diff --git a/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8ee0b94d6c2b5a0f955a772576c2c855d9cbc57 --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91fbdb66a9e4da6e4b659c893ed62fe150b3b2fcb87a69e4e49e4ed1325ac032 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/17.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/17.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4fb41ae5a7586319c6a7b8a4062ec260ada6997a --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ba9630f2a533f3ac28f6e10d9492ebec38926e4f4d8cf4741efe3f441ade36b +size 16778317 diff --git a/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4dae6030efdf889a49719c1ed382223ad753a903 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:996520494b7c161147d9550c30ae99f763cc6a7301debafb1dd60c41e0b6e3d9 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a2678ebd6867cac2021439d001fac6ffc794301 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:085aff1a306423579c8d3cd7f2d2390fad5beb8dd1ab99a49ea246b2814aad75 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__init__.py b/venv/lib/python3.10/site-packages/nltk/chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..462f0b517068657d149662cf990414f203491caf --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/__init__.py @@ -0,0 +1,48 @@ +# Natural Language Toolkit: Chatbots +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +""" +A class for simple chatbots. These perform simple pattern matching on sentences +typed by users, and respond with automatically generated sentences. + +These chatbots may not work using the windows command line or the +windows IDLE GUI. +""" + +from nltk.chat.eliza import eliza_chat +from nltk.chat.iesha import iesha_chat +from nltk.chat.rude import rude_chat +from nltk.chat.suntsu import suntsu_chat +from nltk.chat.util import Chat +from nltk.chat.zen import zen_chat + +bots = [ + (eliza_chat, "Eliza (psycho-babble)"), + (iesha_chat, "Iesha (teen anime junky)"), + (rude_chat, "Rude (abusive bot)"), + (suntsu_chat, "Suntsu (Chinese sayings)"), + (zen_chat, "Zen (gems of wisdom)"), +] + + +def chatbots(): + print("Which chatbot would you like to talk to?") + botcount = len(bots) + for i in range(botcount): + print(" %d: %s" % (i + 1, bots[i][1])) + while True: + choice = input(f"\nEnter a number in the range 1-{botcount}: ").strip() + if choice.isdigit() and (int(choice) - 1) in range(botcount): + break + else: + print(" Error: bad chatbot number") + + chatbot = bots[int(choice) - 1][0] + chatbot() diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fad1b17972fa88e52b590dd836c88272e7dba3d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e674b5f6a20bab0f75f9d22c3f3d2b133ba96ecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/eliza.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba4180a61cd195251f3f407d486523af67b0fa4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/iesha.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93f91f09f59e13fc6fcac8bd6a988d9cc0702ef4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/rude.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e8d14aa7d24ca7e5f82961bda9b2a2dd97be2b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/suntsu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfd5135150e9ba30c74ee3b2da1908a37c35b9c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f172c4f8923179f1606de751c41cd9a2da35622b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chat/__pycache__/zen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chat/eliza.py b/venv/lib/python3.10/site-packages/nltk/chat/eliza.py new file mode 100644 index 0000000000000000000000000000000000000000..5dfb4a4be2caa084c89a169f4861bd7a4b3eacf3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/eliza.py @@ -0,0 +1,337 @@ +# Natural Language Toolkit: Eliza +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +# a translation table used to convert things you say into things the +# computer says back, e.g. "I am" --> "you are" + +from nltk.chat.util import Chat, reflections + +# a table of response pairs, where each pair consists of a +# regular expression, and a list of possible responses, +# with group-macros labelled as %1, %2. + +pairs = ( + ( + r"I need (.*)", + ( + "Why do you need %1?", + "Would it really help you to get %1?", + "Are you sure you need %1?", + ), + ), + ( + r"Why don\'t you (.*)", + ( + "Do you really think I don't %1?", + "Perhaps eventually I will %1.", + "Do you really want me to %1?", + ), + ), + ( + r"Why can\'t I (.*)", + ( + "Do you think you should be able to %1?", + "If you could %1, what would you do?", + "I don't know -- why can't you %1?", + "Have you really tried?", + ), + ), + ( + r"I can\'t (.*)", + ( + "How do you know you can't %1?", + "Perhaps you could %1 if you tried.", + "What would it take for you to %1?", + ), + ), + ( + r"I am (.*)", + ( + "Did you come to me because you are %1?", + "How long have you been %1?", + "How do you feel about being %1?", + ), + ), + ( + r"I\'m (.*)", + ( + "How does being %1 make you feel?", + "Do you enjoy being %1?", + "Why do you tell me you're %1?", + "Why do you think you're %1?", + ), + ), + ( + r"Are you (.*)", + ( + "Why does it matter whether I am %1?", + "Would you prefer it if I were not %1?", + "Perhaps you believe I am %1.", + "I may be %1 -- what do you think?", + ), + ), + ( + r"What (.*)", + ( + "Why do you ask?", + "How would an answer to that help you?", + "What do you think?", + ), + ), + ( + r"How (.*)", + ( + "How do you suppose?", + "Perhaps you can answer your own question.", + "What is it you're really asking?", + ), + ), + ( + r"Because (.*)", + ( + "Is that the real reason?", + "What other reasons come to mind?", + "Does that reason apply to anything else?", + "If %1, what else must be true?", + ), + ), + ( + r"(.*) sorry (.*)", + ( + "There are many times when no apology is needed.", + "What feelings do you have when you apologize?", + ), + ), + ( + r"Hello(.*)", + ( + "Hello... I'm glad you could drop by today.", + "Hi there... how are you today?", + "Hello, how are you feeling today?", + ), + ), + ( + r"I think (.*)", + ("Do you doubt %1?", "Do you really think so?", "But you're not sure %1?"), + ), + ( + r"(.*) friend (.*)", + ( + "Tell me more about your friends.", + "When you think of a friend, what comes to mind?", + "Why don't you tell me about a childhood friend?", + ), + ), + (r"Yes", ("You seem quite sure.", "OK, but can you elaborate a bit?")), + ( + r"(.*) computer(.*)", + ( + "Are you really talking about me?", + "Does it seem strange to talk to a computer?", + "How do computers make you feel?", + "Do you feel threatened by computers?", + ), + ), + ( + r"Is it (.*)", + ( + "Do you think it is %1?", + "Perhaps it's %1 -- what do you think?", + "If it were %1, what would you do?", + "It could well be that %1.", + ), + ), + ( + r"It is (.*)", + ( + "You seem very certain.", + "If I told you that it probably isn't %1, what would you feel?", + ), + ), + ( + r"Can you (.*)", + ( + "What makes you think I can't %1?", + "If I could %1, then what?", + "Why do you ask if I can %1?", + ), + ), + ( + r"Can I (.*)", + ( + "Perhaps you don't want to %1.", + "Do you want to be able to %1?", + "If you could %1, would you?", + ), + ), + ( + r"You are (.*)", + ( + "Why do you think I am %1?", + "Does it please you to think that I'm %1?", + "Perhaps you would like me to be %1.", + "Perhaps you're really talking about yourself?", + ), + ), + ( + r"You\'re (.*)", + ( + "Why do you say I am %1?", + "Why do you think I am %1?", + "Are we talking about you, or me?", + ), + ), + ( + r"I don\'t (.*)", + ("Don't you really %1?", "Why don't you %1?", "Do you want to %1?"), + ), + ( + r"I feel (.*)", + ( + "Good, tell me more about these feelings.", + "Do you often feel %1?", + "When do you usually feel %1?", + "When you feel %1, what do you do?", + ), + ), + ( + r"I have (.*)", + ( + "Why do you tell me that you've %1?", + "Have you really %1?", + "Now that you have %1, what will you do next?", + ), + ), + ( + r"I would (.*)", + ( + "Could you explain why you would %1?", + "Why would you %1?", + "Who else knows that you would %1?", + ), + ), + ( + r"Is there (.*)", + ( + "Do you think there is %1?", + "It's likely that there is %1.", + "Would you like there to be %1?", + ), + ), + ( + r"My (.*)", + ( + "I see, your %1.", + "Why do you say that your %1?", + "When your %1, how do you feel?", + ), + ), + ( + r"You (.*)", + ( + "We should be discussing you, not me.", + "Why do you say that about me?", + "Why do you care whether I %1?", + ), + ), + (r"Why (.*)", ("Why don't you tell me the reason why %1?", "Why do you think %1?")), + ( + r"I want (.*)", + ( + "What would it mean to you if you got %1?", + "Why do you want %1?", + "What would you do if you got %1?", + "If you got %1, then what would you do?", + ), + ), + ( + r"(.*) mother(.*)", + ( + "Tell me more about your mother.", + "What was your relationship with your mother like?", + "How do you feel about your mother?", + "How does this relate to your feelings today?", + "Good family relations are important.", + ), + ), + ( + r"(.*) father(.*)", + ( + "Tell me more about your father.", + "How did your father make you feel?", + "How do you feel about your father?", + "Does your relationship with your father relate to your feelings today?", + "Do you have trouble showing affection with your family?", + ), + ), + ( + r"(.*) child(.*)", + ( + "Did you have close friends as a child?", + "What is your favorite childhood memory?", + "Do you remember any dreams or nightmares from childhood?", + "Did the other children sometimes tease you?", + "How do you think your childhood experiences relate to your feelings today?", + ), + ), + ( + r"(.*)\?", + ( + "Why do you ask that?", + "Please consider whether you can answer your own question.", + "Perhaps the answer lies within yourself?", + "Why don't you tell me?", + ), + ), + ( + r"quit", + ( + "Thank you for talking with me.", + "Good-bye.", + "Thank you, that will be $150. Have a good day!", + ), + ), + ( + r"(.*)", + ( + "Please tell me more.", + "Let's change focus a bit... Tell me about your family.", + "Can you elaborate on that?", + "Why do you say that %1?", + "I see.", + "Very interesting.", + "%1.", + "I see. And what does that tell you?", + "How does that make you feel?", + "How do you feel when you say that?", + ), + ), +) + +eliza_chatbot = Chat(pairs, reflections) + + +def eliza_chat(): + print("Therapist\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("Hello. How are you feeling today?") + + eliza_chatbot.converse() + + +def demo(): + eliza_chat() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/chat/iesha.py b/venv/lib/python3.10/site-packages/nltk/chat/iesha.py new file mode 100644 index 0000000000000000000000000000000000000000..552870caa30927f30b96c5dbdfd2ccb459cf48a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/iesha.py @@ -0,0 +1,160 @@ +# Natural Language Toolkit: Teen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +This chatbot is a tongue-in-cheek take on the average teen +anime junky that frequents YahooMessenger or MSNM. +All spelling mistakes and flawed grammar are intentional. +""" + +from nltk.chat.util import Chat + +reflections = { + "am": "r", + "was": "were", + "i": "u", + "i'd": "u'd", + "i've": "u'v", + "ive": "u'v", + "i'll": "u'll", + "my": "ur", + "are": "am", + "you're": "im", + "you've": "ive", + "you'll": "i'll", + "your": "my", + "yours": "mine", + "you": "me", + "u": "me", + "ur": "my", + "urs": "mine", + "me": "u", +} + +# Note: %1/2/etc are used without spaces prior as the chat bot seems +# to add a superfluous space when matching. + +pairs = ( + ( + r"I\'m (.*)", + ( + "ur%1?? that's so cool! kekekekeke ^_^ tell me more!", + "ur%1? neat!! kekeke >_<", + ), + ), + ( + r"(.*) don\'t you (.*)", + ( + r"u think I can%2??! really?? kekeke \<_\<", + "what do u mean%2??!", + "i could if i wanted, don't you think!! kekeke", + ), + ), + (r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")), + ( + r"do (you|u) (.*)\??", + ("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"), + ), + ( + r"(.*)\?", + ( + "man u ask lots of questions!", + "booooring! how old r u??", + "boooooring!! ur not very fun", + ), + ), + ( + r"(cos|because) (.*)", + ("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"), + ), + ( + r"why can\'t [iI] (.*)", + ( + "i dunno! y u askin me for!", + "try harder, silly! hee! ^_^", + "i dunno! but when i can't%1 i jump up and down!", + ), + ), + ( + r"I can\'t (.*)", + ( + "u can't what??! >_<", + "that's ok! i can't%1 either! kekekekeke ^_^", + "try harder, silly! hee! ^&^", + ), + ), + ( + r"(.*) (like|love|watch) anime", + ( + "omg i love anime!! do u like sailor moon??! ^&^", + "anime yay! anime rocks sooooo much!", + "oooh anime! i love anime more than anything!", + "anime is the bestest evar! evangelion is the best!", + "hee anime is the best! do you have ur fav??", + ), + ), + ( + r"I (like|love|watch|play) (.*)", + ("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"), + ), + ( + r"anime sucks|(.*) (hate|detest) anime", + ( + "ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*", + "no way! anime is the best ever!", + "nuh-uh, anime is the best!", + ), + ), + ( + r"(are|r) (you|u) (.*)", + ("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"), + ), + ( + r"what (.*)", + ("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"), + ), + (r"how (.*)", ("not tellin!! kekekekekeke ^_^",)), + (r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)), + ( + r"quit", + ( + "mom says i have to go eat dinner now :,( bye!!", + "awww u have to go?? see u next time!!", + "how to see u again soon! ^_^", + ), + ), + ( + r"(.*)", + ( + "ur funny! kekeke", + "boooooring! talk about something else! tell me wat u like!", + "do u like anime??", + "do u watch anime? i like sailor moon! ^_^", + "i wish i was a kitty!! kekekeke ^_^", + ), + ), +) + +iesha_chatbot = Chat(pairs, reflections) + + +def iesha_chat(): + print("Iesha the TeenBoT\n---------") + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("hi!! i'm iesha! who r u??!") + + iesha_chatbot.converse() + + +def demo(): + iesha_chat() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/chat/rude.py b/venv/lib/python3.10/site-packages/nltk/chat/rude.py new file mode 100644 index 0000000000000000000000000000000000000000..77404e42bc4d4c9c279540a7bac18fa47d78b9cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/rude.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Rude Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Spiller +# URL: +# For license information, see LICENSE.TXT + +from nltk.chat.util import Chat, reflections + +pairs = ( + ( + r"We (.*)", + ( + "What do you mean, 'we'?", + "Don't include me in that!", + "I wouldn't be so sure about that.", + ), + ), + ( + r"You should (.*)", + ("Don't tell me what to do, buddy.", "Really? I should, should I?"), + ), + ( + r"You\'re(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"You are(.*)", + ( + "More like YOU'RE %1!", + "Hah! Look who's talking.", + "Come over here and tell me I'm %1.", + ), + ), + ( + r"I can\'t(.*)", + ( + "You do sound like the type who can't %1.", + "Hear that splashing sound? That's my heart bleeding for you.", + "Tell somebody who might actually care.", + ), + ), + ( + r"I think (.*)", + ( + "I wouldn't think too hard if I were you.", + "You actually think? I'd never have guessed...", + ), + ), + ( + r"I (.*)", + ( + "I'm getting a bit tired of hearing about you.", + "How about we talk about me instead?", + "Me, me, me... Frankly, I don't care.", + ), + ), + ( + r"How (.*)", + ( + "How do you think?", + "Take a wild guess.", + "I'm not even going to dignify that with an answer.", + ), + ), + (r"What (.*)", ("Do I look like an encyclopedia?", "Figure it out yourself.")), + ( + r"Why (.*)", + ( + "Why not?", + "That's so obvious I thought even you'd have already figured it out.", + ), + ), + ( + r"(.*)shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Shut up(.*)", + ( + "Make me.", + "Getting angry at a feeble NLP assignment? Somebody's losing it.", + "Say that again, I dare you.", + ), + ), + ( + r"Hello(.*)", + ("Oh good, somebody else to talk to. Joy.", "'Hello'? How original..."), + ), + ( + r"(.*)", + ( + "I'm getting bored here. Become more interesting.", + "Either become more thrilling or get lost, buddy.", + "Change the subject before I die of fatal boredom.", + ), + ), +) + +rude_chatbot = Chat(pairs, reflections) + + +def rude_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("I suppose I should say hello.") + + rude_chatbot.converse() + + +def demo(): + rude_chat() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/chat/suntsu.py b/venv/lib/python3.10/site-packages/nltk/chat/suntsu.py new file mode 100644 index 0000000000000000000000000000000000000000..2130c7da1d630a2d8f78412d4b02d518d540af9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/suntsu.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Sun Tsu-Bot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston 2007 +# URL: +# For license information, see LICENSE.TXT + +""" +Tsu bot responds to all queries with a Sun Tsu sayings + +Quoted from Sun Tsu's The Art of War +Translated by LIONEL GILES, M.A. 1910 +Hosted by the Gutenberg Project +https://www.gutenberg.org/ +""" + +from nltk.chat.util import Chat, reflections + +pairs = ( + (r"quit", ("Good-bye.", "Plan well", "May victory be your future")), + ( + r"[^\?]*\?", + ( + "Please consider whether you can answer your own question.", + "Ask me no questions!", + ), + ), + ( + r"[0-9]+(.*)", + ( + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "There are five essentials for victory", + ), + ), + ( + r"[A-Ca-c](.*)", + ( + "The art of war is of vital importance to the State.", + "All warfare is based on deception.", + "If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.", + "If the campaign is protracted, the resources of the State will not be equal to the strain.", + "Attack him where he is unprepared, appear where you are not expected.", + "There is no instance of a country having benefited from prolonged warfare.", + ), + ), + ( + r"[D-Fd-f](.*)", + ( + "The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.", + "Bring war material with you from home, but forage on the enemy.", + "In war, then, let your great object be victory, not lengthy campaigns.", + "To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.", + ), + ), + ( + r"[G-Ig-i](.*)", + ( + "Heaven signifies night and day, cold and heat, times and seasons.", + "It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.", + "The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.", + "One may know how to conquer without being able to do it.", + ), + ), + ( + r"[J-Lj-l](.*)", + ( + "There are three ways in which a ruler can bring misfortune upon his army.", + "By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the army.", + "By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.", + "By employing the officers of his army without discrimination, through ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.", + "There are five essentials for victory", + "He will win who knows when to fight and when not to fight.", + "He will win who knows how to handle both superior and inferior forces.", + "He will win whose army is animated by the same spirit throughout all its ranks.", + "He will win who, prepared himself, waits to take the enemy unprepared.", + "He will win who has military capacity and is not interfered with by the sovereign.", + ), + ), + ( + r"[M-Om-o](.*)", + ( + "If you know the enemy and know yourself, you need not fear the result of a hundred battles.", + "If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.", + "If you know neither the enemy nor yourself, you will succumb in every battle.", + "The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.", + ), + ), + ( + r"[P-Rp-r](.*)", + ( + "Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.", + "Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.", + "He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.", + "A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.", + "The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.", + ), + ), + ( + r"[S-Us-u](.*)", + ( + "What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.", + "Hence his victories bring him neither reputation for wisdom nor credit for courage.", + "Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.", + "In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.", + "There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.", + "Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.", + ), + ), + ( + r"[V-Zv-z](.*)", + ( + "It is a matter of life and death, a road either to safety or to ruin.", + "Hold out baits to entice the enemy. Feign disorder, and crush him.", + "All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.", + "Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.", + "So in war, the way is to avoid what is strong and to strike at what is weak.", + "Just as water retains no constant shape, so in warfare there are no constant conditions.", + ), + ), + (r"(.*)", ("Your statement insults me.", "")), +) + +suntsu_chatbot = Chat(pairs, reflections) + + +def suntsu_chat(): + print("Talk to the program by typing in plain English, using normal upper-") + print('and lower-case letters and punctuation. Enter "quit" when done.') + print("=" * 72) + print("You seek enlightenment?") + + suntsu_chatbot.converse() + + +def demo(): + suntsu_chat() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/chat/util.py b/venv/lib/python3.10/site-packages/nltk/chat/util.py new file mode 100644 index 0000000000000000000000000000000000000000..ddcb246ce3b74a15cd4c87bb180811553849af1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/util.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Chatbot Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Based on an Eliza implementation by Joe Strout , +# Jeff Epler and Jez Higgins . + +import random +import re + +reflections = { + "i am": "you are", + "i was": "you were", + "i": "you", + "i'm": "you are", + "i'd": "you would", + "i've": "you have", + "i'll": "you will", + "my": "your", + "you are": "I am", + "you were": "I was", + "you've": "I have", + "you'll": "I will", + "your": "my", + "yours": "mine", + "you": "me", + "me": "you", +} + + +class Chat: + def __init__(self, pairs, reflections={}): + """ + Initialize the chatbot. Pairs is a list of patterns and responses. Each + pattern is a regular expression matching the user's statement or question, + e.g. r'I like (.*)'. For each such pattern a list of possible responses + is given, e.g. ['Why do you like %1', 'Did you ever dislike %1']. Material + which is matched by parenthesized sections of the patterns (e.g. .*) is mapped to + the numbered positions in the responses, e.g. %1. + + :type pairs: list of tuple + :param pairs: The patterns and responses + :type reflections: dict + :param reflections: A mapping between first and second person expressions + :rtype: None + """ + + self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs] + self._reflections = reflections + self._regex = self._compile_reflections() + + def _compile_reflections(self): + sorted_refl = sorted(self._reflections, key=len, reverse=True) + return re.compile( + r"\b({})\b".format("|".join(map(re.escape, sorted_refl))), re.IGNORECASE + ) + + def _substitute(self, str): + """ + Substitute words in the string, according to the specified reflections, + e.g. "I'm" -> "you are" + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + return self._regex.sub( + lambda mo: self._reflections[mo.string[mo.start() : mo.end()]], str.lower() + ) + + def _wildcards(self, response, match): + pos = response.find("%") + while pos >= 0: + num = int(response[pos + 1 : pos + 2]) + response = ( + response[:pos] + + self._substitute(match.group(num)) + + response[pos + 2 :] + ) + pos = response.find("%") + return response + + def respond(self, str): + """ + Generate a response to the user input. + + :type str: str + :param str: The string to be mapped + :rtype: str + """ + + # check each pattern + for (pattern, response) in self._pairs: + match = pattern.match(str) + + # did the pattern match? + if match: + resp = random.choice(response) # pick a random response + resp = self._wildcards(resp, match) # process wildcards + + # fix munged punctuation at the end + if resp[-2:] == "?.": + resp = resp[:-2] + "." + if resp[-2:] == "??": + resp = resp[:-2] + "?" + return resp + + # Hold a conversation with a chatbot + def converse(self, quit="quit"): + user_input = "" + while user_input != quit: + user_input = quit + try: + user_input = input(">") + except EOFError: + print(user_input) + if user_input: + while user_input[-1] in "!.": + user_input = user_input[:-1] + print(self.respond(user_input)) diff --git a/venv/lib/python3.10/site-packages/nltk/chat/zen.py b/venv/lib/python3.10/site-packages/nltk/chat/zen.py new file mode 100644 index 0000000000000000000000000000000000000000..5972d47764056b388fca1a24acb88bd87369274f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chat/zen.py @@ -0,0 +1,329 @@ +# Natural Language Toolkit: Zen Chatbot +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Amy Holland +# URL: +# For license information, see LICENSE.TXT + +""" +Zen Chatbot talks in gems of Zen wisdom. + +This is a sample conversation with Zen Chatbot: +ZC: Welcome, my child. +me: Good afternoon. +ZC: Ask the question you have come to ask. +me: How can I achieve enlightenment? +ZC: How do you suppose? +me: Through meditation. +ZC: Form is emptiness, and emptiness form. +me: How can I empty my mind of worldly troubles? +ZC: Will an answer to that really help in your search for enlightenment? +me: Yes. +ZC: It is better to be right than to be certain. +me: I seek truth and wisdom. +ZC: The search for truth is a long journey. +me: Are you sure? +ZC: Maybe sure, maybe not sure. + + +The chatbot structure is based on that of chat.eliza. Thus, it uses +a translation table to convert from question to response +i.e. "I am" --> "you are" + +Of course, since Zen Chatbot does not understand the meaning of any words, +responses are very limited. Zen Chatbot will usually answer very vaguely, or +respond to a question by asking a different question, in much the same way +as Eliza. +""" + +from nltk.chat.util import Chat, reflections + +# responses are matched top to bottom, so non-specific matches occur later +# for each match, a list of possible responses is provided +responses = ( + # Zen Chatbot opens with the line "Welcome, my child." The usual + # response will be a greeting problem: 'good' matches "good morning", + # "good day" etc, but also "good grief!" and other sentences starting + # with the word 'good' that may not be a greeting + ( + r"(hello(.*))|(good [a-zA-Z]+)", + ( + "The path to enlightenment is often difficult to see.", + "Greetings. I sense your mind is troubled. Tell me of your troubles.", + "Ask the question you have come to ask.", + "Hello. Do you seek englightenment?", + ), + ), + # "I need" and "I want" can be followed by a thing (eg 'help') + # or an action (eg 'to see you') + # + # This is a problem with this style of response - + # person: "I need you" + # chatbot: "me can be achieved by hard work and dedication of the mind" + # i.e. 'you' is not really a thing that can be mapped this way, so this + # interpretation only makes sense for some inputs + # + ( + r"i need (.*)", + ( + "%1 can be achieved by hard work and dedication of the mind.", + "%1 is not a need, but a desire of the mind. Clear your mind of such concerns.", + "Focus your mind on%1, and you will find what you need.", + ), + ), + ( + r"i want (.*)", + ( + "Desires of the heart will distract you from the path to enlightenment.", + "Will%1 help you attain enlightenment?", + "Is%1 a desire of the mind, or of the heart?", + ), + ), + # why questions are separated into three types: + # "why..I" e.g. "why am I here?" "Why do I like cake?" + # "why..you" e.g. "why are you here?" "Why won't you tell me?" + # "why..." e.g. "Why is the sky blue?" + # problems: + # person: "Why can't you tell me?" + # chatbot: "Are you sure I tell you?" + # - this style works for positives (e.g. "why do you like cake?") + # but does not work for negatives (e.g. "why don't you like cake?") + (r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")), + (r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")), + (r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")), + # e.g. "are you listening?", "are you a duck" + ( + r"are you (.*)\?", + ("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."), + ), + # e.g. "am I a duck?", "am I going to die?" + ( + r"am i (.*)\?", + ("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."), + ), + # what questions, e.g. "what time is it?" + # problems: + # person: "What do you want?" + # chatbot: "Seek truth, not what do me want." + (r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")), + # how questions, e.g. "how do you do?" + ( + r"how (.*)\?", + ( + "How do you suppose?", + "Will an answer to that really help in your search for enlightenment?", + "Ask yourself not how, but why.", + ), + ), + # can questions, e.g. "can you run?", "can you come over here please?" + ( + r"can you (.*)\?", + ( + "I probably can, but I may not.", + "Maybe I can%1, and maybe I cannot.", + "I can do all, and I can do nothing.", + ), + ), + # can questions, e.g. "can I have some cake?", "can I know truth?" + ( + r"can i (.*)\?", + ( + "You can%1 if you believe you can%1, and have a pure spirit.", + "Seek truth and you will know if you can%1.", + ), + ), + # e.g. "It is raining" - implies the speaker is certain of a fact + ( + r"it is (.*)", + ( + "How can you be certain that%1, when you do not even know yourself?", + "Whether it is%1 or not does not change the way the world is.", + ), + ), + # e.g. "is there a doctor in the house?" + ( + r"is there (.*)\?", + ("There is%1 if you believe there is.", "It is possible that there is%1."), + ), + # e.g. "is it possible?", "is this true?" + (r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")), + # non-specific question + ( + r"(.*)\?", + ( + "Do you think %1?", + "You seek the truth. Does the truth seek you?", + "If you intentionally pursue the answers to your questions, the answers become hard to see.", + "The answer to your question cannot be told. It must be experienced.", + ), + ), + # expression of hate of form "I hate you" or "Kelly hates cheese" + ( + r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)", + ( + "Perhaps it is not about hating %2, but about hate from within.", + "Weeds only grow when we dislike them", + "Hate is a very strong emotion.", + ), + ), + # statement containing the word 'truth' + ( + r"(.*) truth(.*)", + ( + "Seek truth, and truth will seek you.", + "Remember, it is not the spoon which bends - only yourself.", + "The search for truth is a long journey.", + ), + ), + # desire to do an action + # e.g. "I want to go shopping" + ( + r"i want to (.*)", + ("You may %1 if your heart truly desires to.", "You may have to %1."), + ), + # desire for an object + # e.g. "I want a pony" + ( + r"i want (.*)", + ( + "Does your heart truly desire %1?", + "Is this a desire of the heart, or of the mind?", + ), + ), + # e.g. "I can't wait" or "I can't do this" + ( + r"i can\'t (.*)", + ( + "What we can and can't do is a limitation of the mind.", + "There are limitations of the body, and limitations of the mind.", + "Have you tried to%1 with a clear mind?", + ), + ), + # "I think.." indicates uncertainty. e.g. "I think so." + # problem: exceptions... + # e.g. "I think, therefore I am" + ( + r"i think (.*)", + ( + "Uncertainty in an uncertain world.", + "Indeed, how can we be certain of anything in such uncertain times.", + "Are you not, in fact, certain that%1?", + ), + ), + # "I feel...emotions/sick/light-headed..." + ( + r"i feel (.*)", + ( + "Your body and your emotions are both symptoms of your mind." + "What do you believe is the root of such feelings?", + "Feeling%1 can be a sign of your state-of-mind.", + ), + ), + # exclaimation mark indicating emotion + # e.g. "Wow!" or "No!" + ( + r"(.*)!", + ( + "I sense that you are feeling emotional today.", + "You need to calm your emotions.", + ), + ), + # because [statement] + # e.g. "because I said so" + ( + r"because (.*)", + ( + "Does knowning the reasons behind things help you to understand" + " the things themselves?", + "If%1, what else must be true?", + ), + ), + # yes or no - raise an issue of certainty/correctness + ( + r"(yes)|(no)", + ( + "Is there certainty in an uncertain world?", + "It is better to be right than to be certain.", + ), + ), + # sentence containing word 'love' + ( + r"(.*)love(.*)", + ( + "Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.", + "Free love!", + ), + ), + # sentence containing word 'understand' - r + ( + r"(.*)understand(.*)", + ( + "If you understand, things are just as they are;" + " if you do not understand, things are just as they are.", + "Imagination is more important than knowledge.", + ), + ), + # 'I', 'me', 'my' - person is talking about themself. + # this breaks down when words contain these - eg 'Thyme', 'Irish' + ( + r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)", + ( + "'I', 'me', 'my'... these are selfish expressions.", + "Have you ever considered that you might be a selfish person?", + "Try to consider others, not just yourself.", + "Think not just of yourself, but of others.", + ), + ), + # 'you' starting a sentence + # e.g. "you stink!" + ( + r"you (.*)", + ("My path is not of concern to you.", "I am but one, and you but one more."), + ), + # say goodbye with some extra Zen wisdom. + ( + r"exit", + ( + "Farewell. The obstacle is the path.", + "Farewell. Life is a journey, not a destination.", + "Good bye. We are cups, constantly and quietly being filled." + "\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.", + ), + ), + # fall through case - + # when stumped, respond with generic zen wisdom + # + ( + r"(.*)", + ( + "When you're enlightened, every word is wisdom.", + "Random talk is useless.", + "The reverse side also has a reverse side.", + "Form is emptiness, and emptiness is form.", + "I pour out a cup of water. Is the cup empty?", + ), + ), +) + +zen_chatbot = Chat(responses, reflections) + + +def zen_chat(): + print("*" * 75) + print("Zen Chatbot!".center(75)) + print("*" * 75) + print('"Look beyond mere words and letters - look into your mind"'.center(75)) + print("* Talk your way to truth with Zen Chatbot.") + print("* Type 'quit' when you have had enough.") + print("*" * 75) + print("Welcome, my child.") + + zen_chatbot.converse() + + +def demo(): + zen_chat() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/__init__.py b/venv/lib/python3.10/site-packages/nltk/classify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..238522fd6f7cedce69faf8bfb3384b22cc509cbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/__init__.py @@ -0,0 +1,101 @@ +# Natural Language Toolkit: Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for labeling tokens with category labels (or +"class labels"). Typically, labels are represented with strings +(such as ``'health'`` or ``'sports'``). Classifiers can be used to +perform a wide range of classification tasks. For example, +classifiers can be used... + +- to classify documents by topic +- to classify ambiguous words by which word sense is intended +- to classify acoustic signals by which phoneme they represent +- to classify sentences by their author + +Features +======== +In order to decide which category label is appropriate for a given +token, classifiers examine one or more 'features' of the token. These +"features" are typically chosen by hand, and indicate which aspects +of the token are relevant to the classification decision. For +example, a document classifier might use a separate feature for each +word, recording how often that word occurred in the document. + +Featuresets +=========== +The features describing a token are encoded using a "featureset", +which is a dictionary that maps from "feature names" to "feature +values". Feature names are unique strings that indicate what aspect +of the token is encoded by the feature. Examples include +``'prevword'``, for a feature whose value is the previous word; and +``'contains-word(library)'`` for a feature that is true when a document +contains the word ``'library'``. Feature values are typically +booleans, numbers, or strings, depending on which feature they +describe. + +Featuresets are typically constructed using a "feature detector" +(also known as a "feature extractor"). A feature detector is a +function that takes a token (and sometimes information about its +context) as its input, and returns a featureset describing that token. +For example, the following feature detector converts a document +(stored as a list of words) to a featureset describing the set of +words included in the document: + + >>> # Define a feature detector function. + >>> def document_features(document): + ... return dict([('contains-word(%s)' % w, True) for w in document]) + +Feature detectors are typically applied to each token before it is fed +to the classifier: + + >>> # Classify each Gutenberg document. + >>> from nltk.corpus import gutenberg + >>> for fileid in gutenberg.fileids(): # doctest: +SKIP + ... doc = gutenberg.words(fileid) # doctest: +SKIP + ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP + +The parameters that a feature detector expects will vary, depending on +the task and the needs of the feature detector. For example, a +feature detector for word sense disambiguation (WSD) might take as its +input a sentence, and the index of a word that should be classified, +and return a featureset for that word. The following feature detector +for WSD includes features describing the left and right contexts of +the target word: + + >>> def wsd_features(sentence, index): + ... featureset = {} + ... for i in range(max(0, index-3), index): + ... featureset['left-context(%s)' % sentence[i]] = True + ... for i in range(index, max(index+3, len(sentence))): + ... featureset['right-context(%s)' % sentence[i]] = True + ... return featureset + +Training Classifiers +==================== +Most classifiers are built by training them on a list of hand-labeled +examples, known as the "training set". Training sets are represented +as lists of ``(featuredict, label)`` tuples. +""" + +from nltk.classify.api import ClassifierI, MultiClassifierI +from nltk.classify.decisiontree import DecisionTreeClassifier +from nltk.classify.maxent import ( + BinaryMaxentFeatureEncoding, + ConditionalExponentialClassifier, + MaxentClassifier, + TypedMaxentFeatureEncoding, +) +from nltk.classify.megam import call_megam, config_megam +from nltk.classify.naivebayes import NaiveBayesClassifier +from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier +from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features +from nltk.classify.scikitlearn import SklearnClassifier +from nltk.classify.senna import Senna +from nltk.classify.textcat import TextCat +from nltk.classify.util import accuracy, apply_features, log_likelihood +from nltk.classify.weka import WekaClassifier, config_weka diff --git a/venv/lib/python3.10/site-packages/nltk/classify/api.py b/venv/lib/python3.10/site-packages/nltk/classify/api.py new file mode 100644 index 0000000000000000000000000000000000000000..f2c1e25990ecc91dec7d5dd79a69e9a6d4e9fbec --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/api.py @@ -0,0 +1,195 @@ +# Natural Language Toolkit: Classifier Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +""" +Interfaces for labeling tokens with category labels (or "class labels"). + +``ClassifierI`` is a standard interface for "single-category +classification", in which the set of categories is known, the number +of categories is finite, and each text belongs to exactly one +category. + +``MultiClassifierI`` is a standard interface for "multi-category +classification", which is like single-category classification except +that each text belongs to zero or more categories. +""" +from nltk.internals import overridden + +##////////////////////////////////////////////////////// +# { Classification Interfaces +##////////////////////////////////////////////////////// + + +class ClassifierI: + """ + A processing interface for labeling tokens with a single category + label (or "class"). Labels are typically strs or + ints, but can be any immutable type. The set of labels + that the classifier chooses from must be fixed and finite. + + Subclasses must define: + - ``labels()`` + - either ``classify()`` or ``classify_many()`` (or both) + + Subclasses may define: + - either ``prob_classify()`` or ``prob_classify_many()`` (or both) + """ + + def labels(self): + """ + :return: the list of category labels used by this classifier. + :rtype: list of (immutable) + """ + raise NotImplementedError() + + def classify(self, featureset): + """ + :return: the most appropriate label for the given featureset. + :rtype: label + """ + if overridden(self.classify_many): + return self.classify_many([featureset])[0] + else: + raise NotImplementedError() + + def prob_classify(self, featureset): + """ + :return: a probability distribution over labels for the given + featureset. + :rtype: ProbDistI + """ + if overridden(self.prob_classify_many): + return self.prob_classify_many([featureset])[0] + else: + raise NotImplementedError() + + def classify_many(self, featuresets): + """ + Apply ``self.classify()`` to each element of ``featuresets``. I.e.: + + return [self.classify(fs) for fs in featuresets] + + :rtype: list(label) + """ + return [self.classify(fs) for fs in featuresets] + + def prob_classify_many(self, featuresets): + """ + Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.: + + return [self.prob_classify(fs) for fs in featuresets] + + :rtype: list(ProbDistI) + """ + return [self.prob_classify(fs) for fs in featuresets] + + +class MultiClassifierI: + """ + A processing interface for labeling tokens with zero or more + category labels (or "labels"). Labels are typically strs + or ints, but can be any immutable type. The set of labels + that the multi-classifier chooses from must be fixed and finite. + + Subclasses must define: + - ``labels()`` + - either ``classify()`` or ``classify_many()`` (or both) + + Subclasses may define: + - either ``prob_classify()`` or ``prob_classify_many()`` (or both) + """ + + def labels(self): + """ + :return: the list of category labels used by this classifier. + :rtype: list of (immutable) + """ + raise NotImplementedError() + + def classify(self, featureset): + """ + :return: the most appropriate set of labels for the given featureset. + :rtype: set(label) + """ + if overridden(self.classify_many): + return self.classify_many([featureset])[0] + else: + raise NotImplementedError() + + def prob_classify(self, featureset): + """ + :return: a probability distribution over sets of labels for the + given featureset. + :rtype: ProbDistI + """ + if overridden(self.prob_classify_many): + return self.prob_classify_many([featureset])[0] + else: + raise NotImplementedError() + + def classify_many(self, featuresets): + """ + Apply ``self.classify()`` to each element of ``featuresets``. I.e.: + + return [self.classify(fs) for fs in featuresets] + + :rtype: list(set(label)) + """ + return [self.classify(fs) for fs in featuresets] + + def prob_classify_many(self, featuresets): + """ + Apply ``self.prob_classify()`` to each element of ``featuresets``. I.e.: + + return [self.prob_classify(fs) for fs in featuresets] + + :rtype: list(ProbDistI) + """ + return [self.prob_classify(fs) for fs in featuresets] + + +# # [XX] IN PROGRESS: +# class SequenceClassifierI: +# """ +# A processing interface for labeling sequences of tokens with a +# single category label (or "class"). Labels are typically +# strs or ints, but can be any immutable type. The set +# of labels that the classifier chooses from must be fixed and +# finite. +# """ +# def labels(self): +# """ +# :return: the list of category labels used by this classifier. +# :rtype: list of (immutable) +# """ +# raise NotImplementedError() + +# def prob_classify(self, featureset): +# """ +# Return a probability distribution over labels for the given +# featureset. + +# If ``featureset`` is a list of featuresets, then return a +# corresponding list containing the probability distribution +# over labels for each of the given featuresets, where the +# *i*\ th element of this list is the most appropriate label for +# the *i*\ th element of ``featuresets``. +# """ +# raise NotImplementedError() + +# def classify(self, featureset): +# """ +# Return the most appropriate label for the given featureset. + +# If ``featureset`` is a list of featuresets, then return a +# corresponding list containing the most appropriate label for +# each of the given featuresets, where the *i*\ th element of +# this list is the most appropriate label for the *i*\ th element +# of ``featuresets``. +# """ +# raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/decisiontree.py b/venv/lib/python3.10/site-packages/nltk/classify/decisiontree.py new file mode 100644 index 0000000000000000000000000000000000000000..41d04e8ad8fe15e5ac1f2319f2e28f83f5dcfe7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/decisiontree.py @@ -0,0 +1,349 @@ +# Natural Language Toolkit: Decision Tree Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A classifier model that decides which label to assign to a token on +the basis of a tree structure, where branches correspond to conditions +on feature values, and leaves correspond to label assignments. +""" + +from collections import defaultdict + +from nltk.classify.api import ClassifierI +from nltk.probability import FreqDist, MLEProbDist, entropy + + +class DecisionTreeClassifier(ClassifierI): + def __init__(self, label, feature_name=None, decisions=None, default=None): + """ + :param label: The most likely label for tokens that reach + this node in the decision tree. If this decision tree + has no children, then this label will be assigned to + any token that reaches this decision tree. + :param feature_name: The name of the feature that this + decision tree selects for. + :param decisions: A dictionary mapping from feature values + for the feature identified by ``feature_name`` to + child decision trees. + :param default: The child that will be used if the value of + feature ``feature_name`` does not match any of the keys in + ``decisions``. This is used when constructing binary + decision trees. + """ + self._label = label + self._fname = feature_name + self._decisions = decisions + self._default = default + + def labels(self): + labels = [self._label] + if self._decisions is not None: + for dt in self._decisions.values(): + labels.extend(dt.labels()) + if self._default is not None: + labels.extend(self._default.labels()) + return list(set(labels)) + + def classify(self, featureset): + # Decision leaf: + if self._fname is None: + return self._label + + # Decision tree: + fval = featureset.get(self._fname) + if fval in self._decisions: + return self._decisions[fval].classify(featureset) + elif self._default is not None: + return self._default.classify(featureset) + else: + return self._label + + def error(self, labeled_featuresets): + errors = 0 + for featureset, label in labeled_featuresets: + if self.classify(featureset) != label: + errors += 1 + return errors / len(labeled_featuresets) + + def pretty_format(self, width=70, prefix="", depth=4): + """ + Return a string containing a pretty-printed version of this + decision tree. Each line in this string corresponds to a + single decision tree node or leaf, and indentation is used to + display the structure of the decision tree. + """ + # [xx] display default!! + if self._fname is None: + n = width - len(prefix) - 15 + return "{}{} {}\n".format(prefix, "." * n, self._label) + s = "" + for i, (fval, result) in enumerate( + sorted( + self._decisions.items(), + key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()), + ) + ): + hdr = f"{prefix}{self._fname}={fval}? " + n = width - 15 - len(hdr) + s += "{}{} {}\n".format(hdr, "." * (n), result._label) + if result._fname is not None and depth > 1: + s += result.pretty_format(width, prefix + " ", depth - 1) + if self._default is not None: + n = width - len(prefix) - 21 + s += "{}else: {} {}\n".format(prefix, "." * n, self._default._label) + if self._default._fname is not None and depth > 1: + s += self._default.pretty_format(width, prefix + " ", depth - 1) + return s + + def pseudocode(self, prefix="", depth=4): + """ + Return a string representation of this decision tree that + expresses the decisions it makes as a nested set of pseudocode + if statements. + """ + if self._fname is None: + return f"{prefix}return {self._label!r}\n" + s = "" + for (fval, result) in sorted( + self._decisions.items(), + key=lambda item: (item[0] in [None, False, True], str(item[0]).lower()), + ): + s += f"{prefix}if {self._fname} == {fval!r}: " + if result._fname is not None and depth > 1: + s += "\n" + result.pseudocode(prefix + " ", depth - 1) + else: + s += f"return {result._label!r}\n" + if self._default is not None: + if len(self._decisions) == 1: + s += "{}if {} != {!r}: ".format( + prefix, self._fname, list(self._decisions.keys())[0] + ) + else: + s += f"{prefix}else: " + if self._default._fname is not None and depth > 1: + s += "\n" + self._default.pseudocode(prefix + " ", depth - 1) + else: + s += f"return {self._default._label!r}\n" + return s + + def __str__(self): + return self.pretty_format() + + @staticmethod + def train( + labeled_featuresets, + entropy_cutoff=0.05, + depth_cutoff=100, + support_cutoff=10, + binary=False, + feature_values=None, + verbose=False, + ): + """ + :param binary: If true, then treat all feature/value pairs as + individual binary features, rather than using a single n-way + branch for each feature. + """ + # Collect a list of all feature names. + feature_names = set() + for featureset, label in labeled_featuresets: + for fname in featureset: + feature_names.add(fname) + + # Collect a list of the values each feature can take. + if feature_values is None and binary: + feature_values = defaultdict(set) + for featureset, label in labeled_featuresets: + for fname, fval in featureset.items(): + feature_values[fname].add(fval) + + # Start with a stump. + if not binary: + tree = DecisionTreeClassifier.best_stump( + feature_names, labeled_featuresets, verbose + ) + else: + tree = DecisionTreeClassifier.best_binary_stump( + feature_names, labeled_featuresets, feature_values, verbose + ) + + # Refine the stump. + tree.refine( + labeled_featuresets, + entropy_cutoff, + depth_cutoff - 1, + support_cutoff, + binary, + feature_values, + verbose, + ) + + # Return it + return tree + + @staticmethod + def leaf(labeled_featuresets): + label = FreqDist(label for (featureset, label) in labeled_featuresets).max() + return DecisionTreeClassifier(label) + + @staticmethod + def stump(feature_name, labeled_featuresets): + label = FreqDist(label for (featureset, label) in labeled_featuresets).max() + + # Find the best label for each value. + freqs = defaultdict(FreqDist) # freq(label|value) + for featureset, label in labeled_featuresets: + feature_value = featureset.get(feature_name) + freqs[feature_value][label] += 1 + + decisions = {val: DecisionTreeClassifier(freqs[val].max()) for val in freqs} + return DecisionTreeClassifier(label, feature_name, decisions) + + def refine( + self, + labeled_featuresets, + entropy_cutoff, + depth_cutoff, + support_cutoff, + binary=False, + feature_values=None, + verbose=False, + ): + if len(labeled_featuresets) <= support_cutoff: + return + if self._fname is None: + return + if depth_cutoff <= 0: + return + for fval in self._decisions: + fval_featuresets = [ + (featureset, label) + for (featureset, label) in labeled_featuresets + if featureset.get(self._fname) == fval + ] + + label_freqs = FreqDist(label for (featureset, label) in fval_featuresets) + if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: + self._decisions[fval] = DecisionTreeClassifier.train( + fval_featuresets, + entropy_cutoff, + depth_cutoff, + support_cutoff, + binary, + feature_values, + verbose, + ) + if self._default is not None: + default_featuresets = [ + (featureset, label) + for (featureset, label) in labeled_featuresets + if featureset.get(self._fname) not in self._decisions + ] + label_freqs = FreqDist(label for (featureset, label) in default_featuresets) + if entropy(MLEProbDist(label_freqs)) > entropy_cutoff: + self._default = DecisionTreeClassifier.train( + default_featuresets, + entropy_cutoff, + depth_cutoff, + support_cutoff, + binary, + feature_values, + verbose, + ) + + @staticmethod + def best_stump(feature_names, labeled_featuresets, verbose=False): + best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) + best_error = best_stump.error(labeled_featuresets) + for fname in feature_names: + stump = DecisionTreeClassifier.stump(fname, labeled_featuresets) + stump_error = stump.error(labeled_featuresets) + if stump_error < best_error: + best_error = stump_error + best_stump = stump + if verbose: + print( + "best stump for {:6d} toks uses {:20} err={:6.4f}".format( + len(labeled_featuresets), best_stump._fname, best_error + ) + ) + return best_stump + + @staticmethod + def binary_stump(feature_name, feature_value, labeled_featuresets): + label = FreqDist(label for (featureset, label) in labeled_featuresets).max() + + # Find the best label for each value. + pos_fdist = FreqDist() + neg_fdist = FreqDist() + for featureset, label in labeled_featuresets: + if featureset.get(feature_name) == feature_value: + pos_fdist[label] += 1 + else: + neg_fdist[label] += 1 + + decisions = {} + default = label + # But hopefully we have observations! + if pos_fdist.N() > 0: + decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())} + if neg_fdist.N() > 0: + default = DecisionTreeClassifier(neg_fdist.max()) + + return DecisionTreeClassifier(label, feature_name, decisions, default) + + @staticmethod + def best_binary_stump( + feature_names, labeled_featuresets, feature_values, verbose=False + ): + best_stump = DecisionTreeClassifier.leaf(labeled_featuresets) + best_error = best_stump.error(labeled_featuresets) + for fname in feature_names: + for fval in feature_values[fname]: + stump = DecisionTreeClassifier.binary_stump( + fname, fval, labeled_featuresets + ) + stump_error = stump.error(labeled_featuresets) + if stump_error < best_error: + best_error = stump_error + best_stump = stump + if verbose: + if best_stump._decisions: + descr = "{}={}".format( + best_stump._fname, list(best_stump._decisions.keys())[0] + ) + else: + descr = "(default)" + print( + "best stump for {:6d} toks uses {:20} err={:6.4f}".format( + len(labeled_featuresets), descr, best_error + ) + ) + return best_stump + + +##////////////////////////////////////////////////////// +## Demo +##////////////////////////////////////////////////////// + + +def f(x): + return DecisionTreeClassifier.train(x, binary=True, verbose=True) + + +def demo(): + from nltk.classify.util import binary_names_demo_features, names_demo + + classifier = names_demo( + f, binary_names_demo_features # DecisionTreeClassifier.train, + ) + print(classifier.pretty_format(depth=7)) + print(classifier.pseudocode(depth=7)) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/maxent.py b/venv/lib/python3.10/site-packages/nltk/classify/maxent.py new file mode 100644 index 0000000000000000000000000000000000000000..e9f66503756a0768ece53179cf3ff8f231c2aab1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/maxent.py @@ -0,0 +1,1569 @@ +# Natural Language Toolkit: Maximum Entropy Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Dmitry Chichkov (TypedMaxentFeatureEncoding) +# URL: +# For license information, see LICENSE.TXT + +""" +A classifier model based on maximum entropy modeling framework. This +framework considers all of the probability distributions that are +empirically consistent with the training data; and chooses the +distribution with the highest entropy. A probability distribution is +"empirically consistent" with a set of training data if its estimated +frequency with which a class and a feature vector value co-occur is +equal to the actual frequency in the data. + +Terminology: 'feature' +====================== +The term *feature* is usually used to refer to some property of an +unlabeled token. For example, when performing word sense +disambiguation, we might define a ``'prevword'`` feature whose value is +the word preceding the target word. However, in the context of +maxent modeling, the term *feature* is typically used to refer to a +property of a "labeled" token. In order to prevent confusion, we +will introduce two distinct terms to disambiguate these two different +concepts: + + - An "input-feature" is a property of an unlabeled token. + - A "joint-feature" is a property of a labeled token. + +In the rest of the ``nltk.classify`` module, the term "features" is +used to refer to what we will call "input-features" in this module. + +In literature that describes and discusses maximum entropy models, +input-features are typically called "contexts", and joint-features +are simply referred to as "features". + +Converting Input-Features to Joint-Features +------------------------------------------- +In maximum entropy models, joint-features are required to have numeric +values. Typically, each input-feature ``input_feat`` is mapped to a +set of joint-features of the form: + +| joint_feat(token, label) = { 1 if input_feat(token) == feat_val +| { and label == some_label +| { +| { 0 otherwise + +For all values of ``feat_val`` and ``some_label``. This mapping is +performed by classes that implement the ``MaxentFeatureEncodingI`` +interface. +""" +try: + import numpy +except ImportError: + pass + +import os +import tempfile +from collections import defaultdict + +from nltk.classify.api import ClassifierI +from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file +from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file +from nltk.classify.util import CutoffChecker, accuracy, log_likelihood +from nltk.data import gzip_open_unicode +from nltk.probability import DictionaryProbDist +from nltk.util import OrderedDict + +__docformat__ = "epytext en" + +###################################################################### +# { Classifier Model +###################################################################### + + +class MaxentClassifier(ClassifierI): + """ + A maximum entropy classifier (also known as a "conditional + exponential classifier"). This classifier is parameterized by a + set of "weights", which are used to combine the joint-features + that are generated from a featureset by an "encoding". In + particular, the encoding maps each ``(featureset, label)`` pair to + a vector. The probability of each label is then computed using + the following equation:: + + dotprod(weights, encode(fs,label)) + prob(fs|label) = --------------------------------------------------- + sum(dotprod(weights, encode(fs,l)) for l in labels) + + Where ``dotprod`` is the dot product:: + + dotprod(a,b) = sum(x*y for (x,y) in zip(a,b)) + """ + + def __init__(self, encoding, weights, logarithmic=True): + """ + Construct a new maxent classifier model. Typically, new + classifier models are created using the ``train()`` method. + + :type encoding: MaxentFeatureEncodingI + :param encoding: An encoding that is used to convert the + featuresets that are given to the ``classify`` method into + joint-feature vectors, which are used by the maxent + classifier model. + + :type weights: list of float + :param weights: The feature weight vector for this classifier. + + :type logarithmic: bool + :param logarithmic: If false, then use non-logarithmic weights. + """ + self._encoding = encoding + self._weights = weights + self._logarithmic = logarithmic + # self._logarithmic = False + assert encoding.length() == len(weights) + + def labels(self): + return self._encoding.labels() + + def set_weights(self, new_weights): + """ + Set the feature weight vector for this classifier. + :param new_weights: The new feature weight vector. + :type new_weights: list of float + """ + self._weights = new_weights + assert self._encoding.length() == len(new_weights) + + def weights(self): + """ + :return: The feature weight vector for this classifier. + :rtype: list of float + """ + return self._weights + + def classify(self, featureset): + return self.prob_classify(featureset).max() + + def prob_classify(self, featureset): + prob_dict = {} + for label in self._encoding.labels(): + feature_vector = self._encoding.encode(featureset, label) + + if self._logarithmic: + total = 0.0 + for (f_id, f_val) in feature_vector: + total += self._weights[f_id] * f_val + prob_dict[label] = total + + else: + prod = 1.0 + for (f_id, f_val) in feature_vector: + prod *= self._weights[f_id] ** f_val + prob_dict[label] = prod + + # Normalize the dictionary to give a probability distribution + return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True) + + def explain(self, featureset, columns=4): + """ + Print a table showing the effect of each of the features in + the given feature set, and how they combine to determine the + probabilities of each label for that featureset. + """ + descr_width = 50 + TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f" + + pdist = self.prob_classify(featureset) + labels = sorted(pdist.samples(), key=pdist.prob, reverse=True) + labels = labels[:columns] + print( + " Feature".ljust(descr_width) + + "".join("%8s" % (("%s" % l)[:7]) for l in labels) + ) + print(" " + "-" * (descr_width - 2 + 8 * len(labels))) + sums = defaultdict(int) + for i, label in enumerate(labels): + feature_vector = self._encoding.encode(featureset, label) + feature_vector.sort( + key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True + ) + for (f_id, f_val) in feature_vector: + if self._logarithmic: + score = self._weights[f_id] * f_val + else: + score = self._weights[f_id] ** f_val + descr = self._encoding.describe(f_id) + descr = descr.split(" and label is ")[0] # hack + descr += " (%s)" % f_val # hack + if len(descr) > 47: + descr = descr[:44] + "..." + print(TEMPLATE % (descr, i * 8 * " ", score)) + sums[label] += score + print(" " + "-" * (descr_width - 1 + 8 * len(labels))) + print( + " TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels) + ) + print( + " PROBS:".ljust(descr_width) + + "".join("%8.3f" % pdist.prob(l) for l in labels) + ) + + def most_informative_features(self, n=10): + """ + Generates the ranked list of informative features from most to least. + """ + if hasattr(self, "_most_informative_features"): + return self._most_informative_features[:n] + else: + self._most_informative_features = sorted( + list(range(len(self._weights))), + key=lambda fid: abs(self._weights[fid]), + reverse=True, + ) + return self._most_informative_features[:n] + + def show_most_informative_features(self, n=10, show="all"): + """ + :param show: all, neg, or pos (for negative-only or positive-only) + :type show: str + :param n: The no. of top features + :type n: int + """ + # Use None the full list of ranked features. + fids = self.most_informative_features(None) + if show == "pos": + fids = [fid for fid in fids if self._weights[fid] > 0] + elif show == "neg": + fids = [fid for fid in fids if self._weights[fid] < 0] + for fid in fids[:n]: + print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}") + + def __repr__(self): + return "" % ( + len(self._encoding.labels()), + self._encoding.length(), + ) + + #: A list of the algorithm names that are accepted for the + #: ``train()`` method's ``algorithm`` parameter. + ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"] + + @classmethod + def train( + cls, + train_toks, + algorithm=None, + trace=3, + encoding=None, + labels=None, + gaussian_prior_sigma=0, + **cutoffs, + ): + """ + Train a new maxent classifier based on the given corpus of + training samples. This classifier will have its weights + chosen to maximize entropy while remaining empirically + consistent with the training corpus. + + :rtype: MaxentClassifier + :return: The new maxent classifier + + :type train_toks: list + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a featureset, + and the second of which is a classification label. + + :type algorithm: str + :param algorithm: A case-insensitive string, specifying which + algorithm should be used to train the classifier. The + following algorithms are currently available. + + - Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``), + Improved Iterative Scaling (``'IIS'``) + - External Libraries (requiring megam): + LM-BFGS algorithm, with training performed by Megam (``'megam'``) + + The default algorithm is ``'IIS'``. + + :type trace: int + :param trace: The level of diagnostic tracing output to produce. + Higher values produce more verbose output. + :type encoding: MaxentFeatureEncodingI + :param encoding: A feature encoding, used to convert featuresets + into feature vectors. If none is specified, then a + ``BinaryMaxentFeatureEncoding`` will be built based on the + features that are attested in the training corpus. + :type labels: list(str) + :param labels: The set of possible labels. If none is given, then + the set of all labels attested in the training data will be + used instead. + :param gaussian_prior_sigma: The sigma value for a gaussian + prior on model weights. Currently, this is supported by + ``megam``. For other algorithms, its value is ignored. + :param cutoffs: Arguments specifying various conditions under + which the training should be halted. (Some of the cutoff + conditions are not supported by some algorithms.) + + - ``max_iter=v``: Terminate after ``v`` iterations. + - ``min_ll=v``: Terminate after the negative average + log-likelihood drops under ``v``. + - ``min_lldelta=v``: Terminate if a single iteration improves + log likelihood by less than ``v``. + """ + if algorithm is None: + algorithm = "iis" + for key in cutoffs: + if key not in ( + "max_iter", + "min_ll", + "min_lldelta", + "max_acc", + "min_accdelta", + "count_cutoff", + "norm", + "explicit", + "bernoulli", + ): + raise TypeError("Unexpected keyword arg %r" % key) + algorithm = algorithm.lower() + if algorithm == "iis": + return train_maxent_classifier_with_iis( + train_toks, trace, encoding, labels, **cutoffs + ) + elif algorithm == "gis": + return train_maxent_classifier_with_gis( + train_toks, trace, encoding, labels, **cutoffs + ) + elif algorithm == "megam": + return train_maxent_classifier_with_megam( + train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs + ) + elif algorithm == "tadm": + kwargs = cutoffs + kwargs["trace"] = trace + kwargs["encoding"] = encoding + kwargs["labels"] = labels + kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma + return TadmMaxentClassifier.train(train_toks, **kwargs) + else: + raise ValueError("Unknown algorithm %s" % algorithm) + + +#: Alias for MaxentClassifier. +ConditionalExponentialClassifier = MaxentClassifier + + +###################################################################### +# { Feature Encodings +###################################################################### + + +class MaxentFeatureEncodingI: + """ + A mapping that converts a set of input-feature values to a vector + of joint-feature values, given a label. This conversion is + necessary to translate featuresets into a format that can be used + by maximum entropy models. + + The set of joint-features used by a given encoding is fixed, and + each index in the generated joint-feature vectors corresponds to a + single joint-feature. The length of the generated joint-feature + vectors is therefore constant (for a given encoding). + + Because the joint-feature vectors generated by + ``MaxentFeatureEncodingI`` are typically very sparse, they are + represented as a list of ``(index, value)`` tuples, specifying the + value of each non-zero joint-feature. + + Feature encodings are generally created using the ``train()`` + method, which generates an appropriate encoding based on the + input-feature values and labels that are present in a given + corpus. + """ + + def encode(self, featureset, label): + """ + Given a (featureset, label) pair, return the corresponding + vector of joint-feature values. This vector is represented as + a list of ``(index, value)`` tuples, specifying the value of + each non-zero joint-feature. + + :type featureset: dict + :rtype: list(tuple(int, int)) + """ + raise NotImplementedError() + + def length(self): + """ + :return: The size of the fixed-length joint-feature vectors + that are generated by this encoding. + :rtype: int + """ + raise NotImplementedError() + + def labels(self): + """ + :return: A list of the \"known labels\" -- i.e., all labels + ``l`` such that ``self.encode(fs,l)`` can be a nonzero + joint-feature vector for some value of ``fs``. + :rtype: list + """ + raise NotImplementedError() + + def describe(self, fid): + """ + :return: A string describing the value of the joint-feature + whose index in the generated feature vectors is ``fid``. + :rtype: str + """ + raise NotImplementedError() + + def train(cls, train_toks): + """ + Construct and return new feature encoding, based on a given + training corpus ``train_toks``. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + """ + raise NotImplementedError() + + +class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI): + """ + A feature encoding that calls a user-supplied function to map a + given featureset/label pair to a sparse joint-feature vector. + """ + + def __init__(self, func, length, labels): + """ + Construct a new feature encoding based on the given function. + + :type func: (callable) + :param func: A function that takes two arguments, a featureset + and a label, and returns the sparse joint feature vector + that encodes them:: + + func(featureset, label) -> feature_vector + + This sparse joint feature vector (``feature_vector``) is a + list of ``(index,value)`` tuples. + + :type length: int + :param length: The size of the fixed-length joint-feature + vectors that are generated by this encoding. + + :type labels: list + :param labels: A list of the \"known labels\" for this + encoding -- i.e., all labels ``l`` such that + ``self.encode(fs,l)`` can be a nonzero joint-feature vector + for some value of ``fs``. + """ + self._length = length + self._func = func + self._labels = labels + + def encode(self, featureset, label): + return self._func(featureset, label) + + def length(self): + return self._length + + def labels(self): + return self._labels + + def describe(self, fid): + return "no description available" + + +class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI): + """ + A feature encoding that generates vectors containing a binary + joint-features of the form: + + | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label) + | { + | { 0 otherwise + + Where ``fname`` is the name of an input-feature, ``fval`` is a value + for that input-feature, and ``label`` is a label. + + Typically, these features are constructed based on a training + corpus, using the ``train()`` method. This method will create one + feature for each combination of ``fname``, ``fval``, and ``label`` + that occurs at least once in the training corpus. + + The ``unseen_features`` parameter can be used to add "unseen-value + features", which are used whenever an input feature has a value + that was not encountered in the training corpus. These features + have the form: + + | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname]) + | { and l == label + | { + | { 0 otherwise + + Where ``is_unseen(fname, fval)`` is true if the encoding does not + contain any joint features that are true when ``fs[fname]==fval``. + + The ``alwayson_features`` parameter can be used to add "always-on + features", which have the form:: + + | joint_feat(fs, l) = { 1 if (l == label) + | { + | { 0 otherwise + + These always-on features allow the maxent model to directly model + the prior probabilities of each label. + """ + + def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False): + """ + :param labels: A list of the \"known labels\" for this encoding. + + :param mapping: A dictionary mapping from ``(fname,fval,label)`` + tuples to corresponding joint-feature indexes. These + indexes must be the set of integers from 0...len(mapping). + If ``mapping[fname,fval,label]=id``, then + ``self.encode(..., fname:fval, ..., label)[id]`` is 1; + otherwise, it is 0. + + :param unseen_features: If true, then include unseen value + features in the generated joint-feature vectors. + + :param alwayson_features: If true, then include always-on + features in the generated joint-feature vectors. + """ + if set(mapping.values()) != set(range(len(mapping))): + raise ValueError( + "Mapping values must be exactly the " + "set of integers from 0...len(mapping)" + ) + + self._labels = list(labels) + """A list of attested labels.""" + + self._mapping = mapping + """dict mapping from (fname,fval,label) -> fid""" + + self._length = len(mapping) + """The length of generated joint feature vectors.""" + + self._alwayson = None + """dict mapping from label -> fid""" + + self._unseen = None + """dict mapping from fname -> fid""" + + if alwayson_features: + self._alwayson = { + label: i + self._length for (i, label) in enumerate(labels) + } + self._length += len(self._alwayson) + + if unseen_features: + fnames = {fname for (fname, fval, label) in mapping} + self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)} + self._length += len(fnames) + + def encode(self, featureset, label): + # Inherit docs. + encoding = [] + + # Convert input-features to joint-features: + for fname, fval in featureset.items(): + # Known feature name & value: + if (fname, fval, label) in self._mapping: + encoding.append((self._mapping[fname, fval, label], 1)) + + # Otherwise, we might want to fire an "unseen-value feature". + elif self._unseen: + # Have we seen this fname/fval combination with any label? + for label2 in self._labels: + if (fname, fval, label2) in self._mapping: + break # we've seen this fname/fval combo + # We haven't -- fire the unseen-value feature + else: + if fname in self._unseen: + encoding.append((self._unseen[fname], 1)) + + # Add always-on features: + if self._alwayson and label in self._alwayson: + encoding.append((self._alwayson[label], 1)) + + return encoding + + def describe(self, f_id): + # Inherit docs. + if not isinstance(f_id, int): + raise TypeError("describe() expected an int") + try: + self._inv_mapping + except AttributeError: + self._inv_mapping = [-1] * len(self._mapping) + for (info, i) in self._mapping.items(): + self._inv_mapping[i] = info + + if f_id < len(self._mapping): + (fname, fval, label) = self._inv_mapping[f_id] + return f"{fname}=={fval!r} and label is {label!r}" + elif self._alwayson and f_id in self._alwayson.values(): + for (label, f_id2) in self._alwayson.items(): + if f_id == f_id2: + return "label is %r" % label + elif self._unseen and f_id in self._unseen.values(): + for (fname, f_id2) in self._unseen.items(): + if f_id == f_id2: + return "%s is unseen" % fname + else: + raise ValueError("Bad feature id") + + def labels(self): + # Inherit docs. + return self._labels + + def length(self): + # Inherit docs. + return self._length + + @classmethod + def train(cls, train_toks, count_cutoff=0, labels=None, **options): + """ + Construct and return new feature encoding, based on a given + training corpus ``train_toks``. See the class description + ``BinaryMaxentFeatureEncoding`` for a description of the + joint-features that will be included in this encoding. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + + :type count_cutoff: int + :param count_cutoff: A cutoff value that is used to discard + rare joint-features. If a joint-feature's value is 1 + fewer than ``count_cutoff`` times in the training corpus, + then that joint-feature is not included in the generated + encoding. + + :type labels: list + :param labels: A list of labels that should be used by the + classifier. If not specified, then the set of labels + attested in ``train_toks`` will be used. + + :param options: Extra parameters for the constructor, such as + ``unseen_features`` and ``alwayson_features``. + """ + mapping = {} # maps (fname, fval, label) -> fid + seen_labels = set() # The set of labels we've encountered + count = defaultdict(int) # maps (fname, fval) -> count + + for (tok, label) in train_toks: + if labels and label not in labels: + raise ValueError("Unexpected label %s" % label) + seen_labels.add(label) + + # Record each of the features. + for (fname, fval) in tok.items(): + + # If a count cutoff is given, then only add a joint + # feature once the corresponding (fname, fval, label) + # tuple exceeds that cutoff. + count[fname, fval] += 1 + if count[fname, fval] >= count_cutoff: + if (fname, fval, label) not in mapping: + mapping[fname, fval, label] = len(mapping) + + if labels is None: + labels = seen_labels + return cls(labels, mapping, **options) + + +class GISEncoding(BinaryMaxentFeatureEncoding): + """ + A binary feature encoding which adds one new joint-feature to the + joint-features defined by ``BinaryMaxentFeatureEncoding``: a + correction feature, whose value is chosen to ensure that the + sparse vector always sums to a constant non-negative number. This + new feature is used to ensure two preconditions for the GIS + training algorithm: + + - At least one feature vector index must be nonzero for every + token. + - The feature vector must sum to a constant non-negative number + for every token. + """ + + def __init__( + self, labels, mapping, unseen_features=False, alwayson_features=False, C=None + ): + """ + :param C: The correction constant. The value of the correction + feature is based on this value. In particular, its value is + ``C - sum([v for (f,v) in encoding])``. + :seealso: ``BinaryMaxentFeatureEncoding.__init__`` + """ + BinaryMaxentFeatureEncoding.__init__( + self, labels, mapping, unseen_features, alwayson_features + ) + if C is None: + C = len({fname for (fname, fval, label) in mapping}) + 1 + self._C = C + + @property + def C(self): + """The non-negative constant that all encoded feature vectors + will sum to.""" + return self._C + + def encode(self, featureset, label): + # Get the basic encoding. + encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label) + base_length = BinaryMaxentFeatureEncoding.length(self) + + # Add a correction feature. + total = sum(v for (f, v) in encoding) + if total >= self._C: + raise ValueError("Correction feature is not high enough!") + encoding.append((base_length, self._C - total)) + + # Return the result + return encoding + + def length(self): + return BinaryMaxentFeatureEncoding.length(self) + 1 + + def describe(self, f_id): + if f_id == BinaryMaxentFeatureEncoding.length(self): + return "Correction feature (%s)" % self._C + else: + return BinaryMaxentFeatureEncoding.describe(self, f_id) + + +class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding): + def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False): + self._mapping = OrderedDict(mapping) + self._label_mapping = OrderedDict() + BinaryMaxentFeatureEncoding.__init__( + self, labels, self._mapping, unseen_features, alwayson_features + ) + + def encode(self, featureset, label): + encoding = [] + for feature, value in featureset.items(): + if (feature, label) not in self._mapping: + self._mapping[(feature, label)] = len(self._mapping) + if value not in self._label_mapping: + if not isinstance(value, int): + self._label_mapping[value] = len(self._label_mapping) + else: + self._label_mapping[value] = value + encoding.append( + (self._mapping[(feature, label)], self._label_mapping[value]) + ) + return encoding + + def labels(self): + return self._labels + + def describe(self, fid): + for (feature, label) in self._mapping: + if self._mapping[(feature, label)] == fid: + return (feature, label) + + def length(self): + return len(self._mapping) + + @classmethod + def train(cls, train_toks, count_cutoff=0, labels=None, **options): + mapping = OrderedDict() + if not labels: + labels = [] + + # This gets read twice, so compute the values in case it's lazy. + train_toks = list(train_toks) + + for (featureset, label) in train_toks: + if label not in labels: + labels.append(label) + + for (featureset, label) in train_toks: + for label in labels: + for feature in featureset: + if (feature, label) not in mapping: + mapping[(feature, label)] = len(mapping) + + return cls(labels, mapping, **options) + + +class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI): + """ + A feature encoding that generates vectors containing integer, + float and binary joint-features of the form: + + Binary (for string and boolean features): + + | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label) + | { + | { 0 otherwise + + Value (for integer and float features): + + | joint_feat(fs, l) = { fval if (fs[fname] == type(fval)) + | { and (l == label) + | { + | { not encoded otherwise + + Where ``fname`` is the name of an input-feature, ``fval`` is a value + for that input-feature, and ``label`` is a label. + + Typically, these features are constructed based on a training + corpus, using the ``train()`` method. + + For string and boolean features [type(fval) not in (int, float)] + this method will create one feature for each combination of + ``fname``, ``fval``, and ``label`` that occurs at least once in the + training corpus. + + For integer and float features [type(fval) in (int, float)] this + method will create one feature for each combination of ``fname`` + and ``label`` that occurs at least once in the training corpus. + + For binary features the ``unseen_features`` parameter can be used + to add "unseen-value features", which are used whenever an input + feature has a value that was not encountered in the training + corpus. These features have the form: + + | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname]) + | { and l == label + | { + | { 0 otherwise + + Where ``is_unseen(fname, fval)`` is true if the encoding does not + contain any joint features that are true when ``fs[fname]==fval``. + + The ``alwayson_features`` parameter can be used to add "always-on + features", which have the form: + + | joint_feat(fs, l) = { 1 if (l == label) + | { + | { 0 otherwise + + These always-on features allow the maxent model to directly model + the prior probabilities of each label. + """ + + def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False): + """ + :param labels: A list of the \"known labels\" for this encoding. + + :param mapping: A dictionary mapping from ``(fname,fval,label)`` + tuples to corresponding joint-feature indexes. These + indexes must be the set of integers from 0...len(mapping). + If ``mapping[fname,fval,label]=id``, then + ``self.encode({..., fname:fval, ...``, label)[id]} is 1; + otherwise, it is 0. + + :param unseen_features: If true, then include unseen value + features in the generated joint-feature vectors. + + :param alwayson_features: If true, then include always-on + features in the generated joint-feature vectors. + """ + if set(mapping.values()) != set(range(len(mapping))): + raise ValueError( + "Mapping values must be exactly the " + "set of integers from 0...len(mapping)" + ) + + self._labels = list(labels) + """A list of attested labels.""" + + self._mapping = mapping + """dict mapping from (fname,fval,label) -> fid""" + + self._length = len(mapping) + """The length of generated joint feature vectors.""" + + self._alwayson = None + """dict mapping from label -> fid""" + + self._unseen = None + """dict mapping from fname -> fid""" + + if alwayson_features: + self._alwayson = { + label: i + self._length for (i, label) in enumerate(labels) + } + self._length += len(self._alwayson) + + if unseen_features: + fnames = {fname for (fname, fval, label) in mapping} + self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)} + self._length += len(fnames) + + def encode(self, featureset, label): + # Inherit docs. + encoding = [] + + # Convert input-features to joint-features: + for fname, fval in featureset.items(): + if isinstance(fval, (int, float)): + # Known feature name & value: + if (fname, type(fval), label) in self._mapping: + encoding.append((self._mapping[fname, type(fval), label], fval)) + else: + # Known feature name & value: + if (fname, fval, label) in self._mapping: + encoding.append((self._mapping[fname, fval, label], 1)) + + # Otherwise, we might want to fire an "unseen-value feature". + elif self._unseen: + # Have we seen this fname/fval combination with any label? + for label2 in self._labels: + if (fname, fval, label2) in self._mapping: + break # we've seen this fname/fval combo + # We haven't -- fire the unseen-value feature + else: + if fname in self._unseen: + encoding.append((self._unseen[fname], 1)) + + # Add always-on features: + if self._alwayson and label in self._alwayson: + encoding.append((self._alwayson[label], 1)) + + return encoding + + def describe(self, f_id): + # Inherit docs. + if not isinstance(f_id, int): + raise TypeError("describe() expected an int") + try: + self._inv_mapping + except AttributeError: + self._inv_mapping = [-1] * len(self._mapping) + for (info, i) in self._mapping.items(): + self._inv_mapping[i] = info + + if f_id < len(self._mapping): + (fname, fval, label) = self._inv_mapping[f_id] + return f"{fname}=={fval!r} and label is {label!r}" + elif self._alwayson and f_id in self._alwayson.values(): + for (label, f_id2) in self._alwayson.items(): + if f_id == f_id2: + return "label is %r" % label + elif self._unseen and f_id in self._unseen.values(): + for (fname, f_id2) in self._unseen.items(): + if f_id == f_id2: + return "%s is unseen" % fname + else: + raise ValueError("Bad feature id") + + def labels(self): + # Inherit docs. + return self._labels + + def length(self): + # Inherit docs. + return self._length + + @classmethod + def train(cls, train_toks, count_cutoff=0, labels=None, **options): + """ + Construct and return new feature encoding, based on a given + training corpus ``train_toks``. See the class description + ``TypedMaxentFeatureEncoding`` for a description of the + joint-features that will be included in this encoding. + + Note: recognized feature values types are (int, float), over + types are interpreted as regular binary features. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + + :type count_cutoff: int + :param count_cutoff: A cutoff value that is used to discard + rare joint-features. If a joint-feature's value is 1 + fewer than ``count_cutoff`` times in the training corpus, + then that joint-feature is not included in the generated + encoding. + + :type labels: list + :param labels: A list of labels that should be used by the + classifier. If not specified, then the set of labels + attested in ``train_toks`` will be used. + + :param options: Extra parameters for the constructor, such as + ``unseen_features`` and ``alwayson_features``. + """ + mapping = {} # maps (fname, fval, label) -> fid + seen_labels = set() # The set of labels we've encountered + count = defaultdict(int) # maps (fname, fval) -> count + + for (tok, label) in train_toks: + if labels and label not in labels: + raise ValueError("Unexpected label %s" % label) + seen_labels.add(label) + + # Record each of the features. + for (fname, fval) in tok.items(): + if type(fval) in (int, float): + fval = type(fval) + # If a count cutoff is given, then only add a joint + # feature once the corresponding (fname, fval, label) + # tuple exceeds that cutoff. + count[fname, fval] += 1 + if count[fname, fval] >= count_cutoff: + if (fname, fval, label) not in mapping: + mapping[fname, fval, label] = len(mapping) + + if labels is None: + labels = seen_labels + return cls(labels, mapping, **options) + + +###################################################################### +# { Classifier Trainer: Generalized Iterative Scaling +###################################################################### + + +def train_maxent_classifier_with_gis( + train_toks, trace=3, encoding=None, labels=None, **cutoffs +): + """ + Train a new ``ConditionalExponentialClassifier``, using the given + training samples, using the Generalized Iterative Scaling + algorithm. This ``ConditionalExponentialClassifier`` will encode + the model that maximizes entropy from all the models that are + empirically consistent with ``train_toks``. + + :see: ``train_maxent_classifier()`` for parameter descriptions. + """ + cutoffs.setdefault("max_iter", 100) + cutoffchecker = CutoffChecker(cutoffs) + + # Construct an encoding from the training data. + if encoding is None: + encoding = GISEncoding.train(train_toks, labels=labels) + + if not hasattr(encoding, "C"): + raise TypeError( + "The GIS algorithm requires an encoding that " + "defines C (e.g., GISEncoding)." + ) + + # Cinv is the inverse of the sum of each joint feature vector. + # This controls the learning rate: higher Cinv (or lower C) gives + # faster learning. + Cinv = 1.0 / encoding.C + + # Count how many times each feature occurs in the training data. + empirical_fcount = calculate_empirical_fcount(train_toks, encoding) + + # Check for any features that are not attested in train_toks. + unattested = set(numpy.nonzero(empirical_fcount == 0)[0]) + + # Build the classifier. Start with weight=0 for each attested + # feature, and weight=-infinity for each unattested feature. + weights = numpy.zeros(len(empirical_fcount), "d") + for fid in unattested: + weights[fid] = numpy.NINF + classifier = ConditionalExponentialClassifier(encoding, weights) + + # Take the log of the empirical fcount. + log_empirical_fcount = numpy.log2(empirical_fcount) + del empirical_fcount + + if trace > 0: + print(" ==> Training (%d iterations)" % cutoffs["max_iter"]) + if trace > 2: + print() + print(" Iteration Log Likelihood Accuracy") + print(" ---------------------------------------") + + # Train the classifier. + try: + while True: + if trace > 2: + ll = cutoffchecker.ll or log_likelihood(classifier, train_toks) + acc = cutoffchecker.acc or accuracy(classifier, train_toks) + iternum = cutoffchecker.iter + print(" %9d %14.5f %9.3f" % (iternum, ll, acc)) + + # Use the model to estimate the number of times each + # feature should occur in the training data. + estimated_fcount = calculate_estimated_fcount( + classifier, train_toks, encoding + ) + + # Take the log of estimated fcount (avoid taking log(0).) + for fid in unattested: + estimated_fcount[fid] += 1 + log_estimated_fcount = numpy.log2(estimated_fcount) + del estimated_fcount + + # Update the classifier weights + weights = classifier.weights() + weights += (log_empirical_fcount - log_estimated_fcount) * Cinv + classifier.set_weights(weights) + + # Check the log-likelihood & accuracy cutoffs. + if cutoffchecker.check(classifier, train_toks): + break + + except KeyboardInterrupt: + print(" Training stopped: keyboard interrupt") + except: + raise + + if trace > 2: + ll = log_likelihood(classifier, train_toks) + acc = accuracy(classifier, train_toks) + print(f" Final {ll:14.5f} {acc:9.3f}") + + # Return the classifier. + return classifier + + +def calculate_empirical_fcount(train_toks, encoding): + fcount = numpy.zeros(encoding.length(), "d") + + for tok, label in train_toks: + for (index, val) in encoding.encode(tok, label): + fcount[index] += val + + return fcount + + +def calculate_estimated_fcount(classifier, train_toks, encoding): + fcount = numpy.zeros(encoding.length(), "d") + + for tok, label in train_toks: + pdist = classifier.prob_classify(tok) + for label in pdist.samples(): + prob = pdist.prob(label) + for (fid, fval) in encoding.encode(tok, label): + fcount[fid] += prob * fval + + return fcount + + +###################################################################### +# { Classifier Trainer: Improved Iterative Scaling +###################################################################### + + +def train_maxent_classifier_with_iis( + train_toks, trace=3, encoding=None, labels=None, **cutoffs +): + """ + Train a new ``ConditionalExponentialClassifier``, using the given + training samples, using the Improved Iterative Scaling algorithm. + This ``ConditionalExponentialClassifier`` will encode the model + that maximizes entropy from all the models that are empirically + consistent with ``train_toks``. + + :see: ``train_maxent_classifier()`` for parameter descriptions. + """ + cutoffs.setdefault("max_iter", 100) + cutoffchecker = CutoffChecker(cutoffs) + + # Construct an encoding from the training data. + if encoding is None: + encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels) + + # Count how many times each feature occurs in the training data. + empirical_ffreq = calculate_empirical_fcount(train_toks, encoding) / len(train_toks) + + # Find the nf map, and related variables nfarray and nfident. + # nf is the sum of the features for a given labeled text. + # nfmap compresses this sparse set of values to a dense list. + # nfarray performs the reverse operation. nfident is + # nfarray multiplied by an identity matrix. + nfmap = calculate_nfmap(train_toks, encoding) + nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), "d") + nftranspose = numpy.reshape(nfarray, (len(nfarray), 1)) + + # Check for any features that are not attested in train_toks. + unattested = set(numpy.nonzero(empirical_ffreq == 0)[0]) + + # Build the classifier. Start with weight=0 for each attested + # feature, and weight=-infinity for each unattested feature. + weights = numpy.zeros(len(empirical_ffreq), "d") + for fid in unattested: + weights[fid] = numpy.NINF + classifier = ConditionalExponentialClassifier(encoding, weights) + + if trace > 0: + print(" ==> Training (%d iterations)" % cutoffs["max_iter"]) + if trace > 2: + print() + print(" Iteration Log Likelihood Accuracy") + print(" ---------------------------------------") + + # Train the classifier. + try: + while True: + if trace > 2: + ll = cutoffchecker.ll or log_likelihood(classifier, train_toks) + acc = cutoffchecker.acc or accuracy(classifier, train_toks) + iternum = cutoffchecker.iter + print(" %9d %14.5f %9.3f" % (iternum, ll, acc)) + + # Calculate the deltas for this iteration, using Newton's method. + deltas = calculate_deltas( + train_toks, + classifier, + unattested, + empirical_ffreq, + nfmap, + nfarray, + nftranspose, + encoding, + ) + + # Use the deltas to update our weights. + weights = classifier.weights() + weights += deltas + classifier.set_weights(weights) + + # Check the log-likelihood & accuracy cutoffs. + if cutoffchecker.check(classifier, train_toks): + break + + except KeyboardInterrupt: + print(" Training stopped: keyboard interrupt") + except: + raise + + if trace > 2: + ll = log_likelihood(classifier, train_toks) + acc = accuracy(classifier, train_toks) + print(f" Final {ll:14.5f} {acc:9.3f}") + + # Return the classifier. + return classifier + + +def calculate_nfmap(train_toks, encoding): + """ + Construct a map that can be used to compress ``nf`` (which is + typically sparse). + + *nf(feature_vector)* is the sum of the feature values for + *feature_vector*. + + This represents the number of features that are active for a + given labeled text. This method finds all values of *nf(t)* + that are attested for at least one token in the given list of + training tokens; and constructs a dictionary mapping these + attested values to a continuous range *0...N*. For example, + if the only values of *nf()* that were attested were 3, 5, and + 7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``. + + :return: A map that can be used to compress ``nf`` to a dense + vector. + :rtype: dict(int -> int) + """ + # Map from nf to indices. This allows us to use smaller arrays. + nfset = set() + for tok, _ in train_toks: + for label in encoding.labels(): + nfset.add(sum(val for (id, val) in encoding.encode(tok, label))) + return {nf: i for (i, nf) in enumerate(nfset)} + + +def calculate_deltas( + train_toks, + classifier, + unattested, + ffreq_empirical, + nfmap, + nfarray, + nftranspose, + encoding, +): + r""" + Calculate the update values for the classifier weights for + this iteration of IIS. These update weights are the value of + ``delta`` that solves the equation:: + + ffreq_empirical[i] + = + SUM[fs,l] (classifier.prob_classify(fs).prob(l) * + feature_vector(fs,l)[i] * + exp(delta[i] * nf(feature_vector(fs,l)))) + + Where: + - *(fs,l)* is a (featureset, label) tuple from ``train_toks`` + - *feature_vector(fs,l)* = ``encoding.encode(fs,l)`` + - *nf(vector)* = ``sum([val for (id,val) in vector])`` + + This method uses Newton's method to solve this equation for + *delta[i]*. In particular, it starts with a guess of + ``delta[i]`` = 1; and iteratively updates ``delta`` with: + + | delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i]) + + until convergence, where *sum1* and *sum2* are defined as: + + | sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta) + | sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l))) + | f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) . + | feature_vector(fs,l)[i] . + | exp(delta[i] . nf(feature_vector(fs,l)))) + + Note that *sum1* and *sum2* depend on ``delta``; so they need + to be re-computed each iteration. + + The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are + used to generate a dense encoding for *nf(ltext)*. This + allows ``_deltas`` to calculate *sum1* and *sum2* using + matrices, which yields a significant performance improvement. + + :param train_toks: The set of training tokens. + :type train_toks: list(tuple(dict, str)) + :param classifier: The current classifier. + :type classifier: ClassifierI + :param ffreq_empirical: An array containing the empirical + frequency for each feature. The *i*\ th element of this + array is the empirical frequency for feature *i*. + :type ffreq_empirical: sequence of float + :param unattested: An array that is 1 for features that are + not attested in the training data; and 0 for features that + are attested. In other words, ``unattested[i]==0`` iff + ``ffreq_empirical[i]==0``. + :type unattested: sequence of int + :param nfmap: A map that can be used to compress ``nf`` to a dense + vector. + :type nfmap: dict(int -> int) + :param nfarray: An array that can be used to uncompress ``nf`` + from a dense vector. + :type nfarray: array(float) + :param nftranspose: The transpose of ``nfarray`` + :type nftranspose: array(float) + """ + # These parameters control when we decide that we've + # converged. It probably should be possible to set these + # manually, via keyword arguments to train. + NEWTON_CONVERGE = 1e-12 + MAX_NEWTON = 300 + + deltas = numpy.ones(encoding.length(), "d") + + # Precompute the A matrix: + # A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) ) + # over all label,fs s.t. num_features[label,fs]=nf + A = numpy.zeros((len(nfmap), encoding.length()), "d") + + for tok, label in train_toks: + dist = classifier.prob_classify(tok) + + for label in encoding.labels(): + # Generate the feature vector + feature_vector = encoding.encode(tok, label) + # Find the number of active features + nf = sum(val for (id, val) in feature_vector) + # Update the A matrix + for (id, val) in feature_vector: + A[nfmap[nf], id] += dist.prob(label) * val + A /= len(train_toks) + + # Iteratively solve for delta. Use the following variables: + # - nf_delta[x][y] = nfarray[x] * delta[y] + # - exp_nf_delta[x][y] = exp(nf[x] * delta[y]) + # - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y]) + # - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs) + # exp(delta[i]nf) + # - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs) + # nf exp(delta[i]nf) + for rangenum in range(MAX_NEWTON): + nf_delta = numpy.outer(nfarray, deltas) + exp_nf_delta = 2**nf_delta + nf_exp_nf_delta = nftranspose * exp_nf_delta + sum1 = numpy.sum(exp_nf_delta * A, axis=0) + sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0) + + # Avoid division by zero. + for fid in unattested: + sum2[fid] += 1 + + # Update the deltas. + deltas -= (ffreq_empirical - sum1) / -sum2 + + # We can stop once we converge. + n_error = numpy.sum(abs(ffreq_empirical - sum1)) / numpy.sum(abs(deltas)) + if n_error < NEWTON_CONVERGE: + return deltas + + return deltas + + +###################################################################### +# { Classifier Trainer: megam +###################################################################### + +# [xx] possible extension: add support for using implicit file format; +# this would need to put requirements on what encoding is used. But +# we may need this for other maxent classifier trainers that require +# implicit formats anyway. +def train_maxent_classifier_with_megam( + train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs +): + """ + Train a new ``ConditionalExponentialClassifier``, using the given + training samples, using the external ``megam`` library. This + ``ConditionalExponentialClassifier`` will encode the model that + maximizes entropy from all the models that are empirically + consistent with ``train_toks``. + + :see: ``train_maxent_classifier()`` for parameter descriptions. + :see: ``nltk.classify.megam`` + """ + + explicit = True + bernoulli = True + if "explicit" in kwargs: + explicit = kwargs["explicit"] + if "bernoulli" in kwargs: + bernoulli = kwargs["bernoulli"] + + # Construct an encoding from the training data. + if encoding is None: + # Count cutoff can also be controlled by megam with the -minfc + # option. Not sure where the best place for it is. + count_cutoff = kwargs.get("count_cutoff", 0) + encoding = BinaryMaxentFeatureEncoding.train( + train_toks, count_cutoff, labels=labels, alwayson_features=True + ) + elif labels is not None: + raise ValueError("Specify encoding or labels, not both") + + # Write a training file for megam. + try: + fd, trainfile_name = tempfile.mkstemp(prefix="nltk-") + with open(trainfile_name, "w") as trainfile: + write_megam_file( + train_toks, encoding, trainfile, explicit=explicit, bernoulli=bernoulli + ) + os.close(fd) + except (OSError, ValueError) as e: + raise ValueError("Error while creating megam training file: %s" % e) from e + + # Run megam on the training file. + options = [] + options += ["-nobias", "-repeat", "10"] + if explicit: + options += ["-explicit"] + if not bernoulli: + options += ["-fvals"] + if gaussian_prior_sigma: + # Lambda is just the precision of the Gaussian prior, i.e. it's the + # inverse variance, so the parameter conversion is 1.0/sigma**2. + # See https://users.umiacs.umd.edu/~hal/docs/daume04cg-bfgs.pdf + inv_variance = 1.0 / gaussian_prior_sigma**2 + else: + inv_variance = 0 + options += ["-lambda", "%.2f" % inv_variance, "-tune"] + if trace < 3: + options += ["-quiet"] + if "max_iter" in kwargs: + options += ["-maxi", "%s" % kwargs["max_iter"]] + if "ll_delta" in kwargs: + # [xx] this is actually a perplexity delta, not a log + # likelihood delta + options += ["-dpp", "%s" % abs(kwargs["ll_delta"])] + if hasattr(encoding, "cost"): + options += ["-multilabel"] # each possible la + options += ["multiclass", trainfile_name] + stdout = call_megam(options) + # print('./megam_i686.opt ', ' '.join(options)) + # Delete the training file + try: + os.remove(trainfile_name) + except OSError as e: + print(f"Warning: unable to delete {trainfile_name}: {e}") + + # Parse the generated weight vector. + weights = parse_megam_weights(stdout, encoding.length(), explicit) + + # Convert from base-e to base-2 weights. + weights *= numpy.log2(numpy.e) + + # Build the classifier + return MaxentClassifier(encoding, weights) + + +###################################################################### +# { Classifier Trainer: tadm +###################################################################### + + +class TadmMaxentClassifier(MaxentClassifier): + @classmethod + def train(cls, train_toks, **kwargs): + algorithm = kwargs.get("algorithm", "tao_lmvm") + trace = kwargs.get("trace", 3) + encoding = kwargs.get("encoding", None) + labels = kwargs.get("labels", None) + sigma = kwargs.get("gaussian_prior_sigma", 0) + count_cutoff = kwargs.get("count_cutoff", 0) + max_iter = kwargs.get("max_iter") + ll_delta = kwargs.get("min_lldelta") + + # Construct an encoding from the training data. + if not encoding: + encoding = TadmEventMaxentFeatureEncoding.train( + train_toks, count_cutoff, labels=labels + ) + + trainfile_fd, trainfile_name = tempfile.mkstemp( + prefix="nltk-tadm-events-", suffix=".gz" + ) + weightfile_fd, weightfile_name = tempfile.mkstemp(prefix="nltk-tadm-weights-") + + trainfile = gzip_open_unicode(trainfile_name, "w") + write_tadm_file(train_toks, encoding, trainfile) + trainfile.close() + + options = [] + options.extend(["-monitor"]) + options.extend(["-method", algorithm]) + if sigma: + options.extend(["-l2", "%.6f" % sigma**2]) + if max_iter: + options.extend(["-max_it", "%d" % max_iter]) + if ll_delta: + options.extend(["-fatol", "%.6f" % abs(ll_delta)]) + options.extend(["-events_in", trainfile_name]) + options.extend(["-params_out", weightfile_name]) + if trace < 3: + options.extend(["2>&1"]) + else: + options.extend(["-summary"]) + + call_tadm(options) + + with open(weightfile_name) as weightfile: + weights = parse_tadm_weights(weightfile) + + os.remove(trainfile_name) + os.remove(weightfile_name) + + # Convert from base-e to base-2 weights. + weights *= numpy.log2(numpy.e) + + # Build the classifier + return cls(encoding, weights) + + +###################################################################### +# { Demo +###################################################################### +def demo(): + from nltk.classify.util import names_demo + + classifier = names_demo(MaxentClassifier.train) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/megam.py b/venv/lib/python3.10/site-packages/nltk/classify/megam.py new file mode 100644 index 0000000000000000000000000000000000000000..ce9690cecad7cb767cc95925fc1b12026200720e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/megam.py @@ -0,0 +1,184 @@ +# Natural Language Toolkit: Interface to Megam Classifier +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A set of functions used to interface with the external megam_ maxent +optimization package. Before megam can be used, you should tell NLTK where it +can find the megam binary, using the ``config_megam()`` function. Typical +usage: + + >>> from nltk.classify import megam + >>> megam.config_megam() # pass path to megam if not found in PATH # doctest: +SKIP + [Found megam: ...] + +Use with MaxentClassifier. Example below, see MaxentClassifier documentation +for details. + + nltk.classify.MaxentClassifier.train(corpus, 'megam') + +.. _megam: https://www.umiacs.umd.edu/~hal/megam/index.html +""" +import subprocess + +from nltk.internals import find_binary + +try: + import numpy +except ImportError: + numpy = None + +###################################################################### +# { Configuration +###################################################################### + +_megam_bin = None + + +def config_megam(bin=None): + """ + Configure NLTK's interface to the ``megam`` maxent optimization + package. + + :param bin: The full path to the ``megam`` binary. If not specified, + then nltk will search the system for a ``megam`` binary; and if + one is not found, it will raise a ``LookupError`` exception. + :type bin: str + """ + global _megam_bin + _megam_bin = find_binary( + "megam", + bin, + env_vars=["MEGAM"], + binary_names=["megam.opt", "megam", "megam_686", "megam_i686.opt"], + url="https://www.umiacs.umd.edu/~hal/megam/index.html", + ) + + +###################################################################### +# { Megam Interface Functions +###################################################################### + + +def write_megam_file(train_toks, encoding, stream, bernoulli=True, explicit=True): + """ + Generate an input file for ``megam`` based on the given corpus of + classified tokens. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + + :type encoding: MaxentFeatureEncodingI + :param encoding: A feature encoding, used to convert featuresets + into feature vectors. May optionally implement a cost() method + in order to assign different costs to different class predictions. + + :type stream: stream + :param stream: The stream to which the megam input file should be + written. + + :param bernoulli: If true, then use the 'bernoulli' format. I.e., + all joint features have binary values, and are listed iff they + are true. Otherwise, list feature values explicitly. If + ``bernoulli=False``, then you must call ``megam`` with the + ``-fvals`` option. + + :param explicit: If true, then use the 'explicit' format. I.e., + list the features that would fire for any of the possible + labels, for each token. If ``explicit=True``, then you must + call ``megam`` with the ``-explicit`` option. + """ + # Look up the set of labels. + labels = encoding.labels() + labelnum = {label: i for (i, label) in enumerate(labels)} + + # Write the file, which contains one line per instance. + for featureset, label in train_toks: + # First, the instance number (or, in the weighted multiclass case, the cost of each label). + if hasattr(encoding, "cost"): + stream.write( + ":".join(str(encoding.cost(featureset, label, l)) for l in labels) + ) + else: + stream.write("%d" % labelnum[label]) + + # For implicit file formats, just list the features that fire + # for this instance's actual label. + if not explicit: + _write_megam_features(encoding.encode(featureset, label), stream, bernoulli) + + # For explicit formats, list the features that would fire for + # any of the possible labels. + else: + for l in labels: + stream.write(" #") + _write_megam_features(encoding.encode(featureset, l), stream, bernoulli) + + # End of the instance. + stream.write("\n") + + +def parse_megam_weights(s, features_count, explicit=True): + """ + Given the stdout output generated by ``megam`` when training a + model, return a ``numpy`` array containing the corresponding weight + vector. This function does not currently handle bias features. + """ + if numpy is None: + raise ValueError("This function requires that numpy be installed") + assert explicit, "non-explicit not supported yet" + lines = s.strip().split("\n") + weights = numpy.zeros(features_count, "d") + for line in lines: + if line.strip(): + fid, weight = line.split() + weights[int(fid)] = float(weight) + return weights + + +def _write_megam_features(vector, stream, bernoulli): + if not vector: + raise ValueError( + "MEGAM classifier requires the use of an " "always-on feature." + ) + for (fid, fval) in vector: + if bernoulli: + if fval == 1: + stream.write(" %s" % fid) + elif fval != 0: + raise ValueError( + "If bernoulli=True, then all" "features must be binary." + ) + else: + stream.write(f" {fid} {fval}") + + +def call_megam(args): + """ + Call the ``megam`` binary with the given arguments. + """ + if isinstance(args, str): + raise TypeError("args should be a list of strings") + if _megam_bin is None: + config_megam() + + # Call megam via a subprocess + cmd = [_megam_bin] + args + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + + # Check the return code. + if p.returncode != 0: + print() + print(stderr) + raise OSError("megam command failed!") + + if isinstance(stdout, str): + return stdout + else: + return stdout.decode("utf-8") diff --git a/venv/lib/python3.10/site-packages/nltk/classify/naivebayes.py b/venv/lib/python3.10/site-packages/nltk/classify/naivebayes.py new file mode 100644 index 0000000000000000000000000000000000000000..caf4b73dd6a13ddcbc1f078441ec06254be4a6d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/naivebayes.py @@ -0,0 +1,260 @@ +# Natural Language Toolkit: Naive Bayes Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A classifier based on the Naive Bayes algorithm. In order to find the +probability for a label, this algorithm first uses the Bayes rule to +express P(label|features) in terms of P(label) and P(features|label): + +| P(label) * P(features|label) +| P(label|features) = ------------------------------ +| P(features) + +The algorithm then makes the 'naive' assumption that all features are +independent, given the label: + +| P(label) * P(f1|label) * ... * P(fn|label) +| P(label|features) = -------------------------------------------- +| P(features) + +Rather than computing P(features) explicitly, the algorithm just +calculates the numerator for each label, and normalizes them so they +sum to one: + +| P(label) * P(f1|label) * ... * P(fn|label) +| P(label|features) = -------------------------------------------- +| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) ) +""" + +from collections import defaultdict + +from nltk.classify.api import ClassifierI +from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist, sum_logs + +##////////////////////////////////////////////////////// +## Naive Bayes Classifier +##////////////////////////////////////////////////////// + + +class NaiveBayesClassifier(ClassifierI): + """ + A Naive Bayes classifier. Naive Bayes classifiers are + paramaterized by two probability distributions: + + - P(label) gives the probability that an input will receive each + label, given no information about the input's features. + + - P(fname=fval|label) gives the probability that a given feature + (fname) will receive a given value (fval), given that the + label (label). + + If the classifier encounters an input with a feature that has + never been seen with any label, then rather than assigning a + probability of 0 to all labels, it will ignore that feature. + + The feature value 'None' is reserved for unseen feature values; + you generally should not use 'None' as a feature value for one of + your own features. + """ + + def __init__(self, label_probdist, feature_probdist): + """ + :param label_probdist: P(label), the probability distribution + over labels. It is expressed as a ``ProbDistI`` whose + samples are labels. I.e., P(label) = + ``label_probdist.prob(label)``. + + :param feature_probdist: P(fname=fval|label), the probability + distribution for feature values, given labels. It is + expressed as a dictionary whose keys are ``(label, fname)`` + pairs and whose values are ``ProbDistI`` objects over feature + values. I.e., P(fname=fval|label) = + ``feature_probdist[label,fname].prob(fval)``. If a given + ``(label,fname)`` is not a key in ``feature_probdist``, then + it is assumed that the corresponding P(fname=fval|label) + is 0 for all values of ``fval``. + """ + self._label_probdist = label_probdist + self._feature_probdist = feature_probdist + self._labels = list(label_probdist.samples()) + + def labels(self): + return self._labels + + def classify(self, featureset): + return self.prob_classify(featureset).max() + + def prob_classify(self, featureset): + # Discard any feature names that we've never seen before. + # Otherwise, we'll just assign a probability of 0 to + # everything. + featureset = featureset.copy() + for fname in list(featureset.keys()): + for label in self._labels: + if (label, fname) in self._feature_probdist: + break + else: + # print('Ignoring unseen feature %s' % fname) + del featureset[fname] + + # Find the log probability of each label, given the features. + # Start with the log probability of the label itself. + logprob = {} + for label in self._labels: + logprob[label] = self._label_probdist.logprob(label) + + # Then add in the log probability of features given labels. + for label in self._labels: + for (fname, fval) in featureset.items(): + if (label, fname) in self._feature_probdist: + feature_probs = self._feature_probdist[label, fname] + logprob[label] += feature_probs.logprob(fval) + else: + # nb: This case will never come up if the + # classifier was created by + # NaiveBayesClassifier.train(). + logprob[label] += sum_logs([]) # = -INF. + + return DictionaryProbDist(logprob, normalize=True, log=True) + + def show_most_informative_features(self, n=10): + # Determine the most relevant features, and display them. + cpdist = self._feature_probdist + print("Most Informative Features") + + for (fname, fval) in self.most_informative_features(n): + + def labelprob(l): + return cpdist[l, fname].prob(fval) + + labels = sorted( + (l for l in self._labels if fval in cpdist[l, fname].samples()), + key=lambda element: (-labelprob(element), element), + reverse=True, + ) + if len(labels) == 1: + continue + l0 = labels[0] + l1 = labels[-1] + if cpdist[l0, fname].prob(fval) == 0: + ratio = "INF" + else: + ratio = "%8.1f" % ( + cpdist[l1, fname].prob(fval) / cpdist[l0, fname].prob(fval) + ) + print( + "%24s = %-14r %6s : %-6s = %s : 1.0" + % (fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio) + ) + + def most_informative_features(self, n=100): + """ + Return a list of the 'most informative' features used by this + classifier. For the purpose of this function, the + informativeness of a feature ``(fname,fval)`` is equal to the + highest value of P(fname=fval|label), for any label, divided by + the lowest value of P(fname=fval|label), for any label: + + | max[ P(fname=fval|label1) / P(fname=fval|label2) ] + """ + if hasattr(self, "_most_informative_features"): + return self._most_informative_features[:n] + else: + # The set of (fname, fval) pairs used by this classifier. + features = set() + # The max & min probability associated w/ each (fname, fval) + # pair. Maps (fname,fval) -> float. + maxprob = defaultdict(lambda: 0.0) + minprob = defaultdict(lambda: 1.0) + + for (label, fname), probdist in self._feature_probdist.items(): + for fval in probdist.samples(): + feature = (fname, fval) + features.add(feature) + p = probdist.prob(fval) + maxprob[feature] = max(p, maxprob[feature]) + minprob[feature] = min(p, minprob[feature]) + if minprob[feature] == 0: + features.discard(feature) + + # Convert features to a list, & sort it by how informative + # features are. + self._most_informative_features = sorted( + features, + key=lambda feature_: ( + minprob[feature_] / maxprob[feature_], + feature_[0], + feature_[1] in [None, False, True], + str(feature_[1]).lower(), + ), + ) + return self._most_informative_features[:n] + + @classmethod + def train(cls, labeled_featuresets, estimator=ELEProbDist): + """ + :param labeled_featuresets: A list of classified featuresets, + i.e., a list of tuples ``(featureset, label)``. + """ + label_freqdist = FreqDist() + feature_freqdist = defaultdict(FreqDist) + feature_values = defaultdict(set) + fnames = set() + + # Count up how many times each feature value occurred, given + # the label and featurename. + for featureset, label in labeled_featuresets: + label_freqdist[label] += 1 + for fname, fval in featureset.items(): + # Increment freq(fval|label, fname) + feature_freqdist[label, fname][fval] += 1 + # Record that fname can take the value fval. + feature_values[fname].add(fval) + # Keep a list of all feature names. + fnames.add(fname) + + # If a feature didn't have a value given for an instance, then + # we assume that it gets the implicit value 'None.' This loop + # counts up the number of 'missing' feature values for each + # (label,fname) pair, and increments the count of the fval + # 'None' by that amount. + for label in label_freqdist: + num_samples = label_freqdist[label] + for fname in fnames: + count = feature_freqdist[label, fname].N() + # Only add a None key when necessary, i.e. if there are + # any samples with feature 'fname' missing. + if num_samples - count > 0: + feature_freqdist[label, fname][None] += num_samples - count + feature_values[fname].add(None) + + # Create the P(label) distribution + label_probdist = estimator(label_freqdist) + + # Create the P(fval|label, fname) distribution + feature_probdist = {} + for ((label, fname), freqdist) in feature_freqdist.items(): + probdist = estimator(freqdist, bins=len(feature_values[fname])) + feature_probdist[label, fname] = probdist + + return cls(label_probdist, feature_probdist) + + +##////////////////////////////////////////////////////// +## Demo +##////////////////////////////////////////////////////// + + +def demo(): + from nltk.classify.util import names_demo + + classifier = names_demo(NaiveBayesClassifier.train) + classifier.show_most_informative_features() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py b/venv/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py new file mode 100644 index 0000000000000000000000000000000000000000..23797f0970848ce9e3617b16dbf54352e5f1523c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/positivenaivebayes.py @@ -0,0 +1,180 @@ +# Natural Language Toolkit: Positive Naive Bayes Classifier +# +# Copyright (C) 2012 NLTK Project +# Author: Alessandro Presta +# URL: +# For license information, see LICENSE.TXT + +""" +A variant of the Naive Bayes Classifier that performs binary classification with +partially-labeled training sets. In other words, assume we want to build a classifier +that assigns each example to one of two complementary classes (e.g., male names and +female names). +If we have a training set with labeled examples for both classes, we can use a +standard Naive Bayes Classifier. However, consider the case when we only have labeled +examples for one of the classes, and other, unlabeled, examples. +Then, assuming a prior distribution on the two labels, we can use the unlabeled set +to estimate the frequencies of the various features. + +Let the two possible labels be 1 and 0, and let's say we only have examples labeled 1 +and unlabeled examples. We are also given an estimate of P(1). + +We compute P(feature|1) exactly as in the standard case. + +To compute P(feature|0), we first estimate P(feature) from the unlabeled set (we are +assuming that the unlabeled examples are drawn according to the given prior distribution) +and then express the conditional probability as: + +| P(feature) - P(feature|1) * P(1) +| P(feature|0) = ---------------------------------- +| P(0) + +Example: + + >>> from nltk.classify import PositiveNaiveBayesClassifier + +Some sentences about sports: + + >>> sports_sentences = [ 'The team dominated the game', + ... 'They lost the ball', + ... 'The game was intense', + ... 'The goalkeeper catched the ball', + ... 'The other team controlled the ball' ] + +Mixed topics, including sports: + + >>> various_sentences = [ 'The President did not comment', + ... 'I lost the keys', + ... 'The team won the game', + ... 'Sara has two kids', + ... 'The ball went off the court', + ... 'They had the ball for the whole game', + ... 'The show is over' ] + +The features of a sentence are simply the words it contains: + + >>> def features(sentence): + ... words = sentence.lower().split() + ... return dict(('contains(%s)' % w, True) for w in words) + +We use the sports sentences as positive examples, the mixed ones ad unlabeled examples: + + >>> positive_featuresets = map(features, sports_sentences) + >>> unlabeled_featuresets = map(features, various_sentences) + >>> classifier = PositiveNaiveBayesClassifier.train(positive_featuresets, + ... unlabeled_featuresets) + +Is the following sentence about sports? + + >>> classifier.classify(features('The cat is on the table')) + False + +What about this one? + + >>> classifier.classify(features('My team lost the game')) + True +""" + +from collections import defaultdict + +from nltk.classify.naivebayes import NaiveBayesClassifier +from nltk.probability import DictionaryProbDist, ELEProbDist, FreqDist + +##////////////////////////////////////////////////////// +## Positive Naive Bayes Classifier +##////////////////////////////////////////////////////// + + +class PositiveNaiveBayesClassifier(NaiveBayesClassifier): + @staticmethod + def train( + positive_featuresets, + unlabeled_featuresets, + positive_prob_prior=0.5, + estimator=ELEProbDist, + ): + """ + :param positive_featuresets: An iterable of featuresets that are known as positive + examples (i.e., their label is ``True``). + + :param unlabeled_featuresets: An iterable of featuresets whose label is unknown. + + :param positive_prob_prior: A prior estimate of the probability of the label + ``True`` (default 0.5). + """ + positive_feature_freqdist = defaultdict(FreqDist) + unlabeled_feature_freqdist = defaultdict(FreqDist) + feature_values = defaultdict(set) + fnames = set() + + # Count up how many times each feature value occurred in positive examples. + num_positive_examples = 0 + for featureset in positive_featuresets: + for fname, fval in featureset.items(): + positive_feature_freqdist[fname][fval] += 1 + feature_values[fname].add(fval) + fnames.add(fname) + num_positive_examples += 1 + + # Count up how many times each feature value occurred in unlabeled examples. + num_unlabeled_examples = 0 + for featureset in unlabeled_featuresets: + for fname, fval in featureset.items(): + unlabeled_feature_freqdist[fname][fval] += 1 + feature_values[fname].add(fval) + fnames.add(fname) + num_unlabeled_examples += 1 + + # If a feature didn't have a value given for an instance, then we assume that + # it gets the implicit value 'None'. + for fname in fnames: + count = positive_feature_freqdist[fname].N() + positive_feature_freqdist[fname][None] += num_positive_examples - count + feature_values[fname].add(None) + + for fname in fnames: + count = unlabeled_feature_freqdist[fname].N() + unlabeled_feature_freqdist[fname][None] += num_unlabeled_examples - count + feature_values[fname].add(None) + + negative_prob_prior = 1.0 - positive_prob_prior + + # Create the P(label) distribution. + label_probdist = DictionaryProbDist( + {True: positive_prob_prior, False: negative_prob_prior} + ) + + # Create the P(fval|label, fname) distribution. + feature_probdist = {} + for fname, freqdist in positive_feature_freqdist.items(): + probdist = estimator(freqdist, bins=len(feature_values[fname])) + feature_probdist[True, fname] = probdist + + for fname, freqdist in unlabeled_feature_freqdist.items(): + global_probdist = estimator(freqdist, bins=len(feature_values[fname])) + negative_feature_probs = {} + for fval in feature_values[fname]: + prob = ( + global_probdist.prob(fval) + - positive_prob_prior * feature_probdist[True, fname].prob(fval) + ) / negative_prob_prior + # TODO: We need to add some kind of smoothing here, instead of + # setting negative probabilities to zero and normalizing. + negative_feature_probs[fval] = max(prob, 0.0) + feature_probdist[False, fname] = DictionaryProbDist( + negative_feature_probs, normalize=True + ) + + return PositiveNaiveBayesClassifier(label_probdist, feature_probdist) + + +##////////////////////////////////////////////////////// +## Demo +##////////////////////////////////////////////////////// + + +def demo(): + from nltk.classify.util import partial_names_demo + + classifier = partial_names_demo(PositiveNaiveBayesClassifier.train) + classifier.show_most_informative_features() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/rte_classify.py b/venv/lib/python3.10/site-packages/nltk/classify/rte_classify.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb416633eb65576d3c8241d93e23880e1b3f73b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/rte_classify.py @@ -0,0 +1,183 @@ +# Natural Language Toolkit: RTE Classifier +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Simple classifier for RTE corpus. + +It calculates the overlap in words and named entities between text and +hypothesis, and also whether there are words / named entities in the +hypothesis which fail to occur in the text, since this is an indicator that +the hypothesis is more informative than (i.e not entailed by) the text. + +TO DO: better Named Entity classification +TO DO: add lemmatization +""" + +from nltk.classify.maxent import MaxentClassifier +from nltk.classify.util import accuracy +from nltk.tokenize import RegexpTokenizer + + +class RTEFeatureExtractor: + """ + This builds a bag of words for both the text and the hypothesis after + throwing away some stopwords, then calculates overlap and difference. + """ + + def __init__(self, rtepair, stop=True, use_lemmatize=False): + """ + :param rtepair: a ``RTEPair`` from which features should be extracted + :param stop: if ``True``, stopwords are thrown away. + :type stop: bool + """ + self.stop = stop + self.stopwords = { + "a", + "the", + "it", + "they", + "of", + "in", + "to", + "is", + "have", + "are", + "were", + "and", + "very", + ".", + ",", + } + + self.negwords = {"no", "not", "never", "failed", "rejected", "denied"} + # Try to tokenize so that abbreviations, monetary amounts, email + # addresses, URLs are single tokens. + tokenizer = RegexpTokenizer(r"[\w.@:/]+|\w+|\$[\d.]+") + + # Get the set of word types for text and hypothesis + self.text_tokens = tokenizer.tokenize(rtepair.text) + self.hyp_tokens = tokenizer.tokenize(rtepair.hyp) + self.text_words = set(self.text_tokens) + self.hyp_words = set(self.hyp_tokens) + + if use_lemmatize: + self.text_words = {self._lemmatize(token) for token in self.text_tokens} + self.hyp_words = {self._lemmatize(token) for token in self.hyp_tokens} + + if self.stop: + self.text_words = self.text_words - self.stopwords + self.hyp_words = self.hyp_words - self.stopwords + + self._overlap = self.hyp_words & self.text_words + self._hyp_extra = self.hyp_words - self.text_words + self._txt_extra = self.text_words - self.hyp_words + + def overlap(self, toktype, debug=False): + """ + Compute the overlap between text and hypothesis. + + :param toktype: distinguish Named Entities from ordinary words + :type toktype: 'ne' or 'word' + """ + ne_overlap = {token for token in self._overlap if self._ne(token)} + if toktype == "ne": + if debug: + print("ne overlap", ne_overlap) + return ne_overlap + elif toktype == "word": + if debug: + print("word overlap", self._overlap - ne_overlap) + return self._overlap - ne_overlap + else: + raise ValueError("Type not recognized:'%s'" % toktype) + + def hyp_extra(self, toktype, debug=True): + """ + Compute the extraneous material in the hypothesis. + + :param toktype: distinguish Named Entities from ordinary words + :type toktype: 'ne' or 'word' + """ + ne_extra = {token for token in self._hyp_extra if self._ne(token)} + if toktype == "ne": + return ne_extra + elif toktype == "word": + return self._hyp_extra - ne_extra + else: + raise ValueError("Type not recognized: '%s'" % toktype) + + @staticmethod + def _ne(token): + """ + This just assumes that words in all caps or titles are + named entities. + + :type token: str + """ + if token.istitle() or token.isupper(): + return True + return False + + @staticmethod + def _lemmatize(word): + """ + Use morphy from WordNet to find the base form of verbs. + """ + from nltk.corpus import wordnet as wn + + lemma = wn.morphy(word, pos=wn.VERB) + if lemma is not None: + return lemma + return word + + +def rte_features(rtepair): + extractor = RTEFeatureExtractor(rtepair) + features = {} + features["alwayson"] = True + features["word_overlap"] = len(extractor.overlap("word")) + features["word_hyp_extra"] = len(extractor.hyp_extra("word")) + features["ne_overlap"] = len(extractor.overlap("ne")) + features["ne_hyp_extra"] = len(extractor.hyp_extra("ne")) + features["neg_txt"] = len(extractor.negwords & extractor.text_words) + features["neg_hyp"] = len(extractor.negwords & extractor.hyp_words) + return features + + +def rte_featurize(rte_pairs): + return [(rte_features(pair), pair.value) for pair in rte_pairs] + + +def rte_classifier(algorithm, sample_N=None): + from nltk.corpus import rte as rte_corpus + + train_set = rte_corpus.pairs(["rte1_dev.xml", "rte2_dev.xml", "rte3_dev.xml"]) + test_set = rte_corpus.pairs(["rte1_test.xml", "rte2_test.xml", "rte3_test.xml"]) + + if sample_N is not None: + train_set = train_set[:sample_N] + test_set = test_set[:sample_N] + + featurized_train_set = rte_featurize(train_set) + featurized_test_set = rte_featurize(test_set) + + # Train the classifier + print("Training classifier...") + if algorithm in ["megam"]: # MEGAM based algorithms. + clf = MaxentClassifier.train(featurized_train_set, algorithm) + elif algorithm in ["GIS", "IIS"]: # Use default GIS/IIS MaxEnt algorithm + clf = MaxentClassifier.train(featurized_train_set, algorithm) + else: + err_msg = str( + "RTEClassifier only supports these algorithms:\n " + "'megam', 'GIS', 'IIS'.\n" + ) + raise Exception(err_msg) + print("Testing classifier...") + acc = accuracy(clf, featurized_test_set) + print("Accuracy: %6.4f" % acc) + return clf diff --git a/venv/lib/python3.10/site-packages/nltk/classify/scikitlearn.py b/venv/lib/python3.10/site-packages/nltk/classify/scikitlearn.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a35a416e2aebc873dad0559b75f85be3ad8200 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/scikitlearn.py @@ -0,0 +1,143 @@ +# Natural Language Toolkit: Interface to scikit-learn classifiers +# +# Author: Lars Buitinck +# URL: +# For license information, see LICENSE.TXT +""" +scikit-learn (https://scikit-learn.org) is a machine learning library for +Python. It supports many classification algorithms, including SVMs, +Naive Bayes, logistic regression (MaxEnt) and decision trees. + +This package implements a wrapper around scikit-learn classifiers. To use this +wrapper, construct a scikit-learn estimator object, then use that to construct +a SklearnClassifier. E.g., to wrap a linear SVM with default settings: + +>>> from sklearn.svm import LinearSVC +>>> from nltk.classify.scikitlearn import SklearnClassifier +>>> classif = SklearnClassifier(LinearSVC()) + +A scikit-learn classifier may include preprocessing steps when it's wrapped +in a Pipeline object. The following constructs and wraps a Naive Bayes text +classifier with tf-idf weighting and chi-square feature selection to get the +best 1000 features: + +>>> from sklearn.feature_extraction.text import TfidfTransformer +>>> from sklearn.feature_selection import SelectKBest, chi2 +>>> from sklearn.naive_bayes import MultinomialNB +>>> from sklearn.pipeline import Pipeline +>>> pipeline = Pipeline([('tfidf', TfidfTransformer()), +... ('chi2', SelectKBest(chi2, k=1000)), +... ('nb', MultinomialNB())]) +>>> classif = SklearnClassifier(pipeline) +""" + +from nltk.classify.api import ClassifierI +from nltk.probability import DictionaryProbDist + +try: + from sklearn.feature_extraction import DictVectorizer + from sklearn.preprocessing import LabelEncoder +except ImportError: + pass + +__all__ = ["SklearnClassifier"] + + +class SklearnClassifier(ClassifierI): + """Wrapper for scikit-learn classifiers.""" + + def __init__(self, estimator, dtype=float, sparse=True): + """ + :param estimator: scikit-learn classifier object. + + :param dtype: data type used when building feature array. + scikit-learn estimators work exclusively on numeric data. The + default value should be fine for almost all situations. + + :param sparse: Whether to use sparse matrices internally. + The estimator must support these; not all scikit-learn classifiers + do (see their respective documentation and look for "sparse + matrix"). The default value is True, since most NLP problems + involve sparse feature sets. Setting this to False may take a + great amount of memory. + :type sparse: boolean. + """ + self._clf = estimator + self._encoder = LabelEncoder() + self._vectorizer = DictVectorizer(dtype=dtype, sparse=sparse) + + def __repr__(self): + return "" % self._clf + + def classify_many(self, featuresets): + """Classify a batch of samples. + + :param featuresets: An iterable over featuresets, each a dict mapping + strings to either numbers, booleans or strings. + :return: The predicted class label for each input sample. + :rtype: list + """ + X = self._vectorizer.transform(featuresets) + classes = self._encoder.classes_ + return [classes[i] for i in self._clf.predict(X)] + + def prob_classify_many(self, featuresets): + """Compute per-class probabilities for a batch of samples. + + :param featuresets: An iterable over featuresets, each a dict mapping + strings to either numbers, booleans or strings. + :rtype: list of ``ProbDistI`` + """ + X = self._vectorizer.transform(featuresets) + y_proba_list = self._clf.predict_proba(X) + return [self._make_probdist(y_proba) for y_proba in y_proba_list] + + def labels(self): + """The class labels used by this classifier. + + :rtype: list + """ + return list(self._encoder.classes_) + + def train(self, labeled_featuresets): + """ + Train (fit) the scikit-learn estimator. + + :param labeled_featuresets: A list of ``(featureset, label)`` + where each ``featureset`` is a dict mapping strings to either + numbers, booleans or strings. + """ + + X, y = list(zip(*labeled_featuresets)) + X = self._vectorizer.fit_transform(X) + y = self._encoder.fit_transform(y) + self._clf.fit(X, y) + + return self + + def _make_probdist(self, y_proba): + classes = self._encoder.classes_ + return DictionaryProbDist({classes[i]: p for i, p in enumerate(y_proba)}) + + +if __name__ == "__main__": + from sklearn.linear_model import LogisticRegression + from sklearn.naive_bayes import BernoulliNB + + from nltk.classify.util import names_demo, names_demo_features + + # Bernoulli Naive Bayes is designed for binary classification. We set the + # binarize option to False since we know we're passing boolean features. + print("scikit-learn Naive Bayes:") + names_demo( + SklearnClassifier(BernoulliNB(binarize=False)).train, + features=names_demo_features, + ) + + # The C parameter on logistic regression (MaxEnt) controls regularization. + # The higher it's set, the less regularized the classifier is. + print("\n\nscikit-learn logistic regression:") + names_demo( + SklearnClassifier(LogisticRegression(C=1000)).train, + features=names_demo_features, + ) diff --git a/venv/lib/python3.10/site-packages/nltk/classify/senna.py b/venv/lib/python3.10/site-packages/nltk/classify/senna.py new file mode 100644 index 0000000000000000000000000000000000000000..a1099ffd668574c636412b1bec9c94fd07865651 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/senna.py @@ -0,0 +1,176 @@ +# Natural Language Toolkit: Senna Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +A general interface to the SENNA pipeline that supports any of the +operations specified in SUPPORTED_OPERATIONS. + +Applying multiple operations at once has the speed advantage. For example, +Senna will automatically determine POS tags if you are extracting named +entities. Applying both of the operations will cost only the time of +extracting the named entities. + +The SENNA pipeline has a fixed maximum size of the sentences that it can read. +By default it is 1024 token/sentence. If you have larger sentences, changing +the MAX_SENTENCE_SIZE value in SENNA_main.c should be considered and your +system specific binary should be rebuilt. Otherwise this could introduce +misalignment errors. + +The input is: + +- path to the directory that contains SENNA executables. If the path is incorrect, + Senna will automatically search for executable file specified in SENNA environment variable +- List of the operations needed to be performed. +- (optionally) the encoding of the input data (default:utf-8) + +Note: Unit tests for this module can be found in test/unit/test_senna.py + +>>> from nltk.classify import Senna +>>> pipeline = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner']) # doctest: +SKIP +>>> sent = 'Dusseldorf is an international business center'.split() +>>> [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)] # doctest: +SKIP +[('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP', 'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'), +('international', 'I-NP', 'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP', 'O', 'NN')] +""" + +from os import environ, path, sep +from platform import architecture, system +from subprocess import PIPE, Popen + +from nltk.tag.api import TaggerI + + +class Senna(TaggerI): + + SUPPORTED_OPERATIONS = ["pos", "chk", "ner"] + + def __init__(self, senna_path, operations, encoding="utf-8"): + self._encoding = encoding + self._path = path.normpath(senna_path) + sep + + # Verifies the existence of the executable on the self._path first + # senna_binary_file_1 = self.executable(self._path) + exe_file_1 = self.executable(self._path) + if not path.isfile(exe_file_1): + # Check for the system environment + if "SENNA" in environ: + # self._path = path.join(environ['SENNA'],'') + self._path = path.normpath(environ["SENNA"]) + sep + exe_file_2 = self.executable(self._path) + if not path.isfile(exe_file_2): + raise LookupError( + "Senna executable expected at %s or %s but not found" + % (exe_file_1, exe_file_2) + ) + + self.operations = operations + + def executable(self, base_path): + """ + The function that determines the system specific binary that should be + used in the pipeline. In case, the system is not known the default senna binary will + be used. + """ + os_name = system() + if os_name == "Linux": + bits = architecture()[0] + if bits == "64bit": + return path.join(base_path, "senna-linux64") + return path.join(base_path, "senna-linux32") + if os_name == "Windows": + return path.join(base_path, "senna-win32.exe") + if os_name == "Darwin": + return path.join(base_path, "senna-osx") + return path.join(base_path, "senna") + + def _map(self): + """ + A method that calculates the order of the columns that SENNA pipeline + will output the tags into. This depends on the operations being ordered. + """ + _map = {} + i = 1 + for operation in Senna.SUPPORTED_OPERATIONS: + if operation in self.operations: + _map[operation] = i + i += 1 + return _map + + def tag(self, tokens): + """ + Applies the specified operation(s) on a list of tokens. + """ + return self.tag_sents([tokens])[0] + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return a + list of dictionaries. Every dictionary will contain a word with its + calculated annotations/tags. + """ + encoding = self._encoding + + if not path.isfile(self.executable(self._path)): + raise LookupError( + "Senna executable expected at %s but not found" + % self.executable(self._path) + ) + + # Build the senna command to run the tagger + _senna_cmd = [ + self.executable(self._path), + "-path", + self._path, + "-usrtokens", + "-iobtags", + ] + _senna_cmd.extend(["-" + op for op in self.operations]) + + # Serialize the actual sentences to a temporary string + _input = "\n".join(" ".join(x) for x in sentences) + "\n" + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + + # Run the tagger and get the output + p = Popen(_senna_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + (stdout, stderr) = p.communicate(input=_input) + senna_output = stdout + + # Check the return code. + if p.returncode != 0: + raise RuntimeError("Senna command failed! Details: %s" % stderr) + + if encoding: + senna_output = stdout.decode(encoding) + + # Output the tagged sentences + map_ = self._map() + tagged_sentences = [[]] + sentence_index = 0 + token_index = 0 + for tagged_word in senna_output.strip().split("\n"): + if not tagged_word: + tagged_sentences.append([]) + sentence_index += 1 + token_index = 0 + continue + tags = tagged_word.split("\t") + result = {} + for tag in map_: + result[tag] = tags[map_[tag]].strip() + try: + result["word"] = sentences[sentence_index][token_index] + except IndexError as e: + raise IndexError( + "Misalignment error occurred at sentence number %d. Possible reason" + " is that the sentence size exceeded the maximum size. Check the " + "documentation of Senna class for more information." + % sentence_index + ) from e + tagged_sentences[-1].append(result) + token_index += 1 + return tagged_sentences diff --git a/venv/lib/python3.10/site-packages/nltk/classify/svm.py b/venv/lib/python3.10/site-packages/nltk/classify/svm.py new file mode 100644 index 0000000000000000000000000000000000000000..65b852c2748b0df620a4bc4340197ea1026e6728 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/svm.py @@ -0,0 +1,17 @@ +# Natural Language Toolkit: SVM-based classifier +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Leon Derczynski +# +# URL: +# For license information, see LICENSE.TXT +""" +nltk.classify.svm was deprecated. For classification based +on support vector machines SVMs use nltk.classify.scikitlearn +(or `scikit-learn `_ directly). +""" + + +class SvmClassifier: + def __init__(self, *args, **kwargs): + raise NotImplementedError(__doc__) diff --git a/venv/lib/python3.10/site-packages/nltk/classify/tadm.py b/venv/lib/python3.10/site-packages/nltk/classify/tadm.py new file mode 100644 index 0000000000000000000000000000000000000000..f8eb4b3daa2b7e904856b3fa2b4f16378427715f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/tadm.py @@ -0,0 +1,122 @@ +# Natural Language Toolkit: Interface to TADM Classifier +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joseph Frazee +# URL: +# For license information, see LICENSE.TXT + +import subprocess +import sys + +from nltk.internals import find_binary + +try: + import numpy +except ImportError: + pass + +_tadm_bin = None + + +def config_tadm(bin=None): + global _tadm_bin + _tadm_bin = find_binary( + "tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net" + ) + + +def write_tadm_file(train_toks, encoding, stream): + """ + Generate an input file for ``tadm`` based on the given corpus of + classified tokens. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + :type encoding: TadmEventMaxentFeatureEncoding + :param encoding: A feature encoding, used to convert featuresets + into feature vectors. + :type stream: stream + :param stream: The stream to which the ``tadm`` input file should be + written. + """ + # See the following for a file format description: + # + # https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054 + # https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054 + labels = encoding.labels() + for featureset, label in train_toks: + length_line = "%d\n" % len(labels) + stream.write(length_line) + for known_label in labels: + v = encoding.encode(featureset, known_label) + line = "%d %d %s\n" % ( + int(label == known_label), + len(v), + " ".join("%d %d" % u for u in v), + ) + stream.write(line) + + +def parse_tadm_weights(paramfile): + """ + Given the stdout output generated by ``tadm`` when training a + model, return a ``numpy`` array containing the corresponding weight + vector. + """ + weights = [] + for line in paramfile: + weights.append(float(line.strip())) + return numpy.array(weights, "d") + + +def call_tadm(args): + """ + Call the ``tadm`` binary with the given arguments. + """ + if isinstance(args, str): + raise TypeError("args should be a list of strings") + if _tadm_bin is None: + config_tadm() + + # Call tadm via a subprocess + cmd = [_tadm_bin] + args + p = subprocess.Popen(cmd, stdout=sys.stdout) + (stdout, stderr) = p.communicate() + + # Check the return code. + if p.returncode != 0: + print() + print(stderr) + raise OSError("tadm command failed!") + + +def names_demo(): + from nltk.classify.maxent import TadmMaxentClassifier + from nltk.classify.util import names_demo + + classifier = names_demo(TadmMaxentClassifier.train) + + +def encoding_demo(): + import sys + + from nltk.classify.maxent import TadmEventMaxentFeatureEncoding + + tokens = [ + ({"f0": 1, "f1": 1, "f3": 1}, "A"), + ({"f0": 1, "f2": 1, "f4": 1}, "B"), + ({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"), + ] + encoding = TadmEventMaxentFeatureEncoding.train(tokens) + write_tadm_file(tokens, encoding, sys.stdout) + print() + for i in range(encoding.length()): + print("%s --> %d" % (encoding.describe(i), i)) + print() + + +if __name__ == "__main__": + encoding_demo() + names_demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/textcat.py b/venv/lib/python3.10/site-packages/nltk/classify/textcat.py new file mode 100644 index 0000000000000000000000000000000000000000..b8176d3b668d2d233023cd22e7ae3854df8043d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/textcat.py @@ -0,0 +1,197 @@ +# Natural Language Toolkit: Language ID module using TextCat algorithm +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Avital Pekker +# +# URL: +# For license information, see LICENSE.TXT + +""" +A module for language identification using the TextCat algorithm. +An implementation of the text categorization algorithm +presented in Cavnar, W. B. and J. M. Trenkle, +"N-Gram-Based Text Categorization". + +The algorithm takes advantage of Zipf's law and uses +n-gram frequencies to profile languages and text-yet to +be identified-then compares using a distance measure. + +Language n-grams are provided by the "An Crubadan" +project. A corpus reader was created separately to read +those files. + +For details regarding the algorithm, see: +https://www.let.rug.nl/~vannoord/TextCat/textcat.pdf + +For details about An Crubadan, see: +https://borel.slu.edu/crubadan/index.html +""" + +from sys import maxsize + +from nltk.util import trigrams + +# Note: this is NOT "re" you're likely used to. The regex module +# is an alternative to the standard re module that supports +# Unicode codepoint properties with the \p{} syntax. +# You may have to "pip install regx" +try: + import regex as re +except ImportError: + re = None +###################################################################### +## Language identification using TextCat +###################################################################### + + +class TextCat: + + _corpus = None + fingerprints = {} + _START_CHAR = "<" + _END_CHAR = ">" + + last_distances = {} + + def __init__(self): + if not re: + raise OSError( + "classify.textcat requires the regex module that " + "supports unicode. Try '$ pip install regex' and " + "see https://pypi.python.org/pypi/regex for " + "further details." + ) + + from nltk.corpus import crubadan + + self._corpus = crubadan + # Load all language ngrams into cache + for lang in self._corpus.langs(): + self._corpus.lang_freq(lang) + + def remove_punctuation(self, text): + """Get rid of punctuation except apostrophes""" + return re.sub(r"[^\P{P}\']+", "", text) + + def profile(self, text): + """Create FreqDist of trigrams within text""" + from nltk import FreqDist, word_tokenize + + clean_text = self.remove_punctuation(text) + tokens = word_tokenize(clean_text) + + fingerprint = FreqDist() + for t in tokens: + token_trigram_tuples = trigrams(self._START_CHAR + t + self._END_CHAR) + token_trigrams = ["".join(tri) for tri in token_trigram_tuples] + + for cur_trigram in token_trigrams: + if cur_trigram in fingerprint: + fingerprint[cur_trigram] += 1 + else: + fingerprint[cur_trigram] = 1 + + return fingerprint + + def calc_dist(self, lang, trigram, text_profile): + """Calculate the "out-of-place" measure between the + text and language profile for a single trigram""" + + lang_fd = self._corpus.lang_freq(lang) + dist = 0 + + if trigram in lang_fd: + idx_lang_profile = list(lang_fd.keys()).index(trigram) + idx_text = list(text_profile.keys()).index(trigram) + + # print(idx_lang_profile, ", ", idx_text) + dist = abs(idx_lang_profile - idx_text) + else: + # Arbitrary but should be larger than + # any possible trigram file length + # in terms of total lines + dist = maxsize + + return dist + + def lang_dists(self, text): + """Calculate the "out-of-place" measure between + the text and all languages""" + + distances = {} + profile = self.profile(text) + # For all the languages + for lang in self._corpus._all_lang_freq.keys(): + # Calculate distance metric for every trigram in + # input text to be identified + lang_dist = 0 + for trigram in profile: + lang_dist += self.calc_dist(lang, trigram, profile) + + distances[lang] = lang_dist + + return distances + + def guess_language(self, text): + """Find the language with the min distance + to the text and return its ISO 639-3 code""" + self.last_distances = self.lang_dists(text) + + return min(self.last_distances, key=self.last_distances.get) + #################################################') + + +def demo(): + from nltk.corpus import udhr + + langs = [ + "Kurdish-UTF8", + "Abkhaz-UTF8", + "Farsi_Persian-UTF8", + "Hindi-UTF8", + "Hawaiian-UTF8", + "Russian-UTF8", + "Vietnamese-UTF8", + "Serbian_Srpski-UTF8", + "Esperanto-UTF8", + ] + + friendly = { + "kmr": "Northern Kurdish", + "abk": "Abkhazian", + "pes": "Iranian Persian", + "hin": "Hindi", + "haw": "Hawaiian", + "rus": "Russian", + "vie": "Vietnamese", + "srp": "Serbian", + "epo": "Esperanto", + } + + tc = TextCat() + + for cur_lang in langs: + # Get raw data from UDHR corpus + raw_sentences = udhr.sents(cur_lang) + rows = len(raw_sentences) - 1 + cols = list(map(len, raw_sentences)) + + sample = "" + + # Generate a sample text of the language + for i in range(0, rows): + cur_sent = "" + for j in range(0, cols[i]): + cur_sent += " " + raw_sentences[i][j] + + sample += cur_sent + + # Try to detect what it is + print("Language snippet: " + sample[0:140] + "...") + guess = tc.guess_language(sample) + print(f"Language detection: {guess} ({friendly[guess]})") + print("#" * 140) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/classify/util.py b/venv/lib/python3.10/site-packages/nltk/classify/util.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ada2c3e30a97debe6c6f03ee5c7be06f5e9d2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/util.py @@ -0,0 +1,346 @@ +# Natural Language Toolkit: Classifier Utility Functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions and classes for classifiers. +""" + +import math + +# from nltk.util import Deprecated +import nltk.classify.util # for accuracy & log_likelihood +from nltk.util import LazyMap + +###################################################################### +# { Helper Functions +###################################################################### + +# alternative name possibility: 'map_featurefunc()'? +# alternative name possibility: 'detect_features()'? +# alternative name possibility: 'map_featuredetect()'? +# or.. just have users use LazyMap directly? +def apply_features(feature_func, toks, labeled=None): + """ + Use the ``LazyMap`` class to construct a lazy list-like + object that is analogous to ``map(feature_func, toks)``. In + particular, if ``labeled=False``, then the returned list-like + object's values are equal to:: + + [feature_func(tok) for tok in toks] + + If ``labeled=True``, then the returned list-like object's values + are equal to:: + + [(feature_func(tok), label) for (tok, label) in toks] + + The primary purpose of this function is to avoid the memory + overhead involved in storing all the featuresets for every token + in a corpus. Instead, these featuresets are constructed lazily, + as-needed. The reduction in memory overhead can be especially + significant when the underlying list of tokens is itself lazy (as + is the case with many corpus readers). + + :param feature_func: The function that will be applied to each + token. It should return a featureset -- i.e., a dict + mapping feature names to feature values. + :param toks: The list of tokens to which ``feature_func`` should be + applied. If ``labeled=True``, then the list elements will be + passed directly to ``feature_func()``. If ``labeled=False``, + then the list elements should be tuples ``(tok,label)``, and + ``tok`` will be passed to ``feature_func()``. + :param labeled: If true, then ``toks`` contains labeled tokens -- + i.e., tuples of the form ``(tok, label)``. (Default: + auto-detect based on types.) + """ + if labeled is None: + labeled = toks and isinstance(toks[0], (tuple, list)) + if labeled: + + def lazy_func(labeled_token): + return (feature_func(labeled_token[0]), labeled_token[1]) + + return LazyMap(lazy_func, toks) + else: + return LazyMap(feature_func, toks) + + +def attested_labels(tokens): + """ + :return: A list of all labels that are attested in the given list + of tokens. + :rtype: list of (immutable) + :param tokens: The list of classified tokens from which to extract + labels. A classified token has the form ``(token, label)``. + :type tokens: list + """ + return tuple({label for (tok, label) in tokens}) + + +def log_likelihood(classifier, gold): + results = classifier.prob_classify_many([fs for (fs, l) in gold]) + ll = [pdist.prob(l) for ((fs, l), pdist) in zip(gold, results)] + return math.log(sum(ll) / len(ll)) + + +def accuracy(classifier, gold): + results = classifier.classify_many([fs for (fs, l) in gold]) + correct = [l == r for ((fs, l), r) in zip(gold, results)] + if correct: + return sum(correct) / len(correct) + else: + return 0 + + +class CutoffChecker: + """ + A helper class that implements cutoff checks based on number of + iterations and log likelihood. + + Accuracy cutoffs are also implemented, but they're almost never + a good idea to use. + """ + + def __init__(self, cutoffs): + self.cutoffs = cutoffs.copy() + if "min_ll" in cutoffs: + cutoffs["min_ll"] = -abs(cutoffs["min_ll"]) + if "min_lldelta" in cutoffs: + cutoffs["min_lldelta"] = abs(cutoffs["min_lldelta"]) + self.ll = None + self.acc = None + self.iter = 1 + + def check(self, classifier, train_toks): + cutoffs = self.cutoffs + self.iter += 1 + if "max_iter" in cutoffs and self.iter >= cutoffs["max_iter"]: + return True # iteration cutoff. + + new_ll = nltk.classify.util.log_likelihood(classifier, train_toks) + if math.isnan(new_ll): + return True + + if "min_ll" in cutoffs or "min_lldelta" in cutoffs: + if "min_ll" in cutoffs and new_ll >= cutoffs["min_ll"]: + return True # log likelihood cutoff + if ( + "min_lldelta" in cutoffs + and self.ll + and ((new_ll - self.ll) <= abs(cutoffs["min_lldelta"])) + ): + return True # log likelihood delta cutoff + self.ll = new_ll + + if "max_acc" in cutoffs or "min_accdelta" in cutoffs: + new_acc = nltk.classify.util.log_likelihood(classifier, train_toks) + if "max_acc" in cutoffs and new_acc >= cutoffs["max_acc"]: + return True # log likelihood cutoff + if ( + "min_accdelta" in cutoffs + and self.acc + and ((new_acc - self.acc) <= abs(cutoffs["min_accdelta"])) + ): + return True # log likelihood delta cutoff + self.acc = new_acc + + return False # no cutoff reached. + + +###################################################################### +# { Demos +###################################################################### + + +def names_demo_features(name): + features = {} + features["alwayson"] = True + features["startswith"] = name[0].lower() + features["endswith"] = name[-1].lower() + for letter in "abcdefghijklmnopqrstuvwxyz": + features["count(%s)" % letter] = name.lower().count(letter) + features["has(%s)" % letter] = letter in name.lower() + return features + + +def binary_names_demo_features(name): + features = {} + features["alwayson"] = True + features["startswith(vowel)"] = name[0].lower() in "aeiouy" + features["endswith(vowel)"] = name[-1].lower() in "aeiouy" + for letter in "abcdefghijklmnopqrstuvwxyz": + features["count(%s)" % letter] = name.lower().count(letter) + features["has(%s)" % letter] = letter in name.lower() + features["startswith(%s)" % letter] = letter == name[0].lower() + features["endswith(%s)" % letter] = letter == name[-1].lower() + return features + + +def names_demo(trainer, features=names_demo_features): + import random + + from nltk.corpus import names + + # Construct a list of classified names, using the names corpus. + namelist = [(name, "male") for name in names.words("male.txt")] + [ + (name, "female") for name in names.words("female.txt") + ] + + # Randomly split the names into a test & train set. + random.seed(123456) + random.shuffle(namelist) + train = namelist[:5000] + test = namelist[5000:5500] + + # Train up a classifier. + print("Training classifier...") + classifier = trainer([(features(n), g) for (n, g) in train]) + + # Run the classifier on the test data. + print("Testing classifier...") + acc = accuracy(classifier, [(features(n), g) for (n, g) in test]) + print("Accuracy: %6.4f" % acc) + + # For classifiers that can find probabilities, show the log + # likelihood and some sample probability distributions. + try: + test_featuresets = [features(n) for (n, g) in test] + pdists = classifier.prob_classify_many(test_featuresets) + ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)] + print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test))) + print() + print("Unseen Names P(Male) P(Female)\n" + "-" * 40) + for ((name, gender), pdist) in list(zip(test, pdists))[:5]: + if gender == "male": + fmt = " %-15s *%6.4f %6.4f" + else: + fmt = " %-15s %6.4f *%6.4f" + print(fmt % (name, pdist.prob("male"), pdist.prob("female"))) + except NotImplementedError: + pass + + # Return the classifier + return classifier + + +def partial_names_demo(trainer, features=names_demo_features): + import random + + from nltk.corpus import names + + male_names = names.words("male.txt") + female_names = names.words("female.txt") + + random.seed(654321) + random.shuffle(male_names) + random.shuffle(female_names) + + # Create a list of male names to be used as positive-labeled examples for training + positive = map(features, male_names[:2000]) + + # Create a list of male and female names to be used as unlabeled examples + unlabeled = map(features, male_names[2000:2500] + female_names[:500]) + + # Create a test set with correctly-labeled male and female names + test = [(name, True) for name in male_names[2500:2750]] + [ + (name, False) for name in female_names[500:750] + ] + + random.shuffle(test) + + # Train up a classifier. + print("Training classifier...") + classifier = trainer(positive, unlabeled) + + # Run the classifier on the test data. + print("Testing classifier...") + acc = accuracy(classifier, [(features(n), m) for (n, m) in test]) + print("Accuracy: %6.4f" % acc) + + # For classifiers that can find probabilities, show the log + # likelihood and some sample probability distributions. + try: + test_featuresets = [features(n) for (n, m) in test] + pdists = classifier.prob_classify_many(test_featuresets) + ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)] + print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test))) + print() + print("Unseen Names P(Male) P(Female)\n" + "-" * 40) + for ((name, is_male), pdist) in zip(test, pdists)[:5]: + if is_male == True: + fmt = " %-15s *%6.4f %6.4f" + else: + fmt = " %-15s %6.4f *%6.4f" + print(fmt % (name, pdist.prob(True), pdist.prob(False))) + except NotImplementedError: + pass + + # Return the classifier + return classifier + + +_inst_cache = {} + + +def wsd_demo(trainer, word, features, n=1000): + import random + + from nltk.corpus import senseval + + # Get the instances. + print("Reading data...") + global _inst_cache + if word not in _inst_cache: + _inst_cache[word] = [(i, i.senses[0]) for i in senseval.instances(word)] + instances = _inst_cache[word][:] + if n > len(instances): + n = len(instances) + senses = list({l for (i, l) in instances}) + print(" Senses: " + " ".join(senses)) + + # Randomly split the names into a test & train set. + print("Splitting into test & train...") + random.seed(123456) + random.shuffle(instances) + train = instances[: int(0.8 * n)] + test = instances[int(0.8 * n) : n] + + # Train up a classifier. + print("Training classifier...") + classifier = trainer([(features(i), l) for (i, l) in train]) + + # Run the classifier on the test data. + print("Testing classifier...") + acc = accuracy(classifier, [(features(i), l) for (i, l) in test]) + print("Accuracy: %6.4f" % acc) + + # For classifiers that can find probabilities, show the log + # likelihood and some sample probability distributions. + try: + test_featuresets = [features(i) for (i, n) in test] + pdists = classifier.prob_classify_many(test_featuresets) + ll = [pdist.logprob(gold) for ((name, gold), pdist) in zip(test, pdists)] + print("Avg. log likelihood: %6.4f" % (sum(ll) / len(test))) + except NotImplementedError: + pass + + # Return the classifier + return classifier + + +def check_megam_config(): + """ + Checks whether the MEGAM binary is configured. + """ + try: + _megam_bin + except NameError as e: + err_msg = str( + "Please configure your megam binary first, e.g.\n" + ">>> nltk.config_megam('/usr/bin/local/megam')" + ) + raise NameError(err_msg) from e diff --git a/venv/lib/python3.10/site-packages/nltk/classify/weka.py b/venv/lib/python3.10/site-packages/nltk/classify/weka.py new file mode 100644 index 0000000000000000000000000000000000000000..b02505f0139bc4e0c516d0384b6c9d3224297dbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/classify/weka.py @@ -0,0 +1,377 @@ +# Natural Language Toolkit: Interface to Weka Classsifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Classifiers that make use of the external 'Weka' package. +""" + +import os +import re +import subprocess +import tempfile +import time +import zipfile +from sys import stdin + +from nltk.classify.api import ClassifierI +from nltk.internals import config_java, java +from nltk.probability import DictionaryProbDist + +_weka_classpath = None +_weka_search = [ + ".", + "/usr/share/weka", + "/usr/local/share/weka", + "/usr/lib/weka", + "/usr/local/lib/weka", +] + + +def config_weka(classpath=None): + global _weka_classpath + + # Make sure java's configured first. + config_java() + + if classpath is not None: + _weka_classpath = classpath + + if _weka_classpath is None: + searchpath = _weka_search + if "WEKAHOME" in os.environ: + searchpath.insert(0, os.environ["WEKAHOME"]) + + for path in searchpath: + if os.path.exists(os.path.join(path, "weka.jar")): + _weka_classpath = os.path.join(path, "weka.jar") + version = _check_weka_version(_weka_classpath) + if version: + print(f"[Found Weka: {_weka_classpath} (version {version})]") + else: + print("[Found Weka: %s]" % _weka_classpath) + _check_weka_version(_weka_classpath) + + if _weka_classpath is None: + raise LookupError( + "Unable to find weka.jar! Use config_weka() " + "or set the WEKAHOME environment variable. " + "For more information about Weka, please see " + "https://www.cs.waikato.ac.nz/ml/weka/" + ) + + +def _check_weka_version(jar): + try: + zf = zipfile.ZipFile(jar) + except (SystemExit, KeyboardInterrupt): + raise + except: + return None + try: + try: + return zf.read("weka/core/version.txt") + except KeyError: + return None + finally: + zf.close() + + +class WekaClassifier(ClassifierI): + def __init__(self, formatter, model_filename): + self._formatter = formatter + self._model = model_filename + + def prob_classify_many(self, featuresets): + return self._classify_many(featuresets, ["-p", "0", "-distribution"]) + + def classify_many(self, featuresets): + return self._classify_many(featuresets, ["-p", "0"]) + + def _classify_many(self, featuresets, options): + # Make sure we can find java & weka. + config_weka() + + temp_dir = tempfile.mkdtemp() + try: + # Write the test data file. + test_filename = os.path.join(temp_dir, "test.arff") + self._formatter.write(test_filename, featuresets) + + # Call weka to classify the data. + cmd = [ + "weka.classifiers.bayes.NaiveBayes", + "-l", + self._model, + "-T", + test_filename, + ] + options + (stdout, stderr) = java( + cmd, + classpath=_weka_classpath, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # Check if something went wrong: + if stderr and not stdout: + if "Illegal options: -distribution" in stderr: + raise ValueError( + "The installed version of weka does " + "not support probability distribution " + "output." + ) + else: + raise ValueError("Weka failed to generate output:\n%s" % stderr) + + # Parse weka's output. + return self.parse_weka_output(stdout.decode(stdin.encoding).split("\n")) + + finally: + for f in os.listdir(temp_dir): + os.remove(os.path.join(temp_dir, f)) + os.rmdir(temp_dir) + + def parse_weka_distribution(self, s): + probs = [float(v) for v in re.split("[*,]+", s) if v.strip()] + probs = dict(zip(self._formatter.labels(), probs)) + return DictionaryProbDist(probs) + + def parse_weka_output(self, lines): + # Strip unwanted text from stdout + for i, line in enumerate(lines): + if line.strip().startswith("inst#"): + lines = lines[i:] + break + + if lines[0].split() == ["inst#", "actual", "predicted", "error", "prediction"]: + return [line.split()[2].split(":")[1] for line in lines[1:] if line.strip()] + elif lines[0].split() == [ + "inst#", + "actual", + "predicted", + "error", + "distribution", + ]: + return [ + self.parse_weka_distribution(line.split()[-1]) + for line in lines[1:] + if line.strip() + ] + + # is this safe:? + elif re.match(r"^0 \w+ [01]\.[0-9]* \?\s*$", lines[0]): + return [line.split()[1] for line in lines if line.strip()] + + else: + for line in lines[:10]: + print(line) + raise ValueError( + "Unhandled output format -- your version " + "of weka may not be supported.\n" + " Header: %s" % lines[0] + ) + + # [xx] full list of classifiers (some may be abstract?): + # ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule, + # DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48, + # JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic, + # LogisticBase, M5Base, MultilayerPerceptron, + # MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial, + # NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART, + # PreConstructedLinearModel, Prism, RandomForest, + # RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor, + # RuleNode, SimpleLinearRegression, SimpleLogistic, + # SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI, + # VotedPerceptron, Winnow, ZeroR + + _CLASSIFIER_CLASS = { + "naivebayes": "weka.classifiers.bayes.NaiveBayes", + "C4.5": "weka.classifiers.trees.J48", + "log_regression": "weka.classifiers.functions.Logistic", + "svm": "weka.classifiers.functions.SMO", + "kstar": "weka.classifiers.lazy.KStar", + "ripper": "weka.classifiers.rules.JRip", + } + + @classmethod + def train( + cls, + model_filename, + featuresets, + classifier="naivebayes", + options=[], + quiet=True, + ): + # Make sure we can find java & weka. + config_weka() + + # Build an ARFF formatter. + formatter = ARFF_Formatter.from_train(featuresets) + + temp_dir = tempfile.mkdtemp() + try: + # Write the training data file. + train_filename = os.path.join(temp_dir, "train.arff") + formatter.write(train_filename, featuresets) + + if classifier in cls._CLASSIFIER_CLASS: + javaclass = cls._CLASSIFIER_CLASS[classifier] + elif classifier in cls._CLASSIFIER_CLASS.values(): + javaclass = classifier + else: + raise ValueError("Unknown classifier %s" % classifier) + + # Train the weka model. + cmd = [javaclass, "-d", model_filename, "-t", train_filename] + cmd += list(options) + if quiet: + stdout = subprocess.PIPE + else: + stdout = None + java(cmd, classpath=_weka_classpath, stdout=stdout) + + # Return the new classifier. + return WekaClassifier(formatter, model_filename) + + finally: + for f in os.listdir(temp_dir): + os.remove(os.path.join(temp_dir, f)) + os.rmdir(temp_dir) + + +class ARFF_Formatter: + """ + Converts featuresets and labeled featuresets to ARFF-formatted + strings, appropriate for input into Weka. + + Features and classes can be specified manually in the constructor, or may + be determined from data using ``from_train``. + """ + + def __init__(self, labels, features): + """ + :param labels: A list of all class labels that can be generated. + :param features: A list of feature specifications, where + each feature specification is a tuple (fname, ftype); + and ftype is an ARFF type string such as NUMERIC or + STRING. + """ + self._labels = labels + self._features = features + + def format(self, tokens): + """Returns a string representation of ARFF output for the given data.""" + return self.header_section() + self.data_section(tokens) + + def labels(self): + """Returns the list of classes.""" + return list(self._labels) + + def write(self, outfile, tokens): + """Writes ARFF data to a file for the given data.""" + if not hasattr(outfile, "write"): + outfile = open(outfile, "w") + outfile.write(self.format(tokens)) + outfile.close() + + @staticmethod + def from_train(tokens): + """ + Constructs an ARFF_Formatter instance with class labels and feature + types determined from the given data. Handles boolean, numeric and + string (note: not nominal) types. + """ + # Find the set of all attested labels. + labels = {label for (tok, label) in tokens} + + # Determine the types of all features. + features = {} + for tok, label in tokens: + for (fname, fval) in tok.items(): + if issubclass(type(fval), bool): + ftype = "{True, False}" + elif issubclass(type(fval), (int, float, bool)): + ftype = "NUMERIC" + elif issubclass(type(fval), str): + ftype = "STRING" + elif fval is None: + continue # can't tell the type. + else: + raise ValueError("Unsupported value type %r" % ftype) + + if features.get(fname, ftype) != ftype: + raise ValueError("Inconsistent type for %s" % fname) + features[fname] = ftype + features = sorted(features.items()) + + return ARFF_Formatter(labels, features) + + def header_section(self): + """Returns an ARFF header as a string.""" + # Header comment. + s = ( + "% Weka ARFF file\n" + + "% Generated automatically by NLTK\n" + + "%% %s\n\n" % time.ctime() + ) + + # Relation name + s += "@RELATION rel\n\n" + + # Input attribute specifications + for fname, ftype in self._features: + s += "@ATTRIBUTE %-30r %s\n" % (fname, ftype) + + # Label attribute specification + s += "@ATTRIBUTE %-30r {%s}\n" % ("-label-", ",".join(self._labels)) + + return s + + def data_section(self, tokens, labeled=None): + """ + Returns the ARFF data section for the given data. + + :param tokens: a list of featuresets (dicts) or labelled featuresets + which are tuples (featureset, label). + :param labeled: Indicates whether the given tokens are labeled + or not. If None, then the tokens will be assumed to be + labeled if the first token's value is a tuple or list. + """ + # Check if the tokens are labeled or unlabeled. If unlabeled, + # then use 'None' + if labeled is None: + labeled = tokens and isinstance(tokens[0], (tuple, list)) + if not labeled: + tokens = [(tok, None) for tok in tokens] + + # Data section + s = "\n@DATA\n" + for (tok, label) in tokens: + for fname, ftype in self._features: + s += "%s," % self._fmt_arff_val(tok.get(fname)) + s += "%s\n" % self._fmt_arff_val(label) + + return s + + def _fmt_arff_val(self, fval): + if fval is None: + return "?" + elif isinstance(fval, (bool, int)): + return "%s" % fval + elif isinstance(fval, float): + return "%r" % fval + else: + return "%r" % fval + + +if __name__ == "__main__": + from nltk.classify.util import binary_names_demo_features, names_demo + + def make_classifier(featuresets): + return WekaClassifier.train("/tmp/name.model", featuresets, "C4.5") + + classifier = names_demo(make_classifier, binary_names_demo_features) diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d615ec3b9d73441a52dbd125c849a5c518a61aac Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0265077990d3e0a479c02751a585e384f43b106b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/babelfish.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd77177faf55c3103648def9ccd9fc1f73a04d5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/chomsky.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1980893b8f0663a00a68a77206e045104ec2995f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/minimalset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4a17faad1083961ff91e77aee42ad411914ab2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/sort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__init__.py b/venv/lib/python3.10/site-packages/nltk/stem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f46ec0c26303eea6837bc070d8e77b56b48e29f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/__init__.py @@ -0,0 +1,34 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Stemmers + +Interfaces used to remove morphological affixes from words, leaving +only the word stem. Stemming algorithms aim to remove those affixes +required for eg. grammatical role, tense, derivational morphology +leaving only the stem of the word. This is a difficult problem due to +irregular words (eg. common verbs in English), complicated +morphological rules, and part-of-speech and sense ambiguities +(eg. ``ceil-`` is not the stem of ``ceiling``). + +StemmerI defines a standard interface for stemmers. +""" + +from nltk.stem.api import StemmerI +from nltk.stem.arlstem import ARLSTem +from nltk.stem.arlstem2 import ARLSTem2 +from nltk.stem.cistem import Cistem +from nltk.stem.isri import ISRIStemmer +from nltk.stem.lancaster import LancasterStemmer +from nltk.stem.porter import PorterStemmer +from nltk.stem.regexp import RegexpStemmer +from nltk.stem.rslp import RSLPStemmer +from nltk.stem.snowball import SnowballStemmer +from nltk.stem.wordnet import WordNetLemmatizer diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10f11599d9e367a1dc041bc8443b4cb39543e191 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c9b2efaed12aa4ab12c5f075b786e0f895ac04d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e8ea3a7513f1c7097945a445392c9a6a6436072 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0232bcd462e02184a7c01b06f2fc03f7485f92b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cf91d0fed65818fc6426af33842117704ae6e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f386784e4d53b94b8e090aaf5114ef596dd92e51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3438ced8b0e4f74f52ff8af75072f817a4b15575 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37b4ceb4e772ce8b33f2c3e845825cddd5da6e8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cc6b6758938ae5200618c3b5aff43595bee542f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37c2201f4a99b9d59d55b87502d60219b94559e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f70489cb3a41a8fb2e6fa2032e9483473d69d27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e8ca9a677f198d6bd56042688114d59495cfeae Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8804ed464ee87d833999e6c4040020c523a99a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/api.py b/venv/lib/python3.10/site-packages/nltk/stem/api.py new file mode 100644 index 0000000000000000000000000000000000000000..7a58c059a10ca2649faeb695d042a0c6cbb9ec69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/api.py @@ -0,0 +1,27 @@ +# Natural Language Toolkit: Stemmer Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + + +class StemmerI(metaclass=ABCMeta): + """ + A processing interface for removing morphological affixes from + words. This process is known as stemming. + + """ + + @abstractmethod + def stem(self, token): + """ + Strip affixes from the token and return the stem. + + :param token: The token that should be stemmed. + :type token: str + """ diff --git a/venv/lib/python3.10/site-packages/nltk/stem/arlstem.py b/venv/lib/python3.10/site-packages/nltk/stem/arlstem.py new file mode 100644 index 0000000000000000000000000000000000000000..566a4dd36b37e148a24eb840b5e9478dd24d4b55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/arlstem.py @@ -0,0 +1,361 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Siham Ouamour +# Halim Sayoud +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem Arabic Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia, S. Ouamour and H. Sayoud, A Novel Robust Arabic Light Stemmer , +Journal of Experimental & Theoretical Artificial Intelligence (JETAI'17), +Vol. 29, No. 3, 2017, pp. 557-573. +The ARLSTem is a light Arabic stemmer that is based on removing the affixes +from the word (i.e. prefixes, suffixes and infixes). It was evaluated and +compared to several other stemmers using Paice's parameters (under-stemming +index, over-stemming index and stemming weight), and the results showed that +ARLSTem is promising and producing high performances. This stemmer is not +based on any dictionary and can be used on-line effectively. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem(StemmerI): + """ + ARLSTem stemmer : a light Arabic Stemming algorithm without any dictionary. + Department of Telecommunication & Information Processing. USTHB University, + Algiers, Algeria. + ARLSTem.stem(token) returns the Arabic stem for the input token. + The ARLSTem Stemmer requires that all tokens are encoded using Unicode + encoding. + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem(self, token): + """ + call this function to get the word's stem based on ARLSTem . + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip common prefixes of the nouns + pre = self.pref(token) + if pre is not None: + token = pre + # strip the suffixes which are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + # transform from the feminine form to the masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + else: + if pre is None: # if the prefixes are not stripped + # strip the verb prefixes and suffixes + return self.verb(token) + else: + return ps + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replacing hamzated Alif + with Alif replacing AlifMaqsura with Yaa and removing Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is 3 letters + # at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def suff(self, token): + """ + remove suffixes from the word's end. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 3 and token.endswith("\u0627\u062A"): + return token[:-2] + if len(token) > 3 and token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + if len(token) > 4 and token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + return self.verb_t6(token) + + def verb_t1(self, token): + """ + stem the present prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + return token + + def verb_t6(self, token): + """ + stem the order prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + return token diff --git a/venv/lib/python3.10/site-packages/nltk/stem/arlstem2.py b/venv/lib/python3.10/site-packages/nltk/stem/arlstem2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2d9e9551ecffff219821bb570f96b21f588a6f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/arlstem2.py @@ -0,0 +1,457 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer v2 +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Hamza Rebbani +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem2 Arabic Light Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia and H. Rebbani, Comparing the Effectiveness of the Improved ARLSTem +Algorithm with Existing Arabic Light Stemmers, International Conference on +Theoretical and Applicative Aspects of Computer Science (ICTAACS'19), Skikda, +Algeria, December 15-16, 2019. +ARLSTem2 is an Arabic light stemmer based on removing the affixes from +the words (i.e. prefixes, suffixes and infixes). It is an improvement +of the previous Arabic light stemmer (ARLSTem). The new version was compared to +the original algorithm and several existing Arabic light stemmers, where the +results showed that the new version considerably improves the under-stemming +errors that are common to light stemmers. Both ARLSTem and ARLSTem2 can be run +online and do not use any dictionary. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem2(StemmerI): + """ + Return a stemmed Arabic word after removing affixes. This an improved + version of the previous algorithm, which reduces under-stemming errors. + Typically used in Arabic search engine, information retrieval and NLP. + + >>> from nltk.stem import arlstem2 + >>> stemmer = ARLSTem2() + >>> word = stemmer.stem('يعمل') + >>> print(word) + عمل + + :param token: The input Arabic word (unicode) to be stemmed + :type token: unicode + :return: A unicode Arabic word + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem1(self, token): + """ + call this function to get the first stem + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + self.is_verb = False + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip the common noun prefixes + pre = self.pref(token) + if pre is not None: + token = pre + # transform the feminine form to masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + # strip the adjective affixes + adj = self.adjective(token) + if adj is not None: + return adj + # strip the suffixes that are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + if pre is None: # if the noun prefixes are not stripped + # strip the verb prefixes and suffixes + verb = self.verb(token) + if verb is not None: + self.is_verb = True + return verb + else: + return ps + return token + except ValueError as e: + print(e) + + def stem(self, token): + # stem the input word + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # run the first round of stemming + token = self.stem1(token) + # check if there is some additional noun affixes + if len(token) > 4: + # ^Taa, $Yaa + char + if token.startswith("\u062A") and token[-2] == "\u064A": + token = token[1:-2] + token[-1] + return token + # ^Miim, $Waaw + char + if token.startswith("\u0645") and token[-2] == "\u0648": + token = token[1:-2] + token[-1] + return token + if len(token) > 3: + # !^Alif, $Yaa + if not token.startswith("\u0627") and token.endswith("\u064A"): + token = token[:-1] + return token + # $Laam + if token.startswith("\u0644"): + return token[1:] + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replace hamzated Alif + with Alif bare, replace AlifMaqsura with Yaa and remove Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is + # tri-literal at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def adjective(self, token): + """ + remove the infixes from adjectives + """ + # ^Alif, Alif, $Yaa + if len(token) > 5: + if ( + token.startswith("\u0627") + and token[-3] == "\u0627" + and token.endswith("\u064A") + ): + return token[:-3] + token[-2] + + def suff(self, token): + """ + remove the suffixes from the word's ending. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + # $Noon and Alif + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if len(token) > 6: + # ^Taa, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u062A") + and token[-4] == "\u064A" + and token.endswith("\u064A\u0629") + ): + return token[1:-4] + token[-3] + # ^Alif, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u0627") + and token[-4] == "\u0627" + and token.endswith("\u064A\u0629") + ): + return token[:-4] + token[-3] + # $Alif, Yaa and Taa Marbuta + if token.endswith("\u0627\u064A\u0629") and len(token) > 5: + return token[:-2] + if len(token) > 4: + # Alif, $Taa Marbuta + if token[1] == "\u0627" and token.endswith("\u0629"): + return token[0] + token[2:-1] + # $Yaa and Taa Marbuta + if token.endswith("\u064A\u0629"): + return token[:-2] + # $Taa Marbuta + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + # ^Haa, $Noon, Waaw + if len(token) > 5: + if token.startswith("\u0645") and token.endswith("\u0648\u0646"): + return token[1:-2] + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 4: + # $Alif, Taa + if token.endswith("\u0627\u062A"): + return token[:-2] + # ^Alif Alif + if token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + # ^Alif Alif + if token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + vb = self.verb_t6(token) + return vb + + def verb_t1(self, token): + """ + stem the present tense co-occurred prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future tense co-occurred prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present tense suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present tense prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future tense prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + + def verb_t6(self, token): + """ + stem the imperative tense prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + + return token diff --git a/venv/lib/python3.10/site-packages/nltk/stem/cistem.py b/venv/lib/python3.10/site-packages/nltk/stem/cistem.py new file mode 100644 index 0000000000000000000000000000000000000000..69c07a42a373cec1eca9d75e9d474c4c1063e70b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/cistem.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit: CISTEM Stemmer for German +# Copyright (C) 2001-2023 NLTK Project +# Author: Leonie Weissweiler +# Tom Aarsen <> (modifications) +# Algorithm: Leonie Weissweiler +# Alexander Fraser +# URL: +# For license information, see LICENSE.TXT + +import re +from typing import Tuple + +from nltk.stem.api import StemmerI + + +class Cistem(StemmerI): + """ + CISTEM Stemmer for German + + This is the official Python implementation of the CISTEM stemmer. + It is based on the paper + Leonie Weissweiler, Alexander Fraser (2017). Developing a Stemmer for German + Based on a Comparative Analysis of Publicly Available Stemmers. + In Proceedings of the German Society for Computational Linguistics and Language + Technology (GSCL) + which can be read here: + https://www.cis.lmu.de/~weissweiler/cistem/ + + In the paper, we conducted an analysis of publicly available stemmers, + developed two gold standards for German stemming and evaluated the stemmers + based on the two gold standards. We then proposed the stemmer implemented here + and show that it achieves slightly better f-measure than the other stemmers and + is thrice as fast as the Snowball stemmer for German while being about as fast + as most other stemmers. + + case_insensitive is a a boolean specifying if case-insensitive stemming + should be used. Case insensitivity improves performance only if words in the + text may be incorrectly upper case. For all-lowercase and correctly cased + text, best performance is achieved by setting case_insensitive for false. + + :param case_insensitive: if True, the stemming is case insensitive. False by default. + :type case_insensitive: bool + """ + + strip_ge = re.compile(r"^ge(.{4,})") + repl_xx = re.compile(r"(.)\1") + strip_emr = re.compile(r"e[mr]$") + strip_nd = re.compile(r"nd$") + strip_t = re.compile(r"t$") + strip_esn = re.compile(r"[esn]$") + repl_xx_back = re.compile(r"(.)\*") + + def __init__(self, case_insensitive: bool = False): + self._case_insensitive = case_insensitive + + @staticmethod + def replace_to(word: str) -> str: + word = word.replace("sch", "$") + word = word.replace("ei", "%") + word = word.replace("ie", "&") + word = Cistem.repl_xx.sub(r"\1*", word) + + return word + + @staticmethod + def replace_back(word: str) -> str: + word = Cistem.repl_xx_back.sub(r"\1\1", word) + word = word.replace("%", "ei") + word = word.replace("&", "ie") + word = word.replace("$", "sch") + + return word + + def stem(self, word: str) -> str: + """Stems the input word. + + :param word: The word that is to be stemmed. + :type word: str + :return: The stemmed word. + :rtype: str + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.stem(s1) + 'speicherbehalt' + >>> s2 = "Grenzpostens" + >>> stemmer.stem(s2) + 'grenzpost' + >>> s3 = "Ausgefeiltere" + >>> stemmer.stem(s3) + 'ausgefeilt' + >>> stemmer = Cistem(True) + >>> stemmer.stem(s1) + 'speicherbehal' + >>> stemmer.stem(s2) + 'grenzpo' + >>> stemmer.stem(s3) + 'ausgefeil' + """ + if len(word) == 0: + return word + + upper = word[0].isupper() + word = word.lower() + + word = word.replace("ü", "u") + word = word.replace("ö", "o") + word = word.replace("ä", "a") + word = word.replace("ß", "ss") + + word = Cistem.strip_ge.sub(r"\1", word) + + return self._segment_inner(word, upper)[0] + + def segment(self, word: str) -> Tuple[str, str]: + """ + This method works very similarly to stem (:func:'cistem.stem'). The difference is that in + addition to returning the stem, it also returns the rest that was removed at + the end. To be able to return the stem unchanged so the stem and the rest + can be concatenated to form the original word, all subsitutions that altered + the stem in any other way than by removing letters at the end were left out. + + :param word: The word that is to be stemmed. + :type word: str + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.segment(s1) + ('speicherbehält', 'ern') + >>> s2 = "Grenzpostens" + >>> stemmer.segment(s2) + ('grenzpost', 'ens') + >>> s3 = "Ausgefeiltere" + >>> stemmer.segment(s3) + ('ausgefeilt', 'ere') + >>> stemmer = Cistem(True) + >>> stemmer.segment(s1) + ('speicherbehäl', 'tern') + >>> stemmer.segment(s2) + ('grenzpo', 'stens') + >>> stemmer.segment(s3) + ('ausgefeil', 'tere') + """ + if len(word) == 0: + return ("", "") + + upper = word[0].isupper() + word = word.lower() + + return self._segment_inner(word, upper) + + def _segment_inner(self, word: str, upper: bool): + """Inner method for iteratively applying the code stemming regexes. + This method receives a pre-processed variant of the word to be stemmed, + or the word to be segmented, and returns a tuple of the word and the + removed suffix. + + :param word: A pre-processed variant of the word that is to be stemmed. + :type word: str + :param upper: Whether the original word started with a capital letter. + :type upper: bool + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + """ + + rest_length = 0 + word_copy = word[:] + + # Pre-processing before applying the substitution patterns + word = Cistem.replace_to(word) + rest = "" + + # Apply the substitution patterns + while len(word) > 3: + if len(word) > 5: + word, n = Cistem.strip_emr.subn("", word) + if n != 0: + rest_length += 2 + continue + + word, n = Cistem.strip_nd.subn("", word) + if n != 0: + rest_length += 2 + continue + + if not upper or self._case_insensitive: + word, n = Cistem.strip_t.subn("", word) + if n != 0: + rest_length += 1 + continue + + word, n = Cistem.strip_esn.subn("", word) + if n != 0: + rest_length += 1 + continue + else: + break + + # Post-processing after applying the substitution patterns + word = Cistem.replace_back(word) + + if rest_length: + rest = word_copy[-rest_length:] + + return (word, rest) diff --git a/venv/lib/python3.10/site-packages/nltk/stem/isri.py b/venv/lib/python3.10/site-packages/nltk/stem/isri.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae91f1fafaf713330ce78696873e258487d2d0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/isri.py @@ -0,0 +1,395 @@ +# +# Natural Language Toolkit: The ISRI Arabic Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005) +# Author: Hosam Algasaier +# URL: +# For license information, see LICENSE.TXT + +""" +ISRI Arabic Stemmer + +The algorithm for this stemmer is described in: + +Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary. +Information Science Research Institute. University of Nevada, Las Vegas, USA. + +The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features +with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root +dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than +returning the original unmodified word. + +Additional adjustments were made to improve the algorithm: + +1- Adding 60 stop words. +2- Adding the pattern (تفاعيل) to ISRI pattern set. +3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it +increases the word ambiguities and changes the original root. + +""" +import re + +from nltk.stem.api import StemmerI + + +class ISRIStemmer(StemmerI): + """ + ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary. + Information Science Research Institute. University of Nevada, Las Vegas, USA. + + A few minor modifications have been made to ISRI basic algorithm. + See the source code of this module for more information. + + isri.stem(token) returns Arabic root for the given token. + + The ISRI Stemmer requires that all tokens have Unicode string types. + If you use Python IDLE on Arabic Windows you have to decode text first + using Arabic '1256' coding. + """ + + def __init__(self): + # length three prefixes + self.p3 = [ + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", + "\u0648\u0644\u0644", + "\u0648\u0627\u0644", + ] + + # length two prefixes + self.p2 = ["\u0627\u0644", "\u0644\u0644"] + + # length one prefixes + self.p1 = [ + "\u0644", + "\u0628", + "\u0641", + "\u0633", + "\u0648", + "\u064a", + "\u062a", + "\u0646", + "\u0627", + ] + + # length three suffixes + self.s3 = [ + "\u062a\u0645\u0644", + "\u0647\u0645\u0644", + "\u062a\u0627\u0646", + "\u062a\u064a\u0646", + "\u0643\u0645\u0644", + ] + + # length two suffixes + self.s2 = [ + "\u0648\u0646", + "\u0627\u062a", + "\u0627\u0646", + "\u064a\u0646", + "\u062a\u0646", + "\u0643\u0645", + "\u0647\u0646", + "\u0646\u0627", + "\u064a\u0627", + "\u0647\u0627", + "\u062a\u0645", + "\u0643\u0646", + "\u0646\u064a", + "\u0648\u0627", + "\u0645\u0627", + "\u0647\u0645", + ] + + # length one suffixes + self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"] + + # groups of length four patterns + self.pr4 = { + 0: ["\u0645"], + 1: ["\u0627"], + 2: ["\u0627", "\u0648", "\u064A"], + 3: ["\u0629"], + } + + # Groups of length five patterns and length three roots + self.pr53 = { + 0: ["\u0627", "\u062a"], + 1: ["\u0627", "\u064a", "\u0648"], + 2: ["\u0627", "\u062a", "\u0645"], + 3: ["\u0645", "\u064a", "\u062a"], + 4: ["\u0645", "\u062a"], + 5: ["\u0627", "\u0648"], + 6: ["\u0627", "\u0645"], + } + + self.re_short_vowels = re.compile(r"[\u064B-\u0652]") + self.re_hamza = re.compile(r"[\u0621\u0624\u0626]") + self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]") + + self.stop_words = [ + "\u064a\u0643\u0648\u0646", + "\u0648\u0644\u064a\u0633", + "\u0648\u0643\u0627\u0646", + "\u0643\u0630\u0644\u0643", + "\u0627\u0644\u062a\u064a", + "\u0648\u0628\u064a\u0646", + "\u0639\u0644\u064a\u0647\u0627", + "\u0645\u0633\u0627\u0621", + "\u0627\u0644\u0630\u064a", + "\u0648\u0643\u0627\u0646\u062a", + "\u0648\u0644\u0643\u0646", + "\u0648\u0627\u0644\u062a\u064a", + "\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0648\u0645", + "\u0627\u0644\u0644\u0630\u064a\u0646", + "\u0639\u0644\u064a\u0647", + "\u0643\u0627\u0646\u062a", + "\u0644\u0630\u0644\u0643", + "\u0623\u0645\u0627\u0645", + "\u0647\u0646\u0627\u0643", + "\u0645\u0646\u0647\u0627", + "\u0645\u0627\u0632\u0627\u0644", + "\u0644\u0627\u0632\u0627\u0644", + "\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0645\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0635\u0628\u062d", + "\u0623\u0635\u0628\u062d", + "\u0623\u0645\u0633\u0649", + "\u0627\u0645\u0633\u0649", + "\u0623\u0636\u062d\u0649", + "\u0627\u0636\u062d\u0649", + "\u0645\u0627\u0628\u0631\u062d", + "\u0645\u0627\u0641\u062a\u0626", + "\u0645\u0627\u0627\u0646\u0641\u0643", + "\u0644\u0627\u0633\u064a\u0645\u0627", + "\u0648\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0644\u062d\u0627\u0644\u064a", + "\u0627\u0644\u064a\u0647\u0627", + "\u0627\u0644\u0630\u064a\u0646", + "\u0641\u0627\u0646\u0647", + "\u0648\u0627\u0644\u0630\u064a", + "\u0648\u0647\u0630\u0627", + "\u0644\u0647\u0630\u0627", + "\u0641\u0643\u0627\u0646", + "\u0633\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0647", + "\u064a\u0645\u0643\u0646", + "\u0628\u0647\u0630\u0627", + "\u0627\u0644\u0630\u0649", + ] + + def stem(self, token): + """ + Stemming a word token using the ISRI stemmer. + """ + token = self.norm( + token, 1 + ) # remove diacritics which representing Arabic short vowels + if token in self.stop_words: + return token # exclude stop words from being processed + token = self.pre32( + token + ) # remove length three and length two prefixes in this order + token = self.suf32( + token + ) # remove length three and length two suffixes in this order + token = self.waw( + token + ) # remove connective ‘و’ if it precedes a word beginning with ‘و’ + token = self.norm(token, 2) # normalize initial hamza to bare alif + # if 4 <= word length <= 7, then stem; otherwise, no stemming + if len(token) == 4: # length 4 word + token = self.pro_w4(token) + elif len(token) == 5: # length 5 word + token = self.pro_w53(token) + token = self.end_w5(token) + elif len(token) == 6: # length 6 word + token = self.pro_w6(token) + token = self.end_w6(token) + elif len(token) == 7: # length 7 word + token = self.suf1(token) + if len(token) == 7: + token = self.pre1(token) + if len(token) == 6: + token = self.pro_w6(token) + token = self.end_w6(token) + return token + + def norm(self, word, num=3): + """ + normalization: + num=1 normalize diacritics + num=2 normalize initial hamza + num=3 both 1&2 + """ + if num == 1: + word = self.re_short_vowels.sub("", word) + elif num == 2: + word = self.re_initial_hamza.sub("\u0627", word) + elif num == 3: + word = self.re_short_vowels.sub("", word) + word = self.re_initial_hamza.sub("\u0627", word) + return word + + def pre32(self, word): + """remove length three and length two prefixes in this order""" + if len(word) >= 6: + for pre3 in self.p3: + if word.startswith(pre3): + return word[3:] + if len(word) >= 5: + for pre2 in self.p2: + if word.startswith(pre2): + return word[2:] + return word + + def suf32(self, word): + """remove length three and length two suffixes in this order""" + if len(word) >= 6: + for suf3 in self.s3: + if word.endswith(suf3): + return word[:-3] + if len(word) >= 5: + for suf2 in self.s2: + if word.endswith(suf2): + return word[:-2] + return word + + def waw(self, word): + """remove connective ‘و’ if it precedes a word beginning with ‘و’""" + if len(word) >= 4 and word[:2] == "\u0648\u0648": + word = word[1:] + return word + + def pro_w4(self, word): + """process length four patterns and extract length three roots""" + if word[0] in self.pr4[0]: # مفعل + word = word[1:] + elif word[1] in self.pr4[1]: # فاعل + word = word[:1] + word[2:] + elif word[2] in self.pr4[2]: # فعال - فعول - فعيل + word = word[:2] + word[3] + elif word[3] in self.pr4[3]: # فعلة + word = word[:-1] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 4: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w53(self, word): + """process length five patterns and extract length three roots""" + if word[2] in self.pr53[0] and word[0] == "\u0627": # افتعل - افاعل + word = word[1] + word[3:] + elif word[3] in self.pr53[1] and word[0] == "\u0645": # مفعول - مفعال - مفعيل + word = word[1:3] + word[4] + elif word[0] in self.pr53[2] and word[4] == "\u0629": # مفعلة - تفعلة - افعلة + word = word[1:4] + elif word[0] in self.pr53[3] and word[2] == "\u062a": # مفتعل - يفتعل - تفتعل + word = word[1] + word[3:] + elif word[0] in self.pr53[4] and word[2] == "\u0627": # مفاعل - تفاعل + word = word[1] + word[3:] + elif word[2] in self.pr53[5] and word[4] == "\u0629": # فعولة - فعالة + word = word[:2] + word[3] + elif word[0] in self.pr53[6] and word[1] == "\u0646": # انفعل - منفعل + word = word[2:] + elif word[3] == "\u0627" and word[0] == "\u0627": # افعال + word = word[1:3] + word[4] + elif word[4] == "\u0646" and word[3] == "\u0627": # فعلان + word = word[:3] + elif word[3] == "\u064a" and word[0] == "\u062a": # تفعيل + word = word[1:3] + word[4] + elif word[3] == "\u0648" and word[1] == "\u0627": # فاعول + word = word[0] + word[2] + word[4] + elif word[2] == "\u0627" and word[1] == "\u0648": # فواعل + word = word[0] + word[3:] + elif word[3] == "\u0626" and word[2] == "\u0627": # فعائل + word = word[:2] + word[4] + elif word[4] == "\u0629" and word[1] == "\u0627": # فاعلة + word = word[0] + word[2:4] + elif word[4] == "\u064a" and word[2] == "\u0627": # فعالي + word = word[:2] + word[3] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 5: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w54(self, word): + """process length five patterns and extract length four roots""" + if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل + word = word[1:] + elif word[4] == "\u0629": # فعللة + word = word[:4] + elif word[2] == "\u0627": # فعالل + word = word[:2] + word[3:] + return word + + def end_w5(self, word): + """ending step (word of length five)""" + if len(word) == 4: + word = self.pro_w4(word) + elif len(word) == 5: + word = self.pro_w54(word) + return word + + def pro_w6(self, word): + """process length six patterns and extract length three roots""" + if word.startswith("\u0627\u0633\u062a") or word.startswith( + "\u0645\u0633\u062a" + ): # مستفعل - استفعل + word = word[3:] + elif ( + word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629" + ): # مفعالة + word = word[1:3] + word[4] + elif ( + word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627" + ): # افتعال + word = word[1] + word[3] + word[5] + elif ( + word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4] + ): # افعوعل + word = word[1] + word[4:] + elif ( + word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a" + ): # تفاعيل new pattern + word = word[1] + word[3] + word[5] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 6: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w64(self, word): + """process length six patterns and extract length four roots""" + if word[0] == "\u0627" and word[4] == "\u0627": # افعلال + word = word[1:4] + word[5] + elif word.startswith("\u0645\u062a"): # متفعلل + word = word[2:] + return word + + def end_w6(self, word): + """ending step (word of length six)""" + if len(word) == 5: + word = self.pro_w53(word) + word = self.end_w5(word) + elif len(word) == 6: + word = self.pro_w64(word) + return word + + def suf1(self, word): + """normalize short sufix""" + for sf1 in self.s1: + if word.endswith(sf1): + return word[:-1] + return word + + def pre1(self, word): + """normalize short prefix""" + for sp1 in self.p1: + if word.startswith(sp1): + return word[1:] + return word diff --git a/venv/lib/python3.10/site-packages/nltk/stem/lancaster.py b/venv/lib/python3.10/site-packages/nltk/stem/lancaster.py new file mode 100644 index 0000000000000000000000000000000000000000..40a87331848c9f25332e5e655bc24d85b563c2c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/lancaster.py @@ -0,0 +1,343 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Tomcavage +# URL: +# For license information, see LICENSE.TXT + +""" +A word stemmer based on the Lancaster (Paice/Husk) stemming algorithm. +Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61. +""" +import re + +from nltk.stem.api import StemmerI + + +class LancasterStemmer(StemmerI): + """ + Lancaster Stemmer + + >>> from nltk.stem.lancaster import LancasterStemmer + >>> st = LancasterStemmer() + >>> st.stem('maximum') # Remove "-um" when word is intact + 'maxim' + >>> st.stem('presumably') # Don't remove "-um" when word is not intact + 'presum' + >>> st.stem('multiply') # No action taken if word ends with "-ply" + 'multiply' + >>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules + 'provid' + >>> st.stem('owed') # Word starting with vowel must contain at least 2 letters + 'ow' + >>> st.stem('ear') # ditto + 'ear' + >>> st.stem('saying') # Words starting with consonant must contain at least 3 + 'say' + >>> st.stem('crying') # letters and one of those letters must be a vowel + 'cry' + >>> st.stem('string') # ditto + 'string' + >>> st.stem('meant') # ditto + 'meant' + >>> st.stem('cement') # ditto + 'cem' + >>> st_pre = LancasterStemmer(strip_prefix_flag=True) + >>> st_pre.stem('kilometer') # Test Prefix + 'met' + >>> st_custom = LancasterStemmer(rule_tuple=("ssen4>", "s1t.")) + >>> st_custom.stem("ness") # Change s to t + 'nest' + """ + + # The rule list is static since it doesn't change between instances + default_rule_tuple = ( + "ai*2.", # -ia > - if intact + "a*1.", # -a > - if intact + "bb1.", # -bb > -b + "city3s.", # -ytic > -ys + "ci2>", # -ic > - + "cn1t>", # -nc > -nt + "dd1.", # -dd > -d + "dei3y>", # -ied > -y + "deec2ss.", # -ceed >", -cess + "dee1.", # -eed > -ee + "de2>", # -ed > - + "dooh4>", # -hood > - + "e1>", # -e > - + "feil1v.", # -lief > -liev + "fi2>", # -if > - + "gni3>", # -ing > - + "gai3y.", # -iag > -y + "ga2>", # -ag > - + "gg1.", # -gg > -g + "ht*2.", # -th > - if intact + "hsiug5ct.", # -guish > -ct + "hsi3>", # -ish > - + "i*1.", # -i > - if intact + "i1y>", # -i > -y + "ji1d.", # -ij > -id -- see nois4j> & vis3j> + "juf1s.", # -fuj > -fus + "ju1d.", # -uj > -ud + "jo1d.", # -oj > -od + "jeh1r.", # -hej > -her + "jrev1t.", # -verj > -vert + "jsim2t.", # -misj > -mit + "jn1d.", # -nj > -nd + "j1s.", # -j > -s + "lbaifi6.", # -ifiabl > - + "lbai4y.", # -iabl > -y + "lba3>", # -abl > - + "lbi3.", # -ibl > - + "lib2l>", # -bil > -bl + "lc1.", # -cl > c + "lufi4y.", # -iful > -y + "luf3>", # -ful > - + "lu2.", # -ul > - + "lai3>", # -ial > - + "lau3>", # -ual > - + "la2>", # -al > - + "ll1.", # -ll > -l + "mui3.", # -ium > - + "mu*2.", # -um > - if intact + "msi3>", # -ism > - + "mm1.", # -mm > -m + "nois4j>", # -sion > -j + "noix4ct.", # -xion > -ct + "noi3>", # -ion > - + "nai3>", # -ian > - + "na2>", # -an > - + "nee0.", # protect -een + "ne2>", # -en > - + "nn1.", # -nn > -n + "pihs4>", # -ship > - + "pp1.", # -pp > -p + "re2>", # -er > - + "rae0.", # protect -ear + "ra2.", # -ar > - + "ro2>", # -or > - + "ru2>", # -ur > - + "rr1.", # -rr > -r + "rt1>", # -tr > -t + "rei3y>", # -ier > -y + "sei3y>", # -ies > -y + "sis2.", # -sis > -s + "si2>", # -is > - + "ssen4>", # -ness > - + "ss0.", # protect -ss + "suo3>", # -ous > - + "su*2.", # -us > - if intact + "s*1>", # -s > - if intact + "s0.", # -s > -s + "tacilp4y.", # -plicat > -ply + "ta2>", # -at > - + "tnem4>", # -ment > - + "tne3>", # -ent > - + "tna3>", # -ant > - + "tpir2b.", # -ript > -rib + "tpro2b.", # -orpt > -orb + "tcud1.", # -duct > -duc + "tpmus2.", # -sumpt > -sum + "tpec2iv.", # -cept > -ceiv + "tulo2v.", # -olut > -olv + "tsis0.", # protect -sist + "tsi3>", # -ist > - + "tt1.", # -tt > -t + "uqi3.", # -iqu > - + "ugo1.", # -ogu > -og + "vis3j>", # -siv > -j + "vie0.", # protect -eiv + "vi2>", # -iv > - + "ylb1>", # -bly > -bl + "yli3y>", # -ily > -y + "ylp0.", # protect -ply + "yl2>", # -ly > - + "ygo1.", # -ogy > -og + "yhp1.", # -phy > -ph + "ymo1.", # -omy > -om + "ypo1.", # -opy > -op + "yti3>", # -ity > - + "yte3>", # -ety > - + "ytl2.", # -lty > -l + "yrtsi5.", # -istry > - + "yra3>", # -ary > - + "yro3>", # -ory > - + "yfi3.", # -ify > - + "ycn2t>", # -ncy > -nt + "yca3>", # -acy > - + "zi2>", # -iz > - + "zy1s.", # -yz > -ys + ) + + def __init__(self, rule_tuple=None, strip_prefix_flag=False): + """Create an instance of the Lancaster stemmer.""" + # Setup an empty rule dictionary - this will be filled in later + self.rule_dictionary = {} + # Check if a user wants to strip prefix + self._strip_prefix = strip_prefix_flag + # Check if a user wants to use his/her own rule tuples. + self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple + + def parseRules(self, rule_tuple=None): + """Validate the set of rules used in this stemmer. + + If this function is called as an individual method, without using stem + method, rule_tuple argument will be compiled into self.rule_dictionary. + If this function is called within stem, self._rule_tuple will be used. + + """ + # If there is no argument for the function, use class' own rule tuple. + rule_tuple = rule_tuple if rule_tuple else self._rule_tuple + valid_rule = re.compile(r"^[a-z]+\*?\d[a-z]*[>\.]?$") + # Empty any old rules from the rule set before adding new ones + self.rule_dictionary = {} + + for rule in rule_tuple: + if not valid_rule.match(rule): + raise ValueError(f"The rule {rule} is invalid") + first_letter = rule[0:1] + if first_letter in self.rule_dictionary: + self.rule_dictionary[first_letter].append(rule) + else: + self.rule_dictionary[first_letter] = [rule] + + def stem(self, word): + """Stem a word using the Lancaster stemmer.""" + # Lower-case the word, since all the rules are lower-cased + word = word.lower() + word = self.__stripPrefix(word) if self._strip_prefix else word + + # Save a copy of the original word + intact_word = word + + # If rule dictionary is empty, parse rule tuple. + if not self.rule_dictionary: + self.parseRules() + + return self.__doStemming(word, intact_word) + + def __doStemming(self, word, intact_word): + """Perform the actual word stemming""" + + valid_rule = re.compile(r"^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$") + + proceed = True + + while proceed: + + # Find the position of the last letter of the word to be stemmed + last_letter_position = self.__getLastLetter(word) + + # Only stem the word if it has a last letter and a rule matching that last letter + if ( + last_letter_position < 0 + or word[last_letter_position] not in self.rule_dictionary + ): + proceed = False + + else: + rule_was_applied = False + + # Go through each rule that matches the word's final letter + for rule in self.rule_dictionary[word[last_letter_position]]: + rule_match = valid_rule.match(rule) + if rule_match: + ( + ending_string, + intact_flag, + remove_total, + append_string, + cont_flag, + ) = rule_match.groups() + + # Convert the number of chars to remove when stemming + # from a string to an integer + remove_total = int(remove_total) + + # Proceed if word's ending matches rule's word ending + if word.endswith(ending_string[::-1]): + if intact_flag: + if word == intact_word and self.__isAcceptable( + word, remove_total + ): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + elif self.__isAcceptable(word, remove_total): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + # If no rules apply, the word doesn't need any more stemming + if rule_was_applied == False: + proceed = False + return word + + def __getLastLetter(self, word): + """Get the zero-based index of the last alphabetic character in this string""" + last_letter = -1 + for position in range(len(word)): + if word[position].isalpha(): + last_letter = position + else: + break + return last_letter + + def __isAcceptable(self, word, remove_total): + """Determine if the word is acceptable for stemming.""" + word_is_acceptable = False + # If the word starts with a vowel, it must be at least 2 + # characters long to be stemmed + if word[0] in "aeiouy": + if len(word) - remove_total >= 2: + word_is_acceptable = True + # If the word starts with a consonant, it must be at least 3 + # characters long (including one vowel) to be stemmed + elif len(word) - remove_total >= 3: + if word[1] in "aeiouy": + word_is_acceptable = True + elif word[2] in "aeiouy": + word_is_acceptable = True + return word_is_acceptable + + def __applyRule(self, word, remove_total, append_string): + """Apply the stemming rule to the word""" + # Remove letters from the end of the word + new_word_length = len(word) - remove_total + word = word[0:new_word_length] + + # And add new letters to the end of the truncated word + if append_string: + word += append_string + return word + + def __stripPrefix(self, word): + """Remove prefix from a word. + + This function originally taken from Whoosh. + + """ + for prefix in ( + "kilo", + "micro", + "milli", + "intra", + "ultra", + "mega", + "nano", + "pico", + "pseudo", + ): + if word.startswith(prefix): + return word[len(prefix) :] + return word + + def __repr__(self): + return "" diff --git a/venv/lib/python3.10/site-packages/nltk/stem/porter.py b/venv/lib/python3.10/site-packages/nltk/stem/porter.py new file mode 100644 index 0000000000000000000000000000000000000000..c84402d8083677ea9e727f5f5b0998529ad96ba6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/porter.py @@ -0,0 +1,715 @@ +""" +Porter Stemmer + +This is the Porter stemming algorithm. It follows the algorithm +presented in + +Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137. + +with some optional deviations that can be turned on or off with the +`mode` argument to the constructor. + +Martin Porter, the algorithm's inventor, maintains a web page about the +algorithm at + + https://www.tartarus.org/~martin/PorterStemmer/ + +which includes another Python implementation and other implementations +in many languages. +""" + +__docformat__ = "plaintext" + +import re + +from nltk.stem.api import StemmerI + + +class PorterStemmer(StemmerI): + """ + A word stemmer based on the Porter stemming algorithm. + + Porter, M. "An algorithm for suffix stripping." + Program 14.3 (1980): 130-137. + + See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage + of the algorithm. + + Martin Porter has endorsed several modifications to the Porter + algorithm since writing his original paper, and those extensions are + included in the implementations on his website. Additionally, others + have proposed further improvements to the algorithm, including NLTK + contributors. There are thus three modes that can be selected by + passing the appropriate constant to the class constructor's `mode` + attribute: + + - PorterStemmer.ORIGINAL_ALGORITHM + + An implementation that is faithful to the original paper. + + Note that Martin Porter has deprecated this version of the + algorithm. Martin distributes implementations of the Porter + Stemmer in many languages, hosted at: + + https://www.tartarus.org/~martin/PorterStemmer/ + + and all of these implementations include his extensions. He + strongly recommends against using the original, published + version of the algorithm; only use this mode if you clearly + understand why you are choosing to do so. + + - PorterStemmer.MARTIN_EXTENSIONS + + An implementation that only uses the modifications to the + algorithm that are included in the implementations on Martin + Porter's website. He has declared Porter frozen, so the + behaviour of those implementations should never change. + + - PorterStemmer.NLTK_EXTENSIONS (default) + + An implementation that includes further improvements devised by + NLTK contributors or taken from other modified implementations + found on the web. + + For the best stemming, you should use the default NLTK_EXTENSIONS + version. However, if you need to get the same results as either the + original algorithm or one of Martin Porter's hosted versions for + compatibility with an existing implementation or dataset, you can use + one of the other modes instead. + """ + + # Modes the Stemmer can be instantiated in + NLTK_EXTENSIONS = "NLTK_EXTENSIONS" + MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS" + ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM" + + def __init__(self, mode=NLTK_EXTENSIONS): + if mode not in ( + self.NLTK_EXTENSIONS, + self.MARTIN_EXTENSIONS, + self.ORIGINAL_ALGORITHM, + ): + raise ValueError( + "Mode must be one of PorterStemmer.NLTK_EXTENSIONS, " + "PorterStemmer.MARTIN_EXTENSIONS, or " + "PorterStemmer.ORIGINAL_ALGORITHM" + ) + + self.mode = mode + + if self.mode == self.NLTK_EXTENSIONS: + # This is a table of irregular forms. It is quite short, + # but still reflects the errors actually drawn to Martin + # Porter's attention over a 20 year period! + irregular_forms = { + "sky": ["sky", "skies"], + "die": ["dying"], + "lie": ["lying"], + "tie": ["tying"], + "news": ["news"], + "inning": ["innings", "inning"], + "outing": ["outings", "outing"], + "canning": ["cannings", "canning"], + "howe": ["howe"], + "proceed": ["proceed"], + "exceed": ["exceed"], + "succeed": ["succeed"], + } + + self.pool = {} + for key in irregular_forms: + for val in irregular_forms[key]: + self.pool[val] = key + + self.vowels = frozenset(["a", "e", "i", "o", "u"]) + + def _is_consonant(self, word, i): + """Returns True if word[i] is a consonant, False otherwise + + A consonant is defined in the paper as follows: + + A consonant in a word is a letter other than A, E, I, O or + U, and other than Y preceded by a consonant. (The fact that + the term `consonant' is defined to some extent in terms of + itself does not make it ambiguous.) So in TOY the consonants + are T and Y, and in SYZYGY they are S, Z and G. If a letter + is not a consonant it is a vowel. + """ + if word[i] in self.vowels: + return False + if word[i] == "y": + if i == 0: + return True + else: + return not self._is_consonant(word, i - 1) + return True + + def _measure(self, stem): + r"""Returns the 'measure' of stem, per definition in the paper + + From the paper: + + A consonant will be denoted by c, a vowel by v. A list + ccc... of length greater than 0 will be denoted by C, and a + list vvv... of length greater than 0 will be denoted by V. + Any word, or part of a word, therefore has one of the four + forms: + + CVCV ... C + CVCV ... V + VCVC ... C + VCVC ... V + + These may all be represented by the single form + + [C]VCVC ... [V] + + where the square brackets denote arbitrary presence of their + contents. Using (VC){m} to denote VC repeated m times, this + may again be written as + + [C](VC){m}[V]. + + m will be called the \measure\ of any word or word part when + represented in this form. The case m = 0 covers the null + word. Here are some examples: + + m=0 TR, EE, TREE, Y, BY. + m=1 TROUBLE, OATS, TREES, IVY. + m=2 TROUBLES, PRIVATE, OATEN, ORRERY. + """ + cv_sequence = "" + + # Construct a string of 'c's and 'v's representing whether each + # character in `stem` is a consonant or a vowel. + # e.g. 'falafel' becomes 'cvcvcvc', + # 'architecture' becomes 'vcccvcvccvcv' + for i in range(len(stem)): + if self._is_consonant(stem, i): + cv_sequence += "c" + else: + cv_sequence += "v" + + # Count the number of 'vc' occurrences, which is equivalent to + # the number of 'VC' occurrences in Porter's reduced form in the + # docstring above, which is in turn equivalent to `m` + return cv_sequence.count("vc") + + def _has_positive_measure(self, stem): + return self._measure(stem) > 0 + + def _contains_vowel(self, stem): + """Returns True if stem contains a vowel, else False""" + for i in range(len(stem)): + if not self._is_consonant(stem, i): + return True + return False + + def _ends_double_consonant(self, word): + """Implements condition *d from the paper + + Returns True if word ends with a double consonant + """ + return ( + len(word) >= 2 + and word[-1] == word[-2] + and self._is_consonant(word, len(word) - 1) + ) + + def _ends_cvc(self, word): + """Implements condition *o from the paper + + From the paper: + + *o - the stem ends cvc, where the second c is not W, X or Y + (e.g. -WIL, -HOP). + """ + return ( + len(word) >= 3 + and self._is_consonant(word, len(word) - 3) + and not self._is_consonant(word, len(word) - 2) + and self._is_consonant(word, len(word) - 1) + and word[-1] not in ("w", "x", "y") + ) or ( + self.mode == self.NLTK_EXTENSIONS + and len(word) == 2 + and not self._is_consonant(word, 0) + and self._is_consonant(word, 1) + ) + + def _replace_suffix(self, word, suffix, replacement): + """Replaces `suffix` of `word` with `replacement""" + assert word.endswith(suffix), "Given word doesn't end with given suffix" + if suffix == "": + return word + replacement + else: + return word[: -len(suffix)] + replacement + + def _apply_rule_list(self, word, rules): + """Applies the first applicable suffix-removal rule to the word + + Takes a word and a list of suffix-removal rules represented as + 3-tuples, with the first element being the suffix to remove, + the second element being the string to replace it with, and the + final element being the condition for the rule to be applicable, + or None if the rule is unconditional. + """ + for rule in rules: + suffix, replacement, condition = rule + if suffix == "*d" and self._ends_double_consonant(word): + stem = word[:-2] + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + if word.endswith(suffix): + stem = self._replace_suffix(word, suffix, "") + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + + return word + + def _step1a(self, word): + """Implements Step 1a from "An algorithm for suffix stripping" + + From the paper: + + SSES -> SS caresses -> caress + IES -> I ponies -> poni + ties -> ti + SS -> SS caress -> caress + S -> cats -> cat + """ + # this NLTK-only rule extends the original algorithm, so + # that 'flies'->'fli' but 'dies'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ies") and len(word) == 4: + return self._replace_suffix(word, "ies", "ie") + + return self._apply_rule_list( + word, + [ + ("sses", "ss", None), # SSES -> SS + ("ies", "i", None), # IES -> I + ("ss", "ss", None), # SS -> SS + ("s", "", None), # S -> + ], + ) + + def _step1b(self, word): + """Implements Step 1b from "An algorithm for suffix stripping" + + From the paper: + + (m>0) EED -> EE feed -> feed + agreed -> agree + (*v*) ED -> plastered -> plaster + bled -> bled + (*v*) ING -> motoring -> motor + sing -> sing + + If the second or third of the rules in Step 1b is successful, + the following is done: + + AT -> ATE conflat(ed) -> conflate + BL -> BLE troubl(ed) -> trouble + IZ -> IZE siz(ed) -> size + (*d and not (*L or *S or *Z)) + -> single letter + hopp(ing) -> hop + tann(ed) -> tan + fall(ing) -> fall + hiss(ing) -> hiss + fizz(ed) -> fizz + (m=1 and *o) -> E fail(ing) -> fail + fil(ing) -> file + + The rule to map to a single letter causes the removal of one of + the double letter pair. The -E is put back on -AT, -BL and -IZ, + so that the suffixes -ATE, -BLE and -IZE can be recognised + later. This E may be removed in step 4. + """ + # this NLTK-only block extends the original algorithm, so that + # 'spied'->'spi' but 'died'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ied"): + if len(word) == 4: + return self._replace_suffix(word, "ied", "ie") + else: + return self._replace_suffix(word, "ied", "i") + + # (m>0) EED -> EE + if word.endswith("eed"): + stem = self._replace_suffix(word, "eed", "") + if self._measure(stem) > 0: + return stem + "ee" + else: + return word + + rule_2_or_3_succeeded = False + + for suffix in ["ed", "ing"]: + if word.endswith(suffix): + intermediate_stem = self._replace_suffix(word, suffix, "") + if self._contains_vowel(intermediate_stem): + rule_2_or_3_succeeded = True + break + + if not rule_2_or_3_succeeded: + return word + + return self._apply_rule_list( + intermediate_stem, + [ + ("at", "ate", None), # AT -> ATE + ("bl", "ble", None), # BL -> BLE + ("iz", "ize", None), # IZ -> IZE + # (*d and not (*L or *S or *Z)) + # -> single letter + ( + "*d", + intermediate_stem[-1], + lambda stem: intermediate_stem[-1] not in ("l", "s", "z"), + ), + # (m=1 and *o) -> E + ( + "", + "e", + lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)), + ), + ], + ) + + def _step1c(self, word): + """Implements Step 1c from "An algorithm for suffix stripping" + + From the paper: + + Step 1c + + (*v*) Y -> I happy -> happi + sky -> sky + """ + + def nltk_condition(stem): + """ + This has been modified from the original Porter algorithm so + that y->i is only done when y is preceded by a consonant, + but not if the stem is only a single consonant, i.e. + + (*c and not c) Y -> I + + So 'happy' -> 'happi', but + 'enjoy' -> 'enjoy' etc + + This is a much better rule. Formerly 'enjoy'->'enjoi' and + 'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but + with this modification that no longer really matters. + + Also, the removal of the contains_vowel(z) condition means + that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and + conflate with 'spied', 'tried', 'flies' ... + """ + return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1) + + def original_condition(stem): + return self._contains_vowel(stem) + + return self._apply_rule_list( + word, + [ + ( + "y", + "i", + nltk_condition + if self.mode == self.NLTK_EXTENSIONS + else original_condition, + ) + ], + ) + + def _step2(self, word): + """Implements Step 2 from "An algorithm for suffix stripping" + + From the paper: + + Step 2 + + (m>0) ATIONAL -> ATE relational -> relate + (m>0) TIONAL -> TION conditional -> condition + rational -> rational + (m>0) ENCI -> ENCE valenci -> valence + (m>0) ANCI -> ANCE hesitanci -> hesitance + (m>0) IZER -> IZE digitizer -> digitize + (m>0) ABLI -> ABLE conformabli -> conformable + (m>0) ALLI -> AL radicalli -> radical + (m>0) ENTLI -> ENT differentli -> different + (m>0) ELI -> E vileli - > vile + (m>0) OUSLI -> OUS analogousli -> analogous + (m>0) IZATION -> IZE vietnamization -> vietnamize + (m>0) ATION -> ATE predication -> predicate + (m>0) ATOR -> ATE operator -> operate + (m>0) ALISM -> AL feudalism -> feudal + (m>0) IVENESS -> IVE decisiveness -> decisive + (m>0) FULNESS -> FUL hopefulness -> hopeful + (m>0) OUSNESS -> OUS callousness -> callous + (m>0) ALITI -> AL formaliti -> formal + (m>0) IVITI -> IVE sensitiviti -> sensitive + (m>0) BILITI -> BLE sensibiliti -> sensible + """ + + if self.mode == self.NLTK_EXTENSIONS: + # Instead of applying the ALLI -> AL rule after '(a)bli' per + # the published algorithm, instead we apply it first, and, + # if it succeeds, run the result through step2 again. + if word.endswith("alli") and self._has_positive_measure( + self._replace_suffix(word, "alli", "") + ): + return self._step2(self._replace_suffix(word, "alli", "al")) + + bli_rule = ("bli", "ble", self._has_positive_measure) + abli_rule = ("abli", "able", self._has_positive_measure) + + rules = [ + ("ational", "ate", self._has_positive_measure), + ("tional", "tion", self._has_positive_measure), + ("enci", "ence", self._has_positive_measure), + ("anci", "ance", self._has_positive_measure), + ("izer", "ize", self._has_positive_measure), + abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule, + ("alli", "al", self._has_positive_measure), + ("entli", "ent", self._has_positive_measure), + ("eli", "e", self._has_positive_measure), + ("ousli", "ous", self._has_positive_measure), + ("ization", "ize", self._has_positive_measure), + ("ation", "ate", self._has_positive_measure), + ("ator", "ate", self._has_positive_measure), + ("alism", "al", self._has_positive_measure), + ("iveness", "ive", self._has_positive_measure), + ("fulness", "ful", self._has_positive_measure), + ("ousness", "ous", self._has_positive_measure), + ("aliti", "al", self._has_positive_measure), + ("iviti", "ive", self._has_positive_measure), + ("biliti", "ble", self._has_positive_measure), + ] + + if self.mode == self.NLTK_EXTENSIONS: + rules.append(("fulli", "ful", self._has_positive_measure)) + + # The 'l' of the 'logi' -> 'log' rule is put with the stem, + # so that short stems like 'geo' 'theo' etc work like + # 'archaeo' 'philo' etc. + rules.append( + ("logi", "log", lambda stem: self._has_positive_measure(word[:-3])) + ) + + if self.mode == self.MARTIN_EXTENSIONS: + rules.append(("logi", "log", self._has_positive_measure)) + + return self._apply_rule_list(word, rules) + + def _step3(self, word): + """Implements Step 3 from "An algorithm for suffix stripping" + + From the paper: + + Step 3 + + (m>0) ICATE -> IC triplicate -> triplic + (m>0) ATIVE -> formative -> form + (m>0) ALIZE -> AL formalize -> formal + (m>0) ICITI -> IC electriciti -> electric + (m>0) ICAL -> IC electrical -> electric + (m>0) FUL -> hopeful -> hope + (m>0) NESS -> goodness -> good + """ + return self._apply_rule_list( + word, + [ + ("icate", "ic", self._has_positive_measure), + ("ative", "", self._has_positive_measure), + ("alize", "al", self._has_positive_measure), + ("iciti", "ic", self._has_positive_measure), + ("ical", "ic", self._has_positive_measure), + ("ful", "", self._has_positive_measure), + ("ness", "", self._has_positive_measure), + ], + ) + + def _step4(self, word): + """Implements Step 4 from "An algorithm for suffix stripping" + + Step 4 + + (m>1) AL -> revival -> reviv + (m>1) ANCE -> allowance -> allow + (m>1) ENCE -> inference -> infer + (m>1) ER -> airliner -> airlin + (m>1) IC -> gyroscopic -> gyroscop + (m>1) ABLE -> adjustable -> adjust + (m>1) IBLE -> defensible -> defens + (m>1) ANT -> irritant -> irrit + (m>1) EMENT -> replacement -> replac + (m>1) MENT -> adjustment -> adjust + (m>1) ENT -> dependent -> depend + (m>1 and (*S or *T)) ION -> adoption -> adopt + (m>1) OU -> homologou -> homolog + (m>1) ISM -> communism -> commun + (m>1) ATE -> activate -> activ + (m>1) ITI -> angulariti -> angular + (m>1) OUS -> homologous -> homolog + (m>1) IVE -> effective -> effect + (m>1) IZE -> bowdlerize -> bowdler + + The suffixes are now removed. All that remains is a little + tidying up. + """ + measure_gt_1 = lambda stem: self._measure(stem) > 1 + + return self._apply_rule_list( + word, + [ + ("al", "", measure_gt_1), + ("ance", "", measure_gt_1), + ("ence", "", measure_gt_1), + ("er", "", measure_gt_1), + ("ic", "", measure_gt_1), + ("able", "", measure_gt_1), + ("ible", "", measure_gt_1), + ("ant", "", measure_gt_1), + ("ement", "", measure_gt_1), + ("ment", "", measure_gt_1), + ("ent", "", measure_gt_1), + # (m>1 and (*S or *T)) ION -> + ( + "ion", + "", + lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"), + ), + ("ou", "", measure_gt_1), + ("ism", "", measure_gt_1), + ("ate", "", measure_gt_1), + ("iti", "", measure_gt_1), + ("ous", "", measure_gt_1), + ("ive", "", measure_gt_1), + ("ize", "", measure_gt_1), + ], + ) + + def _step5a(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5a + + (m>1) E -> probate -> probat + rate -> rate + (m=1 and not *o) E -> cease -> ceas + """ + # Note that Martin's test vocabulary and reference + # implementations are inconsistent in how they handle the case + # where two rules both refer to a suffix that matches the word + # to be stemmed, but only the condition of the second one is + # true. + # Earlier in step2b we had the rules: + # (m>0) EED -> EE + # (*v*) ED -> + # but the examples in the paper included "feed"->"feed", even + # though (*v*) is true for "fe" and therefore the second rule + # alone would map "feed"->"fe". + # However, in THIS case, we need to handle the consecutive rules + # differently and try both conditions (obviously; the second + # rule here would be redundant otherwise). Martin's paper makes + # no explicit mention of the inconsistency; you have to infer it + # from the examples. + # For this reason, we can't use _apply_rule_list here. + if word.endswith("e"): + stem = self._replace_suffix(word, "e", "") + if self._measure(stem) > 1: + return stem + if self._measure(stem) == 1 and not self._ends_cvc(stem): + return stem + return word + + def _step5b(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5b + + (m > 1 and *d and *L) -> single letter + controll -> control + roll -> roll + """ + return self._apply_rule_list( + word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)] + ) + + def stem(self, word, to_lowercase=True): + """ + :param to_lowercase: if `to_lowercase=True` the word always lowercase + """ + stem = word.lower() if to_lowercase else word + + if self.mode == self.NLTK_EXTENSIONS and word in self.pool: + return self.pool[stem] + + if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2: + # With this line, strings of length 1 or 2 don't go through + # the stemming process, although no mention is made of this + # in the published algorithm. + return stem + + stem = self._step1a(stem) + stem = self._step1b(stem) + stem = self._step1c(stem) + stem = self._step2(stem) + stem = self._step3(stem) + stem = self._step4(stem) + stem = self._step5a(stem) + stem = self._step5b(stem) + + return stem + + def __repr__(self): + return "" + + +def demo(): + """ + A demonstration of the porter stemmer on a sample from + the Penn Treebank corpus. + """ + + from nltk import stem + from nltk.corpus import treebank + + stemmer = stem.PorterStemmer() + + orig = [] + stemmed = [] + for item in treebank.fileids()[:3]: + for (word, tag) in treebank.tagged_words(item): + orig.append(word) + stemmed.append(stemmer.stem(word)) + + # Convert the results to a string, and word-wrap them. + results = " ".join(stemmed) + results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip() + + # Convert the original to a string, and word wrap it. + original = " ".join(orig) + original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip() + + # Print the results. + print("-Original-".center(70).replace(" ", "*").replace("-", " ")) + print(original) + print("-Results-".center(70).replace(" ", "*").replace("-", " ")) + print(results) + print("*" * 70) diff --git a/venv/lib/python3.10/site-packages/nltk/stem/regexp.py b/venv/lib/python3.10/site-packages/nltk/stem/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..473b42bd4a194bc11a51db9db7a00178a945862a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/regexp.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT +import re + +from nltk.stem.api import StemmerI + + +class RegexpStemmer(StemmerI): + """ + A stemmer that uses regular expressions to identify morphological + affixes. Any substrings that match the regular expressions will + be removed. + + >>> from nltk.stem import RegexpStemmer + >>> st = RegexpStemmer('ing$|s$|e$|able$', min=4) + >>> st.stem('cars') + 'car' + >>> st.stem('mass') + 'mas' + >>> st.stem('was') + 'was' + >>> st.stem('bee') + 'bee' + >>> st.stem('compute') + 'comput' + >>> st.stem('advisable') + 'advis' + + :type regexp: str or regexp + :param regexp: The regular expression that should be used to + identify morphological affixes. + :type min: int + :param min: The minimum length of string to stem + """ + + def __init__(self, regexp, min=0): + + if not hasattr(regexp, "pattern"): + regexp = re.compile(regexp) + self._regexp = regexp + self._min = min + + def stem(self, word): + if len(word) < self._min: + return word + else: + return self._regexp.sub("", word) + + def __repr__(self): + return f"" diff --git a/venv/lib/python3.10/site-packages/nltk/stem/rslp.py b/venv/lib/python3.10/site-packages/nltk/stem/rslp.py new file mode 100644 index 0000000000000000000000000000000000000000..b1dfeb35e09643e2e75af68cac3bcc7632fc2245 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/rslp.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: RSLP Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tiago Tresoldi +# URL: +# For license information, see LICENSE.TXT + +# This code is based on the algorithm presented in the paper "A Stemming +# Algorithm for the Portuguese Language" by Viviane Moreira Orengo and +# Christian Huyck, which unfortunately I had no access to. The code is a +# Python version, with some minor modifications of mine, to the description +# presented at https://www.webcitation.org/5NnvdIzOb and to the C source code +# available at http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. +# Please note that this stemmer is intended for demonstration and educational +# purposes only. Feel free to write me for any comments, including the +# development of a different and/or better stemmer for Portuguese. I also +# suggest using NLTK's mailing list for Portuguese for any discussion. + +# Este código é baseado no algoritmo apresentado no artigo "A Stemming +# Algorithm for the Portuguese Language" de Viviane Moreira Orengo e +# Christian Huyck, o qual infelizmente não tive a oportunidade de ler. O +# código é uma conversão para Python, com algumas pequenas modificações +# minhas, daquele apresentado em https://www.webcitation.org/5NnvdIzOb e do +# código para linguagem C disponível em +# http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. Por favor, +# lembre-se de que este stemmer foi desenvolvido com finalidades unicamente +# de demonstração e didáticas. Sinta-se livre para me escrever para qualquer +# comentário, inclusive sobre o desenvolvimento de um stemmer diferente +# e/ou melhor para o português. Também sugiro utilizar-se a lista de discussão +# do NLTK para o português para qualquer debate. + +from nltk.data import load +from nltk.stem.api import StemmerI + + +class RSLPStemmer(StemmerI): + """ + A stemmer for Portuguese. + + >>> from nltk.stem import RSLPStemmer + >>> st = RSLPStemmer() + >>> # opening lines of Erico Verissimo's "Música ao Longe" + >>> text = ''' + ... Clarissa risca com giz no quadro-negro a paisagem que os alunos + ... devem copiar . Uma casinha de porta e janela , em cima duma + ... coxilha .''' + >>> for token in text.split(): # doctest: +NORMALIZE_WHITESPACE + ... print(st.stem(token)) + clariss risc com giz no quadro-negr a pais que os alun dev copi . + uma cas de port e janel , em cim dum coxilh . + """ + + def __init__(self): + self._model = [] + + self._model.append(self.read_rule("step0.pt")) + self._model.append(self.read_rule("step1.pt")) + self._model.append(self.read_rule("step2.pt")) + self._model.append(self.read_rule("step3.pt")) + self._model.append(self.read_rule("step4.pt")) + self._model.append(self.read_rule("step5.pt")) + self._model.append(self.read_rule("step6.pt")) + + def read_rule(self, filename): + rules = load("nltk:stemmers/rslp/" + filename, format="raw").decode("utf8") + lines = rules.split("\n") + + lines = [line for line in lines if line != ""] # remove blank lines + lines = [line for line in lines if line[0] != "#"] # remove comments + + # NOTE: a simple but ugly hack to make this parser happy with double '\t's + lines = [line.replace("\t\t", "\t") for line in lines] + + # parse rules + rules = [] + for line in lines: + rule = [] + tokens = line.split("\t") + + # text to be searched for at the end of the string + rule.append(tokens[0][1:-1]) # remove quotes + + # minimum stem size to perform the replacement + rule.append(int(tokens[1])) + + # text to be replaced into + rule.append(tokens[2][1:-1]) # remove quotes + + # exceptions to this rule + rule.append([token[1:-1] for token in tokens[3].split(",")]) + + # append to the results + rules.append(rule) + + return rules + + def stem(self, word): + word = word.lower() + + # the word ends in 's'? apply rule for plural reduction + if word[-1] == "s": + word = self.apply_rule(word, 0) + + # the word ends in 'a'? apply rule for feminine reduction + if word[-1] == "a": + word = self.apply_rule(word, 1) + + # augmentative reduction + word = self.apply_rule(word, 3) + + # adverb reduction + word = self.apply_rule(word, 2) + + # noun reduction + prev_word = word + word = self.apply_rule(word, 4) + if word == prev_word: + # verb reduction + prev_word = word + word = self.apply_rule(word, 5) + if word == prev_word: + # vowel removal + word = self.apply_rule(word, 6) + + return word + + def apply_rule(self, word, rule_index): + rules = self._model[rule_index] + for rule in rules: + suffix_length = len(rule[0]) + if word[-suffix_length:] == rule[0]: # if suffix matches + if len(word) >= suffix_length + rule[1]: # if we have minimum size + if word not in rule[3]: # if not an exception + word = word[:-suffix_length] + rule[2] + break + + return word diff --git a/venv/lib/python3.10/site-packages/nltk/stem/snowball.py b/venv/lib/python3.10/site-packages/nltk/stem/snowball.py new file mode 100644 index 0000000000000000000000000000000000000000..08cd9e76993213eafb0d1698f3f9b019af21068d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/snowball.py @@ -0,0 +1,5946 @@ +# +# Natural Language Toolkit: Snowball Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Michael Stahl +# Peter Ljunglof (revisions) +# Lakhdar Benzahia (co-writer) +# Assem Chelli (reviewer arabicstemmer) +# Abdelkrim Aries (reviewer arabicstemmer) +# Algorithms: Dr Martin Porter +# Assem Chelli arabic stemming algorithm +# Benzahia Lakhdar +# URL: +# For license information, see LICENSE.TXT + +""" +Snowball stemmers + +This module provides a port of the Snowball stemmers +developed by Martin Porter. + +There is also a demo function: `snowball.demo()`. + +""" + +import re + +from nltk.corpus import stopwords +from nltk.stem import porter +from nltk.stem.api import StemmerI +from nltk.stem.util import prefix_replace, suffix_replace + + +class SnowballStemmer(StemmerI): + + """ + Snowball Stemmer + + The following languages are supported: + Arabic, Danish, Dutch, English, Finnish, French, German, + Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian, + Spanish and Swedish. + + The algorithm for English is documented here: + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + The algorithms have been developed by Martin Porter. + These stemmers are called Snowball, because Porter created + a programming language with this name for creating + new stemming algorithms. There is more information available + at http://snowball.tartarus.org/ + + The stemmer is invoked as shown below: + + >>> from nltk.stem import SnowballStemmer # See which languages are supported + >>> print(" ".join(SnowballStemmer.languages)) # doctest: +NORMALIZE_WHITESPACE + arabic danish dutch english finnish french german hungarian + italian norwegian porter portuguese romanian russian + spanish swedish + >>> stemmer = SnowballStemmer("german") # Choose a language + >>> stemmer.stem("Autobahnen") # Stem a word + 'autobahn' + + Invoking the stemmers that way is useful if you do not know the + language to be stemmed at runtime. Alternatively, if you already know + the language, then you can invoke the language specific stemmer directly: + + >>> from nltk.stem.snowball import GermanStemmer + >>> stemmer = GermanStemmer() + >>> stemmer.stem("Autobahnen") + 'autobahn' + + :param language: The language whose subclass is instantiated. + :type language: str or unicode + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + :raise ValueError: If there is no stemmer for the specified + language, a ValueError is raised. + """ + + languages = ( + "arabic", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "hungarian", + "italian", + "norwegian", + "porter", + "portuguese", + "romanian", + "russian", + "spanish", + "swedish", + ) + + def __init__(self, language, ignore_stopwords=False): + if language not in self.languages: + raise ValueError(f"The language '{language}' is not supported.") + stemmerclass = globals()[language.capitalize() + "Stemmer"] + self.stemmer = stemmerclass(ignore_stopwords) + self.stem = self.stemmer.stem + self.stopwords = self.stemmer.stopwords + + def stem(self, token): + return self.stemmer.stem(self, token) + + +class _LanguageSpecificStemmer(StemmerI): + + """ + This helper subclass offers the possibility + to invoke a specific stemmer directly. + This is useful if you already know the language to be stemmed at runtime. + + Create an instance of the Snowball stemmer. + + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + """ + + def __init__(self, ignore_stopwords=False): + # The language is the name of the class, minus the final "Stemmer". + language = type(self).__name__.lower() + if language.endswith("stemmer"): + language = language[:-7] + + self.stopwords = set() + if ignore_stopwords: + try: + for word in stopwords.words(language): + self.stopwords.add(word) + except OSError as e: + raise ValueError( + "{!r} has no list of stopwords. Please set" + " 'ignore_stopwords' to 'False'.".format(self) + ) from e + + def __repr__(self): + """ + Print out the string representation of the respective class. + + """ + return f"<{type(self).__name__}>" + + +class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer): + """ + A word stemmer based on the original Porter stemming algorithm. + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + A few minor modifications have been made to Porter's basic + algorithm. See the source code of the module + nltk.stem.porter for more information. + + """ + + def __init__(self, ignore_stopwords=False): + _LanguageSpecificStemmer.__init__(self, ignore_stopwords) + porter.PorterStemmer.__init__(self) + + +class _ScandinavianStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates a method for defining the string region R1. + It is used by the Danish, Norwegian, and Swedish stemmer. + + """ + + def _r1_scandinavian(self, word, vowels): + """ + Return the region R1 that is used by the Scandinavian stemmers. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. But then R1 is adjusted so that the region + before it contains at least three letters. + + :param word: The word whose region R1 is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region R1. + :type vowels: unicode + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses DanishStemmer, NorwegianStemmer, and + SwedishStemmer. It is not to be invoked directly! + + """ + r1 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) >= 3: + r1 = word[i + 1 :] + else: + return word + break + + return r1 + + +class _StandardStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates two methods for defining the standard versions + of the string regions R1, R2, and RV. + + """ + + def _r1r2_standard(self, word, vowels): + """ + Return the standard interpretations of the string regions R1 and R2. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. + + R2 is the region after the first non-vowel following a vowel + in R1, or is the null region at the end of the word if there + is no such non-vowel. + + :param word: The word whose regions R1 and R2 are determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the regions R1 and R2. + :type vowels: unicode + :return: (r1,r2), the regions R1 and R2 for the respective word. + :rtype: tuple + :note: This helper method is invoked by the respective stem method of + the subclasses DutchStemmer, FinnishStemmer, + FrenchStemmer, GermanStemmer, ItalianStemmer, + PortugueseStemmer, RomanianStemmer, and SpanishStemmer. + It is not to be invoked directly! + :note: A detailed description of how to define R1 and R2 + can be found at http://snowball.tartarus.org/texts/r1r2.html + + """ + r1 = "" + r2 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + return (r1, r2) + + def _rv_standard(self, word, vowels): + """ + Return the standard interpretation of the string region RV. + + If the second letter is a consonant, RV is the region after the + next following vowel. If the first two letters are vowels, RV is + the region after the next following consonant. Otherwise, RV is + the region after the third letter. + + :param word: The word whose region RV is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region RV. + :type vowels: unicode + :return: the region RV for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses ItalianStemmer, PortugueseStemmer, + RomanianStemmer, and SpanishStemmer. It is not to be + invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word[1] not in vowels: + for i in range(2, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + elif word[0] in vowels and word[1] in vowels: + for i in range(2, len(word)): + if word[i] not in vowels: + rv = word[i + 1 :] + break + else: + rv = word[3:] + + return rv + + +class ArabicStemmer(_StandardStemmer): + """ + https://github.com/snowballstem/snowball/blob/master/algorithms/arabic/stem_Unicode.sbl (Original Algorithm) + The Snowball Arabic light Stemmer + Algorithm: + + - Assem Chelli + - Abdelkrim Aries + - Lakhdar Benzahia + + NLTK Version Author: + + - Lakhdar Benzahia + """ + + # Normalize_pre stes + __vocalization = re.compile( + r"[\u064b-\u064c-\u064d-\u064e-\u064f-\u0650-\u0651-\u0652]" + ) # ً، ٌ، ٍ، َ، ُ، ِ، ّ، ْ + + __kasheeda = re.compile(r"[\u0640]") # ـ tatweel/kasheeda + + __arabic_punctuation_marks = re.compile(r"[\u060C-\u061B-\u061F]") # ؛ ، ؟ + + # Normalize_post + __last_hamzat = ("\u0623", "\u0625", "\u0622", "\u0624", "\u0626") # أ، إ، آ، ؤ، ئ + + # normalize other hamza's + __initial_hamzat = re.compile(r"^[\u0622\u0623\u0625]") # أ، إ، آ + + __waw_hamza = re.compile(r"[\u0624]") # ؤ + + __yeh_hamza = re.compile(r"[\u0626]") # ئ + + __alefat = re.compile(r"[\u0623\u0622\u0625]") # أ، إ، آ + + # Checks + __checks1 = ( + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + "\u0627\u0644", + "\u0644\u0644", # لل، ال + ) + + __checks2 = ("\u0629", "\u0627\u062a") # ة # female plural ات + + # Suffixes + __suffix_noun_step1a = ( + "\u064a", + "\u0643", + "\u0647", # ي، ك، ه + "\u0646\u0627", + "\u0643\u0645", + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # نا، كم، ها، هن، هم + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", # كما، هما + ) + + __suffix_noun_step1b = "\u0646" # ن + + __suffix_noun_step2a = ("\u0627", "\u064a", "\u0648") # ا، ي، و + + __suffix_noun_step2b = "\u0627\u062a" # ات + + __suffix_noun_step2c1 = "\u062a" # ت + + __suffix_noun_step2c2 = "\u0629" # ة + + __suffix_noun_step3 = "\u064a" # ي + + __suffix_verb_step1 = ( + "\u0647", + "\u0643", # ه، ك + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", + "\u0647\u0645", # ني، نا، ها، هم + "\u0647\u0646", + "\u0643\u0645", + "\u0643\u0646", # هن، كم، كن + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", # هما، كما، كمو + ) + + __suffix_verb_step2a = ( + "\u062a", + "\u0627", + "\u0646", + "\u064a", # ت، ا، ن، ي + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", # نا، تا، تن Past + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", # ان، هن، ين Present + "\u062a\u0645\u0627", # تما + ) + + __suffix_verb_step2b = ("\u0648\u0627", "\u062a\u0645") # وا، تم + + __suffix_verb_step2c = ("\u0648", "\u062a\u0645\u0648") # و # تمو + + __suffix_all_alef_maqsura = "\u0649" # ى + + # Prefixes + __prefix_step1 = ( + "\u0623", # أ + "\u0623\u0623", + "\u0623\u0622", + "\u0623\u0624", + "\u0623\u0627", + "\u0623\u0625", # أأ، أآ، أؤ، أا، أإ + ) + + __prefix_step2a = ("\u0641\u0627\u0644", "\u0648\u0627\u0644") # فال، وال + + __prefix_step2b = ("\u0641", "\u0648") # ف، و + + __prefix_step3a_noun = ( + "\u0627\u0644", + "\u0644\u0644", # لل، ال + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + ) + + __prefix_step3b_noun = ( + "\u0628", + "\u0643", + "\u0644", # ب، ك، ل + "\u0628\u0628", + "\u0643\u0643", # بب، كك + ) + + __prefix_step3_verb = ( + "\u0633\u064a", + "\u0633\u062a", + "\u0633\u0646", + "\u0633\u0623", + ) # سي، ست، سن، سأ + + __prefix_step4_verb = ( + "\u064a\u0633\u062a", + "\u0646\u0633\u062a", + "\u062a\u0633\u062a", + ) # يست، نست، تست + + # Suffixes added due to Conjugation Verbs + __conjugation_suffix_verb_1 = ("\u0647", "\u0643") # ه، ك + + __conjugation_suffix_verb_2 = ( + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", # ني، نا، ها + "\u0647\u0645", + "\u0647\u0646", + "\u0643\u0645", # هم، هن، كم + "\u0643\u0646", # كن + ) + __conjugation_suffix_verb_3 = ( + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", + ) # هما، كما، كمو + + __conjugation_suffix_verb_4 = ("\u0627", "\u0646", "\u064a") # ا، ن، ي + + __conjugation_suffix_verb_past = ( + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", + ) # نا، تا، تن + + __conjugation_suffix_verb_present = ( + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", + ) # ان، ون، ين + + # Suffixes added due to derivation Names + __conjugation_suffix_noun_1 = ("\u064a", "\u0643", "\u0647") # ي، ك، ه + + __conjugation_suffix_noun_2 = ( + "\u0646\u0627", + "\u0643\u0645", # نا، كم + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # ها، هن، هم + ) + + __conjugation_suffix_noun_3 = ( + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", + ) # كما، هما + + # Prefixes added due to derivation Names + __prefixes1 = ("\u0648\u0627", "\u0641\u0627") # فا، وا + + __articles_3len = ("\u0643\u0627\u0644", "\u0628\u0627\u0644") # بال كال + + __articles_2len = ("\u0627\u0644", "\u0644\u0644") # ال لل + + # Prepositions letters + __prepositions1 = ("\u0643", "\u0644") # ك، ل + __prepositions2 = ("\u0628\u0628", "\u0643\u0643") # بب، كك + + is_verb = True + is_noun = True + is_defined = False + + suffixes_verb_step1_success = False + suffix_verb_step2a_success = False + suffix_verb_step2b_success = False + suffix_noun_step2c2_success = False + suffix_noun_step1a_success = False + suffix_noun_step2a_success = False + suffix_noun_step2b_success = False + suffixe_noun_step1b_success = False + prefix_step2a_success = False + prefix_step3a_noun_success = False + prefix_step3b_noun_success = False + + def __normalize_pre(self, token): + """ + :param token: string + :return: normalized token type string + """ + # strip diacritics + token = self.__vocalization.sub("", token) + # strip kasheeda + token = self.__kasheeda.sub("", token) + # strip punctuation marks + token = self.__arabic_punctuation_marks.sub("", token) + return token + + def __normalize_post(self, token): + # normalize last hamza + for hamza in self.__last_hamzat: + if token.endswith(hamza): + token = suffix_replace(token, hamza, "\u0621") + break + # normalize other hamzat + token = self.__initial_hamzat.sub("\u0627", token) + token = self.__waw_hamza.sub("\u0648", token) + token = self.__yeh_hamza.sub("\u064a", token) + token = self.__alefat.sub("\u0627", token) + return token + + def __checks_1(self, token): + for prefix in self.__checks1: + if token.startswith(prefix): + if prefix in self.__articles_3len and len(token) > 4: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + if prefix in self.__articles_2len and len(token) > 3: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + def __checks_2(self, token): + for suffix in self.__checks2: + if token.endswith(suffix): + if suffix == "\u0629" and len(token) > 2: + self.is_noun = True + self.is_verb = False + break + + if suffix == "\u0627\u062a" and len(token) > 3: + self.is_noun = True + self.is_verb = False + break + + def __Suffix_Verb_Step1(self, token): + for suffix in self.__suffix_verb_step1: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_verb_1 and len(token) >= 4: + token = token[:-1] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_2 and len(token) >= 5: + token = token[:-2] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_3 and len(token) >= 6: + token = token[:-3] + self.suffixes_verb_step1_success = True + break + return token + + def __Suffix_Verb_Step2a(self, token): + for suffix in self.__suffix_verb_step2a: + if token.endswith(suffix) and len(token) > 3: + if suffix == "\u062a" and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5: + token = token[:-2] # past + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_present and len(token) > 5: + token = token[:-2] # present + self.suffix_verb_step2a_success = True + break + + if suffix == "\u062a\u0645\u0627" and len(token) >= 6: + token = token[:-3] + self.suffix_verb_step2a_success = True + break + return token + + def __Suffix_Verb_Step2c(self, token): + for suffix in self.__suffix_verb_step2c: + if token.endswith(suffix): + if suffix == "\u062a\u0645\u0648" and len(token) >= 6: + token = token[:-3] + break + + if suffix == "\u0648" and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Verb_Step2b(self, token): + for suffix in self.__suffix_verb_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_verb_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c2(self, token): + for suffix in self.__suffix_noun_step2c2: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] + self.suffix_noun_step2c2_success = True + break + return token + + def __Suffix_Noun_Step1a(self, token): + for suffix in self.__suffix_noun_step1a: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_noun_1 and len(token) >= 4: + token = token[:-1] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_2 and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_3 and len(token) >= 6: + token = token[:-3] + self.suffix_noun_step1a_success = True + break + return token + + def __Suffix_Noun_Step2a(self, token): + for suffix in self.__suffix_noun_step2a: + if token.endswith(suffix) and len(token) > 4: + token = token[:-1] + self.suffix_noun_step2a_success = True + break + return token + + def __Suffix_Noun_Step2b(self, token): + for suffix in self.__suffix_noun_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c1(self, token): + for suffix in self.__suffix_noun_step2c1: + if token.endswith(suffix) and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Noun_Step1b(self, token): + for suffix in self.__suffix_noun_step1b: + if token.endswith(suffix) and len(token) > 5: + token = token[:-1] + self.suffixe_noun_step1b_success = True + break + return token + + def __Suffix_Noun_Step3(self, token): + for suffix in self.__suffix_noun_step3: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] # ya' nisbiya + break + return token + + def __Suffix_All_alef_maqsura(self, token): + for suffix in self.__suffix_all_alef_maqsura: + if token.endswith(suffix): + token = suffix_replace(token, suffix, "\u064a") + return token + + def __Prefix_Step1(self, token): + for prefix in self.__prefix_step1: + if token.startswith(prefix) and len(token) > 3: + if prefix == "\u0623\u0623": + token = prefix_replace(token, prefix, "\u0623") + break + + elif prefix == "\u0623\u0622": + token = prefix_replace(token, prefix, "\u0622") + break + + elif prefix == "\u0623\u0624": + token = prefix_replace(token, prefix, "\u0624") + break + + elif prefix == "\u0623\u0627": + token = prefix_replace(token, prefix, "\u0627") + break + + elif prefix == "\u0623\u0625": + token = prefix_replace(token, prefix, "\u0625") + break + return token + + def __Prefix_Step2a(self, token): + for prefix in self.__prefix_step2a: + if token.startswith(prefix) and len(token) > 5: + token = token[len(prefix) :] + self.prefix_step2a_success = True + break + return token + + def __Prefix_Step2b(self, token): + for prefix in self.__prefix_step2b: + if token.startswith(prefix) and len(token) > 3: + if token[:2] not in self.__prefixes1: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3a_Noun(self, token): + for prefix in self.__prefix_step3a_noun: + if token.startswith(prefix): + if prefix in self.__articles_2len and len(token) > 4: + token = token[len(prefix) :] + self.prefix_step3a_noun_success = True + break + if prefix in self.__articles_3len and len(token) > 5: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3b_Noun(self, token): + for prefix in self.__prefix_step3b_noun: + if token.startswith(prefix): + if len(token) > 3: + if prefix == "\u0628": + token = token[len(prefix) :] + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions2: + token = prefix_replace(token, prefix, prefix[1]) + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions1 and len(token) > 4: + token = token[len(prefix) :] # BUG: cause confusion + self.prefix_step3b_noun_success = True + break + return token + + def __Prefix_Step3_Verb(self, token): + for prefix in self.__prefix_step3_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, prefix[1]) + break + return token + + def __Prefix_Step4_Verb(self, token): + for prefix in self.__prefix_step4_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, "\u0627\u0633\u062a") + self.is_verb = True + self.is_noun = False + break + return token + + def stem(self, word): + """ + Stem an Arabic word and return the stemmed form. + + :param word: string + :return: string + """ + # set initial values + self.is_verb = True + self.is_noun = True + self.is_defined = False + + self.suffix_verb_step2a_success = False + self.suffix_verb_step2b_success = False + self.suffix_noun_step2c2_success = False + self.suffix_noun_step1a_success = False + self.suffix_noun_step2a_success = False + self.suffix_noun_step2b_success = False + self.suffixe_noun_step1b_success = False + self.prefix_step2a_success = False + self.prefix_step3a_noun_success = False + self.prefix_step3b_noun_success = False + + modified_word = word + # guess type and properties + # checks1 + self.__checks_1(modified_word) + # checks2 + self.__checks_2(modified_word) + # Pre_Normalization + modified_word = self.__normalize_pre(modified_word) + # Avoid stopwords + if modified_word in self.stopwords or len(modified_word) <= 2: + return modified_word + # Start stemming + if self.is_verb: + modified_word = self.__Suffix_Verb_Step1(modified_word) + if self.suffixes_verb_step1_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if not self.suffix_verb_step2a_success: + modified_word = self.__Suffix_Verb_Step2c(modified_word) + # or next TODO: How to deal with or next instruction + else: + modified_word = self.__Suffix_Verb_Step2b(modified_word) + if not self.suffix_verb_step2b_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if self.is_noun: + modified_word = self.__Suffix_Noun_Step2c2(modified_word) + if not self.suffix_noun_step2c2_success: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step1a(modified_word) + # if self.suffix_noun_step1a_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + # or next ? todo : how to deal with or next + else: + modified_word = self.__Suffix_Noun_Step1b(modified_word) + if self.suffixe_noun_step1b_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + else: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + modified_word = self.__Suffix_Noun_Step2b(modified_word) + modified_word = self.__Suffix_Noun_Step3(modified_word) + if not self.is_noun and self.is_verb: + modified_word = self.__Suffix_All_alef_maqsura(modified_word) + + # prefixes + modified_word = self.__Prefix_Step1(modified_word) + modified_word = self.__Prefix_Step2a(modified_word) + if not self.prefix_step2a_success: + modified_word = self.__Prefix_Step2b(modified_word) + modified_word = self.__Prefix_Step3a_Noun(modified_word) + if not self.prefix_step3a_noun_success and self.is_noun: + modified_word = self.__Prefix_Step3b_Noun(modified_word) + else: + if not self.prefix_step3b_noun_success and self.is_verb: + modified_word = self.__Prefix_Step3_Verb(modified_word) + modified_word = self.__Prefix_Step4_Verb(modified_word) + + # post normalization stemming + modified_word = self.__normalize_post(modified_word) + stemmed_word = modified_word + return stemmed_word + + +class DanishStemmer(_ScandinavianStemmer): + + """ + The Danish Snowball stemmer. + + :cvar __vowels: The Danish vowels. + :type __vowels: unicode + :cvar __consonants: The Danish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Danish double consonants. + :type __double_consonants: tuple + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Danish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/danish/stemmer.html + + """ + + # The language's vowels and other important characters are defined. + __vowels = "aeiouy\xE6\xE5\xF8" + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __s_ending = "abcdfghjklmnoprtvyz\xE5" + + # The different suffixes, divided into the algorithm's steps + # and organized by length, are listed in tuples. + __step1_suffixes = ( + "erendes", + "erende", + "hedens", + "ethed", + "erede", + "heden", + "heder", + "endes", + "ernes", + "erens", + "erets", + "ered", + "ende", + "erne", + "eren", + "erer", + "heds", + "enes", + "eres", + "eret", + "hed", + "ene", + "ere", + "ens", + "ers", + "ets", + "en", + "er", + "es", + "et", + "e", + "s", + ) + __step2_suffixes = ("gd", "dt", "gt", "kt") + __step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig") + + def stem(self, word): + """ + Stem a Danish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + # Every word is put into lower case for normalization. + word = word.lower() + + if word in self.stopwords: + return word + + # After this, the required regions are generated + # by the respective helper method. + r1 = self._r1_scandinavian(word, self.__vowels) + + # Then the actual stemming process starts. + # Every new step is explicitly indicated + # according to the descriptions on the Snowball website. + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + if r1.endswith("igst"): + word = word[:-2] + r1 = r1[:-2] + + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "l\xF8st": + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith(self.__step2_suffixes): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 4: Undouble + for double_cons in self.__double_consonants: + if word.endswith(double_cons) and len(word) > 3: + word = word[:-1] + break + + return word + + +class DutchStemmer(_StandardStemmer): + + """ + The Dutch Snowball stemmer. + + :cvar __vowels: The Dutch vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm. + :type __step3b_suffixes: tuple + :note: A detailed description of the Dutch + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/dutch/stemmer.html + + """ + + __vowels = "aeiouy\xE8" + __step1_suffixes = ("heden", "ene", "en", "se", "s") + __step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig") + + def stem(self, word): + """ + Stem a Dutch word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step2_success = False + + # Vowel accents are removed. + word = ( + word.replace("\xE4", "a") + .replace("\xE1", "a") + .replace("\xEB", "e") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xEF", "i") + .replace("\xF6", "o") + .replace("\xF3", "o") + .replace("\xFC", "u") + .replace("\xFA", "u") + ) + + # An initial 'y', a 'y' after a vowel, + # and an 'i' between self.__vowels is put into upper case. + # As from now these are treated as consonants. + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + for i in range(1, len(word) - 1): + if ( + word[i - 1] in self.__vowels + and word[i] == "i" + and word[i + 1] in self.__vowels + ): + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "heden": + word = suffix_replace(word, suffix, "heid") + r1 = suffix_replace(r1, suffix, "heid") + if r2.endswith("heden"): + r2 = suffix_replace(r2, suffix, "heid") + + elif ( + suffix in ("ene", "en") + and not word.endswith("heden") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 3 : -len(suffix)] != "gem" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + suffix in ("se", "s") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 1] != "j" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + if r1.endswith("e") and word[-2] not in self.__vowels: + step2_success = True + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3a + if r2.endswith("heid") and word[-5] != "c": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + if ( + r1.endswith("en") + and word[-3] not in self.__vowels + and word[-5:-2] != "gem" + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3b: Derivational suffixes + for suffix in self.__step3b_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ing"): + word = word[:-3] + r2 = r2[:-3] + + if r2.endswith("ig") and word[-3] != "e": + word = word[:-2] + else: + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "ig" and word[-3] != "e": + word = word[:-2] + + elif suffix == "lijk": + word = word[:-4] + r1 = r1[:-4] + + if r1.endswith("e") and word[-2] not in self.__vowels: + word = word[:-1] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "baar": + word = word[:-4] + + elif suffix == "bar" and step2_success: + word = word[:-3] + break + + # STEP 4: Undouble vowel + if len(word) >= 4: + if word[-1] not in self.__vowels and word[-1] != "I": + if word[-3:-1] in ("aa", "ee", "oo", "uu"): + if word[-4] not in self.__vowels: + word = "".join((word[:-3], word[-3], word[-1])) + + # All occurrences of 'I' and 'Y' are put back into lower case. + word = word.replace("I", "i").replace("Y", "y") + + return word + + +class EnglishStemmer(_StandardStemmer): + + """ + The English Snowball stemmer. + + :cvar __vowels: The English vowels. + :type __vowels: unicode + :cvar __double_consonants: The English double consonants. + :type __double_consonants: tuple + :cvar __li_ending: Letters that may directly appear before a word final 'li'. + :type __li_ending: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm. + :type __step1a_suffixes: tuple + :cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm. + :type __step1b_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __special_words: A dictionary containing words + which have to be stemmed specially. + :type __special_words: dict + :note: A detailed description of the English + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/english/stemmer.html + """ + + __vowels = "aeiouy" + __double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt") + __li_ending = "cdeghkmnrt" + __step0_suffixes = ("'s'", "'s", "'") + __step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s") + __step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed") + __step2_suffixes = ( + "ization", + "ational", + "fulness", + "ousness", + "iveness", + "tional", + "biliti", + "lessli", + "entli", + "ation", + "alism", + "aliti", + "ousli", + "iviti", + "fulli", + "enci", + "anci", + "abli", + "izer", + "ator", + "alli", + "bli", + "ogi", + "li", + ) + __step3_suffixes = ( + "ational", + "tional", + "alize", + "icate", + "iciti", + "ative", + "ical", + "ness", + "ful", + ) + __step4_suffixes = ( + "ement", + "ance", + "ence", + "able", + "ible", + "ment", + "ant", + "ent", + "ism", + "ate", + "iti", + "ous", + "ive", + "ize", + "ion", + "al", + "er", + "ic", + ) + __step5_suffixes = ("e", "l") + __special_words = { + "skis": "ski", + "skies": "sky", + "dying": "die", + "lying": "lie", + "tying": "tie", + "idly": "idl", + "gently": "gentl", + "ugly": "ugli", + "early": "earli", + "only": "onli", + "singly": "singl", + "sky": "sky", + "news": "news", + "howe": "howe", + "atlas": "atlas", + "cosmos": "cosmos", + "bias": "bias", + "andes": "andes", + "inning": "inning", + "innings": "inning", + "outing": "outing", + "outings": "outing", + "canning": "canning", + "cannings": "canning", + "herring": "herring", + "herrings": "herring", + "earring": "earring", + "earrings": "earring", + "proceed": "proceed", + "proceeds": "proceed", + "proceeded": "proceed", + "proceeding": "proceed", + "exceed": "exceed", + "exceeds": "exceed", + "exceeded": "exceed", + "exceeding": "exceed", + "succeed": "succeed", + "succeeds": "succeed", + "succeeded": "succeed", + "succeeding": "succeed", + } + + def stem(self, word): + + """ + Stem an English word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords or len(word) <= 2: + return word + + elif word in self.__special_words: + return self.__special_words[word] + + # Map the different apostrophe characters to a single consistent one + word = ( + word.replace("\u2019", "\x27") + .replace("\u2018", "\x27") + .replace("\u201B", "\x27") + ) + + if word.startswith("\x27"): + word = word[1:] + + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + step1a_vowel_found = False + step1b_vowel_found = False + + r1 = "" + r2 = "" + + if word.startswith(("gener", "commun", "arsen")): + if word.startswith(("gener", "arsen")): + r1 = word[5:] + else: + r1 = word[6:] + + for i in range(1, len(r1)): + if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels: + r2 = r1[i + 1 :] + break + else: + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 0 + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 1a + for suffix in self.__step1a_suffixes: + if word.endswith(suffix): + + if suffix == "sses": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("ied", "ies"): + if len(word[: -len(suffix)]) > 1: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix == "s": + for letter in word[:-2]: + if letter in self.__vowels: + step1a_vowel_found = True + break + + if step1a_vowel_found: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + break + + # STEP 1b + for suffix in self.__step1b_suffixes: + if word.endswith(suffix): + if suffix in ("eed", "eedly"): + + if r1.endswith(suffix): + word = suffix_replace(word, suffix, "ee") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ee") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ee") + else: + r2 = "" + else: + for letter in word[: -len(suffix)]: + if letter in self.__vowels: + step1b_vowel_found = True + break + + if step1b_vowel_found: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + if word.endswith(("at", "bl", "iz")): + word = "".join((word, "e")) + r1 = "".join((r1, "e")) + + if len(word) > 5 or len(r1) >= 3: + r2 = "".join((r2, "e")) + + elif word.endswith(self.__double_consonants): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + r1 == "" + and len(word) >= 3 + and word[-1] not in self.__vowels + and word[-1] not in "wxY" + and word[-2] in self.__vowels + and word[-3] not in self.__vowels + ) or ( + r1 == "" + and len(word) == 2 + and word[0] in self.__vowels + and word[1] not in self.__vowels + ): + + word = "".join((word, "e")) + + if len(r1) > 0: + r1 = "".join((r1, "e")) + + if len(r2) > 0: + r2 = "".join((r2, "e")) + break + + # STEP 1c + if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels: + word = "".join((word[:-1], "i")) + if len(r1) >= 1: + r1 = "".join((r1[:-1], "i")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "i")) + else: + r2 = "" + + # STEP 2 + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("enci", "anci", "abli"): + word = "".join((word[:-1], "e")) + + if len(r1) >= 1: + r1 = "".join((r1[:-1], "e")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "e")) + else: + r2 = "" + + elif suffix == "entli": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("izer", "ization"): + word = suffix_replace(word, suffix, "ize") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ize") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ize") + else: + r2 = "" + + elif suffix in ("ational", "ation", "ator"): + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "e" + + elif suffix in ("alism", "aliti", "alli"): + word = suffix_replace(word, suffix, "al") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "al") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "al") + else: + r2 = "" + + elif suffix == "fulness": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + elif suffix in ("ousli", "ousness"): + word = suffix_replace(word, suffix, "ous") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ous") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ous") + else: + r2 = "" + + elif suffix in ("iveness", "iviti"): + word = suffix_replace(word, suffix, "ive") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ive") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ive") + else: + r2 = "e" + + elif suffix in ("biliti", "bli"): + word = suffix_replace(word, suffix, "ble") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ble") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ble") + else: + r2 = "" + + elif suffix == "ogi" and word[-4] == "l": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix in ("fulli", "lessli"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "li" and word[-3] in self.__li_ending: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ational": + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "" + + elif suffix == "alize": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + + elif suffix in ("icate", "iciti", "ical"): + word = suffix_replace(word, suffix, "ic") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ic") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ic") + else: + r2 = "" + + elif suffix in ("ful", "ness"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + elif suffix == "ative" and r2.endswith(suffix): + word = word[:-5] + r1 = r1[:-5] + r2 = r2[:-5] + break + + # STEP 4 + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if r2.endswith(suffix): + if suffix == "ion": + if word[-4] in "st": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5 + if r2.endswith("l") and word[-2] == "l": + word = word[:-1] + elif r2.endswith("e"): + word = word[:-1] + elif r1.endswith("e"): + if len(word) >= 4 and ( + word[-2] in self.__vowels + or word[-2] in "wxY" + or word[-3] not in self.__vowels + or word[-4] in self.__vowels + ): + word = word[:-1] + + word = word.replace("Y", "y") + + return word + + +class FinnishStemmer(_StandardStemmer): + + """ + The Finnish Snowball stemmer. + + :cvar __vowels: The Finnish vowels. + :type __vowels: unicode + :cvar __restricted_vowels: A subset of the Finnish vowels. + :type __restricted_vowels: unicode + :cvar __long_vowels: The Finnish vowels in their long forms. + :type __long_vowels: tuple + :cvar __consonants: The Finnish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Finnish double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Finnish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/finnish/stemmer.html + """ + + __vowels = "aeiouy\xE4\xF6" + __restricted_vowels = "aeiou\xE4\xF6" + __long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4", "\xF6\xF6") + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __step1_suffixes = ( + "kaan", + "k\xE4\xE4n", + "sti", + "kin", + "han", + "h\xE4n", + "ko", + "k\xF6", + "pa", + "p\xE4", + ) + __step2_suffixes = ("nsa", "ns\xE4", "mme", "nne", "si", "ni", "an", "\xE4n", "en") + __step3_suffixes = ( + "siin", + "tten", + "seen", + "han", + "hen", + "hin", + "hon", + "h\xE4n", + "h\xF6n", + "den", + "tta", + "tt\xE4", + "ssa", + "ss\xE4", + "sta", + "st\xE4", + "lla", + "ll\xE4", + "lta", + "lt\xE4", + "lle", + "ksi", + "ine", + "ta", + "t\xE4", + "na", + "n\xE4", + "a", + "\xE4", + "n", + ) + __step4_suffixes = ( + "impi", + "impa", + "imp\xE4", + "immi", + "imma", + "imm\xE4", + "mpi", + "mpa", + "mp\xE4", + "mmi", + "mma", + "mm\xE4", + "eja", + "ej\xE4", + ) + + def stem(self, word): + """ + Stem a Finnish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step3_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 1: Particles etc. + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "sti": + if suffix in r2: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + if word[-len(suffix) - 1] in "ntaeiouy\xE4\xF6": + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2: Possessives + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "si": + if word[-3] != "k": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ni": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + if word.endswith("kse"): + word = suffix_replace(word, "kse", "ksi") + + if r1.endswith("kse"): + r1 = suffix_replace(r1, "kse", "ksi") + + if r2.endswith("kse"): + r2 = suffix_replace(r2, "kse", "ksi") + + elif suffix == "an": + if word[-4:-2] in ("ta", "na") or word[-5:-2] in ( + "ssa", + "sta", + "lla", + "lta", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "\xE4n": + if word[-4:-2] in ("t\xE4", "n\xE4") or word[-5:-2] in ( + "ss\xE4", + "st\xE4", + "ll\xE4", + "lt\xE4", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "en": + if word[-5:-2] in ("lle", "ine"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + break + + # STEP 3: Cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("han", "hen", "hin", "hon", "h\xE4n", "h\xF6n"): + if ( + (suffix == "han" and word[-4] == "a") + or (suffix == "hen" and word[-4] == "e") + or (suffix == "hin" and word[-4] == "i") + or (suffix == "hon" and word[-4] == "o") + or (suffix == "h\xE4n" and word[-4] == "\xE4") + or (suffix == "h\xF6n" and word[-4] == "\xF6") + ): + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix in ("siin", "den", "tten"): + if ( + word[-len(suffix) - 1] == "i" + and word[-len(suffix) - 2] in self.__restricted_vowels + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + else: + continue + + elif suffix == "seen": + if word[-6:-4] in self.__long_vowels: + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + step3_success = True + else: + continue + + elif suffix in ("a", "\xE4"): + if word[-2] in self.__vowels and word[-3] in self.__consonants: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + elif suffix in ("tta", "tt\xE4"): + if word[-4] == "e": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix == "n": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + if word[-2:] == "ie" or word[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + break + + # STEP 4: Other endings + for suffix in self.__step4_suffixes: + if r2.endswith(suffix): + if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma", "mm\xE4"): + if word[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5: Plurals + if step3_success and len(r1) >= 1 and r1[-1] in "ij": + word = word[:-1] + r1 = r1[:-1] + + elif ( + not step3_success + and len(r1) >= 2 + and r1[-1] == "t" + and r1[-2] in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + if r2.endswith("imma"): + word = word[:-4] + r1 = r1[:-4] + elif r2.endswith("mma") and r2[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + + # STEP 6: Tidying up + if r1[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + + if len(r1) >= 2 and r1[-2] in self.__consonants and r1[-1] in "a\xE4ei": + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith(("oj", "uj")): + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith("jo"): + word = word[:-1] + r1 = r1[:-1] + + # If the word ends with a double consonant + # followed by zero or more vowels, the last consonant is removed. + for i in range(1, len(word)): + if word[-i] in self.__vowels: + continue + else: + if i == 1: + if word[-i - 1 :] in self.__double_consonants: + word = word[:-1] + else: + if word[-i - 1 : -i + 1] in self.__double_consonants: + word = "".join((word[:-i], word[-i + 1 :])) + break + + return word + + +class FrenchStemmer(_StandardStemmer): + + """ + The French Snowball stemmer. + + :cvar __vowels: The French vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the French + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/french/stemmer.html + """ + + __vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9" + __step1_suffixes = ( + "issements", + "issement", + "atrices", + "atrice", + "ateurs", + "ations", + "logies", + "usions", + "utions", + "ements", + "amment", + "emment", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + "ateur", + "ation", + "logie", + "usion", + "ution", + "ences", + "ement", + "euses", + "ments", + "ance", + "iqUe", + "isme", + "able", + "iste", + "ence", + "it\xE9s", + "ives", + "eaux", + "euse", + "ment", + "eux", + "it\xE9", + "ive", + "ifs", + "aux", + "if", + ) + __step2a_suffixes = ( + "issaIent", + "issantes", + "iraIent", + "issante", + "issants", + "issions", + "irions", + "issais", + "issait", + "issant", + "issent", + "issiez", + "issons", + "irais", + "irait", + "irent", + "iriez", + "irons", + "iront", + "isses", + "issez", + "\xEEmes", + "\xEEtes", + "irai", + "iras", + "irez", + "isse", + "ies", + "ira", + "\xEEt", + "ie", + "ir", + "is", + "it", + "i", + ) + __step2b_suffixes = ( + "eraIent", + "assions", + "erions", + "assent", + "assiez", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "aIent", + "antes", + "asses", + "ions", + "erai", + "eras", + "erez", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "\xE9es", + "era", + "iez", + "ais", + "ait", + "ant", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE2t", + "ai", + "as", + "\xE9", + "a", + ) + __step4_suffixes = ("i\xE8re", "I\xE8re", "ion", "ier", "Ier", "e", "\xEB") + + def stem(self, word): + """ + Stem a French word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + rv_ending_found = False + step2a_success = False + step2b_success = False + + # Every occurrence of 'u' after 'q' is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + # Every occurrence of 'y' preceded or + # followed by a vowel is also put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + if word[i - 1] in self.__vowels or word[i + 1] in self.__vowels: + if word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self.__rv_french(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "eaux": + word = word[:-1] + step1_success = True + + elif suffix in ("euse", "euses"): + if suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + elif suffix in r1: + word = suffix_replace(word, suffix, "eux") + step1_success = True + + elif suffix in ("ement", "ements") and suffix in rv: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "iv" and "iv" in r2: + word = word[:-2] + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + elif word[-3:] == "eus": + if "eus" in r2: + word = word[:-3] + elif "eus" in r1: + word = "".join((word[:-1], "x")) + + elif word[-3:] in ("abl", "iqU"): + if "abl" in r2 or "iqU" in r2: + word = word[:-3] + + elif word[-3:] in ("i\xE8r", "I\xE8r"): + if "i\xE8r" in rv or "I\xE8r" in rv: + word = "".join((word[:-3], "i")) + + elif suffix == "amment" and suffix in rv: + word = suffix_replace(word, "amment", "ant") + rv = suffix_replace(rv, "amment", "ant") + rv_ending_found = True + + elif suffix == "emment" and suffix in rv: + word = suffix_replace(word, "emment", "ent") + rv_ending_found = True + + elif ( + suffix in ("ment", "ments") + and suffix in rv + and not rv.startswith(suffix) + and rv[rv.rindex(suffix) - 1] in self.__vowels + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + rv_ending_found = True + + elif suffix == "aux" and suffix in r1: + word = "".join((word[:-2], "l")) + step1_success = True + + elif ( + suffix in ("issement", "issements") + and suffix in r1 + and word[-len(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ( + "ance", + "iqUe", + "isme", + "able", + "iste", + "eux", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + ) + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ("atrice", "ateur", "ation", "atrices", "ateurs", "ations") + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif suffix in ("logie", "logies") and suffix in r2: + word = suffix_replace(word, suffix, "log") + step1_success = True + + elif suffix in ("usion", "ution", "usions", "utions") and suffix in r2: + word = suffix_replace(word, suffix, "u") + step1_success = True + + elif suffix in ("ence", "ences") and suffix in r2: + word = suffix_replace(word, suffix, "ent") + step1_success = True + + elif suffix in ("it\xE9", "it\xE9s") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-4:] == "abil": + if "abil" in r2: + word = word[:-4] + else: + word = "".join((word[:-2], "l")) + + elif word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif word[-2:] == "iv": + if "iv" in r2: + word = word[:-2] + + elif suffix in ("if", "ive", "ifs", "ives") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + break + + # STEP 2a: Verb suffixes beginning 'i' + if not step1_success or rv_ending_found: + for suffix in self.__step2a_suffixes: + if word.endswith(suffix): + if ( + suffix in rv + and len(rv) > len(suffix) + and rv[rv.rindex(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step2a_success = True + break + + # STEP 2b: Other verb suffixes + if not step2a_success: + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + if suffix == "ions" and "ions" in r2: + word = word[:-4] + step2b_success = True + + elif suffix in ( + "eraIent", + "erions", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "erai", + "eras", + "erez", + "\xE9es", + "era", + "iez", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE9", + ): + word = word[: -len(suffix)] + step2b_success = True + + elif suffix in ( + "assions", + "assent", + "assiez", + "aIent", + "antes", + "asses", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "ais", + "ait", + "ant", + "\xE2t", + "ai", + "as", + "a", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + step2b_success = True + if rv.endswith("e"): + word = word[:-1] + break + + # STEP 3 + if step1_success or step2a_success or step2b_success: + if word[-1] == "Y": + word = "".join((word[:-1], "i")) + elif word[-1] == "\xE7": + word = "".join((word[:-1], "c")) + + # STEP 4: Residual suffixes + else: + if len(word) >= 2 and word[-1] == "s" and word[-2] not in "aiou\xE8s": + word = word[:-1] + + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix == "ion" and suffix in r2 and rv[-4] in "st": + word = word[:-3] + + elif suffix in ("ier", "i\xE8re", "Ier", "I\xE8re"): + word = suffix_replace(word, suffix, "i") + + elif suffix == "e": + word = word[:-1] + + elif suffix == "\xEB" and word[-3:-1] == "gu": + word = word[:-1] + break + + # STEP 5: Undouble + if word.endswith(("enn", "onn", "ett", "ell", "eill")): + word = word[:-1] + + # STEP 6: Un-accent + for i in range(1, len(word)): + if word[-i] not in self.__vowels: + i += 1 + else: + if i != 1 and word[-i] in ("\xE9", "\xE8"): + word = "".join((word[:-i], "e", word[-i + 1 :])) + break + + word = word.replace("I", "i").replace("U", "u").replace("Y", "y") + + return word + + def __rv_french(self, word, vowels): + """ + Return the region RV that is used by the French stemmer. + + If the word begins with two vowels, RV is the region after + the third letter. Otherwise, it is the region after the first + vowel not at the beginning of the word, or the end of the word + if these positions cannot be found. (Exceptionally, u'par', + u'col' or u'tap' at the beginning of a word is also taken to + define RV as the region to their right.) + + :param word: The French word whose region RV is determined. + :type word: str or unicode + :param vowels: The French vowels that are used to determine + the region RV. + :type vowels: unicode + :return: the region RV for the respective French word. + :rtype: unicode + :note: This helper method is invoked by the stem method of + the subclass FrenchStemmer. It is not to be invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word.startswith(("par", "col", "tap")) or ( + word[0] in vowels and word[1] in vowels + ): + rv = word[3:] + else: + for i in range(1, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + return rv + + +class GermanStemmer(_StandardStemmer): + + """ + The German Snowball stemmer. + + :cvar __vowels: The German vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __st_ending: Letter that may directly appear before a word final 'st'. + :type __st_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the German + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/german/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xF6\xFC" + __s_ending = "bdfghklmnrt" + __st_ending = "bdfghklmnt" + + __step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s") + __step2_suffixes = ("est", "en", "er", "st") + __step3_suffixes = ("isch", "lich", "heit", "keit", "end", "ung", "ig", "ik") + + def stem(self, word): + """ + Stem a German word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + word = word.replace("\xDF", "ss") + + # Every occurrence of 'u' and 'y' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if ( + suffix in ("en", "es", "e") + and word[-len(suffix) - 4 : -len(suffix)] == "niss" + ): + word = word[: -len(suffix) - 1] + r1 = r1[: -len(suffix) - 1] + r2 = r2[: -len(suffix) - 1] + + elif suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "st": + if word[-3] in self.__st_ending and len(word[:-3]) >= 3: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 3: Derivational suffixes + for suffix in self.__step3_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ung"): + if ( + "ig" in r2[-len(suffix) - 2 : -len(suffix)] + and "e" not in r2[-len(suffix) - 3 : -len(suffix) - 2] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif ( + suffix in ("ig", "ik", "isch") + and "e" not in r2[-len(suffix) - 1 : -len(suffix)] + ): + word = word[: -len(suffix)] + + elif suffix in ("lich", "heit"): + if ( + "er" in r1[-len(suffix) - 2 : -len(suffix)] + or "en" in r1[-len(suffix) - 2 : -len(suffix)] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif suffix == "keit": + if "lich" in r2[-len(suffix) - 4 : -len(suffix)]: + word = word[: -len(suffix) - 4] + + elif "ig" in r2[-len(suffix) - 2 : -len(suffix)]: + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + break + + # Umlaut accents are removed and + # 'u' and 'y' are put back into lower case. + word = ( + word.replace("\xE4", "a") + .replace("\xF6", "o") + .replace("\xFC", "u") + .replace("U", "u") + .replace("Y", "y") + ) + + return word + + +class HungarianStemmer(_LanguageSpecificStemmer): + + """ + The Hungarian Snowball stemmer. + + :cvar __vowels: The Hungarian vowels. + :type __vowels: unicode + :cvar __digraphs: The Hungarian digraphs. + :type __digraphs: tuple + :cvar __double_consonants: The Hungarian double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm. + :type __step6_suffixes: tuple + :cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm. + :type __step7_suffixes: tuple + :cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm. + :type __step8_suffixes: tuple + :cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm. + :type __step9_suffixes: tuple + :note: A detailed description of the Hungarian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/hungarian/stemmer.html + + """ + + __vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB" + __digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs") + __double_consonants = ( + "bb", + "cc", + "ccs", + "dd", + "ff", + "gg", + "ggy", + "jj", + "kk", + "ll", + "lly", + "mm", + "nn", + "nny", + "pp", + "rr", + "ss", + "ssz", + "tt", + "tty", + "vv", + "zz", + "zzs", + ) + + __step1_suffixes = ("al", "el") + __step2_suffixes = ( + "k\xE9ppen", + "onk\xE9nt", + "enk\xE9nt", + "ank\xE9nt", + "k\xE9pp", + "k\xE9nt", + "ban", + "ben", + "nak", + "nek", + "val", + "vel", + "t\xF3l", + "t\xF5l", + "r\xF3l", + "r\xF5l", + "b\xF3l", + "b\xF5l", + "hoz", + "hez", + "h\xF6z", + "n\xE1l", + "n\xE9l", + "\xE9rt", + "kor", + "ba", + "be", + "ra", + "re", + "ig", + "at", + "et", + "ot", + "\xF6t", + "ul", + "\xFCl", + "v\xE1", + "v\xE9", + "en", + "on", + "an", + "\xF6n", + "n", + "t", + ) + __step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n") + __step4_suffixes = ( + "astul", + "est\xFCl", + "\xE1stul", + "\xE9st\xFCl", + "stul", + "st\xFCl", + ) + __step5_suffixes = ("\xE1", "\xE9") + __step6_suffixes = ( + "ok\xE9", + "\xF6k\xE9", + "ak\xE9", + "ek\xE9", + "\xE1k\xE9", + "\xE1\xE9i", + "\xE9k\xE9", + "\xE9\xE9i", + "k\xE9", + "\xE9i", + "\xE9\xE9", + "\xE9", + ) + __step7_suffixes = ( + "\xE1juk", + "\xE9j\xFCk", + "\xFCnk", + "unk", + "juk", + "j\xFCk", + "\xE1nk", + "\xE9nk", + "nk", + "uk", + "\xFCk", + "em", + "om", + "am", + "od", + "ed", + "ad", + "\xF6d", + "ja", + "je", + "\xE1m", + "\xE1d", + "\xE9m", + "\xE9d", + "m", + "d", + "a", + "e", + "o", + "\xE1", + "\xE9", + ) + __step8_suffixes = ( + "jaitok", + "jeitek", + "jaink", + "jeink", + "aitok", + "eitek", + "\xE1itok", + "\xE9itek", + "jaim", + "jeim", + "jaid", + "jeid", + "eink", + "aink", + "itek", + "jeik", + "jaik", + "\xE1ink", + "\xE9ink", + "aim", + "eim", + "aid", + "eid", + "jai", + "jei", + "ink", + "aik", + "eik", + "\xE1im", + "\xE1id", + "\xE1ik", + "\xE9im", + "\xE9id", + "\xE9ik", + "im", + "id", + "ai", + "ei", + "ik", + "\xE1i", + "\xE9i", + "i", + ) + __step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok", "ek", "ak", "k") + + def stem(self, word): + """ + Stem an Hungarian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs) + + # STEP 1: Remove instrumental case + if r1.endswith(self.__step1_suffixes): + for double_cons in self.__double_consonants: + if word[-2 - len(double_cons) : -2] == double_cons: + word = "".join((word[:-4], word[-3])) + + if r1[-2 - len(double_cons) : -2] == double_cons: + r1 = "".join((r1[:-4], r1[-3])) + break + + # STEP 2: Remove frequent cases + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith("\xE1"): + word = "".join((word[:-1], "a")) + r1 = suffix_replace(r1, "\xE1", "a") + + elif r1.endswith("\xE9"): + word = "".join((word[:-1], "e")) + r1 = suffix_replace(r1, "\xE9", "e") + break + + # STEP 3: Remove special cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "\xE9n": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + break + + # STEP 4: Remove other cases + for suffix in self.__step4_suffixes: + if r1.endswith(suffix): + if suffix == "\xE1stul": + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix == "\xE9st\xFCl": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 5: Remove factive case + for suffix in self.__step5_suffixes: + if r1.endswith(suffix): + for double_cons in self.__double_consonants: + if word[-1 - len(double_cons) : -1] == double_cons: + word = "".join((word[:-3], word[-2])) + + if r1[-1 - len(double_cons) : -1] == double_cons: + r1 = "".join((r1[:-3], r1[-2])) + break + + # STEP 6: Remove owned + for suffix in self.__step6_suffixes: + if r1.endswith(suffix): + if suffix in ("\xE1k\xE9", "\xE1\xE9i"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9k\xE9", "\xE9\xE9i", "\xE9\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 7: Remove singular owner suffixes + for suffix in self.__step7_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ("\xE1nk", "\xE1juk", "\xE1m", "\xE1d", "\xE1"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9nk", "\xE9j\xFCk", "\xE9m", "\xE9d", "\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 8: Remove plural owner suffixes + for suffix in self.__step8_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ( + "\xE1im", + "\xE1id", + "\xE1i", + "\xE1ink", + "\xE1itok", + "\xE1ik", + ): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ( + "\xE9im", + "\xE9id", + "\xE9i", + "\xE9ink", + "\xE9itek", + "\xE9ik", + ): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 9: Remove plural suffixes + for suffix in self.__step9_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "\xE1k": + word = suffix_replace(word, suffix, "a") + elif suffix == "\xE9k": + word = suffix_replace(word, suffix, "e") + else: + word = word[: -len(suffix)] + break + + return word + + def __r1_hungarian(self, word, vowels, digraphs): + """ + Return the region R1 that is used by the Hungarian stemmer. + + If the word begins with a vowel, R1 is defined as the region + after the first consonant or digraph (= two letters stand for + one phoneme) in the word. If the word begins with a consonant, + it is defined as the region after the first vowel in the word. + If the word does not contain both a vowel and consonant, R1 + is the null region at the end of the word. + + :param word: The Hungarian word whose region R1 is determined. + :type word: str or unicode + :param vowels: The Hungarian vowels that are used to determine + the region R1. + :type vowels: unicode + :param digraphs: The digraphs that are used to determine the + region R1. + :type digraphs: tuple + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + HungarianStemmer. It is not to be invoked directly! + + """ + r1 = "" + if word[0] in vowels: + for digraph in digraphs: + if digraph in word[1:]: + r1 = word[word.index(digraph[-1]) + 1 :] + return r1 + + for i in range(1, len(word)): + if word[i] not in vowels: + r1 = word[i + 1 :] + break + else: + for i in range(1, len(word)): + if word[i] in vowels: + r1 = word[i + 1 :] + break + + return r1 + + +class ItalianStemmer(_StandardStemmer): + + """ + The Italian Snowball stemmer. + + :cvar __vowels: The Italian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :note: A detailed description of the Italian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/italian/stemmer.html + + """ + + __vowels = "aeiou\xE0\xE8\xEC\xF2\xF9" + __step0_suffixes = ( + "gliela", + "gliele", + "glieli", + "glielo", + "gliene", + "sene", + "mela", + "mele", + "meli", + "melo", + "mene", + "tela", + "tele", + "teli", + "telo", + "tene", + "cela", + "cele", + "celi", + "celo", + "cene", + "vela", + "vele", + "veli", + "velo", + "vene", + "gli", + "ci", + "la", + "le", + "li", + "lo", + "mi", + "ne", + "si", + "ti", + "vi", + ) + __step1_suffixes = ( + "atrice", + "atrici", + "azione", + "azioni", + "uzione", + "uzioni", + "usione", + "usioni", + "amento", + "amenti", + "imento", + "imenti", + "amente", + "abile", + "abili", + "ibile", + "ibili", + "mente", + "atore", + "atori", + "logia", + "logie", + "anza", + "anze", + "iche", + "ichi", + "ismo", + "ismi", + "ista", + "iste", + "isti", + "ist\xE0", + "ist\xE8", + "ist\xEC", + "ante", + "anti", + "enza", + "enze", + "ico", + "ici", + "ica", + "ice", + "oso", + "osi", + "osa", + "ose", + "it\xE0", + "ivo", + "ivi", + "iva", + "ive", + ) + __step2_suffixes = ( + "erebbero", + "irebbero", + "assero", + "assimo", + "eranno", + "erebbe", + "eremmo", + "ereste", + "eresti", + "essero", + "iranno", + "irebbe", + "iremmo", + "ireste", + "iresti", + "iscano", + "iscono", + "issero", + "arono", + "avamo", + "avano", + "avate", + "eremo", + "erete", + "erono", + "evamo", + "evano", + "evate", + "iremo", + "irete", + "irono", + "ivamo", + "ivano", + "ivate", + "ammo", + "ando", + "asse", + "assi", + "emmo", + "enda", + "ende", + "endi", + "endo", + "erai", + "erei", + "Yamo", + "iamo", + "immo", + "irai", + "irei", + "isca", + "isce", + "isci", + "isco", + "ano", + "are", + "ata", + "ate", + "ati", + "ato", + "ava", + "avi", + "avo", + "er\xE0", + "ere", + "er\xF2", + "ete", + "eva", + "evi", + "evo", + "ir\xE0", + "ire", + "ir\xF2", + "ita", + "ite", + "iti", + "ito", + "iva", + "ivi", + "ivo", + "ono", + "uta", + "ute", + "uti", + "uto", + "ar", + "ir", + ) + + def stem(self, word): + """ + Stem an Italian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + # All acute accents are replaced by grave accents. + word = ( + word.replace("\xE1", "\xE0") + .replace("\xE9", "\xE8") + .replace("\xED", "\xEC") + .replace("\xF3", "\xF2") + .replace("\xFA", "\xF9") + ) + + # Every occurrence of 'u' after 'q' + # is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if rv.endswith(suffix): + if rv[-len(suffix) - 4 : -len(suffix)] in ("ando", "endo"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + elif rv[-len(suffix) - 2 : -len(suffix)] in ("ar", "er", "ir"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + r2 = suffix_replace(r2, suffix, "e") + rv = suffix_replace(rv, suffix, "e") + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("amento", "amenti", "imento", "imenti") and rv.endswith( + suffix + ): + step1_success = True + word = word[:-6] + rv = rv[:-6] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ("azione", "azioni", "atore", "atori"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("logia", "logie"): + word = word[:-2] + rv = word[:-2] + + elif suffix in ("uzione", "uzioni", "usione", "usioni"): + word = word[:-5] + rv = rv[:-5] + + elif suffix in ("enza", "enze"): + word = suffix_replace(word, suffix, "te") + rv = suffix_replace(rv, suffix, "te") + + elif suffix == "it\xE0": + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("ivo", "ivi", "iva", "ive"): + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith("at"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3a + if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8", "\xEC", "\xF2")): + word = word[:-1] + rv = rv[:-1] + + if rv.endswith("i"): + word = word[:-1] + rv = rv[:-1] + + # STEP 3b + if rv.endswith(("ch", "gh")): + word = word[:-1] + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class NorwegianStemmer(_ScandinavianStemmer): + + """ + The Norwegian Snowball stemmer. + + :cvar __vowels: The Norwegian vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Norwegian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/norwegian/stemmer.html + + """ + + __vowels = "aeiouy\xE6\xE5\xF8" + __s_ending = "bcdfghjlmnoprtvyz" + __step1_suffixes = ( + "hetenes", + "hetene", + "hetens", + "heter", + "heten", + "endes", + "ande", + "ende", + "edes", + "enes", + "erte", + "ede", + "ane", + "ene", + "ens", + "ers", + "ets", + "het", + "ast", + "ert", + "en", + "ar", + "er", + "as", + "es", + "et", + "a", + "e", + "s", + ) + + __step2_suffixes = ("dt", "vt") + + __step3_suffixes = ( + "hetslov", + "eleg", + "elig", + "elov", + "slov", + "leg", + "eig", + "lig", + "els", + "lov", + "ig", + ) + + def stem(self, word): + """ + Stem a Norwegian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix in ("erte", "ert"): + word = suffix_replace(word, suffix, "er") + r1 = suffix_replace(r1, suffix, "er") + + elif suffix == "s": + if word[-2] in self.__s_ending or ( + word[-2] == "k" and word[-3] not in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + word = word[: -len(suffix)] + break + + return word + + +class PortugueseStemmer(_StandardStemmer): + + """ + The Portuguese Snowball stemmer. + + :cvar __vowels: The Portuguese vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Portuguese + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/portuguese/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4" + __step1_suffixes = ( + "amentos", + "imentos", + "uço~es", + "amento", + "imento", + "adoras", + "adores", + "a\xE7o~es", + "logias", + "\xEAncias", + "amente", + "idades", + "an\xE7as", + "ismos", + "istas", + "adora", + "a\xE7a~o", + "antes", + "\xE2ncia", + "logia", + "uça~o", + "\xEAncia", + "mente", + "idade", + "an\xE7a", + "ezas", + "icos", + "icas", + "ismo", + "\xE1vel", + "\xEDvel", + "ista", + "osos", + "osas", + "ador", + "ante", + "ivas", + "ivos", + "iras", + "eza", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + "ira", + ) + __step2_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "\xE1ssemos", + "\xEAssemos", + "\xEDssemos", + "ar\xEDeis", + "er\xEDeis", + "ir\xEDeis", + "\xE1sseis", + "\xE9sseis", + "\xEDsseis", + "\xE1ramos", + "\xE9ramos", + "\xEDramos", + "\xE1vamos", + "aremos", + "eremos", + "iremos", + "ariam", + "eriam", + "iriam", + "assem", + "essem", + "issem", + "ara~o", + "era~o", + "ira~o", + "arias", + "erias", + "irias", + "ardes", + "erdes", + "irdes", + "asses", + "esses", + "isses", + "astes", + "estes", + "istes", + "\xE1reis", + "areis", + "\xE9reis", + "ereis", + "\xEDreis", + "ireis", + "\xE1veis", + "\xEDamos", + "armos", + "ermos", + "irmos", + "aria", + "eria", + "iria", + "asse", + "esse", + "isse", + "aste", + "este", + "iste", + "arei", + "erei", + "irei", + "aram", + "eram", + "iram", + "avam", + "arem", + "erem", + "irem", + "ando", + "endo", + "indo", + "adas", + "idas", + "ar\xE1s", + "aras", + "er\xE1s", + "eras", + "ir\xE1s", + "avas", + "ares", + "eres", + "ires", + "\xEDeis", + "ados", + "idos", + "\xE1mos", + "amos", + "emos", + "imos", + "iras", + "ada", + "ida", + "ar\xE1", + "ara", + "er\xE1", + "era", + "ir\xE1", + "ava", + "iam", + "ado", + "ido", + "ias", + "ais", + "eis", + "ira", + "ia", + "ei", + "am", + "em", + "ar", + "er", + "ir", + "as", + "es", + "is", + "eu", + "iu", + "ou", + ) + __step4_suffixes = ("os", "a", "i", "o", "\xE1", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Portuguese word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + word = ( + word.replace("\xE3", "a~") + .replace("\xF5", "o~") + .replace("q\xFC", "qu") + .replace("g\xFC", "gu") + ) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif ( + suffix in ("ira", "iras") + and rv.endswith(suffix) + and word[-len(suffix) - 1 : -len(suffix)] == "e" + ): + step1_success = True + + word = suffix_replace(word, suffix, "ir") + rv = suffix_replace(rv, suffix, "ir") + + elif r2.endswith(suffix): + step1_success = True + + if suffix in ("logia", "logias"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uça~o", "uço~es"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("\xEAncia", "\xEAncias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[:-5] + r2 = r2[:-5] + rv = rv[:-5] + + if r2.endswith(("ante", "avel", "ivel")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idade", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("iva", "ivo", "ivas", "ivos"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + step2_success = True + + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3 + if step1_success or step2_success: + if rv.endswith("i") and word[-2] == "c": + word = word[:-1] + rv = rv[:-1] + + ### STEP 4: Residual suffix + if not step1_success and not step2_success: + for suffix in self.__step4_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 5 + if rv.endswith(("e", "\xE9", "\xEA")): + word = word[:-1] + rv = rv[:-1] + + if (word.endswith("gu") and rv.endswith("u")) or ( + word.endswith("ci") and rv.endswith("i") + ): + word = word[:-1] + + elif word.endswith("\xE7"): + word = suffix_replace(word, "\xE7", "c") + + word = word.replace("a~", "\xE3").replace("o~", "\xF5") + + return word + + +class RomanianStemmer(_StandardStemmer): + + """ + The Romanian Snowball stemmer. + + :cvar __vowels: The Romanian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Romanian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/romanian/stemmer.html + + """ + + __vowels = "aeiou\u0103\xE2\xEE" + __step0_suffixes = ( + "iilor", + "ului", + "elor", + "iile", + "ilor", + "atei", + "a\u0163ie", + "a\u0163ia", + "aua", + "ele", + "iua", + "iei", + "ile", + "ul", + "ea", + "ii", + ) + __step1_suffixes = ( + "abilitate", + "abilitati", + "abilit\u0103\u0163i", + "ibilitate", + "abilit\u0103i", + "ivitate", + "ivitati", + "ivit\u0103\u0163i", + "icitate", + "icitati", + "icit\u0103\u0163i", + "icatori", + "ivit\u0103i", + "icit\u0103i", + "icator", + "a\u0163iune", + "atoare", + "\u0103toare", + "i\u0163iune", + "itoare", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "icala", + "icale", + "icali", + "ical\u0103", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "atori", + "\u0103tori", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "itori", + "iciv", + "ical", + "ativ", + "ator", + "\u0103tor", + "itiv", + "itor", + ) + __step2_suffixes = ( + "abila", + "abile", + "abili", + "abil\u0103", + "ibila", + "ibile", + "ibili", + "ibil\u0103", + "atori", + "itate", + "itati", + "it\u0103\u0163i", + "abil", + "ibil", + "oasa", + "oas\u0103", + "oase", + "anta", + "ante", + "anti", + "ant\u0103", + "ator", + "it\u0103i", + "iune", + "iuni", + "isme", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + "ata", + "at\u0103", + "ati", + "ate", + "uta", + "ut\u0103", + "uti", + "ute", + "ita", + "it\u0103", + "iti", + "ite", + "ica", + "ice", + "ici", + "ic\u0103", + "osi", + "o\u015Fi", + "ant", + "iva", + "ive", + "ivi", + "iv\u0103", + "ism", + "ist", + "at", + "ut", + "it", + "ic", + "os", + "iv", + ) + __step3_suffixes = ( + "seser\u0103\u0163i", + "aser\u0103\u0163i", + "iser\u0103\u0163i", + "\xE2ser\u0103\u0163i", + "user\u0103\u0163i", + "seser\u0103m", + "aser\u0103m", + "iser\u0103m", + "\xE2ser\u0103m", + "user\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "easc\u0103", + "ar\u0103\u0163i", + "ur\u0103\u0163i", + "ir\u0103\u0163i", + "\xE2r\u0103\u0163i", + "ase\u015Fi", + "aser\u0103", + "ise\u015Fi", + "iser\u0103", + "\xe2se\u015Fi", + "\xE2ser\u0103", + "use\u015Fi", + "user\u0103", + "ser\u0103m", + "sesem", + "indu", + "\xE2ndu", + "eaz\u0103", + "e\u015Fti", + "e\u015Fte", + "\u0103\u015Fti", + "\u0103\u015Fte", + "ea\u0163i", + "ia\u0163i", + "ar\u0103m", + "ur\u0103m", + "ir\u0103m", + "\xE2r\u0103m", + "asem", + "isem", + "\xE2sem", + "usem", + "se\u015Fi", + "ser\u0103", + "sese", + "are", + "ere", + "ire", + "\xE2re", + "ind", + "\xE2nd", + "eze", + "ezi", + "esc", + "\u0103sc", + "eam", + "eai", + "eau", + "iam", + "iai", + "iau", + "a\u015Fi", + "ar\u0103", + "u\u015Fi", + "ur\u0103", + "i\u015Fi", + "ir\u0103", + "\xE2\u015Fi", + "\xe2r\u0103", + "ase", + "ise", + "\xE2se", + "use", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xe2\u0163i", + "sei", + "ez", + "am", + "ai", + "au", + "ea", + "ia", + "ui", + "\xE2i", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ) + + def stem(self, word): + """ + Stem a Romanian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Removal of plurals and other simplifications + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + if suffix in r1: + if suffix in ("ul", "ului"): + word = word[: -len(suffix)] + + if suffix in rv: + rv = rv[: -len(suffix)] + else: + rv = "" + + elif ( + suffix == "aua" + or suffix == "atei" + or (suffix == "ile" and word[-5:-3] != "ab") + ): + word = word[:-2] + + elif suffix in ("ea", "ele", "elor"): + word = suffix_replace(word, suffix, "e") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "e") + else: + rv = "" + + elif suffix in ("ii", "iua", "iei", "iile", "iilor", "ilor"): + word = suffix_replace(word, suffix, "i") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "i") + else: + rv = "" + + elif suffix in ("a\u0163ie", "a\u0163ia"): + word = word[:-1] + break + + # STEP 1: Reduction of combining suffixes + while True: + + replacement_done = False + + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix in r1: + step1_success = True + replacement_done = True + + if suffix in ( + "abilitate", + "abilitati", + "abilit\u0103i", + "abilit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "abil") + + elif suffix == "ibilitate": + word = word[:-5] + + elif suffix in ( + "ivitate", + "ivitati", + "ivit\u0103i", + "ivit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "iv") + + elif suffix in ( + "icitate", + "icitati", + "icit\u0103i", + "icit\u0103\u0163i", + "icator", + "icatori", + "iciv", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "ical", + "icala", + "icale", + "icali", + "ical\u0103", + ): + word = suffix_replace(word, suffix, "ic") + + elif suffix in ( + "ativ", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "a\u0163iune", + "atoare", + "ator", + "atori", + "\u0103toare", + "\u0103tor", + "\u0103tori", + ): + word = suffix_replace(word, suffix, "at") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "at") + + elif suffix in ( + "itiv", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "i\u0163iune", + "itoare", + "itor", + "itori", + ): + word = suffix_replace(word, suffix, "it") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "it") + else: + step1_success = False + break + + if not replacement_done: + break + + # STEP 2: Removal of standard suffixes + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if suffix in r2: + step2_success = True + + if suffix in ("iune", "iuni"): + if word[-5] == "\u0163": + word = "".join((word[:-5], "t")) + + elif suffix in ( + "ism", + "isme", + "ist", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + ): + word = suffix_replace(word, suffix, "ist") + + else: + word = word[: -len(suffix)] + break + + # STEP 3: Removal of verb suffixes + if not step1_success and not step2_success: + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix in ( + "seser\u0103\u0163i", + "seser\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "ser\u0103m", + "sesem", + "se\u015Fi", + "ser\u0103", + "sese", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xE2\u0163i", + "sei", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + else: + if ( + not rv.startswith(suffix) + and rv[rv.index(suffix) - 1] not in "aeio\u0103\xE2\xEE" + ): + word = word[: -len(suffix)] + break + + # STEP 4: Removal of final vowel + for suffix in ("ie", "a", "e", "i", "\u0103"): + if word.endswith(suffix): + if suffix in rv: + word = word[: -len(suffix)] + break + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class RussianStemmer(_LanguageSpecificStemmer): + + """ + The Russian Snowball stemmer. + + :cvar __perfective_gerund_suffixes: Suffixes to be deleted. + :type __perfective_gerund_suffixes: tuple + :cvar __adjectival_suffixes: Suffixes to be deleted. + :type __adjectival_suffixes: tuple + :cvar __reflexive_suffixes: Suffixes to be deleted. + :type __reflexive_suffixes: tuple + :cvar __verb_suffixes: Suffixes to be deleted. + :type __verb_suffixes: tuple + :cvar __noun_suffixes: Suffixes to be deleted. + :type __noun_suffixes: tuple + :cvar __superlative_suffixes: Suffixes to be deleted. + :type __superlative_suffixes: tuple + :cvar __derivational_suffixes: Suffixes to be deleted. + :type __derivational_suffixes: tuple + :note: A detailed description of the Russian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/russian/stemmer.html + + """ + + __perfective_gerund_suffixes = ( + "ivshis'", + "yvshis'", + "vshis'", + "ivshi", + "yvshi", + "vshi", + "iv", + "yv", + "v", + ) + __adjectival_suffixes = ( + "ui^ushchi^ui^u", + "ui^ushchi^ai^a", + "ui^ushchimi", + "ui^ushchymi", + "ui^ushchego", + "ui^ushchogo", + "ui^ushchemu", + "ui^ushchomu", + "ui^ushchikh", + "ui^ushchykh", + "ui^ushchui^u", + "ui^ushchaia", + "ui^ushchoi^u", + "ui^ushchei^u", + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "ui^ushchee", + "ui^ushchie", + "ui^ushchye", + "ui^ushchoe", + "ui^ushchei`", + "ui^ushchii`", + "ui^ushchyi`", + "ui^ushchoi`", + "ui^ushchem", + "ui^ushchim", + "ui^ushchym", + "ui^ushchom", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "shchi^ui^u", + "shchi^ai^a", + "ivshi^ui^u", + "ivshi^ai^a", + "yvshi^ui^u", + "yvshi^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "ivshimi", + "ivshymi", + "ivshego", + "ivshogo", + "ivshemu", + "ivshomu", + "ivshikh", + "ivshykh", + "ivshui^u", + "ivshai^a", + "ivshoi^u", + "ivshei^u", + "yvshimi", + "yvshymi", + "yvshego", + "yvshogo", + "yvshemu", + "yvshomu", + "yvshikh", + "yvshykh", + "yvshui^u", + "yvshai^a", + "yvshoi^u", + "yvshei^u", + "vshi^ui^u", + "vshi^ai^a", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "ivshee", + "ivshie", + "ivshye", + "ivshoe", + "ivshei`", + "ivshii`", + "ivshyi`", + "ivshoi`", + "ivshem", + "ivshim", + "ivshym", + "ivshom", + "yvshee", + "yvshie", + "yvshye", + "yvshoe", + "yvshei`", + "yvshii`", + "yvshyi`", + "yvshoi`", + "yvshem", + "yvshim", + "yvshym", + "yvshom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + "i^ui^u", + "i^ai^a", + "imi", + "ymi", + "ego", + "ogo", + "emu", + "omu", + "ikh", + "ykh", + "ui^u", + "ai^a", + "oi^u", + "ei^u", + "ee", + "ie", + "ye", + "oe", + "ei`", + "ii`", + "yi`", + "oi`", + "em", + "im", + "ym", + "om", + ) + __reflexive_suffixes = ("si^a", "s'") + __verb_suffixes = ( + "esh'", + "ei`te", + "ui`te", + "ui^ut", + "ish'", + "ete", + "i`te", + "i^ut", + "nno", + "ila", + "yla", + "ena", + "ite", + "ili", + "yli", + "ilo", + "ylo", + "eno", + "i^at", + "uet", + "eny", + "it'", + "yt'", + "ui^u", + "la", + "na", + "li", + "em", + "lo", + "no", + "et", + "ny", + "t'", + "ei`", + "ui`", + "il", + "yl", + "im", + "ym", + "en", + "it", + "yt", + "i^u", + "i`", + "l", + "n", + ) + __noun_suffixes = ( + "ii^ami", + "ii^akh", + "i^ami", + "ii^am", + "i^akh", + "ami", + "iei`", + "i^am", + "iem", + "akh", + "ii^u", + "'i^u", + "ii^a", + "'i^a", + "ev", + "ov", + "ie", + "'e", + "ei", + "ii", + "ei`", + "oi`", + "ii`", + "em", + "am", + "om", + "i^u", + "i^a", + "a", + "e", + "i", + "i`", + "o", + "u", + "y", + "'", + ) + __superlative_suffixes = ("ei`she", "ei`sh") + __derivational_suffixes = ("ost'", "ost") + + def stem(self, word): + """ + Stem a Russian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + if word in self.stopwords: + return word + + chr_exceeded = False + for i in range(len(word)): + if ord(word[i]) > 255: + chr_exceeded = True + break + + if not chr_exceeded: + return word + + word = self.__cyrillic_to_roman(word) + + step1_success = False + adjectival_removed = False + verb_removed = False + undouble_success = False + superlative_removed = False + + rv, r2 = self.__regions_russian(word) + + # Step 1 + for suffix in self.__perfective_gerund_suffixes: + if rv.endswith(suffix): + if suffix in ("v", "vshi", "vshis'"): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + + if not step1_success: + for suffix in self.__reflexive_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + for suffix in self.__adjectival_suffixes: + if rv.endswith(suffix): + if suffix in ( + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "shchi^ui^u", + "shchi^ai^a", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "vshi^ui^u", + "vshi^ai^a", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + + if not adjectival_removed: + for suffix in self.__verb_suffixes: + if rv.endswith(suffix): + if suffix in ( + "la", + "na", + "ete", + "i`te", + "li", + "i`", + "l", + "em", + "n", + "lo", + "no", + "et", + "i^ut", + "ny", + "t'", + "esh'", + "nno", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + + if not adjectival_removed and not verb_removed: + for suffix in self.__noun_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # Step 2 + if rv.endswith("i"): + word = word[:-1] + r2 = r2[:-1] + + # Step 3 + for suffix in self.__derivational_suffixes: + if r2.endswith(suffix): + word = word[: -len(suffix)] + break + + # Step 4 + if word.endswith("nn"): + word = word[:-1] + undouble_success = True + + if not undouble_success: + for suffix in self.__superlative_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + superlative_removed = True + break + if word.endswith("nn"): + word = word[:-1] + + if not undouble_success and not superlative_removed: + if word.endswith("'"): + word = word[:-1] + + word = self.__roman_to_cyrillic(word) + + return word + + def __regions_russian(self, word): + """ + Return the regions RV and R2 which are used by the Russian stemmer. + + In any word, RV is the region after the first vowel, + or the end of the word if it contains no vowel. + + R2 is the region after the first non-vowel following + a vowel in R1, or the end of the word if there is no such non-vowel. + + R1 is the region after the first non-vowel following a vowel, + or the end of the word if there is no such non-vowel. + + :param word: The Russian word whose regions RV and R2 are determined. + :type word: str or unicode + :return: the regions RV and R2 for the respective Russian word. + :rtype: tuple + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + r1 = "" + r2 = "" + rv = "" + + vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y") + word = word.replace("i^a", "A").replace("i^u", "U").replace("e`", "E") + + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + for i in range(len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + r2 = r2.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + rv = rv.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + + return (rv, r2) + + def __cyrillic_to_roman(self, word): + """ + Transliterate a Russian word into the Roman alphabet. + + A Russian word whose letters consist of the Cyrillic + alphabet are transliterated into the Roman alphabet + in order to ease the forthcoming stemming process. + + :param word: The word that is transliterated. + :type word: unicode + :return: the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("\u0410", "a") + .replace("\u0430", "a") + .replace("\u0411", "b") + .replace("\u0431", "b") + .replace("\u0412", "v") + .replace("\u0432", "v") + .replace("\u0413", "g") + .replace("\u0433", "g") + .replace("\u0414", "d") + .replace("\u0434", "d") + .replace("\u0415", "e") + .replace("\u0435", "e") + .replace("\u0401", "e") + .replace("\u0451", "e") + .replace("\u0416", "zh") + .replace("\u0436", "zh") + .replace("\u0417", "z") + .replace("\u0437", "z") + .replace("\u0418", "i") + .replace("\u0438", "i") + .replace("\u0419", "i`") + .replace("\u0439", "i`") + .replace("\u041A", "k") + .replace("\u043A", "k") + .replace("\u041B", "l") + .replace("\u043B", "l") + .replace("\u041C", "m") + .replace("\u043C", "m") + .replace("\u041D", "n") + .replace("\u043D", "n") + .replace("\u041E", "o") + .replace("\u043E", "o") + .replace("\u041F", "p") + .replace("\u043F", "p") + .replace("\u0420", "r") + .replace("\u0440", "r") + .replace("\u0421", "s") + .replace("\u0441", "s") + .replace("\u0422", "t") + .replace("\u0442", "t") + .replace("\u0423", "u") + .replace("\u0443", "u") + .replace("\u0424", "f") + .replace("\u0444", "f") + .replace("\u0425", "kh") + .replace("\u0445", "kh") + .replace("\u0426", "t^s") + .replace("\u0446", "t^s") + .replace("\u0427", "ch") + .replace("\u0447", "ch") + .replace("\u0428", "sh") + .replace("\u0448", "sh") + .replace("\u0429", "shch") + .replace("\u0449", "shch") + .replace("\u042A", "''") + .replace("\u044A", "''") + .replace("\u042B", "y") + .replace("\u044B", "y") + .replace("\u042C", "'") + .replace("\u044C", "'") + .replace("\u042D", "e`") + .replace("\u044D", "e`") + .replace("\u042E", "i^u") + .replace("\u044E", "i^u") + .replace("\u042F", "i^a") + .replace("\u044F", "i^a") + ) + + return word + + def __roman_to_cyrillic(self, word): + """ + Transliterate a Russian word back into the Cyrillic alphabet. + + A Russian word formerly transliterated into the Roman alphabet + in order to ease the stemming process, is transliterated back + into the Cyrillic alphabet, its original form. + + :param word: The word that is transliterated. + :type word: str or unicode + :return: word, the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("i^u", "\u044E") + .replace("i^a", "\u044F") + .replace("shch", "\u0449") + .replace("kh", "\u0445") + .replace("t^s", "\u0446") + .replace("ch", "\u0447") + .replace("e`", "\u044D") + .replace("i`", "\u0439") + .replace("sh", "\u0448") + .replace("k", "\u043A") + .replace("e", "\u0435") + .replace("zh", "\u0436") + .replace("a", "\u0430") + .replace("b", "\u0431") + .replace("v", "\u0432") + .replace("g", "\u0433") + .replace("d", "\u0434") + .replace("e", "\u0435") + .replace("z", "\u0437") + .replace("i", "\u0438") + .replace("l", "\u043B") + .replace("m", "\u043C") + .replace("n", "\u043D") + .replace("o", "\u043E") + .replace("p", "\u043F") + .replace("r", "\u0440") + .replace("s", "\u0441") + .replace("t", "\u0442") + .replace("u", "\u0443") + .replace("f", "\u0444") + .replace("''", "\u044A") + .replace("y", "\u044B") + .replace("'", "\u044C") + ) + + return word + + +class SpanishStemmer(_StandardStemmer): + + """ + The Spanish Snowball stemmer. + + :cvar __vowels: The Spanish vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Spanish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/spanish/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC" + __step0_suffixes = ( + "selas", + "selos", + "sela", + "selo", + "las", + "les", + "los", + "nos", + "me", + "se", + "la", + "le", + "lo", + ) + __step1_suffixes = ( + "amientos", + "imientos", + "amiento", + "imiento", + "acion", + "aciones", + "uciones", + "adoras", + "adores", + "ancias", + "log\xEDas", + "encias", + "amente", + "idades", + "anzas", + "ismos", + "ables", + "ibles", + "istas", + "adora", + "aci\xF3n", + "antes", + "ancia", + "log\xEDa", + "uci\xf3n", + "encia", + "mente", + "anza", + "icos", + "icas", + "ismo", + "able", + "ible", + "ista", + "osos", + "osas", + "ador", + "ante", + "idad", + "ivas", + "ivos", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + ) + __step2a_suffixes = ( + "yeron", + "yendo", + "yamos", + "yais", + "yan", + "yen", + "yas", + "yes", + "ya", + "ye", + "yo", + "y\xF3", + ) + __step2b_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "i\xE9ramos", + "i\xE9semos", + "ar\xEDais", + "aremos", + "er\xEDais", + "eremos", + "ir\xEDais", + "iremos", + "ierais", + "ieseis", + "asteis", + "isteis", + "\xE1bamos", + "\xE1ramos", + "\xE1semos", + "ar\xEDan", + "ar\xEDas", + "ar\xE9is", + "er\xEDan", + "er\xEDas", + "er\xE9is", + "ir\xEDan", + "ir\xEDas", + "ir\xE9is", + "ieran", + "iesen", + "ieron", + "iendo", + "ieras", + "ieses", + "abais", + "arais", + "aseis", + "\xE9amos", + "ar\xE1n", + "ar\xE1s", + "ar\xEDa", + "er\xE1n", + "er\xE1s", + "er\xEDa", + "ir\xE1n", + "ir\xE1s", + "ir\xEDa", + "iera", + "iese", + "aste", + "iste", + "aban", + "aran", + "asen", + "aron", + "ando", + "abas", + "adas", + "idas", + "aras", + "ases", + "\xEDais", + "ados", + "idos", + "amos", + "imos", + "emos", + "ar\xE1", + "ar\xE9", + "er\xE1", + "er\xE9", + "ir\xE1", + "ir\xE9", + "aba", + "ada", + "ida", + "ara", + "ase", + "\xEDan", + "ado", + "ido", + "\xEDas", + "\xE1is", + "\xE9is", + "\xEDa", + "ad", + "ed", + "id", + "an", + "i\xF3", + "ar", + "er", + "ir", + "as", + "\xEDs", + "en", + "es", + ) + __step3_suffixes = ("os", "a", "e", "o", "\xE1", "\xE9", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Spanish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if not (word.endswith(suffix) and rv.endswith(suffix)): + continue + + if ( + rv[: -len(suffix)].endswith( + ( + "ando", + "\xE1ndo", + "ar", + "\xE1r", + "er", + "\xE9r", + "iendo", + "i\xE9ndo", + "ir", + "\xEDr", + ) + ) + ) or ( + rv[: -len(suffix)].endswith("yendo") + and word[: -len(suffix)].endswith("uyendo") + ): + + word = self.__replace_accented(word[: -len(suffix)]) + r1 = self.__replace_accented(r1[: -len(suffix)]) + r2 = self.__replace_accented(r2[: -len(suffix)]) + rv = self.__replace_accented(rv[: -len(suffix)]) + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if not word.endswith(suffix): + continue + + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ( + "adora", + "ador", + "aci\xF3n", + "adoras", + "adores", + "acion", + "aciones", + "ante", + "antes", + "ancia", + "ancias", + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("log\xEDa", "log\xEDas"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uci\xF3n", "uciones"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("encia", "encias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ante", "able", "ible")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idad", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + for pre_suff in ("abil", "ic", "iv"): + if r2.endswith(pre_suff): + word = word[: -len(pre_suff)] + rv = rv[: -len(pre_suff)] + + elif suffix in ("ivo", "iva", "ivos", "ivas"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2a: Verb suffixes beginning 'y' + if not step1_success: + for suffix in self.__step2a_suffixes: + if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == "u": + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2b: Other verb suffixes + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + if suffix in ("en", "es", "\xE9is", "emos"): + if word.endswith("gu"): + word = word[:-1] + + if rv.endswith("gu"): + rv = rv[:-1] + break + + # STEP 3: Residual suffix + for suffix in self.__step3_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + if suffix in ("e", "\xE9"): + rv = rv[: -len(suffix)] + + if word[-2:] == "gu" and rv.endswith("u"): + word = word[:-1] + break + + word = self.__replace_accented(word) + + return word + + def __replace_accented(self, word): + """ + Replaces all accented letters on a word with their non-accented + counterparts. + + :param word: A spanish word, with or without accents + :type word: str or unicode + :return: a word with the accented letters (á, é, í, ó, ú) replaced with + their non-accented counterparts (a, e, i, o, u) + :rtype: str or unicode + """ + return ( + word.replace("\xE1", "a") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xF3", "o") + .replace("\xFA", "u") + ) + + +class SwedishStemmer(_ScandinavianStemmer): + + """ + The Swedish Snowball stemmer. + + :cvar __vowels: The Swedish vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Swedish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/swedish/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xE5\xF6" + __s_ending = "bcdfghjklmnoprtvy" + __step1_suffixes = ( + "heterna", + "hetens", + "heter", + "heten", + "anden", + "arnas", + "ernas", + "ornas", + "andes", + "andet", + "arens", + "arna", + "erna", + "orna", + "ande", + "arne", + "aste", + "aren", + "ades", + "erns", + "ade", + "are", + "ern", + "ens", + "het", + "ast", + "ad", + "en", + "ar", + "er", + "or", + "as", + "es", + "at", + "a", + "e", + "s", + ) + __step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt") + __step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig") + + def stem(self, word): + """ + Stem a Swedish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("els", "lig", "ig"): + word = word[: -len(suffix)] + elif suffix in ("fullt", "l\xF6st"): + word = word[:-1] + break + + return word + + +def demo(): + """ + This function provides a demonstration of the Snowball stemmers. + + After invoking this function and specifying a language, + it stems an excerpt of the Universal Declaration of Human Rights + (which is a part of the NLTK corpus collection) and then prints + out the original and the stemmed text. + + """ + + from nltk.corpus import udhr + + udhr_corpus = { + "arabic": "Arabic_Alarabia-Arabic", + "danish": "Danish_Dansk-Latin1", + "dutch": "Dutch_Nederlands-Latin1", + "english": "English-Latin1", + "finnish": "Finnish_Suomi-Latin1", + "french": "French_Francais-Latin1", + "german": "German_Deutsch-Latin1", + "hungarian": "Hungarian_Magyar-UTF8", + "italian": "Italian_Italiano-Latin1", + "norwegian": "Norwegian-Latin1", + "porter": "English-Latin1", + "portuguese": "Portuguese_Portugues-Latin1", + "romanian": "Romanian_Romana-Latin2", + "russian": "Russian-UTF8", + "spanish": "Spanish-Latin1", + "swedish": "Swedish_Svenska-Latin1", + } + + print("\n") + print("******************************") + print("Demo for the Snowball stemmers") + print("******************************") + + while True: + + language = input( + "Please enter the name of the language " + + "to be demonstrated\n" + + "/".join(SnowballStemmer.languages) + + "\n" + + "(enter 'exit' in order to leave): " + ) + + if language == "exit": + break + + if language not in SnowballStemmer.languages: + print( + "\nOops, there is no stemmer for this language. " + + "Please try again.\n" + ) + continue + + stemmer = SnowballStemmer(language) + excerpt = udhr.words(udhr_corpus[language])[:300] + + stemmed = " ".join(stemmer.stem(word) for word in excerpt) + stemmed = re.sub(r"(.{,70})\s", r"\1\n", stemmed + " ").rstrip() + excerpt = " ".join(excerpt) + excerpt = re.sub(r"(.{,70})\s", r"\1\n", excerpt + " ").rstrip() + + print("\n") + print("-" * 70) + print("ORIGINAL".center(70)) + print(excerpt) + print("\n\n") + print("STEMMED RESULTS".center(70)) + print(stemmed) + print("-" * 70) + print("\n") diff --git a/venv/lib/python3.10/site-packages/nltk/stem/util.py b/venv/lib/python3.10/site-packages/nltk/stem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..71d3a661e582e5dd60ef0a2bdcb2674ef29d472c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/util.py @@ -0,0 +1,25 @@ +# Natural Language Toolkit: Stemmer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Helder +# URL: +# For license information, see LICENSE.TXT + + +def suffix_replace(original, old, new): + """ + Replaces the old suffix of the original string by a new suffix + """ + return original[: -len(old)] + new + + +def prefix_replace(original, old, new): + """ + Replaces the old prefix of the original string by a new suffix + + :param original: string + :param old: string + :param new: string + :return: string + """ + return new + original[len(old) :] diff --git a/venv/lib/python3.10/site-packages/nltk/stem/wordnet.py b/venv/lib/python3.10/site-packages/nltk/stem/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0ccb056f7e2172f46d03236d76942c3080d2f107 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/stem/wordnet.py @@ -0,0 +1,49 @@ +# Natural Language Toolkit: WordNet stemmer interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import wordnet as wn + + +class WordNetLemmatizer: + """ + WordNet Lemmatizer + + Lemmatize using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + >>> from nltk.stem import WordNetLemmatizer + >>> wnl = WordNetLemmatizer() + >>> print(wnl.lemmatize('dogs')) + dog + >>> print(wnl.lemmatize('churches')) + church + >>> print(wnl.lemmatize('aardwolves')) + aardwolf + >>> print(wnl.lemmatize('abaci')) + abacus + >>> print(wnl.lemmatize('hardrock')) + hardrock + """ + + def lemmatize(self, word: str, pos: str = "n") -> str: + """Lemmatize `word` using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + :param word: The input word to lemmatize. + :type word: str + :param pos: The Part Of Speech tag. Valid options are `"n"` for nouns, + `"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"` + for satellite adjectives. + :param pos: str + :return: The lemma of `word`, for the given `pos`. + """ + lemmas = wn._morphy(word, pos) + return min(lemmas, key=len) if lemmas else word + + def __repr__(self): + return "" diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__init__.py b/venv/lib/python3.10/site-packages/nltk/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39eb7195124c71050b5a7fb4dd030fdadf410967 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/__init__.py @@ -0,0 +1,52 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Tree Package + +This package may be used for representing hierarchical language +structures, such as syntax trees and morphological trees. +""" + +# TODO: add LabelledTree (can be used for dependency trees) + +from nltk.tree.immutable import ( + ImmutableMultiParentedTree, + ImmutableParentedTree, + ImmutableProbabilisticTree, + ImmutableTree, +) +from nltk.tree.parented import MultiParentedTree, ParentedTree +from nltk.tree.parsing import bracket_parse, sinica_parse +from nltk.tree.prettyprinter import TreePrettyPrinter +from nltk.tree.probabilistic import ProbabilisticTree +from nltk.tree.transforms import ( + chomsky_normal_form, + collapse_unary, + un_chomsky_normal_form, +) +from nltk.tree.tree import Tree + +__all__ = [ + "ImmutableMultiParentedTree", + "ImmutableParentedTree", + "ImmutableProbabilisticTree", + "ImmutableTree", + "MultiParentedTree", + "ParentedTree", + "bracket_parse", + "sinica_parse", + "TreePrettyPrinter", + "ProbabilisticTree", + "chomsky_normal_form", + "collapse_unary", + "un_chomsky_normal_form", + "Tree", +] diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a7afda61b4c8f60d7a4e6a4eaff4a84cfad9009 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab4698cd864927b5b339958bfd84f9f3d7c514e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/immutable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0999ae273634552e6211e6ca481d91a774124a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parented.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd318ad783cdfdc2417d10b2bd64cb07925d4119 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/parsing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df152667da6d4e8d36b2994556cdc0993873a0c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/prettyprinter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e147280d2ba3c43183ecdbeed07b7446eb7de638 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/probabilistic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5f9ca1fcda3aeec1bf13d6f661ba962d5315fcf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a56f2d773616197b39e983a1b01453b3075fbac Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tree/__pycache__/tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tree/immutable.py b/venv/lib/python3.10/site-packages/nltk/tree/immutable.py new file mode 100644 index 0000000000000000000000000000000000000000..94e7ef9473a9c02b988db5318f3b282eb153e439 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/immutable.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +from nltk.probability import ProbabilisticMixIn +from nltk.tree.parented import MultiParentedTree, ParentedTree +from nltk.tree.tree import Tree + + +class ImmutableTree(Tree): + def __init__(self, node, children=None): + super().__init__(node, children) + # Precompute our hash value. This ensures that we're really + # immutable. It also means we only have to calculate it once. + try: + self._hash = hash((self._label, tuple(self))) + except (TypeError, ValueError) as e: + raise ValueError( + "%s: node value and children " "must be immutable" % type(self).__name__ + ) from e + + def __setitem__(self, index, value): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __setslice__(self, i, j, value): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __delitem__(self, index): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __delslice__(self, i, j): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __iadd__(self, other): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __imul__(self, other): + raise ValueError("%s may not be modified" % type(self).__name__) + + def append(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def extend(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def pop(self, v=None): + raise ValueError("%s may not be modified" % type(self).__name__) + + def remove(self, v): + raise ValueError("%s may not be modified" % type(self).__name__) + + def reverse(self): + raise ValueError("%s may not be modified" % type(self).__name__) + + def sort(self): + raise ValueError("%s may not be modified" % type(self).__name__) + + def __hash__(self): + return self._hash + + def set_label(self, value): + """ + Set the node label. This will only succeed the first time the + node label is set, which should occur in ImmutableTree.__init__(). + """ + if hasattr(self, "_label"): + raise ValueError("%s may not be modified" % type(self).__name__) + self._label = value + + +class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn): + def __init__(self, node, children=None, **prob_kwargs): + ImmutableTree.__init__(self, node, children) + ProbabilisticMixIn.__init__(self, **prob_kwargs) + self._hash = hash((self._label, tuple(self), self.prob())) + + # We have to patch up these methods to make them work right: + def _frozen_class(self): + return ImmutableProbabilisticTree + + def __repr__(self): + return f"{Tree.__repr__(self)} [{self.prob()}]" + + def __str__(self): + return f"{self.pformat(margin=60)} [{self.prob()}]" + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self, prob=self.prob()) + else: + return type(self).convert(self) + + @classmethod + def convert(cls, val): + if isinstance(val, Tree): + children = [cls.convert(child) for child in val] + if isinstance(val, ProbabilisticMixIn): + return cls(val._label, children, prob=val.prob()) + else: + return cls(val._label, children, prob=1.0) + else: + return val + + +class ImmutableParentedTree(ImmutableTree, ParentedTree): + pass + + +class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree): + pass + + +__all__ = [ + "ImmutableProbabilisticTree", + "ImmutableTree", + "ImmutableParentedTree", + "ImmutableMultiParentedTree", +] diff --git a/venv/lib/python3.10/site-packages/nltk/tree/parented.py b/venv/lib/python3.10/site-packages/nltk/tree/parented.py new file mode 100644 index 0000000000000000000000000000000000000000..c43788f1a79902d02c10b0f699cd2f6026b2d646 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/parented.py @@ -0,0 +1,590 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +import warnings +from abc import ABCMeta, abstractmethod + +from nltk.tree.tree import Tree +from nltk.util import slice_bounds + + +###################################################################### +## Parented trees +###################################################################### +class AbstractParentedTree(Tree, metaclass=ABCMeta): + """ + An abstract base class for a ``Tree`` that automatically maintains + pointers to parent nodes. These parent pointers are updated + whenever any change is made to a tree's structure. Two subclasses + are currently defined: + + - ``ParentedTree`` is used for tree structures where each subtree + has at most one parent. This class should be used in cases + where there is no"sharing" of subtrees. + + - ``MultiParentedTree`` is used for tree structures where a + subtree may have zero or more parents. This class should be + used in cases where subtrees may be shared. + + Subclassing + =========== + The ``AbstractParentedTree`` class redefines all operations that + modify a tree's structure to call two methods, which are used by + subclasses to update parent information: + + - ``_setparent()`` is called whenever a new child is added. + - ``_delparent()`` is called whenever a child is removed. + """ + + def __init__(self, node, children=None): + super().__init__(node, children) + # If children is None, the tree is read from node, and + # all parents will be set during parsing. + if children is not None: + # Otherwise we have to set the parent of the children. + # Iterate over self, and *not* children, because children + # might be an iterator. + for i, child in enumerate(self): + if isinstance(child, Tree): + self._setparent(child, i, dry_run=True) + for i, child in enumerate(self): + if isinstance(child, Tree): + self._setparent(child, i) + + # //////////////////////////////////////////////////////////// + # Parent management + # //////////////////////////////////////////////////////////// + @abstractmethod + def _setparent(self, child, index, dry_run=False): + """ + Update the parent pointer of ``child`` to point to ``self``. This + method is only called if the type of ``child`` is ``Tree``; + i.e., it is not called when adding a leaf to a tree. This method + is always called before the child is actually added to the + child list of ``self``. + + :type child: Tree + :type index: int + :param index: The index of ``child`` in ``self``. + :raise TypeError: If ``child`` is a tree with an impropriate + type. Typically, if ``child`` is a tree, then its type needs + to match the type of ``self``. This prevents mixing of + different tree types (single-parented, multi-parented, and + non-parented). + :param dry_run: If true, the don't actually set the child's + parent pointer; just check for any error conditions, and + raise an exception if one is found. + """ + + @abstractmethod + def _delparent(self, child, index): + """ + Update the parent pointer of ``child`` to not point to self. This + method is only called if the type of ``child`` is ``Tree``; i.e., it + is not called when removing a leaf from a tree. This method + is always called before the child is actually removed from the + child list of ``self``. + + :type child: Tree + :type index: int + :param index: The index of ``child`` in ``self``. + """ + + # //////////////////////////////////////////////////////////// + # Methods that add/remove children + # //////////////////////////////////////////////////////////// + # Every method that adds or removes a child must make + # appropriate calls to _setparent() and _delparent(). + + def __delitem__(self, index): + # del ptree[start:stop] + if isinstance(index, slice): + start, stop, step = slice_bounds(self, index, allow_step=True) + # Clear all the children pointers. + for i in range(start, stop, step): + if isinstance(self[i], Tree): + self._delparent(self[i], i) + # Delete the children from our child list. + super().__delitem__(index) + + # del ptree[i] + elif isinstance(index, int): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # Clear the child's parent pointer. + if isinstance(self[index], Tree): + self._delparent(self[index], index) + # Remove the child from our child list. + super().__delitem__(index) + + elif isinstance(index, (list, tuple)): + # del ptree[()] + if len(index) == 0: + raise IndexError("The tree position () may not be deleted.") + # del ptree[(i,)] + elif len(index) == 1: + del self[index[0]] + # del ptree[i1, i2, i3] + else: + del self[index[0]][index[1:]] + + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __setitem__(self, index, value): + # ptree[start:stop] = value + if isinstance(index, slice): + start, stop, step = slice_bounds(self, index, allow_step=True) + # make a copy of value, in case it's an iterator + if not isinstance(value, (list, tuple)): + value = list(value) + # Check for any error conditions, so we can avoid ending + # up in an inconsistent state if an error does occur. + for i, child in enumerate(value): + if isinstance(child, Tree): + self._setparent(child, start + i * step, dry_run=True) + # clear the child pointers of all parents we're removing + for i in range(start, stop, step): + if isinstance(self[i], Tree): + self._delparent(self[i], i) + # set the child pointers of the new children. We do this + # after clearing *all* child pointers, in case we're e.g. + # reversing the elements in a tree. + for i, child in enumerate(value): + if isinstance(child, Tree): + self._setparent(child, start + i * step) + # finally, update the content of the child list itself. + super().__setitem__(index, value) + + # ptree[i] = value + elif isinstance(index, int): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # if the value is not changing, do nothing. + if value is self[index]: + return + # Set the new child's parent pointer. + if isinstance(value, Tree): + self._setparent(value, index) + # Remove the old child's parent pointer + if isinstance(self[index], Tree): + self._delparent(self[index], index) + # Update our child list. + super().__setitem__(index, value) + + elif isinstance(index, (list, tuple)): + # ptree[()] = value + if len(index) == 0: + raise IndexError("The tree position () may not be assigned to.") + # ptree[(i,)] = value + elif len(index) == 1: + self[index[0]] = value + # ptree[i1, i2, i3] = value + else: + self[index[0]][index[1:]] = value + + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def append(self, child): + if isinstance(child, Tree): + self._setparent(child, len(self)) + super().append(child) + + def extend(self, children): + for child in children: + if isinstance(child, Tree): + self._setparent(child, len(self)) + super().append(child) + + def insert(self, index, child): + # Handle negative indexes. Note that if index < -len(self), + # we do *not* raise an IndexError, unlike __getitem__. This + # is done for consistency with list.__getitem__ and list.index. + if index < 0: + index += len(self) + if index < 0: + index = 0 + # Set the child's parent, and update our child list. + if isinstance(child, Tree): + self._setparent(child, index) + super().insert(index, child) + + def pop(self, index=-1): + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + if isinstance(self[index], Tree): + self._delparent(self[index], index) + return super().pop(index) + + # n.b.: like `list`, this is done by equality, not identity! + # To remove a specific child, use del ptree[i]. + def remove(self, child): + index = self.index(child) + if isinstance(self[index], Tree): + self._delparent(self[index], index) + super().remove(child) + + # We need to implement __getslice__ and friends, even though + # they're deprecated, because otherwise list.__getslice__ will get + # called (since we're subclassing from list). Just delegate to + # __getitem__ etc., but use max(0, start) and max(0, stop) because + # because negative indices are already handled *before* + # __getslice__ is called; and we don't want to double-count them. + if hasattr(list, "__getslice__"): + + def __getslice__(self, start, stop): + return self.__getitem__(slice(max(0, start), max(0, stop))) + + def __delslice__(self, start, stop): + return self.__delitem__(slice(max(0, start), max(0, stop))) + + def __setslice__(self, start, stop, value): + return self.__setitem__(slice(max(0, start), max(0, stop)), value) + + def __getnewargs__(self): + """Method used by the pickle module when un-pickling. + This method provides the arguments passed to ``__new__`` + upon un-pickling. Without this method, ParentedTree instances + cannot be pickled and unpickled in Python 3.7+ onwards. + + :return: Tuple of arguments for ``__new__``, i.e. the label + and the children of this node. + :rtype: Tuple[Any, List[AbstractParentedTree]] + """ + return (self._label, list(self)) + + +class ParentedTree(AbstractParentedTree): + """ + A ``Tree`` that automatically maintains parent pointers for + single-parented trees. The following are methods for querying + the structure of a parented tree: ``parent``, ``parent_index``, + ``left_sibling``, ``right_sibling``, ``root``, ``treeposition``. + + Each ``ParentedTree`` may have at most one parent. In + particular, subtrees may not be shared. Any attempt to reuse a + single ``ParentedTree`` as a child of more than one parent (or + as multiple children of the same parent) will cause a + ``ValueError`` exception to be raised. + + ``ParentedTrees`` should never be used in the same tree as ``Trees`` + or ``MultiParentedTrees``. Mixing tree implementations may result + in incorrect parent pointers and in ``TypeError`` exceptions. + """ + + def __init__(self, node, children=None): + self._parent = None + """The parent of this Tree, or None if it has no parent.""" + super().__init__(node, children) + if children is None: + # If children is None, the tree is read from node. + # After parsing, the parent of the immediate children + # will point to an intermediate tree, not self. + # We fix this by brute force: + for i, child in enumerate(self): + if isinstance(child, Tree): + child._parent = None + self._setparent(child, i) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableParentedTree + + return ImmutableParentedTree + + def copy(self, deep=False): + if not deep: + warnings.warn( + f"{self.__class__.__name__} objects do not support shallow copies. Defaulting to a deep copy." + ) + return super().copy(deep=True) + + # ///////////////////////////////////////////////////////////////// + # Methods + # ///////////////////////////////////////////////////////////////// + + def parent(self): + """The parent of this tree, or None if it has no parent.""" + return self._parent + + def parent_index(self): + """ + The index of this tree in its parent. I.e., + ``ptree.parent()[ptree.parent_index()] is ptree``. Note that + ``ptree.parent_index()`` is not necessarily equal to + ``ptree.parent.index(ptree)``, since the ``index()`` method + returns the first child that is equal to its argument. + """ + if self._parent is None: + return None + for i, child in enumerate(self._parent): + if child is self: + return i + assert False, "expected to find self in self._parent!" + + def left_sibling(self): + """The left sibling of this tree, or None if it has none.""" + parent_index = self.parent_index() + if self._parent and parent_index > 0: + return self._parent[parent_index - 1] + return None # no left sibling + + def right_sibling(self): + """The right sibling of this tree, or None if it has none.""" + parent_index = self.parent_index() + if self._parent and parent_index < (len(self._parent) - 1): + return self._parent[parent_index + 1] + return None # no right sibling + + def root(self): + """ + The root of this tree. I.e., the unique ancestor of this tree + whose parent is None. If ``ptree.parent()`` is None, then + ``ptree`` is its own root. + """ + root = self + while root.parent() is not None: + root = root.parent() + return root + + def treeposition(self): + """ + The tree position of this tree, relative to the root of the + tree. I.e., ``ptree.root[ptree.treeposition] is ptree``. + """ + if self.parent() is None: + return () + else: + return self.parent().treeposition() + (self.parent_index(),) + + # ///////////////////////////////////////////////////////////////// + # Parent Management + # ///////////////////////////////////////////////////////////////// + + def _delparent(self, child, index): + # Sanity checks + assert isinstance(child, ParentedTree) + assert self[index] is child + assert child._parent is self + + # Delete child's parent pointer. + child._parent = None + + def _setparent(self, child, index, dry_run=False): + # If the child's type is incorrect, then complain. + if not isinstance(child, ParentedTree): + raise TypeError("Can not insert a non-ParentedTree into a ParentedTree") + + # If child already has a parent, then complain. + if hasattr(child, "_parent") and child._parent is not None: + raise ValueError("Can not insert a subtree that already has a parent.") + + # Set child's parent pointer & index. + if not dry_run: + child._parent = self + + +class MultiParentedTree(AbstractParentedTree): + """ + A ``Tree`` that automatically maintains parent pointers for + multi-parented trees. The following are methods for querying the + structure of a multi-parented tree: ``parents()``, ``parent_indices()``, + ``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``. + + Each ``MultiParentedTree`` may have zero or more parents. In + particular, subtrees may be shared. If a single + ``MultiParentedTree`` is used as multiple children of the same + parent, then that parent will appear multiple times in its + ``parents()`` method. + + ``MultiParentedTrees`` should never be used in the same tree as + ``Trees`` or ``ParentedTrees``. Mixing tree implementations may + result in incorrect parent pointers and in ``TypeError`` exceptions. + """ + + def __init__(self, node, children=None): + self._parents = [] + """A list of this tree's parents. This list should not + contain duplicates, even if a parent contains this tree + multiple times.""" + super().__init__(node, children) + if children is None: + # If children is None, the tree is read from node. + # After parsing, the parent(s) of the immediate children + # will point to an intermediate tree, not self. + # We fix this by brute force: + for i, child in enumerate(self): + if isinstance(child, Tree): + child._parents = [] + self._setparent(child, i) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableMultiParentedTree + + return ImmutableMultiParentedTree + + # ///////////////////////////////////////////////////////////////// + # Methods + # ///////////////////////////////////////////////////////////////// + + def parents(self): + """ + The set of parents of this tree. If this tree has no parents, + then ``parents`` is the empty set. To check if a tree is used + as multiple children of the same parent, use the + ``parent_indices()`` method. + + :type: list(MultiParentedTree) + """ + return list(self._parents) + + def left_siblings(self): + """ + A list of all left siblings of this tree, in any of its parent + trees. A tree may be its own left sibling if it is used as + multiple contiguous children of the same parent. A tree may + appear multiple times in this list if it is the left sibling + of this tree with respect to multiple parents. + + :type: list(MultiParentedTree) + """ + return [ + parent[index - 1] + for (parent, index) in self._get_parent_indices() + if index > 0 + ] + + def right_siblings(self): + """ + A list of all right siblings of this tree, in any of its parent + trees. A tree may be its own right sibling if it is used as + multiple contiguous children of the same parent. A tree may + appear multiple times in this list if it is the right sibling + of this tree with respect to multiple parents. + + :type: list(MultiParentedTree) + """ + return [ + parent[index + 1] + for (parent, index) in self._get_parent_indices() + if index < (len(parent) - 1) + ] + + def _get_parent_indices(self): + return [ + (parent, index) + for parent in self._parents + for index, child in enumerate(parent) + if child is self + ] + + def roots(self): + """ + The set of all roots of this tree. This set is formed by + tracing all possible parent paths until trees with no parents + are found. + + :type: list(MultiParentedTree) + """ + return list(self._get_roots_helper({}).values()) + + def _get_roots_helper(self, result): + if self._parents: + for parent in self._parents: + parent._get_roots_helper(result) + else: + result[id(self)] = self + return result + + def parent_indices(self, parent): + """ + Return a list of the indices where this tree occurs as a child + of ``parent``. If this child does not occur as a child of + ``parent``, then the empty list is returned. The following is + always true:: + + for parent_index in ptree.parent_indices(parent): + parent[parent_index] is ptree + """ + if parent not in self._parents: + return [] + else: + return [index for (index, child) in enumerate(parent) if child is self] + + def treepositions(self, root): + """ + Return a list of all tree positions that can be used to reach + this multi-parented tree starting from ``root``. I.e., the + following is always true:: + + for treepos in ptree.treepositions(root): + root[treepos] is ptree + """ + if self is root: + return [()] + else: + return [ + treepos + (index,) + for parent in self._parents + for treepos in parent.treepositions(root) + for (index, child) in enumerate(parent) + if child is self + ] + + # ///////////////////////////////////////////////////////////////// + # Parent Management + # ///////////////////////////////////////////////////////////////// + + def _delparent(self, child, index): + # Sanity checks + assert isinstance(child, MultiParentedTree) + assert self[index] is child + assert len([p for p in child._parents if p is self]) == 1 + + # If the only copy of child in self is at index, then delete + # self from child's parent list. + for i, c in enumerate(self): + if c is child and i != index: + break + else: + child._parents.remove(self) + + def _setparent(self, child, index, dry_run=False): + # If the child's type is incorrect, then complain. + if not isinstance(child, MultiParentedTree): + raise TypeError( + "Can not insert a non-MultiParentedTree into a MultiParentedTree" + ) + + # Add self as a parent pointer if it's not already listed. + if not dry_run: + for parent in child._parents: + if parent is self: + break + else: + child._parents.append(self) + + +__all__ = [ + "ParentedTree", + "MultiParentedTree", +] diff --git a/venv/lib/python3.10/site-packages/nltk/tree/parsing.py b/venv/lib/python3.10/site-packages/nltk/tree/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..c2df4e166832b940eb5e961a9e7bf685d95b5e63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/parsing.py @@ -0,0 +1,66 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.tree.tree import Tree + +###################################################################### +## Parsing +###################################################################### + + +def bracket_parse(s): + """ + Use Tree.read(s, remove_empty_top_bracketing=True) instead. + """ + raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.") + + +def sinica_parse(s): + """ + Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings, + as shown in the following example (X represents a Chinese character): + S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY) + + :return: A tree corresponding to the string representation. + :rtype: Tree + :param s: The string to be converted + :type s: str + """ + tokens = re.split(r"([()| ])", s) + for i in range(len(tokens)): + if tokens[i] == "(": + tokens[i - 1], tokens[i] = ( + tokens[i], + tokens[i - 1], + ) # pull nonterminal inside parens + elif ":" in tokens[i]: + fields = tokens[i].split(":") + if len(fields) == 2: # non-terminal + tokens[i] = fields[1] + else: + tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")" + elif tokens[i] == "|": + tokens[i] = "" + + treebank_string = " ".join(tokens) + return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True) + + +# s = re.sub(r'^#[^\s]*\s', '', s) # remove leading identifier +# s = re.sub(r'\w+:', '', s) # remove role tags + +# return s + +__all__ = [ + "bracket_parse", + "sinica_parse", +] diff --git a/venv/lib/python3.10/site-packages/nltk/tree/prettyprinter.py b/venv/lib/python3.10/site-packages/nltk/tree/prettyprinter.py new file mode 100644 index 0000000000000000000000000000000000000000..33e1a93208b17a48e3ff4bbcbbb1d4017e89f198 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/prettyprinter.py @@ -0,0 +1,627 @@ +# Natural Language Toolkit: ASCII visualization of NLTK trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Andreas van Cranenburgh +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Pretty-printing of discontinuous trees. +Adapted from the disco-dop project, by Andreas van Cranenburgh. +https://github.com/andreasvc/disco-dop + +Interesting reference (not used for this code): +T. Eschbach et al., Orth. Hypergraph Drawing, Journal of +Graph Algorithms and Applications, 10(2) 141--157 (2006)149. +https://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf +""" + +import re + +try: + from html import escape +except ImportError: + from cgi import escape + +from collections import defaultdict +from operator import itemgetter + +from nltk.tree.tree import Tree +from nltk.util import OrderedDict + +ANSICOLOR = { + "black": 30, + "red": 31, + "green": 32, + "yellow": 33, + "blue": 34, + "magenta": 35, + "cyan": 36, + "white": 37, +} + + +class TreePrettyPrinter: + """ + Pretty-print a tree in text format, either as ASCII or Unicode. + The tree can be a normal tree, or discontinuous. + + ``TreePrettyPrinter(tree, sentence=None, highlight=())`` + creates an object from which different visualizations can be created. + + :param tree: a Tree object. + :param sentence: a list of words (strings). If `sentence` is given, + `tree` must contain integers as leaves, which are taken as indices + in `sentence`. Using this you can display a discontinuous tree. + :param highlight: Optionally, a sequence of Tree objects in `tree` which + should be highlighted. Has the effect of only applying colors to nodes + in this sequence (nodes should be given as Tree objects, terminals as + indices). + + >>> from nltk.tree import Tree + >>> tree = Tree.fromstring('(S (NP Mary) (VP walks))') + >>> print(TreePrettyPrinter(tree).text()) + ... # doctest: +NORMALIZE_WHITESPACE + S + ____|____ + NP VP + | | + Mary walks + """ + + def __init__(self, tree, sentence=None, highlight=()): + if sentence is None: + leaves = tree.leaves() + if ( + leaves + and all(len(a) > 0 for a in tree.subtrees()) + and all(isinstance(a, int) for a in leaves) + ): + sentence = [str(a) for a in leaves] + else: + # this deals with empty nodes (frontier non-terminals) + # and multiple/mixed terminals under non-terminals. + tree = tree.copy(True) + sentence = [] + for a in tree.subtrees(): + if len(a) == 0: + a.append(len(sentence)) + sentence.append(None) + elif any(not isinstance(b, Tree) for b in a): + for n, b in enumerate(a): + if not isinstance(b, Tree): + a[n] = len(sentence) + if type(b) == tuple: + b = "/".join(b) + sentence.append("%s" % b) + self.nodes, self.coords, self.edges, self.highlight = self.nodecoords( + tree, sentence, highlight + ) + + def __str__(self): + return self.text() + + def __repr__(self): + return "" % len(self.nodes) + + @staticmethod + def nodecoords(tree, sentence, highlight): + """ + Produce coordinates of nodes on a grid. + + Objective: + + - Produce coordinates for a non-overlapping placement of nodes and + horizontal lines. + - Order edges so that crossing edges cross a minimal number of previous + horizontal lines (never vertical lines). + + Approach: + + - bottom up level order traversal (start at terminals) + - at each level, identify nodes which cannot be on the same row + - identify nodes which cannot be in the same column + - place nodes into a grid at (row, column) + - order child-parent edges with crossing edges last + + Coordinates are (row, column); the origin (0, 0) is at the top left; + the root node is on row 0. Coordinates do not consider the size of a + node (which depends on font, &c), so the width of a column of the grid + should be automatically determined by the element with the greatest + width in that column. Alternatively, the integer coordinates could be + converted to coordinates in which the distances between adjacent nodes + are non-uniform. + + Produces tuple (nodes, coords, edges, highlighted) where: + + - nodes[id]: Tree object for the node with this integer id + - coords[id]: (n, m) coordinate where to draw node with id in the grid + - edges[id]: parent id of node with this id (ordered dictionary) + - highlighted: set of ids that should be highlighted + """ + + def findcell(m, matrix, startoflevel, children): + """ + Find vacant row, column index for node ``m``. + Iterate over current rows for this level (try lowest first) + and look for cell between first and last child of this node, + add new row to level if no free row available. + """ + candidates = [a for _, a in children[m]] + minidx, maxidx = min(candidates), max(candidates) + leaves = tree[m].leaves() + center = scale * sum(leaves) // len(leaves) # center of gravity + if minidx < maxidx and not minidx < center < maxidx: + center = sum(candidates) // len(candidates) + if max(candidates) - min(candidates) > 2 * scale: + center -= center % scale # round to unscaled coordinate + if minidx < maxidx and not minidx < center < maxidx: + center += scale + if ids[m] == 0: + startoflevel = len(matrix) + for rowidx in range(startoflevel, len(matrix) + 1): + if rowidx == len(matrix): # need to add a new row + matrix.append( + [ + vertline if a not in (corner, None) else None + for a in matrix[-1] + ] + ) + row = matrix[rowidx] + if len(children[m]) == 1: # place unaries directly above child + return rowidx, next(iter(children[m]))[1] + elif all( + a is None or a == vertline + for a in row[min(candidates) : max(candidates) + 1] + ): + # find free column + for n in range(scale): + i = j = center + n + while j > minidx or i < maxidx: + if i < maxidx and ( + matrix[rowidx][i] is None or i in candidates + ): + return rowidx, i + elif j > minidx and ( + matrix[rowidx][j] is None or j in candidates + ): + return rowidx, j + i += scale + j -= scale + raise ValueError( + "could not find a free cell for:\n%s\n%s" + "min=%d; max=%d" % (tree[m], minidx, maxidx, dumpmatrix()) + ) + + def dumpmatrix(): + """Dump matrix contents for debugging purposes.""" + return "\n".join( + "%2d: %s" % (n, " ".join(("%2r" % i)[:2] for i in row)) + for n, row in enumerate(matrix) + ) + + leaves = tree.leaves() + if not all(isinstance(n, int) for n in leaves): + raise ValueError("All leaves must be integer indices.") + if len(leaves) != len(set(leaves)): + raise ValueError("Indices must occur at most once.") + if not all(0 <= n < len(sentence) for n in leaves): + raise ValueError( + "All leaves must be in the interval 0..n " + "with n=len(sentence)\ntokens: %d indices: " + "%r\nsentence: %s" % (len(sentence), tree.leaves(), sentence) + ) + vertline, corner = -1, -2 # constants + tree = tree.copy(True) + for a in tree.subtrees(): + a.sort(key=lambda n: min(n.leaves()) if isinstance(n, Tree) else n) + scale = 2 + crossed = set() + # internal nodes and lexical nodes (no frontiers) + positions = tree.treepositions() + maxdepth = max(map(len, positions)) + 1 + childcols = defaultdict(set) + matrix = [[None] * (len(sentence) * scale)] + nodes = {} + ids = {a: n for n, a in enumerate(positions)} + highlighted_nodes = { + n for a, n in ids.items() if not highlight or tree[a] in highlight + } + levels = {n: [] for n in range(maxdepth - 1)} + terminals = [] + for a in positions: + node = tree[a] + if isinstance(node, Tree): + levels[maxdepth - node.height()].append(a) + else: + terminals.append(a) + + for n in levels: + levels[n].sort(key=lambda n: max(tree[n].leaves()) - min(tree[n].leaves())) + terminals.sort() + positions = set(positions) + + for m in terminals: + i = int(tree[m]) * scale + assert matrix[0][i] is None, (matrix[0][i], m, i) + matrix[0][i] = ids[m] + nodes[ids[m]] = sentence[tree[m]] + if nodes[ids[m]] is None: + nodes[ids[m]] = "..." + highlighted_nodes.discard(ids[m]) + positions.remove(m) + childcols[m[:-1]].add((0, i)) + + # add other nodes centered on their children, + # if the center is already taken, back off + # to the left and right alternately, until an empty cell is found. + for n in sorted(levels, reverse=True): + nodesatdepth = levels[n] + startoflevel = len(matrix) + matrix.append( + [vertline if a not in (corner, None) else None for a in matrix[-1]] + ) + for m in nodesatdepth: # [::-1]: + if n < maxdepth - 1 and childcols[m]: + _, pivot = min(childcols[m], key=itemgetter(1)) + if { + a[:-1] + for row in matrix[:-1] + for a in row[:pivot] + if isinstance(a, tuple) + } & { + a[:-1] + for row in matrix[:-1] + for a in row[pivot:] + if isinstance(a, tuple) + }: + crossed.add(m) + + rowidx, i = findcell(m, matrix, startoflevel, childcols) + positions.remove(m) + + # block positions where children of this node branch out + for _, x in childcols[m]: + matrix[rowidx][x] = corner + # assert m == () or matrix[rowidx][i] in (None, corner), ( + # matrix[rowidx][i], m, str(tree), ' '.join(sentence)) + # node itself + matrix[rowidx][i] = ids[m] + nodes[ids[m]] = tree[m] + # add column to the set of children for its parent + if len(m) > 0: + childcols[m[:-1]].add((rowidx, i)) + assert len(positions) == 0 + + # remove unused columns, right to left + for m in range(scale * len(sentence) - 1, -1, -1): + if not any(isinstance(row[m], (Tree, int)) for row in matrix): + for row in matrix: + del row[m] + + # remove unused rows, reverse + matrix = [ + row + for row in reversed(matrix) + if not all(a is None or a == vertline for a in row) + ] + + # collect coordinates of nodes + coords = {} + for n, _ in enumerate(matrix): + for m, i in enumerate(matrix[n]): + if isinstance(i, int) and i >= 0: + coords[i] = n, m + + # move crossed edges last + positions = sorted( + (a for level in levels.values() for a in level), + key=lambda a: a[:-1] in crossed, + ) + + # collect edges from node to node + edges = OrderedDict() + for i in reversed(positions): + for j, _ in enumerate(tree[i]): + edges[ids[i + (j,)]] = ids[i] + + return nodes, coords, edges, highlighted_nodes + + def text( + self, + nodedist=1, + unicodelines=False, + html=False, + ansi=False, + nodecolor="blue", + leafcolor="red", + funccolor="green", + abbreviate=None, + maxwidth=16, + ): + """ + :return: ASCII art for a discontinuous tree. + + :param unicodelines: whether to use Unicode line drawing characters + instead of plain (7-bit) ASCII. + :param html: whether to wrap output in html code (default plain text). + :param ansi: whether to produce colors with ANSI escape sequences + (only effective when html==False). + :param leafcolor, nodecolor: specify colors of leaves and phrasal + nodes; effective when either html or ansi is True. + :param abbreviate: if True, abbreviate labels longer than 5 characters. + If integer, abbreviate labels longer than `abbr` characters. + :param maxwidth: maximum number of characters before a label starts to + wrap; pass None to disable. + """ + if abbreviate == True: + abbreviate = 5 + if unicodelines: + horzline = "\u2500" + leftcorner = "\u250c" + rightcorner = "\u2510" + vertline = " \u2502 " + tee = horzline + "\u252C" + horzline + bottom = horzline + "\u2534" + horzline + cross = horzline + "\u253c" + horzline + ellipsis = "\u2026" + else: + horzline = "_" + leftcorner = rightcorner = " " + vertline = " | " + tee = 3 * horzline + cross = bottom = "_|_" + ellipsis = "." + + def crosscell(cur, x=vertline): + """Overwrite center of this cell with a vertical branch.""" + splitl = len(cur) - len(cur) // 2 - len(x) // 2 - 1 + lst = list(cur) + lst[splitl : splitl + len(x)] = list(x) + return "".join(lst) + + result = [] + matrix = defaultdict(dict) + maxnodewith = defaultdict(lambda: 3) + maxnodeheight = defaultdict(lambda: 1) + maxcol = 0 + minchildcol = {} + maxchildcol = {} + childcols = defaultdict(set) + labels = {} + wrapre = re.compile( + "(.{%d,%d}\\b\\W*|.{%d})" % (maxwidth - 4, maxwidth, maxwidth) + ) + # collect labels and coordinates + for a in self.nodes: + row, column = self.coords[a] + matrix[row][column] = a + maxcol = max(maxcol, column) + label = ( + self.nodes[a].label() + if isinstance(self.nodes[a], Tree) + else self.nodes[a] + ) + if abbreviate and len(label) > abbreviate: + label = label[:abbreviate] + ellipsis + if maxwidth and len(label) > maxwidth: + label = wrapre.sub(r"\1\n", label).strip() + label = label.split("\n") + maxnodeheight[row] = max(maxnodeheight[row], len(label)) + maxnodewith[column] = max(maxnodewith[column], max(map(len, label))) + labels[a] = label + if a not in self.edges: + continue # e.g., root + parent = self.edges[a] + childcols[parent].add((row, column)) + minchildcol[parent] = min(minchildcol.get(parent, column), column) + maxchildcol[parent] = max(maxchildcol.get(parent, column), column) + # bottom up level order traversal + for row in sorted(matrix, reverse=True): + noderows = [ + ["".center(maxnodewith[col]) for col in range(maxcol + 1)] + for _ in range(maxnodeheight[row]) + ] + branchrow = ["".center(maxnodewith[col]) for col in range(maxcol + 1)] + for col in matrix[row]: + n = matrix[row][col] + node = self.nodes[n] + text = labels[n] + if isinstance(node, Tree): + # draw horizontal branch towards children for this node + if n in minchildcol and minchildcol[n] < maxchildcol[n]: + i, j = minchildcol[n], maxchildcol[n] + a, b = (maxnodewith[i] + 1) // 2 - 1, maxnodewith[j] // 2 + branchrow[i] = ((" " * a) + leftcorner).ljust( + maxnodewith[i], horzline + ) + branchrow[j] = (rightcorner + (" " * b)).rjust( + maxnodewith[j], horzline + ) + for i in range(minchildcol[n] + 1, maxchildcol[n]): + if i == col and any(a == i for _, a in childcols[n]): + line = cross + elif i == col: + line = bottom + elif any(a == i for _, a in childcols[n]): + line = tee + else: + line = horzline + branchrow[i] = line.center(maxnodewith[i], horzline) + else: # if n and n in minchildcol: + branchrow[col] = crosscell(branchrow[col]) + text = [a.center(maxnodewith[col]) for a in text] + color = nodecolor if isinstance(node, Tree) else leafcolor + if isinstance(node, Tree) and node.label().startswith("-"): + color = funccolor + if html: + text = [escape(a, quote=False) for a in text] + if n in self.highlight: + text = [f"{a}" for a in text] + elif ansi and n in self.highlight: + text = ["\x1b[%d;1m%s\x1b[0m" % (ANSICOLOR[color], a) for a in text] + for x in range(maxnodeheight[row]): + # draw vertical lines in partially filled multiline node + # labels, but only if it's not a frontier node. + noderows[x][col] = ( + text[x] + if x < len(text) + else (vertline if childcols[n] else " ").center( + maxnodewith[col], " " + ) + ) + # for each column, if there is a node below us which has a parent + # above us, draw a vertical branch in that column. + if row != max(matrix): + for n, (childrow, col) in self.coords.items(): + if n > 0 and self.coords[self.edges[n]][0] < row < childrow: + branchrow[col] = crosscell(branchrow[col]) + if col not in matrix[row]: + for noderow in noderows: + noderow[col] = crosscell(noderow[col]) + branchrow = [ + a + ((a[-1] if a[-1] != " " else b[0]) * nodedist) + for a, b in zip(branchrow, branchrow[1:] + [" "]) + ] + result.append("".join(branchrow)) + result.extend( + (" " * nodedist).join(noderow) for noderow in reversed(noderows) + ) + return "\n".join(reversed(result)) + "\n" + + def svg(self, nodecolor="blue", leafcolor="red", funccolor="green"): + """ + :return: SVG representation of a tree. + """ + fontsize = 12 + hscale = 40 + vscale = 25 + hstart = vstart = 20 + width = max(col for _, col in self.coords.values()) + height = max(row for row, _ in self.coords.values()) + result = [ + '' + % ( + width * 3, + height * 2.5, + -hstart, + -vstart, + width * hscale + 3 * hstart, + height * vscale + 3 * vstart, + ) + ] + + children = defaultdict(set) + for n in self.nodes: + if n: + children[self.edges[n]].add(n) + + # horizontal branches from nodes to children + for node in self.nodes: + if not children[node]: + continue + y, x = self.coords[node] + x *= hscale + y *= vscale + x += hstart + y += vstart + fontsize // 2 + childx = [self.coords[c][1] for c in children[node]] + xmin = hstart + hscale * min(childx) + xmax = hstart + hscale * max(childx) + result.append( + '\t' % (xmin, y, xmax, y) + ) + result.append( + '\t' % (x, y, x, y - fontsize // 3) + ) + + # vertical branches from children to parents + for child, parent in self.edges.items(): + y, _ = self.coords[parent] + y *= vscale + y += vstart + fontsize // 2 + childy, childx = self.coords[child] + childx *= hscale + childy *= vscale + childx += hstart + childy += vstart - fontsize + result += [ + '\t' % (childx, childy, childx, y + 5), + '\t' % (childx, childy, childx, y), + ] + + # write nodes with coordinates + for n, (row, column) in self.coords.items(): + node = self.nodes[n] + x = column * hscale + hstart + y = row * vscale + vstart + if n in self.highlight: + color = nodecolor if isinstance(node, Tree) else leafcolor + if isinstance(node, Tree) and node.label().startswith("-"): + color = funccolor + else: + color = "black" + result += [ + '\t%s' + % ( + color, + fontsize, + x, + y, + escape( + node.label() if isinstance(node, Tree) else node, quote=False + ), + ) + ] + + result += [""] + return "\n".join(result) + + +def test(): + """Do some tree drawing tests.""" + + def print_tree(n, tree, sentence=None, ansi=True, **xargs): + print() + print('{}: "{}"'.format(n, " ".join(sentence or tree.leaves()))) + print(tree) + print() + drawtree = TreePrettyPrinter(tree, sentence) + try: + print(drawtree.text(unicodelines=ansi, ansi=ansi, **xargs)) + except (UnicodeDecodeError, UnicodeEncodeError): + print(drawtree.text(unicodelines=False, ansi=False, **xargs)) + + from nltk.corpus import treebank + + for n in [0, 1440, 1591, 2771, 2170]: + tree = treebank.parsed_sents()[n] + print_tree(n, tree, nodedist=2, maxwidth=8) + print() + print("ASCII version:") + print(TreePrettyPrinter(tree).text(nodedist=2)) + + tree = Tree.fromstring( + "(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) " + "(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) " + "(vg 10) (inf (verb 11)))))) (punct 12))", + read_leaf=int, + ) + sentence = ( + "Ze had met haar moeder kunnen gaan winkelen ," + " zwemmen of terrassen .".split() + ) + print_tree("Discontinuous tree", tree, sentence, nodedist=2) + + +__all__ = ["TreePrettyPrinter"] + +if __name__ == "__main__": + test() diff --git a/venv/lib/python3.10/site-packages/nltk/tree/probabilistic.py b/venv/lib/python3.10/site-packages/nltk/tree/probabilistic.py new file mode 100644 index 0000000000000000000000000000000000000000..79a4c798ad5f73b7c515e20456a7149232958f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/probabilistic.py @@ -0,0 +1,74 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + + +from nltk.internals import raise_unorderable_types +from nltk.probability import ProbabilisticMixIn +from nltk.tree.immutable import ImmutableProbabilisticTree +from nltk.tree.tree import Tree + +###################################################################### +## Probabilistic trees +###################################################################### + + +class ProbabilisticTree(Tree, ProbabilisticMixIn): + def __init__(self, node, children=None, **prob_kwargs): + Tree.__init__(self, node, children) + ProbabilisticMixIn.__init__(self, **prob_kwargs) + + # We have to patch up these methods to make them work right: + def _frozen_class(self): + return ImmutableProbabilisticTree + + def __repr__(self): + return f"{Tree.__repr__(self)} (p={self.prob()!r})" + + def __str__(self): + return f"{self.pformat(margin=60)} (p={self.prob():.6g})" + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self, prob=self.prob()) + else: + return type(self).convert(self) + + @classmethod + def convert(cls, val): + if isinstance(val, Tree): + children = [cls.convert(child) for child in val] + if isinstance(val, ProbabilisticMixIn): + return cls(val._label, children, prob=val.prob()) + else: + return cls(val._label, children, prob=1.0) + else: + return val + + def __eq__(self, other): + return self.__class__ is other.__class__ and ( + self._label, + list(self), + self.prob(), + ) == (other._label, list(other), other.prob()) + + def __lt__(self, other): + if not isinstance(other, Tree): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return (self._label, list(self), self.prob()) < ( + other._label, + list(other), + other.prob(), + ) + else: + return self.__class__.__name__ < other.__class__.__name__ + + +__all__ = ["ProbabilisticTree"] diff --git a/venv/lib/python3.10/site-packages/nltk/tree/tree.py b/venv/lib/python3.10/site-packages/nltk/tree/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..d21be4091c5f6855a42a12dcddc53f21aafa16ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/tree.py @@ -0,0 +1,982 @@ +# Natural Language Toolkit: Text Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Peter Ljunglöf +# Nathan Bodenstab (tree transforms) +# Eric Kafe (Tree.fromlist()) +# Mohaned mashaly (Deprecating methods) +# URL: +# For license information, see LICENSE.TXT + +""" +Class for representing hierarchical language structures, such as +syntax trees and morphological trees. +""" + +import re + +from nltk.grammar import Nonterminal, Production +from nltk.internals import deprecated + +###################################################################### +## Trees +###################################################################### + + +class Tree(list): + r""" + A Tree represents a hierarchical grouping of leaves and subtrees. + For example, each constituent in a syntax tree is represented by a single Tree. + + A tree's children are encoded as a list of leaves and subtrees, + where a leaf is a basic (non-tree) value; and a subtree is a + nested Tree. + + >>> from nltk.tree import Tree + >>> print(Tree(1, [2, Tree(3, [4]), 5])) + (1 2 (3 4) 5) + >>> vp = Tree('VP', [Tree('V', ['saw']), + ... Tree('NP', ['him'])]) + >>> s = Tree('S', [Tree('NP', ['I']), vp]) + >>> print(s) + (S (NP I) (VP (V saw) (NP him))) + >>> print(s[1]) + (VP (V saw) (NP him)) + >>> print(s[1,1]) + (NP him) + >>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))") + >>> s == t + True + >>> t[1][1].set_label('X') + >>> t[1][1].label() + 'X' + >>> print(t) + (S (NP I) (VP (V saw) (X him))) + >>> t[0], t[1,1] = t[1,1], t[0] + >>> print(t) + (S (X him) (VP (V saw) (NP I))) + + The length of a tree is the number of children it has. + + >>> len(t) + 2 + + The set_label() and label() methods allow individual constituents + to be labeled. For example, syntax trees use this label to specify + phrase tags, such as "NP" and "VP". + + Several Tree methods use "tree positions" to specify + children or descendants of a tree. Tree positions are defined as + follows: + + - The tree position *i* specifies a Tree's *i*\ th child. + - The tree position ``()`` specifies the Tree itself. + - If *p* is the tree position of descendant *d*, then + *p+i* specifies the *i*\ th child of *d*. + + I.e., every tree position is either a single index *i*, + specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*, + specifying ``tree[i1][i2]...[iN]``. + + Construct a new tree. This constructor can be called in one + of two ways: + + - ``Tree(label, children)`` constructs a new tree with the + specified label and list of children. + + - ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``. + """ + + def __init__(self, node, children=None): + if children is None: + raise TypeError( + "%s: Expected a node value and child list " % type(self).__name__ + ) + elif isinstance(children, str): + raise TypeError( + "%s() argument 2 should be a list, not a " + "string" % type(self).__name__ + ) + else: + list.__init__(self, children) + self._label = node + + # //////////////////////////////////////////////////////////// + # Comparison operators + # //////////////////////////////////////////////////////////// + + def __eq__(self, other): + return self.__class__ is other.__class__ and (self._label, list(self)) == ( + other._label, + list(other), + ) + + def __lt__(self, other): + if not isinstance(other, Tree): + # raise_unorderable_types("<", self, other) + # Sometimes children can be pure strings, + # so we need to be able to compare with non-trees: + return self.__class__.__name__ < other.__class__.__name__ + elif self.__class__ is other.__class__: + return (self._label, list(self)) < (other._label, list(other)) + else: + return self.__class__.__name__ < other.__class__.__name__ + + # @total_ordering doesn't work here, since the class inherits from a builtin class + __ne__ = lambda self, other: not self == other + __gt__ = lambda self, other: not (self < other or self == other) + __le__ = lambda self, other: self < other or self == other + __ge__ = lambda self, other: not self < other + + # //////////////////////////////////////////////////////////// + # Disabled list operations + # //////////////////////////////////////////////////////////// + + def __mul__(self, v): + raise TypeError("Tree does not support multiplication") + + def __rmul__(self, v): + raise TypeError("Tree does not support multiplication") + + def __add__(self, v): + raise TypeError("Tree does not support addition") + + def __radd__(self, v): + raise TypeError("Tree does not support addition") + + # //////////////////////////////////////////////////////////// + # Indexing (with support for tree positions) + # //////////////////////////////////////////////////////////// + + def __getitem__(self, index): + if isinstance(index, (int, slice)): + return list.__getitem__(self, index) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + return self + elif len(index) == 1: + return self[index[0]] + else: + return self[index[0]][index[1:]] + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __setitem__(self, index, value): + if isinstance(index, (int, slice)): + return list.__setitem__(self, index, value) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + raise IndexError("The tree position () may not be " "assigned to.") + elif len(index) == 1: + self[index[0]] = value + else: + self[index[0]][index[1:]] = value + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + def __delitem__(self, index): + if isinstance(index, (int, slice)): + return list.__delitem__(self, index) + elif isinstance(index, (list, tuple)): + if len(index) == 0: + raise IndexError("The tree position () may not be deleted.") + elif len(index) == 1: + del self[index[0]] + else: + del self[index[0]][index[1:]] + else: + raise TypeError( + "%s indices must be integers, not %s" + % (type(self).__name__, type(index).__name__) + ) + + # //////////////////////////////////////////////////////////// + # Basic tree operations + # //////////////////////////////////////////////////////////// + @deprecated("Use label() instead") + def _get_node(self): + """Outdated method to access the node value; use the label() method instead.""" + + @deprecated("Use set_label() instead") + def _set_node(self, value): + """Outdated method to set the node value; use the set_label() method instead.""" + + node = property(_get_node, _set_node) + + def label(self): + """ + Return the node label of the tree. + + >>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))') + >>> t.label() + 'S' + + :return: the node label (typically a string) + :rtype: any + """ + return self._label + + def set_label(self, label): + """ + Set the node label of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.set_label("T") + >>> print(t) + (T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat)))) + + :param label: the node label (typically a string) + :type label: any + """ + self._label = label + + def leaves(self): + """ + Return the leaves of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.leaves() + ['the', 'dog', 'chased', 'the', 'cat'] + + :return: a list containing this tree's leaves. + The order reflects the order of the + leaves in the tree's hierarchical structure. + :rtype: list + """ + leaves = [] + for child in self: + if isinstance(child, Tree): + leaves.extend(child.leaves()) + else: + leaves.append(child) + return leaves + + def flatten(self): + """ + Return a flat version of the tree, with all non-root non-terminals removed. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> print(t.flatten()) + (S the dog chased the cat) + + :return: a tree consisting of this tree's root connected directly to + its leaves, omitting all intervening non-terminal nodes. + :rtype: Tree + """ + return Tree(self.label(), self.leaves()) + + def height(self): + """ + Return the height of the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.height() + 5 + >>> print(t[0,0]) + (D the) + >>> t[0,0].height() + 2 + + :return: The height of this tree. The height of a tree + containing no children is 1; the height of a tree + containing only leaves is 2; and the height of any other + tree is one plus the maximum of its children's + heights. + :rtype: int + """ + max_child_height = 0 + for child in self: + if isinstance(child, Tree): + max_child_height = max(max_child_height, child.height()) + else: + max_child_height = max(max_child_height, 1) + return 1 + max_child_height + + def treepositions(self, order="preorder"): + """ + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.treepositions() # doctest: +ELLIPSIS + [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...] + >>> for pos in t.treepositions('leaves'): + ... t[pos] = t[pos][::-1].upper() + >>> print(t) + (S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC)))) + + :param order: One of: ``preorder``, ``postorder``, ``bothorder``, + ``leaves``. + """ + positions = [] + if order in ("preorder", "bothorder"): + positions.append(()) + for i, child in enumerate(self): + if isinstance(child, Tree): + childpos = child.treepositions(order) + positions.extend((i,) + p for p in childpos) + else: + positions.append((i,)) + if order in ("postorder", "bothorder"): + positions.append(()) + return positions + + def subtrees(self, filter=None): + """ + Generate all the subtrees of this tree, optionally restricted + to trees matching the filter function. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> for s in t.subtrees(lambda t: t.height() == 2): + ... print(s) + (D the) + (N dog) + (V chased) + (D the) + (N cat) + + :type filter: function + :param filter: the function to filter all local trees + """ + if not filter or filter(self): + yield self + for child in self: + if isinstance(child, Tree): + yield from child.subtrees(filter) + + def productions(self): + """ + Generate the productions that correspond to the non-terminal nodes of the tree. + For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the + form P -> C1 C2 ... Cn. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.productions() # doctest: +NORMALIZE_WHITESPACE + [S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased', + NP -> D N, D -> 'the', N -> 'cat'] + + :rtype: list(Production) + """ + + if not isinstance(self._label, str): + raise TypeError( + "Productions can only be generated from trees having node labels that are strings" + ) + + prods = [Production(Nonterminal(self._label), _child_names(self))] + for child in self: + if isinstance(child, Tree): + prods += child.productions() + return prods + + def pos(self): + """ + Return a sequence of pos-tagged words extracted from the tree. + + >>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))") + >>> t.pos() + [('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')] + + :return: a list of tuples containing leaves and pre-terminals (part-of-speech tags). + The order reflects the order of the leaves in the tree's hierarchical structure. + :rtype: list(tuple) + """ + pos = [] + for child in self: + if isinstance(child, Tree): + pos.extend(child.pos()) + else: + pos.append((child, self._label)) + return pos + + def leaf_treeposition(self, index): + """ + :return: The tree position of the ``index``-th leaf in this + tree. I.e., if ``tp=self.leaf_treeposition(i)``, then + ``self[tp]==self.leaves()[i]``. + + :raise IndexError: If this tree contains fewer than ``index+1`` + leaves, or if ``index<0``. + """ + if index < 0: + raise IndexError("index must be non-negative") + + stack = [(self, ())] + while stack: + value, treepos = stack.pop() + if not isinstance(value, Tree): + if index == 0: + return treepos + else: + index -= 1 + else: + for i in range(len(value) - 1, -1, -1): + stack.append((value[i], treepos + (i,))) + + raise IndexError("index must be less than or equal to len(self)") + + def treeposition_spanning_leaves(self, start, end): + """ + :return: The tree position of the lowest descendant of this + tree that dominates ``self.leaves()[start:end]``. + :raise ValueError: if ``end <= start`` + """ + if end <= start: + raise ValueError("end must be greater than start") + # Find the tree positions of the start & end leaves, and + # take the longest common subsequence. + start_treepos = self.leaf_treeposition(start) + end_treepos = self.leaf_treeposition(end - 1) + # Find the first index where they mismatch: + for i in range(len(start_treepos)): + if i == len(end_treepos) or start_treepos[i] != end_treepos[i]: + return start_treepos[:i] + return start_treepos + + # //////////////////////////////////////////////////////////// + # Transforms + # //////////////////////////////////////////////////////////// + + def chomsky_normal_form( + self, + factor="right", + horzMarkov=None, + vertMarkov=0, + childChar="|", + parentChar="^", + ): + """ + This method can modify a tree in three ways: + + 1. Convert a tree into its Chomsky Normal Form (CNF) + equivalent -- Every subtree has either two non-terminals + or one terminal as its children. This process requires + the creation of more"artificial" non-terminal nodes. + 2. Markov (vertical) smoothing of children in new artificial + nodes + 3. Horizontal (parent) annotation of nodes + + :param factor: Right or left factoring method (default = "right") + :type factor: str = [left|right] + :param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings) + :type horzMarkov: int | None + :param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation) + :type vertMarkov: int | None + :param childChar: A string used in construction of the artificial nodes, separating the head of the + original subtree from the child nodes that have yet to be expanded (default = "|") + :type childChar: str + :param parentChar: A string used to separate the node representation from its vertical annotation + :type parentChar: str + """ + from nltk.tree.transforms import chomsky_normal_form + + chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar) + + def un_chomsky_normal_form( + self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" + ): + """ + This method modifies the tree in three ways: + + 1. Transforms a tree in Chomsky Normal Form back to its + original structure (branching greater than two) + 2. Removes any parent annotation (if it exists) + 3. (optional) expands unary subtrees (if previously + collapsed with collapseUnary(...) ) + + :param expandUnary: Flag to expand unary or not (default = True) + :type expandUnary: bool + :param childChar: A string separating the head node from its children in an artificial node (default = "|") + :type childChar: str + :param parentChar: A string separating the node label from its parent annotation (default = "^") + :type parentChar: str + :param unaryChar: A string joining two non-terminals in a unary production (default = "+") + :type unaryChar: str + """ + from nltk.tree.transforms import un_chomsky_normal_form + + un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar) + + def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"): + """ + Collapse subtrees with a single child (ie. unary productions) + into a new non-terminal (Tree node) joined by 'joinChar'. + This is useful when working with algorithms that do not allow + unary productions, and completely removing the unary productions + would require loss of useful information. The Tree is modified + directly (since it is passed by reference) and no value is returned. + + :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. + Part-of-Speech tags) since they are always unary productions + :type collapsePOS: bool + :param collapseRoot: 'False' (default) will not modify the root production + if it is unary. For the Penn WSJ treebank corpus, this corresponds + to the TOP -> productions. + :type collapseRoot: bool + :param joinChar: A string used to connect collapsed node values (default = "+") + :type joinChar: str + """ + from nltk.tree.transforms import collapse_unary + + collapse_unary(self, collapsePOS, collapseRoot, joinChar) + + # //////////////////////////////////////////////////////////// + # Convert, copy + # //////////////////////////////////////////////////////////// + + @classmethod + def convert(cls, tree): + """ + Convert a tree between different subtypes of Tree. ``cls`` determines + which class will be used to encode the new tree. + + :type tree: Tree + :param tree: The tree that should be converted. + :return: The new Tree. + """ + if isinstance(tree, Tree): + children = [cls.convert(child) for child in tree] + return cls(tree._label, children) + else: + return tree + + def __copy__(self): + return self.copy() + + def __deepcopy__(self, memo): + return self.copy(deep=True) + + def copy(self, deep=False): + if not deep: + return type(self)(self._label, self) + else: + return type(self).convert(self) + + def _frozen_class(self): + from nltk.tree.immutable import ImmutableTree + + return ImmutableTree + + def freeze(self, leaf_freezer=None): + frozen_class = self._frozen_class() + if leaf_freezer is None: + newcopy = frozen_class.convert(self) + else: + newcopy = self.copy(deep=True) + for pos in newcopy.treepositions("leaves"): + newcopy[pos] = leaf_freezer(newcopy[pos]) + newcopy = frozen_class.convert(newcopy) + hash(newcopy) # Make sure the leaves are hashable. + return newcopy + + # //////////////////////////////////////////////////////////// + # Parsing + # //////////////////////////////////////////////////////////// + + @classmethod + def fromstring( + cls, + s, + brackets="()", + read_node=None, + read_leaf=None, + node_pattern=None, + leaf_pattern=None, + remove_empty_top_bracketing=False, + ): + """ + Read a bracketed tree string and return the resulting tree. + Trees are represented as nested brackettings, such as:: + + (S (NP (NNP John)) (VP (V runs))) + + :type s: str + :param s: The string to read + + :type brackets: str (length=2) + :param brackets: The bracket characters used to mark the + beginning and end of trees and subtrees. + + :type read_node: function + :type read_leaf: function + :param read_node, read_leaf: If specified, these functions + are applied to the substrings of ``s`` corresponding to + nodes and leaves (respectively) to obtain the values for + those nodes and leaves. They should have the following + signature: + + read_node(str) -> value + + For example, these functions could be used to process nodes + and leaves whose values should be some type other than + string (such as ``FeatStruct``). + Note that by default, node strings and leaf strings are + delimited by whitespace and brackets; to override this + default, use the ``node_pattern`` and ``leaf_pattern`` + arguments. + + :type node_pattern: str + :type leaf_pattern: str + :param node_pattern, leaf_pattern: Regular expression patterns + used to find node and leaf substrings in ``s``. By + default, both nodes patterns are defined to match any + sequence of non-whitespace non-bracket characters. + + :type remove_empty_top_bracketing: bool + :param remove_empty_top_bracketing: If the resulting tree has + an empty node label, and is length one, then return its + single child instead. This is useful for treebank trees, + which sometimes contain an extra level of bracketing. + + :return: A tree corresponding to the string representation ``s``. + If this class method is called using a subclass of Tree, + then it will return a tree of that type. + :rtype: Tree + """ + if not isinstance(brackets, str) or len(brackets) != 2: + raise TypeError("brackets must be a length-2 string") + if re.search(r"\s", brackets): + raise TypeError("whitespace brackets not allowed") + # Construct a regexp that will tokenize the string. + open_b, close_b = brackets + open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b)) + if node_pattern is None: + node_pattern = rf"[^\s{open_pattern}{close_pattern}]+" + if leaf_pattern is None: + leaf_pattern = rf"[^\s{open_pattern}{close_pattern}]+" + token_re = re.compile( + r"%s\s*(%s)?|%s|(%s)" + % (open_pattern, node_pattern, close_pattern, leaf_pattern) + ) + # Walk through each token, updating a stack of trees. + stack = [(None, [])] # list of (node, children) tuples + for match in token_re.finditer(s): + token = match.group() + # Beginning of a tree/subtree + if token[0] == open_b: + if len(stack) == 1 and len(stack[0][1]) > 0: + cls._parse_error(s, match, "end-of-string") + label = token[1:].lstrip() + if read_node is not None: + label = read_node(label) + stack.append((label, [])) + # End of a tree/subtree + elif token == close_b: + if len(stack) == 1: + if len(stack[0][1]) == 0: + cls._parse_error(s, match, open_b) + else: + cls._parse_error(s, match, "end-of-string") + label, children = stack.pop() + stack[-1][1].append(cls(label, children)) + # Leaf node + else: + if len(stack) == 1: + cls._parse_error(s, match, open_b) + if read_leaf is not None: + token = read_leaf(token) + stack[-1][1].append(token) + + # check that we got exactly one complete tree. + if len(stack) > 1: + cls._parse_error(s, "end-of-string", close_b) + elif len(stack[0][1]) == 0: + cls._parse_error(s, "end-of-string", open_b) + else: + assert stack[0][0] is None + assert len(stack[0][1]) == 1 + tree = stack[0][1][0] + + # If the tree has an extra level with node='', then get rid of + # it. E.g.: "((S (NP ...) (VP ...)))" + if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1: + tree = tree[0] + # return the tree. + return tree + + @classmethod + def _parse_error(cls, s, match, expecting): + """ + Display a friendly error message when parsing a tree string fails. + :param s: The string we're parsing. + :param match: regexp match of the problem token. + :param expecting: what we expected to see instead. + """ + # Construct a basic error message + if match == "end-of-string": + pos, token = len(s), "end-of-string" + else: + pos, token = match.start(), match.group() + msg = "%s.read(): expected %r but got %r\n%sat index %d." % ( + cls.__name__, + expecting, + token, + " " * 12, + pos, + ) + # Add a display showing the error token itsels: + s = s.replace("\n", " ").replace("\t", " ") + offset = pos + if len(s) > pos + 10: + s = s[: pos + 10] + "..." + if pos > 10: + s = "..." + s[pos - 10 :] + offset = 13 + msg += '\n{}"{}"\n{}^'.format(" " * 16, s, " " * (17 + offset)) + raise ValueError(msg) + + @classmethod + def fromlist(cls, l): + """ + :type l: list + :param l: a tree represented as nested lists + + :return: A tree corresponding to the list representation ``l``. + :rtype: Tree + + Convert nested lists to a NLTK Tree + """ + if type(l) == list and len(l) > 0: + label = repr(l[0]) + if len(l) > 1: + return Tree(label, [cls.fromlist(child) for child in l[1:]]) + else: + return label + + # //////////////////////////////////////////////////////////// + # Visualization & String Representation + # //////////////////////////////////////////////////////////// + + def draw(self): + """ + Open a new window containing a graphical diagram of this tree. + """ + from nltk.draw.tree import draw_trees + + draw_trees(self) + + def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs): + """ + Pretty-print this tree as ASCII or Unicode art. + For explanation of the arguments, see the documentation for + `nltk.tree.prettyprinter.TreePrettyPrinter`. + """ + from nltk.tree.prettyprinter import TreePrettyPrinter + + print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream) + + def __repr__(self): + childstr = ", ".join(repr(c) for c in self) + return "{}({}, [{}])".format( + type(self).__name__, + repr(self._label), + childstr, + ) + + def _repr_svg_(self): + from svgling import draw_tree + + return draw_tree(self)._repr_svg_() + + def __str__(self): + return self.pformat() + + def pprint(self, **kwargs): + """ + Print a string representation of this Tree to 'stream' + """ + + if "stream" in kwargs: + stream = kwargs["stream"] + del kwargs["stream"] + else: + stream = None + print(self.pformat(**kwargs), file=stream) + + def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False): + """ + :return: A pretty-printed string representation of this tree. + :rtype: str + :param margin: The right margin at which to do line-wrapping. + :type margin: int + :param indent: The indentation level at which printing + begins. This number is used to decide how far to indent + subsequent lines. + :type indent: int + :param nodesep: A string that is used to separate the node + from the children. E.g., the default value ``':'`` gives + trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``. + """ + + # Try writing it on one line. + s = self._pformat_flat(nodesep, parens, quotes) + if len(s) + indent < margin: + return s + + # If it doesn't fit on one line, then write it on multi-lines. + if isinstance(self._label, str): + s = f"{parens[0]}{self._label}{nodesep}" + else: + s = f"{parens[0]}{repr(self._label)}{nodesep}" + for child in self: + if isinstance(child, Tree): + s += ( + "\n" + + " " * (indent + 2) + + child.pformat(margin, indent + 2, nodesep, parens, quotes) + ) + elif isinstance(child, tuple): + s += "\n" + " " * (indent + 2) + "/".join(child) + elif isinstance(child, str) and not quotes: + s += "\n" + " " * (indent + 2) + "%s" % child + else: + s += "\n" + " " * (indent + 2) + repr(child) + return s + parens[1] + + def pformat_latex_qtree(self): + r""" + Returns a representation of the tree compatible with the + LaTeX qtree package. This consists of the string ``\Tree`` + followed by the tree represented in bracketed notation. + + For example, the following result was generated from a parse tree of + the sentence ``The announcement astounded us``:: + + \Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ] + [.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ] + + See https://www.ling.upenn.edu/advice/latex.html for the LaTeX + style file for the qtree package. + + :return: A latex qtree representation of this tree. + :rtype: str + """ + reserved_chars = re.compile(r"([#\$%&~_\{\}])") + + pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]")) + return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat) + + def _pformat_flat(self, nodesep, parens, quotes): + childstrs = [] + for child in self: + if isinstance(child, Tree): + childstrs.append(child._pformat_flat(nodesep, parens, quotes)) + elif isinstance(child, tuple): + childstrs.append("/".join(child)) + elif isinstance(child, str) and not quotes: + childstrs.append("%s" % child) + else: + childstrs.append(repr(child)) + if isinstance(self._label, str): + return "{}{}{} {}{}".format( + parens[0], + self._label, + nodesep, + " ".join(childstrs), + parens[1], + ) + else: + return "{}{}{} {}{}".format( + parens[0], + repr(self._label), + nodesep, + " ".join(childstrs), + parens[1], + ) + + +def _child_names(tree): + names = [] + for child in tree: + if isinstance(child, Tree): + names.append(Nonterminal(child._label)) + else: + names.append(child) + return names + + +###################################################################### +## Demonstration +###################################################################### + + +def demo(): + """ + A demonstration showing how Trees and Trees can be + used. This demonstration creates a Tree, and loads a + Tree from the Treebank corpus, + and shows the results of calling several of their methods. + """ + + from nltk import ProbabilisticTree, Tree + + # Demonstrate tree parsing. + s = "(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))" + t = Tree.fromstring(s) + print("Convert bracketed string into tree:") + print(t) + print(t.__repr__()) + + print("Display tree properties:") + print(t.label()) # tree's constituent type + print(t[0]) # tree's first child + print(t[1]) # tree's second child + print(t.height()) + print(t.leaves()) + print(t[1]) + print(t[1, 1]) + print(t[1, 1, 0]) + + # Demonstrate tree modification. + the_cat = t[0] + the_cat.insert(1, Tree.fromstring("(JJ big)")) + print("Tree modification:") + print(t) + t[1, 1, 1] = Tree.fromstring("(NN cake)") + print(t) + print() + + # Tree transforms + print("Collapse unary:") + t.collapse_unary() + print(t) + print("Chomsky normal form:") + t.chomsky_normal_form() + print(t) + print() + + # Demonstrate probabilistic trees. + pt = ProbabilisticTree("x", ["y", "z"], prob=0.5) + print("Probabilistic Tree:") + print(pt) + print() + + # Demonstrate parsing of treebank output format. + t = Tree.fromstring(t.pformat()) + print("Convert tree to bracketed string and back again:") + print(t) + print() + + # Demonstrate LaTeX output + print("LaTeX output:") + print(t.pformat_latex_qtree()) + print() + + # Demonstrate Productions + print("Production output:") + print(t.productions()) + print() + + # Demonstrate tree nodes containing objects other than strings + t.set_label(("test", 3)) + print(t) + + +__all__ = [ + "Tree", +]