diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d86c7513193df263ab0e74875cf40ff5cf51cebb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12cccb05f7812415b6007241b7596abb98b5398c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4442ee6ff477acb86ebb5cb1bc0743e3957bbbf4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7109bd10f9096bf54210329c5258366a19507491 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..253759fa455ed15f1b32dc7b7386c48525f846c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da6cda9e9e3bd4ee26d2b92ec8b90fac7d92fdf0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa763ba27ab771430943e332ec7aeb8c6410bf70 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63940dea8d5ee750981227594da7e37984e8208 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4532e6de84744d6bbdf357d41f10bbbe2a0158c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2478ed89bab82e32eefb73b55e6b09efe512b51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f3e6f174d3b9db9043ebd0b62ca64b0d246cbb0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d4b4a6e68cf3b95083918f2524ea70d1c3911f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..471b8c74b82c249acfa3cfcf405434c8b7aee39d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e22b1a8600ec37725529ccd3992797ba8f6e446 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2f96613ad588f808420b3650667a62b4885f0ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67f565aaa85618c0268c75cd4b1524829712909c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__init__.py @@ -0,0 +1,529 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# TODO this docstring isn't up-to-date! +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus files in a variety of formats. These +functions can be used to read both the corpus files that are +distributed in the NLTK corpus package, and corpus files that are part +of external corpora. + +Available Corpora +================= + +Please see https://www.nltk.org/nltk_data/ for a complete list. +Install corpora using nltk.download(). + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a filename, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS + The, Fulton, County, Grand, Jury, said, ... + +""" + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader +from nltk.tokenize import RegexpTokenizer + +abc: PlaintextCorpusReader = LazyCorpusLoader( + "abc", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[("science", "latin_1"), ("rural", "utf8")], +) +alpino: AlpinoCorpusReader = LazyCorpusLoader( + "alpino", AlpinoCorpusReader, tagset="alpino" +) +bcp47: BCP47CorpusReader = LazyCorpusLoader( + "bcp47", BCP47CorpusReader, r"(cldr|iana)/*" +) +brown: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "brown", + CategorizedTaggedCorpusReader, + r"c[a-z]\d\d", + cat_file="cats.txt", + tagset="brown", + encoding="ascii", +) +cess_cat: BracketParseCorpusReader = LazyCorpusLoader( + "cess_cat", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cess_esp: BracketParseCorpusReader = LazyCorpusLoader( + "cess_esp", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cmudict: CMUDictCorpusReader = LazyCorpusLoader( + "cmudict", CMUDictCorpusReader, ["cmudict"] +) +comtrans: AlignedCorpusReader = LazyCorpusLoader( + "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt" +) +comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader( + "comparative_sentences", + ComparativeSentencesCorpusReader, + r"labeledSentences\.txt", + encoding="latin-1", +) +conll2000: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2000", + ConllChunkCorpusReader, + ["train.txt", "test.txt"], + ("NP", "VP", "PP"), + tagset="wsj", + encoding="ascii", +) +conll2002: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2002", + ConllChunkCorpusReader, + r".*\.(test|train).*", + ("LOC", "PER", "ORG", "MISC"), + encoding="utf-8", +) +conll2007: DependencyCorpusReader = LazyCorpusLoader( + "conll2007", + DependencyCorpusReader, + r".*\.(test|train).*", + encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")], +) +crubadan: CrubadanCorpusReader = LazyCorpusLoader( + "crubadan", CrubadanCorpusReader, r".*\.txt" +) +dependency_treebank: DependencyCorpusReader = LazyCorpusLoader( + "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii" +) +extended_omw: CorpusReader = LazyCorpusLoader( + "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8" +) +floresta: BracketParseCorpusReader = LazyCorpusLoader( + "floresta", + BracketParseCorpusReader, + r"(?!\.).*\.ptb", + "#", + tagset="unknown", + encoding="ISO-8859-15", +) +framenet15: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v15", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +framenet: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v17", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +gazetteers: WordListCorpusReader = LazyCorpusLoader( + "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2" +) +genesis: PlaintextCorpusReader = LazyCorpusLoader( + "genesis", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[ + ("finnish|french|german", "latin_1"), + ("swedish", "cp865"), + (".*", "utf_8"), + ], +) +gutenberg: PlaintextCorpusReader = LazyCorpusLoader( + "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") +inaugural: PlaintextCorpusReader = LazyCorpusLoader( + "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +# [XX] This should probably just use TaggedCorpusReader: +indian: IndianCorpusReader = LazyCorpusLoader( + "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8" +) + +jeita: ChasenCorpusReader = LazyCorpusLoader( + "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8" +) +knbc: KNBCorpusReader = LazyCorpusLoader( + "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp" +) +lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader( + "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp" +) +mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader( + "mac_morpho", + MacMorphoCorpusReader, + r"(?!\.).*\.txt", + tagset="unknown", + encoding="latin-1", +) +machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader( + "machado", + PortugueseCategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"([a-z]*)/.*", + encoding="latin-1", +) +masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "masc_tagged", + CategorizedTaggedCorpusReader, + r"(spoken|written)/.*\.txt", + cat_file="categories.txt", + tagset="wsj", + encoding="utf-8", + sep="_", +) +movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "movie_reviews", + CategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"(neg|pos)/.*", + encoding="ascii", +) +multext_east: MTECorpusReader = LazyCorpusLoader( + "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8" +) +names: WordListCorpusReader = LazyCorpusLoader( + "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii" +) +nps_chat: NPSChatCorpusReader = LazyCorpusLoader( + "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj" +) +opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader( + "opinion_lexicon", + OpinionLexiconCorpusReader, + r"(\w+)\-words\.txt", + encoding="ISO-8859-2", +) +ppattach: PPAttachmentCorpusReader = LazyCorpusLoader( + "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"] +) +product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +pros_cons: ProsConsCorpusReader = LazyCorpusLoader( + "pros_cons", + ProsConsCorpusReader, + r"Integrated(Cons|Pros)\.txt", + cat_pattern=r"Integrated(Cons|Pros)\.txt", + encoding="ISO-8859-2", +) +ptb: CategorizedBracketParseCorpusReader = ( + LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions + "ptb", + CategorizedBracketParseCorpusReader, + r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG", + cat_file="allcats.txt", + tagset="wsj", + ) +) +qc: StringCategoryCorpusReader = LazyCorpusLoader( + "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2" +) +reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "reuters", + CategorizedPlaintextCorpusReader, + "(training|test).*", + cat_file="cats.txt", + encoding="ISO-8859-2", +) +rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml") +senseval: SensevalCorpusReader = LazyCorpusLoader( + "senseval", SensevalCorpusReader, r"(?!\.).*\.pos" +) +sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "sentence_polarity", + CategorizedSentencesCorpusReader, + r"rt-polarity\.(neg|pos)", + cat_pattern=r"rt-polarity\.(neg|pos)", + encoding="utf-8", +) +sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader( + "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8" +) +shakespeare: XMLCorpusReader = LazyCorpusLoader( + "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml" +) +sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader( + "sinica_treebank", + SinicaTreebankCorpusReader, + ["parsed"], + tagset="unknown", + encoding="utf-8", +) +state_union: PlaintextCorpusReader = LazyCorpusLoader( + "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2" +) +stopwords: WordListCorpusReader = LazyCorpusLoader( + "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "subjectivity", + CategorizedSentencesCorpusReader, + r"(quote.tok.gt9|plot.tok.gt9)\.5000", + cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]}, + encoding="latin-1", +) +swadesh: SwadeshCorpusReader = LazyCorpusLoader( + "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8" +) +swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8" +) +switchboard: SwitchboardCorpusReader = LazyCorpusLoader( + "switchboard", SwitchboardCorpusReader, tagset="wsj" +) +timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader) +timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader( + "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii" +) +toolbox: ToolboxCorpusReader = LazyCorpusLoader( + "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)" +) +treebank: BracketParseCorpusReader = LazyCorpusLoader( + "treebank/combined", + BracketParseCorpusReader, + r"wsj_.*\.mrg", + tagset="wsj", + encoding="ascii", +) +treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader( + "treebank/tagged", + ChunkedCorpusReader, + r"wsj_.*\.pos", + sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True), + para_block_reader=tagged_treebank_para_block_reader, + tagset="wsj", + encoding="ascii", +) +treebank_raw: PlaintextCorpusReader = LazyCorpusLoader( + "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2" +) +twitter_samples: TwitterCorpusReader = LazyCorpusLoader( + "twitter_samples", TwitterCorpusReader, r".*\.json" +) +udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader) +udhr2: PlaintextCorpusReader = LazyCorpusLoader( + "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8" +) +universal_treebanks: ConllCorpusReader = LazyCorpusLoader( + "universal_treebanks_v20", + ConllCorpusReader, + r".*\.conll", + columntypes=( + "ignore", + "words", + "ignore", + "ignore", + "pos", + "ignore", + "ignore", + "ignore", + "ignore", + "ignore", + ), +) +verbnet: VerbnetCorpusReader = LazyCorpusLoader( + "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml" +) +webtext: PlaintextCorpusReader = LazyCorpusLoader( + "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2" +) +wordnet: WordNetCorpusReader = LazyCorpusLoader( + "wordnet", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet31: WordNetCorpusReader = LazyCorpusLoader( + "wordnet31", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet2021: WordNetCorpusReader = LazyCorpusLoader( + "wordnet2021", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader( + "wordnet_ic", WordNetICCorpusReader, r".*\.dat" +) +words: WordListCorpusReader = LazyCorpusLoader( + "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii" +) + +# defined after treebank +propbank: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +nombank: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +propbank_ptb: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +nombank_ptb: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +semcor: SemcorCorpusReader = LazyCorpusLoader( + "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet +) # Must be defined *after* wordnet corpus. + +nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader( + "nonbreaking_prefixes", + NonbreakingPrefixesCorpusReader, + r"(?!README|\.).*", + encoding="utf8", +) +perluniprops: UnicharsCorpusReader = LazyCorpusLoader( + "perluniprops", + UnicharsCorpusReader, + r"(?!README|\.).*", + nltk_data_subdir="misc", + encoding="utf8", +) + +# mwa_ppdb = LazyCorpusLoader( +# 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8') + +# See https://github.com/nltk/nltk/issues/1579 +# and https://github.com/nltk/nltk/issues/1716 +# +# pl196x = LazyCorpusLoader( +# 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml', +# cat_file='cats.txt', textid_file='textids.txt', encoding='utf8') +# +# ipipan = LazyCorpusLoader( +# 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml') +# +# nkjp = LazyCorpusLoader( +# 'nkjp', NKJPCorpusReader, r'', encoding='utf8') +# +# panlex_lite = LazyCorpusLoader( +# 'panlex_lite', PanLexLiteCorpusReader) +# +# ycoe = LazyCorpusLoader( +# 'ycoe', YCOECorpusReader) +# +# corpus not available with NLTK; these lines caused help(nltk.corpus) to break +# hebrew_treebank = LazyCorpusLoader( +# 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt') + +# FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116 +def demo(): + # This is out-of-date: + abc.demo() + brown.demo() + # chat80.demo() + cmudict.demo() + conll2000.demo() + conll2002.demo() + genesis.demo() + gutenberg.demo() + ieer.demo() + inaugural.demo() + indian.demo() + names.demo() + ppattach.demo() + senseval.demo() + shakespeare.demo() + sinica_treebank.demo() + state_union.demo() + stopwords.demo() + timit.demo() + toolbox.demo() + treebank.demo() + udhr.demo() + webtext.demo() + words.demo() + + +# ycoe.demo() + +if __name__ == "__main__": + # demo() + pass diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..2a32ecc86f7b7671445effc2801870c3fc10f295 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Europarl Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader + +# Create a new corpus reader instance for each European language +danish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8" +) + +dutch: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8" +) + +english: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8" +) + +finnish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8" +) + +french: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8" +) + +german: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8" +) + +greek: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8" +) + +italian: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8" +) + +portuguese: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8" +) + +spanish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8" +) + +swedish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8" +) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a5274f09dde2db30aa213800647e19a7d8201981 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py @@ -0,0 +1,186 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus fileids in a variety of formats. These +functions can be used to read both the corpus fileids that are +distributed in the NLTK corpus package, and corpus fileids that are part +of external corpora. + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a fileid, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words()[:6])) # only first 6 words + The, Fulton, County, Grand, Jury, said + +isort:skip_file +""" + +from nltk.corpus.reader.plaintext import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.api import * +from nltk.corpus.reader.tagged import * +from nltk.corpus.reader.cmudict import * +from nltk.corpus.reader.conll import * +from nltk.corpus.reader.chunked import * +from nltk.corpus.reader.wordlist import * +from nltk.corpus.reader.xmldocs import * +from nltk.corpus.reader.ppattach import * +from nltk.corpus.reader.senseval import * +from nltk.corpus.reader.ieer import * +from nltk.corpus.reader.sinica_treebank import * +from nltk.corpus.reader.bracket_parse import * +from nltk.corpus.reader.indian import * +from nltk.corpus.reader.toolbox import * +from nltk.corpus.reader.timit import * +from nltk.corpus.reader.ycoe import * +from nltk.corpus.reader.rte import * +from nltk.corpus.reader.string_category import * +from nltk.corpus.reader.propbank import * +from nltk.corpus.reader.verbnet import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.nps_chat import * +from nltk.corpus.reader.wordnet import * +from nltk.corpus.reader.switchboard import * +from nltk.corpus.reader.dependency import * +from nltk.corpus.reader.nombank import * +from nltk.corpus.reader.ipipan import * +from nltk.corpus.reader.pl196x import * +from nltk.corpus.reader.knbc import * +from nltk.corpus.reader.chasen import * +from nltk.corpus.reader.childes import * +from nltk.corpus.reader.aligned import * +from nltk.corpus.reader.lin import * +from nltk.corpus.reader.semcor import * +from nltk.corpus.reader.framenet import * +from nltk.corpus.reader.udhr import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.sentiwordnet import * +from nltk.corpus.reader.twitter import * +from nltk.corpus.reader.nkjp import * +from nltk.corpus.reader.crubadan import * +from nltk.corpus.reader.mte import * +from nltk.corpus.reader.reviews import * +from nltk.corpus.reader.opinion_lexicon import * +from nltk.corpus.reader.pros_cons import * +from nltk.corpus.reader.categorized_sents import * +from nltk.corpus.reader.comparative_sents import * +from nltk.corpus.reader.panlex_lite import * +from nltk.corpus.reader.panlex_swadesh import * +from nltk.corpus.reader.bcp47 import * + +# Make sure that nltk.corpus.reader.bracket_parse gives the module, not +# the function bracket_parse() defined in nltk.tree: +from nltk.corpus.reader import bracket_parse + +__all__ = [ + "CorpusReader", + "CategorizedCorpusReader", + "PlaintextCorpusReader", + "find_corpus_fileids", + "TaggedCorpusReader", + "CMUDictCorpusReader", + "ConllChunkCorpusReader", + "WordListCorpusReader", + "PPAttachmentCorpusReader", + "SensevalCorpusReader", + "IEERCorpusReader", + "ChunkedCorpusReader", + "SinicaTreebankCorpusReader", + "BracketParseCorpusReader", + "IndianCorpusReader", + "ToolboxCorpusReader", + "TimitCorpusReader", + "YCOECorpusReader", + "MacMorphoCorpusReader", + "SyntaxCorpusReader", + "AlpinoCorpusReader", + "RTECorpusReader", + "StringCategoryCorpusReader", + "EuroparlCorpusReader", + "CategorizedBracketParseCorpusReader", + "CategorizedTaggedCorpusReader", + "CategorizedPlaintextCorpusReader", + "PortugueseCategorizedPlaintextCorpusReader", + "tagged_treebank_para_block_reader", + "PropbankCorpusReader", + "VerbnetCorpusReader", + "BNCCorpusReader", + "ConllCorpusReader", + "XMLCorpusReader", + "NPSChatCorpusReader", + "SwadeshCorpusReader", + "WordNetCorpusReader", + "WordNetICCorpusReader", + "SwitchboardCorpusReader", + "DependencyCorpusReader", + "NombankCorpusReader", + "IPIPANCorpusReader", + "Pl196xCorpusReader", + "TEICorpusView", + "KNBCorpusReader", + "ChasenCorpusReader", + "CHILDESCorpusReader", + "AlignedCorpusReader", + "TimitTaggedCorpusReader", + "LinThesaurusCorpusReader", + "SemcorCorpusReader", + "FramenetCorpusReader", + "UdhrCorpusReader", + "BNCCorpusReader", + "SentiWordNetCorpusReader", + "SentiSynset", + "TwitterCorpusReader", + "NKJPCorpusReader", + "CrubadanCorpusReader", + "MTECorpusReader", + "ReviewsCorpusReader", + "OpinionLexiconCorpusReader", + "ProsConsCorpusReader", + "CategorizedSentencesCorpusReader", + "ComparativeSentencesCorpusReader", + "PanLexLiteCorpusReader", + "NonbreakingPrefixesCorpusReader", + "UnicharsCorpusReader", + "MWAPPDBCorpusReader", + "PanlexSwadeshCorpusReader", + "BCP47CorpusReader", +] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..93caf6233b5d1ee4d66eff0009a0d73fceb67904 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Aligned Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# Author: Steven Bird +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import ( + StreamBackedCorpusView, + concat, + read_alignedsent_block, +) +from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer +from nltk.translate import AlignedSent, Alignment + + +class AlignedCorpusReader(CorpusReader): + """ + Reader for corpora of word-aligned sentences. Tokens are assumed + to be separated by whitespace. Sentences begin on separate lines. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + alignedsent_block_reader=read_alignedsent_block, + encoding="latin1", + ): + """ + Construct a new Aligned Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = AlignedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + False, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def aligned_sents(self, fileids=None): + """ + :return: the given file(s) as a list of AlignedSent objects. + :rtype: list(AlignedSent) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + True, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class AlignedSentCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for aligned sentences. + ``AlignedSentCorpusView`` objects are typically created by + ``AlignedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + aligned, + group_by_sent, + word_tokenizer, + sent_tokenizer, + alignedsent_block_reader, + ): + self._aligned = aligned + self._group_by_sent = group_by_sent + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + block = [ + self._word_tokenizer.tokenize(sent_str) + for alignedsent_str in self._alignedsent_block_reader(stream) + for sent_str in self._sent_tokenizer.tokenize(alignedsent_str) + ] + if self._aligned: + block[2] = Alignment.fromstring( + " ".join(block[2]) + ) # kludge; we shouldn't have tokenized the alignment string + block = [AlignedSent(*block)] + elif self._group_by_sent: + block = [block[0]] + else: + block = block[0] + + return block diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6ab8146619bbdc0f448f9771269ab7d3ee5451 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py @@ -0,0 +1,158 @@ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Masato Hagiwara +# URL: +# For license information, see LICENSE.TXT + +import sys + +from nltk.corpus.reader import util +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * + + +class ChasenCorpusReader(CorpusReader): + def __init__(self, root, fileids, encoding="utf8", sent_splitter=None): + self._sent_splitter = sent_splitter + CorpusReader.__init__(self, root, fileids, encoding) + + def words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class ChasenCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``, + but this'll use fixed sets of word and sentence tokenizer. + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sent_splitter=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sent_splitter = sent_splitter + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in read_regexp_block(stream, r".", r"^EOS\n"): + + para = [] + + sent = [] + for line in para_str.splitlines(): + + _eos = line.strip() == "EOS" + _cells = line.split("\t") + w = (_cells[0], "\t".join(_cells[1:])) + if not _eos: + sent.append(w) + + if _eos or (self._sent_splitter and self._sent_splitter(w)): + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + sent = [] + + if len(sent) > 0: + if not self._tagged: + sent = [w for (w, t) in sent] + + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + return block + + +def demo(): + + import nltk + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + print("/".join(jeita.words()[22100:22140])) + + print( + "\nEOS\n".join( + "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent) + for sent in jeita.tagged_sents()[2170:2173] + ) + ) + + +def test(): + + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + + assert isinstance(jeita.tagged_words()[0][1], str) + + +if __name__ == "__main__": + demo() + test() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py new file mode 100644 index 0000000000000000000000000000000000000000..66b42e79ca134227357aba4cb493335196e05961 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py @@ -0,0 +1,273 @@ +# Natural Language Toolkit: Chunked Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that contain chunked (and optionally tagged) +documents. +""" + +import codecs +import os.path + +import nltk +from nltk.chunk import tagstr2tree +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import * +from nltk.tree import Tree + + +class ChunkedCorpusReader(CorpusReader): + """ + Reader for chunked (and optionally tagged) corpora. Paragraphs + are split using a block reader. They are then tokenized into + sentences using a sentence tokenizer. Finally, these sentences + are parsed into chunk trees using a string-to-chunktree conversion + function. Each of these steps can be performed using a default + function or a custom function. By default, paragraphs are split + on blank lines; sentences are listed one per line; and sentences + are parsed into chunk trees using ``nltk.chunk.tagstr2tree``. + """ + + def __init__( + self, + root, + fileids, + extension="", + str2chunktree=tagstr2tree, + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset) + """Arguments for corpus views generated by this corpus: a tuple + (str2chunktree, sent_tokenizer, para_block_tokenizer)""" + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and chunks. Words are encoded as ``(word, tag)`` + tuples (if the corpus has tags) or word strings (if the + corpus has no tags). Chunks are encoded as depth-one + trees over ``(word,tag)`` tuples or word strings. + :rtype: list(tuple(str,str) and Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a shallow Tree. The leaves + of these trees are encoded as ``(word, tag)`` tuples (if + the corpus has tags) or word strings (if the corpus has no + tags). + :rtype: list(Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as a shallow Tree. The leaves of these + trees are encoded as ``(word, tag)`` tuples (if the corpus + has tags) or word strings (if the corpus has no tags). + :rtype: list(list(Tree)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_block(self, stream): + return [tagstr2tree(t) for t in read_blankline_block(stream)] + + +class ChunkedCorpusView(StreamBackedCorpusView): + def __init__( + self, + fileid, + encoding, + tagged, + group_by_sent, + group_by_para, + chunked, + str2chunktree, + sent_tokenizer, + para_block_reader, + source_tagset=None, + target_tagset=None, + ): + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._chunked = chunked + self._str2chunktree = str2chunktree + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._source_tagset = source_tagset + self._target_tagset = target_tagset + + def read_block(self, stream): + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = self._str2chunktree( + sent_str, + source_tagset=self._source_tagset, + target_tagset=self._target_tagset, + ) + + # If requested, throw away the tags. + if not self._tagged: + sent = self._untag(sent) + + # If requested, throw away the chunks. + if not self._chunked: + sent = sent.leaves() + + # Add the sentence to `para`. + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + # Add the paragraph to `block`. + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + # Return the block + return block + + def _untag(self, tree): + for i, child in enumerate(tree): + if isinstance(child, Tree): + self._untag(child) + elif isinstance(child, tuple): + tree[i] = child[0] + else: + raise ValueError("expected child to be Tree or tuple") + return tree diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py new file mode 100644 index 0000000000000000000000000000000000000000..7328ca3239c6e746d328d5706dc05a09af918c14 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py @@ -0,0 +1,88 @@ +# Natural Language Toolkit: Carnegie Mellon Pronouncing Dictionary Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6] +ftp://ftp.cs.cmu.edu/project/speech/dict/ +Copyright 1998 Carnegie Mellon University + +File Format: Each line consists of an uppercased word, a counter +(for alternative pronunciations), and a transcription. Vowels are +marked for stress (1=primary, 2=secondary, 0=no stress). E.g.: +NATURAL 1 N AE1 CH ER0 AH0 L + +The dictionary contains 127069 entries. Of these, 119400 words are assigned +a unique pronunciation, 6830 words have two pronunciations, and 839 words have +three or more pronunciations. Many of these are fast-speech variants. + +Phonemes: There are 39 phonemes, as shown below: + +Phoneme Example Translation Phoneme Example Translation +------- ------- ----------- ------- ------- ----------- +AA odd AA D AE at AE T +AH hut HH AH T AO ought AO T +AW cow K AW AY hide HH AY D +B be B IY CH cheese CH IY Z +D dee D IY DH thee DH IY +EH Ed EH D ER hurt HH ER T +EY ate EY T F fee F IY +G green G R IY N HH he HH IY +IH it IH T IY eat IY T +JH gee JH IY K key K IY +L lee L IY M me M IY +N knee N IY NG ping P IH NG +OW oat OW T OY toy T OY +P pee P IY R read R IY D +S sea S IY SH she SH IY +T tea T IY TH theta TH EY T AH +UH hood HH UH D UW two T UW +V vee V IY W we W IY +Y yield Y IY L D Z zee Z IY +ZH seizure S IY ZH ER +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.util import Index + + +class CMUDictCorpusReader(CorpusReader): + def entries(self): + """ + :return: the cmudict lexicon as a list of entries + containing (word, transcriptions) tuples. + """ + return concat( + [ + StreamBackedCorpusView(fileid, read_cmudict_block, encoding=enc) + for fileid, enc in self.abspaths(None, True) + ] + ) + + def words(self): + """ + :return: a list of all words defined in the cmudict lexicon. + """ + return [word.lower() for (word, _) in self.entries()] + + def dict(self): + """ + :return: the cmudict lexicon as a dictionary, whose keys are + lowercase words and whose values are lists of pronunciations. + """ + return dict(Index(self.entries())) + + +def read_cmudict_block(stream): + entries = [] + while len(entries) < 100: # Read 100 at a time. + line = stream.readline() + if line == "": + return entries # end of file. + pieces = line.split() + entries.append((pieces[0].lower(), pieces[2:])) + return entries diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/conll.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/conll.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3b30db900ee4eb4648b74d5904af04b60e1692 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/conll.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: CONLL Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read CoNLL-style chunk fileids. +""" + +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree +from nltk.util import LazyConcatenation, LazyMap + + +class ConllCorpusReader(CorpusReader): + """ + A corpus reader for CoNLL-style files. These files consist of a + series of sentences, separated by blank lines. Each sentence is + encoded using a table (or "grid") of values, where each line + corresponds to a single word, and each column corresponds to an + annotation type. The set of columns used by CoNLL-style files can + vary from corpus to corpus; the ``ConllCorpusReader`` constructor + therefore takes an argument, ``columntypes``, which is used to + specify the columns that are used by a given corpus. By default + columns are split by consecutive whitespaces, with the + ``separator`` argument you can set a string to split by (e.g. + ``\'\t\'``). + + + @todo: Add support for reading from corpora where different + parallel files contain different columns. + @todo: Possibly add caching of the grid corpus view? This would + allow the same grid view to be used by different data access + methods (eg words() and parsed_sents() could both share the + same grid corpus view object). + @todo: Better support for -DOCSTART-. Currently, we just ignore + it, but it could be used to define methods that retrieve a + document at a time (eg parsed_documents()). + """ + + # ///////////////////////////////////////////////////////////////// + # Column Types + # ///////////////////////////////////////////////////////////////// + + WORDS = "words" #: column type for words + POS = "pos" #: column type for part-of-speech tags + TREE = "tree" #: column type for parse trees + CHUNK = "chunk" #: column type for chunk structures + NE = "ne" #: column type for named entities + SRL = "srl" #: column type for semantic role labels + IGNORE = "ignore" #: column type for column that should be ignored + + #: A list of all column types supported by the conll corpus reader. + COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__( + self, + root, + fileids, + columntypes, + chunk_types=None, + root_label="S", + pos_in_tree=False, + srl_includes_roleset=True, + encoding="utf8", + tree_class=Tree, + tagset=None, + separator=None, + ): + for columntype in columntypes: + if columntype not in self.COLUMN_TYPES: + raise ValueError("Bad column type %r" % columntype) + if isinstance(chunk_types, str): + chunk_types = [chunk_types] + self._chunk_types = chunk_types + self._colmap = {c: i for (i, c) in enumerate(columntypes)} + self._pos_in_tree = pos_in_tree + self._root_label = root_label # for chunks + self._srl_includes_roleset = srl_includes_roleset + self._tree_class = tree_class + CorpusReader.__init__(self, root, fileids, encoding) + self._tagset = tagset + self.sep = separator + + # ///////////////////////////////////////////////////////////////// + # Data Access Methods + # ///////////////////////////////////////////////////////////////// + + def words(self, fileids=None): + self._require(self.WORDS) + return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids))) + + def sents(self, fileids=None): + self._require(self.WORDS) + return LazyMap(self._get_words, self._grids(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_tagged_words, self._grids(fileids))) + + def tagged_sents(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyMap(get_tagged_words, self._grids(fileids)) + + def chunked_words(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyConcatenation(LazyMap(get_chunked_words, self._grids(fileids))) + + def chunked_sents(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyMap(get_chunked_words, self._grids(fileids)) + + def parsed_sents(self, fileids=None, pos_in_tree=None, tagset=None): + self._require(self.WORDS, self.POS, self.TREE) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_parsed_sent(grid): # capture pos_in_tree as local var + return self._get_parsed_sent(grid, pos_in_tree, tagset) + + return LazyMap(get_parsed_sent, self._grids(fileids)) + + def srl_spans(self, fileids=None): + self._require(self.SRL) + return LazyMap(self._get_srl_spans, self._grids(fileids)) + + def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True): + self._require(self.WORDS, self.POS, self.TREE, self.SRL) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_srl_instances(grid): # capture pos_in_tree as local var + return self._get_srl_instances(grid, pos_in_tree) + + result = LazyMap(get_srl_instances, self._grids(fileids)) + if flatten: + result = LazyConcatenation(result) + return result + + def iob_words(self, fileids=None, tagset=None): + """ + :return: a list of word/tag/IOB tuples + :rtype: list(tuple) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids))) + + def iob_sents(self, fileids=None, tagset=None): + """ + :return: a list of lists of word/tag/IOB tuples + :rtype: list(list) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyMap(get_iob_words, self._grids(fileids)) + + # ///////////////////////////////////////////////////////////////// + # Grid Reading + # ///////////////////////////////////////////////////////////////// + + def _grids(self, fileids=None): + # n.b.: we could cache the object returned here (keyed on + # fileids), which would let us reuse the same corpus view for + # different things (eg srl and parse trees). + return concat( + [ + StreamBackedCorpusView(fileid, self._read_grid_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_grid_block(self, stream): + grids = [] + for block in read_blankline_block(stream): + block = block.strip() + if not block: + continue + + grid = [line.split(self.sep) for line in block.split("\n")] + + # If there's a docstart row, then discard. ([xx] eventually it + # would be good to actually use it) + if grid[0][self._colmap.get("words", 0)] == "-DOCSTART-": + del grid[0] + + # Check that the grid is consistent. + for row in grid: + if len(row) != len(grid[0]): + raise ValueError("Inconsistent number of columns:\n%s" % block) + grids.append(grid) + return grids + + # ///////////////////////////////////////////////////////////////// + # Transforms + # ///////////////////////////////////////////////////////////////// + # given a grid, transform it into some representation (e.g., + # a list of words or a parse tree). + + def _get_words(self, grid): + return self._get_column(grid, self._colmap["words"]) + + def _get_tagged_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list(zip(self._get_column(grid, self._colmap["words"]), pos_tags)) + + def _get_iob_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list( + zip( + self._get_column(grid, self._colmap["words"]), + pos_tags, + self._get_column(grid, self._colmap["chunk"]), + ) + ) + + def _get_chunked_words(self, grid, chunk_types, tagset=None): + # n.b.: this method is very similar to conllstr2tree. + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + chunk_tags = self._get_column(grid, self._colmap["chunk"]) + + stack = [Tree(self._root_label, [])] + + for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags): + if chunk_tag == "O": + state, chunk_type = "O", "" + else: + (state, chunk_type) = chunk_tag.split("-") + # If it's a chunk we don't care about, treat it as O. + if chunk_types is not None and chunk_type not in chunk_types: + state = "O" + # Treat a mismatching I like a B. + if state == "I" and chunk_type != stack[-1].label(): + state = "B" + # For B or I: close any open chunks + if state in "BO" and len(stack) == 2: + stack.pop() + # For B: start a new chunk. + if state == "B": + new_chunk = Tree(chunk_type, []) + stack[-1].append(new_chunk) + stack.append(new_chunk) + # Add the word token. + stack[-1].append((word, pos_tag)) + + return stack[0] + + def _get_parsed_sent(self, grid, pos_in_tree, tagset=None): + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + parse_tags = self._get_column(grid, self._colmap["tree"]) + + treestr = "" + for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags): + if word == "(": + word = "-LRB-" + if word == ")": + word = "-RRB-" + if pos_tag == "(": + pos_tag = "-LRB-" + if pos_tag == ")": + pos_tag = "-RRB-" + (left, right) = parse_tag.split("*") + right = right.count(")") * ")" # only keep ')'. + treestr += f"{left} ({pos_tag} {word}) {right}" + try: + tree = self._tree_class.fromstring(treestr) + except (ValueError, IndexError): + tree = self._tree_class.fromstring(f"({self._root_label} {treestr})") + + if not pos_in_tree: + for subtree in tree.subtrees(): + for i, child in enumerate(subtree): + if ( + isinstance(child, Tree) + and len(child) == 1 + and isinstance(child[0], str) + ): + subtree[i] = (child[0], child.label()) + + return tree + + def _get_srl_spans(self, grid): + """ + list of list of (start, end), tag) tuples + """ + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + start_col = self._colmap["srl"] + 2 + else: + predicates = self._get_column(grid, self._colmap["srl"]) + start_col = self._colmap["srl"] + 1 + + # Count how many predicates there are. This tells us how many + # columns to expect for SRL data. + num_preds = len([p for p in predicates if p != "-"]) + + spanlists = [] + for i in range(num_preds): + col = self._get_column(grid, start_col + i) + spanlist = [] + stack = [] + for wordnum, srl_tag in enumerate(col): + (left, right) = srl_tag.split("*") + for tag in left.split("("): + if tag: + stack.append((tag, wordnum)) + for i in range(right.count(")")): + (tag, start) = stack.pop() + spanlist.append(((start, wordnum + 1), tag)) + spanlists.append(spanlist) + + return spanlists + + def _get_srl_instances(self, grid, pos_in_tree): + tree = self._get_parsed_sent(grid, pos_in_tree) + spanlists = self._get_srl_spans(grid) + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + rolesets = self._get_column(grid, self._colmap["srl"]) + else: + predicates = self._get_column(grid, self._colmap["srl"]) + rolesets = [None] * len(predicates) + + instances = ConllSRLInstanceList(tree) + for wordnum, predicate in enumerate(predicates): + if predicate == "-": + continue + # Decide which spanlist to use. Don't assume that they're + # sorted in the same order as the predicates (even though + # they usually are). + for spanlist in spanlists: + for (start, end), tag in spanlist: + if wordnum in range(start, end) and tag in ("V", "C-V"): + break + else: + continue + break + else: + raise ValueError("No srl column found for %r" % predicate) + instances.append( + ConllSRLInstance(tree, wordnum, predicate, rolesets[wordnum], spanlist) + ) + + return instances + + # ///////////////////////////////////////////////////////////////// + # Helper Methods + # ///////////////////////////////////////////////////////////////// + + def _require(self, *columntypes): + for columntype in columntypes: + if columntype not in self._colmap: + raise ValueError( + "This corpus does not contain a %s " "column." % columntype + ) + + @staticmethod + def _get_column(grid, column_index): + return [grid[i][column_index] for i in range(len(grid))] + + +class ConllSRLInstance: + """ + An SRL instance from a CoNLL corpus, which identifies and + providing labels for the arguments of a single verb. + """ + + # [xx] add inst.core_arguments, inst.argm_arguments? + + def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans): + self.verb = [] + """A list of the word indices of the words that compose the + verb whose arguments are identified by this instance. + This will contain multiple word indices when multi-word + verbs are used (e.g. 'turn on').""" + + self.verb_head = verb_head + """The word index of the head word of the verb whose arguments + are identified by this instance. E.g., for a sentence that + uses the verb 'turn on,' ``verb_head`` will be the word index + of the word 'turn'.""" + + self.verb_stem = verb_stem + + self.roleset = roleset + + self.arguments = [] + """A list of ``(argspan, argid)`` tuples, specifying the location + and type for each of the arguments identified by this + instance. ``argspan`` is a tuple ``start, end``, indicating + that the argument consists of the ``words[start:end]``.""" + + self.tagged_spans = tagged_spans + """A list of ``(span, id)`` tuples, specifying the location and + type for each of the arguments, as well as the verb pieces, + that make up this instance.""" + + self.tree = tree + """The parse tree for the sentence containing this instance.""" + + self.words = tree.leaves() + """A list of the words in the sentence containing this + instance.""" + + # Fill in the self.verb and self.arguments values. + for (start, end), tag in tagged_spans: + if tag in ("V", "C-V"): + self.verb += list(range(start, end)) + else: + self.arguments.append(((start, end), tag)) + + def __repr__(self): + # Originally, its: + ##plural = 's' if len(self.arguments) != 1 else '' + plural = "s" if len(self.arguments) != 1 else "" + return "" % ( + (self.verb_stem, len(self.arguments), plural) + ) + + def pprint(self): + verbstr = " ".join(self.words[i][0] for i in self.verb) + hdr = f"SRL for {verbstr!r} (stem={self.verb_stem!r}):\n" + s = "" + for i, word in enumerate(self.words): + if isinstance(word, tuple): + word = word[0] + for (start, end), argid in self.arguments: + if i == start: + s += "[%s " % argid + if i == end: + s += "] " + if i in self.verb: + word = "<<%s>>" % word + s += word + " " + return hdr + textwrap.fill( + s.replace(" ]", "]"), initial_indent=" ", subsequent_indent=" " + ) + + +class ConllSRLInstanceList(list): + """ + Set of instances for a single sentence + """ + + def __init__(self, tree, instances=()): + self.tree = tree + list.__init__(self, instances) + + def __str__(self): + return self.pprint() + + def pprint(self, include_tree=False): + # Sanity check: trees should be the same + for inst in self: + if inst.tree != self.tree: + raise ValueError("Tree mismatch!") + + # If desired, add trees: + if include_tree: + words = self.tree.leaves() + pos = [None] * len(words) + synt = ["*"] * len(words) + self._tree2conll(self.tree, 0, words, pos, synt) + + s = "" + for i in range(len(words)): + # optional tree columns + if include_tree: + s += "%-20s " % words[i] + s += "%-8s " % pos[i] + s += "%15s*%-8s " % tuple(synt[i].split("*")) + + # verb head column + for inst in self: + if i == inst.verb_head: + s += "%-20s " % inst.verb_stem + break + else: + s += "%-20s " % "-" + # Remaining columns: self + for inst in self: + argstr = "*" + for (start, end), argid in inst.tagged_spans: + if i == start: + argstr = f"({argid}{argstr}" + if i == (end - 1): + argstr += ")" + s += "%-12s " % argstr + s += "\n" + return s + + def _tree2conll(self, tree, wordnum, words, pos, synt): + assert isinstance(tree, Tree) + if len(tree) == 1 and isinstance(tree[0], str): + pos[wordnum] = tree.label() + assert words[wordnum] == tree[0] + return wordnum + 1 + elif len(tree) == 1 and isinstance(tree[0], tuple): + assert len(tree[0]) == 2 + pos[wordnum], pos[wordnum] = tree[0] + return wordnum + 1 + else: + synt[wordnum] = f"({tree.label()}{synt[wordnum]}" + for child in tree: + wordnum = self._tree2conll(child, wordnum, words, pos, synt) + synt[wordnum - 1] += ")" + return wordnum + + +class ConllChunkCorpusReader(ConllCorpusReader): + """ + A ConllCorpusReader whose data file contains three columns: words, + pos, and chunk. + """ + + def __init__( + self, root, fileids, chunk_types, encoding="utf8", tagset=None, separator=None + ): + ConllCorpusReader.__init__( + self, + root, + fileids, + ("words", "pos", "chunk"), + chunk_types=chunk_types, + encoding=encoding, + tagset=tagset, + separator=separator, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py new file mode 100644 index 0000000000000000000000000000000000000000..d7bcf8a05cf86123ce952e802a71bb5dd637bd42 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py @@ -0,0 +1,106 @@ +# Natural Language Toolkit: An Crubadan N-grams Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Avital Pekker +# +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for the n-gram statistics gathered from +the corpora for each language using An Crubadan. + +There are multiple potential applications for the data but +this reader was created with the goal of using it in the +context of language identification. + +For details about An Crubadan, this data, and its potential uses, see: +http://borel.slu.edu/crubadan/index.html +""" + +import re +from os import path + +from nltk.corpus.reader import CorpusReader +from nltk.data import ZipFilePathPointer +from nltk.probability import FreqDist + + +class CrubadanCorpusReader(CorpusReader): + """ + A corpus reader used to access language An Crubadan n-gram files. + """ + + _LANG_MAPPER_FILE = "table.txt" + _all_lang_freq = {} + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + super().__init__(root, fileids, encoding="utf8") + self._lang_mapping_data = [] + self._load_lang_mapping_data() + + def lang_freq(self, lang): + """Return n-gram FreqDist for a specific language + given ISO 639-3 language code""" + + if lang not in self._all_lang_freq: + self._all_lang_freq[lang] = self._load_lang_ngrams(lang) + + return self._all_lang_freq[lang] + + def langs(self): + """Return a list of supported languages as ISO 639-3 codes""" + return [row[1] for row in self._lang_mapping_data] + + def iso_to_crubadan(self, lang): + """Return internal Crubadan code based on ISO 639-3 code""" + for i in self._lang_mapping_data: + if i[1].lower() == lang.lower(): + return i[0] + + def crubadan_to_iso(self, lang): + """Return ISO 639-3 code given internal Crubadan code""" + for i in self._lang_mapping_data: + if i[0].lower() == lang.lower(): + return i[1] + + def _load_lang_mapping_data(self): + """Load language mappings between codes and description from table.txt""" + if isinstance(self.root, ZipFilePathPointer): + raise RuntimeError( + "Please install the 'crubadan' corpus first, use nltk.download()" + ) + + mapper_file = path.join(self.root, self._LANG_MAPPER_FILE) + if self._LANG_MAPPER_FILE not in self.fileids(): + raise RuntimeError("Could not find language mapper file: " + mapper_file) + + with open(mapper_file, encoding="utf-8") as raw: + strip_raw = raw.read().strip() + + self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")] + + def _load_lang_ngrams(self, lang): + """Load single n-gram language file given the ISO 639-3 language code + and return its FreqDist""" + + if lang not in self.langs(): + raise RuntimeError("Unsupported language.") + + crubadan_code = self.iso_to_crubadan(lang) + ngram_file = path.join(self.root, crubadan_code + "-3grams.txt") + + if not path.isfile(ngram_file): + raise RuntimeError("No N-gram file found for requested language.") + + counts = FreqDist() + with open(ngram_file, encoding="utf-8") as f: + for line in f: + data = line.split(" ") + + ngram = data[1].strip("\n") + freq = int(data[0]) + + counts[ngram] = freq + + return counts diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..87f56d4b5410a6dc419cd58538d3f4499478a205 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Dependency Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Kepa Sarasola +# Iker Manterola +# +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.parse import DependencyGraph +from nltk.tokenize import * + + +class DependencyCorpusReader(SyntaxCorpusReader): + def __init__( + self, + root, + fileids, + encoding="utf8", + word_tokenizer=TabTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + ): + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + + ######################################################### + + def words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def parsed_sents(self, fileids=None): + sents = concat( + [ + DependencyCorpusView(fileid, False, True, True, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + return [DependencyGraph(sent) for sent in sents] + + +class DependencyCorpusView(StreamBackedCorpusView): + _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da + + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + dependencies, + chunk_types=None, + encoding="utf8", + ): + self._tagged = tagged + self._dependencies = dependencies + self._group_by_sent = group_by_sent + self._chunk_types = chunk_types + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + # Read the next sentence. + sent = read_blankline_block(stream)[0].strip() + # Strip off the docstart marker, if present. + if sent.startswith(self._DOCSTART): + sent = sent[len(self._DOCSTART) :].lstrip() + + # extract word and tag from any of the formats + if not self._dependencies: + lines = [line.split("\t") for line in sent.split("\n")] + if len(lines[0]) == 3 or len(lines[0]) == 4: + sent = [(line[0], line[1]) for line in lines] + elif len(lines[0]) == 10: + sent = [(line[1], line[4]) for line in lines] + else: + raise ValueError("Unexpected number of fields in dependency tree file") + + # discard tags if they weren't requested + if not self._tagged: + sent = [word for (word, tag) in sent] + + # Return the result. + if self._group_by_sent: + return [sent] + else: + return list(sent) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py new file mode 100644 index 0000000000000000000000000000000000000000..6eaa1ad8931ab407bac92d0ea3e6f2e60f74d0e1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py @@ -0,0 +1,3442 @@ +# Natural Language Toolkit: Framenet Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Chuck Wooters , +# Nathan Schneider +# URL: +# For license information, see LICENSE.TXT + + +""" +Corpus reader for the FrameNet 1.7 lexicon and corpus. +""" + +import itertools +import os +import re +import sys +import textwrap +import types +from collections import OrderedDict, defaultdict +from itertools import zip_longest +from operator import itemgetter +from pprint import pprint + +from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView +from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap + +__docformat__ = "epytext en" + + +def mimic_wrap(lines, wrap_at=65, **kwargs): + """ + Wrap the first of 'lines' with textwrap and the remaining lines at exactly the same + positions as the first. + """ + l0 = textwrap.fill(lines[0], wrap_at, drop_whitespace=False).split("\n") + yield l0 + + def _(line): + il0 = 0 + while line and il0 < len(l0) - 1: + yield line[: len(l0[il0])] + line = line[len(l0[il0]) :] + il0 += 1 + if line: # Remaining stuff on this line past the end of the mimicked line. + # So just textwrap this line. + yield from textwrap.fill(line, wrap_at, drop_whitespace=False).split("\n") + + for l in lines[1:]: + yield list(_(l)) + + +def _pretty_longstring(defstr, prefix="", wrap_at=65): + + """ + Helper function for pretty-printing a long string. + + :param defstr: The string to be printed. + :type defstr: str + :return: A nicely formatted string representation of the long string. + :rtype: str + """ + + outstr = "" + for line in textwrap.fill(defstr, wrap_at).split("\n"): + outstr += prefix + line + "\n" + return outstr + + +def _pretty_any(obj): + + """ + Helper function for pretty-printing any AttrDict object. + + :param obj: The obj to be printed. + :type obj: AttrDict + :return: A nicely formatted string representation of the AttrDict object. + :rtype: str + """ + + outstr = "" + for k in obj: + if isinstance(obj[k], str) and len(obj[k]) > 65: + outstr += f"[{k}]\n" + outstr += "{}".format(_pretty_longstring(obj[k], prefix=" ")) + outstr += "\n" + else: + outstr += f"[{k}] {obj[k]}\n" + + return outstr + + +def _pretty_semtype(st): + + """ + Helper function for pretty-printing a semantic type. + + :param st: The semantic type to be printed. + :type st: AttrDict + :return: A nicely formatted string representation of the semantic type. + :rtype: str + """ + + semkeys = st.keys() + if len(semkeys) == 1: + return "" + + outstr = "" + outstr += "semantic type ({0.ID}): {0.name}\n".format(st) + if "abbrev" in semkeys: + outstr += f"[abbrev] {st.abbrev}\n" + if "definition" in semkeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(st.definition, " ") + outstr += f"[rootType] {st.rootType.name}({st.rootType.ID})\n" + if st.superType is None: + outstr += "[superType] \n" + else: + outstr += f"[superType] {st.superType.name}({st.superType.ID})\n" + outstr += f"[subTypes] {len(st.subTypes)} subtypes\n" + outstr += ( + " " + + ", ".join(f"{x.name}({x.ID})" for x in st.subTypes) + + "\n" * (len(st.subTypes) > 0) + ) + return outstr + + +def _pretty_frame_relation_type(freltyp): + + """ + Helper function for pretty-printing a frame relation type. + + :param freltyp: The frame relation type to be printed. + :type freltyp: AttrDict + :return: A nicely formatted string representation of the frame relation type. + :rtype: str + """ + outstr = " {0.subFrameName}>".format( + freltyp + ) + return outstr + + +def _pretty_frame_relation(frel): + + """ + Helper function for pretty-printing a frame relation. + + :param frel: The frame relation to be printed. + :type frel: AttrDict + :return: A nicely formatted string representation of the frame relation. + :rtype: str + """ + outstr = "<{0.type.superFrameName}={0.superFrameName} -- {0.type.name} -> {0.type.subFrameName}={0.subFrameName}>".format( + frel + ) + return outstr + + +def _pretty_fe_relation(ferel): + + """ + Helper function for pretty-printing an FE relation. + + :param ferel: The FE relation to be printed. + :type ferel: AttrDict + :return: A nicely formatted string representation of the FE relation. + :rtype: str + """ + outstr = "<{0.type.superFrameName}={0.frameRelation.superFrameName}.{0.superFEName} -- {0.type.name} -> {0.type.subFrameName}={0.frameRelation.subFrameName}.{0.subFEName}>".format( + ferel + ) + return outstr + + +def _pretty_lu(lu): + + """ + Helper function for pretty-printing a lexical unit. + + :param lu: The lu to be printed. + :type lu: AttrDict + :return: A nicely formatted string representation of the lexical unit. + :rtype: str + """ + + lukeys = lu.keys() + outstr = "" + outstr += "lexical unit ({0.ID}): {0.name}\n\n".format(lu) + if "definition" in lukeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(lu.definition, " ") + if "frame" in lukeys: + outstr += f"\n[frame] {lu.frame.name}({lu.frame.ID})\n" + if "incorporatedFE" in lukeys: + outstr += f"\n[incorporatedFE] {lu.incorporatedFE}\n" + if "POS" in lukeys: + outstr += f"\n[POS] {lu.POS}\n" + if "status" in lukeys: + outstr += f"\n[status] {lu.status}\n" + if "totalAnnotated" in lukeys: + outstr += f"\n[totalAnnotated] {lu.totalAnnotated} annotated examples\n" + if "lexemes" in lukeys: + outstr += "\n[lexemes] {}\n".format( + " ".join(f"{lex.name}/{lex.POS}" for lex in lu.lexemes) + ) + if "semTypes" in lukeys: + outstr += f"\n[semTypes] {len(lu.semTypes)} semantic types\n" + outstr += ( + " " * (len(lu.semTypes) > 0) + + ", ".join(f"{x.name}({x.ID})" for x in lu.semTypes) + + "\n" * (len(lu.semTypes) > 0) + ) + if "URL" in lukeys: + outstr += f"\n[URL] {lu.URL}\n" + if "subCorpus" in lukeys: + subc = [x.name for x in lu.subCorpus] + outstr += f"\n[subCorpus] {len(lu.subCorpus)} subcorpora\n" + for line in textwrap.fill(", ".join(sorted(subc)), 60).split("\n"): + outstr += f" {line}\n" + if "exemplars" in lukeys: + outstr += "\n[exemplars] {} sentences across all subcorpora\n".format( + len(lu.exemplars) + ) + + return outstr + + +def _pretty_exemplars(exemplars, lu): + """ + Helper function for pretty-printing a list of exemplar sentences for a lexical unit. + + :param sent: The list of exemplar sentences to be printed. + :type sent: list(AttrDict) + :return: An index of the text of the exemplar sentences. + :rtype: str + """ + + outstr = "" + outstr += "exemplar sentences for {0.name} in {0.frame.name}:\n\n".format(lu) + for i, sent in enumerate(exemplars): + outstr += f"[{i}] {sent.text}\n" + outstr += "\n" + return outstr + + +def _pretty_fulltext_sentences(sents): + """ + Helper function for pretty-printing a list of annotated sentences for a full-text document. + + :param sent: The list of sentences to be printed. + :type sent: list(AttrDict) + :return: An index of the text of the sentences. + :rtype: str + """ + + outstr = "" + outstr += "full-text document ({0.ID}) {0.name}:\n\n".format(sents) + outstr += "[corpid] {0.corpid}\n[corpname] {0.corpname}\n[description] {0.description}\n[URL] {0.URL}\n\n".format( + sents + ) + outstr += f"[sentence]\n" + for i, sent in enumerate(sents.sentence): + outstr += f"[{i}] {sent.text}\n" + outstr += "\n" + return outstr + + +def _pretty_fulltext_sentence(sent): + """ + Helper function for pretty-printing an annotated sentence from a full-text document. + + :param sent: The sentence to be printed. + :type sent: list(AttrDict) + :return: The text of the sentence with annotation set indices on frame targets. + :rtype: str + """ + + outstr = "" + outstr += "full-text sentence ({0.ID}) in {1}:\n\n".format( + sent, sent.doc.get("name", sent.doc.description) + ) + outstr += f"\n[POS] {len(sent.POS)} tags\n" + outstr += f"\n[POS_tagset] {sent.POS_tagset}\n\n" + outstr += "[text] + [annotationSet]\n\n" + outstr += sent._ascii() # -> _annotation_ascii() + outstr += "\n" + return outstr + + +def _pretty_pos(aset): + """ + Helper function for pretty-printing a sentence with its POS tags. + + :param aset: The POS annotation set of the sentence to be printed. + :type sent: list(AttrDict) + :return: The text of the sentence and its POS tags. + :rtype: str + """ + + outstr = "" + outstr += "POS annotation set ({0.ID}) {0.POS_tagset} in sentence {0.sent.ID}:\n\n".format( + aset + ) + + # list the target spans and their associated aset index + overt = sorted(aset.POS) + + sent = aset.sent + s0 = sent.text + s1 = "" + s2 = "" + i = 0 + adjust = 0 + for j, k, lbl in overt: + assert j >= i, ("Overlapping targets?", (j, k, lbl)) + s1 += " " * (j - i) + "-" * (k - j) + if len(lbl) > (k - j): + # add space in the sentence to make room for the annotation index + amt = len(lbl) - (k - j) + s0 = ( + s0[: k + adjust] + "~" * amt + s0[k + adjust :] + ) # '~' to prevent line wrapping + s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] + adjust += amt + s2 += " " * (j - i) + lbl.ljust(k - j) + i = k + + long_lines = [s0, s1, s2] + + outstr += "\n\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ).replace("~", " ") + outstr += "\n" + return outstr + + +def _pretty_annotation(sent, aset_level=False): + """ + Helper function for pretty-printing an exemplar sentence for a lexical unit. + + :param sent: An annotation set or exemplar sentence to be printed. + :param aset_level: If True, 'sent' is actually an annotation set within a sentence. + :type sent: AttrDict + :return: A nicely formatted string representation of the exemplar sentence + with its target, frame, and FE annotations. + :rtype: str + """ + + sentkeys = sent.keys() + outstr = "annotation set" if aset_level else "exemplar sentence" + outstr += f" ({sent.ID}):\n" + if aset_level: # TODO: any UNANN exemplars? + outstr += f"\n[status] {sent.status}\n" + for k in ("corpID", "docID", "paragNo", "sentNo", "aPos"): + if k in sentkeys: + outstr += f"[{k}] {sent[k]}\n" + outstr += ( + "\n[LU] ({0.ID}) {0.name} in {0.frame.name}\n".format(sent.LU) + if sent.LU + else "\n[LU] Not found!" + ) + outstr += "\n[frame] ({0.ID}) {0.name}\n".format( + sent.frame + ) # redundant with above, but .frame is convenient + if not aset_level: + outstr += "\n[annotationSet] {} annotation sets\n".format( + len(sent.annotationSet) + ) + outstr += f"\n[POS] {len(sent.POS)} tags\n" + outstr += f"\n[POS_tagset] {sent.POS_tagset}\n" + outstr += "\n[GF] {} relation{}\n".format( + len(sent.GF), "s" if len(sent.GF) != 1 else "" + ) + outstr += "\n[PT] {} phrase{}\n".format( + len(sent.PT), "s" if len(sent.PT) != 1 else "" + ) + """ + Special Layers + -------------- + + The 'NER' layer contains, for some of the data, named entity labels. + + The 'WSL' (word status layer) contains, for some of the data, + spans which should not in principle be considered targets (NT). + + The 'Other' layer records relative clause constructions (Rel=relativizer, Ant=antecedent), + pleonastic 'it' (Null), and existential 'there' (Exist). + On occasion they are duplicated by accident (e.g., annotationSet 1467275 in lu6700.xml). + + The 'Sent' layer appears to contain labels that the annotator has flagged the + sentence with for their convenience: values include + 'sense1', 'sense2', 'sense3', etc.; + 'Blend', 'Canonical', 'Idiom', 'Metaphor', 'Special-Sent', + 'keepS', 'deleteS', 'reexamine' + (sometimes they are duplicated for no apparent reason). + + The POS-specific layers may contain the following kinds of spans: + Asp (aspectual particle), Non-Asp (non-aspectual particle), + Cop (copula), Supp (support), Ctrlr (controller), + Gov (governor), X. Gov and X always cooccur. + + >>> from nltk.corpus import framenet as fn + >>> def f(luRE, lyr, ignore=set()): + ... for i,ex in enumerate(fn.exemplars(luRE)): + ... if lyr in ex and ex[lyr] and set(zip(*ex[lyr])[2]) - ignore: + ... print(i,ex[lyr]) + + - Verb: Asp, Non-Asp + - Noun: Cop, Supp, Ctrlr, Gov, X + - Adj: Cop, Supp, Ctrlr, Gov, X + - Prep: Cop, Supp, Ctrlr + - Adv: Ctrlr + - Scon: (none) + - Art: (none) + """ + for lyr in ("NER", "WSL", "Other", "Sent"): + if lyr in sent and sent[lyr]: + outstr += "\n[{}] {} entr{}\n".format( + lyr, len(sent[lyr]), "ies" if len(sent[lyr]) != 1 else "y" + ) + outstr += "\n[text] + [Target] + [FE]" + # POS-specific layers: syntactically important words that are neither the target + # nor the FEs. Include these along with the first FE layer but with '^' underlining. + for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): + if lyr in sent and sent[lyr]: + outstr += f" + [{lyr}]" + if "FE2" in sentkeys: + outstr += " + [FE2]" + if "FE3" in sentkeys: + outstr += " + [FE3]" + outstr += "\n\n" + outstr += sent._ascii() # -> _annotation_ascii() + outstr += "\n" + + return outstr + + +def _annotation_ascii(sent): + """ + Given a sentence or FE annotation set, construct the width-limited string showing + an ASCII visualization of the sentence's annotations, calling either + _annotation_ascii_frames() or _annotation_ascii_FEs() as appropriate. + This will be attached as a method to appropriate AttrDict instances + and called in the full pretty-printing of the instance. + """ + if sent._type == "fulltext_sentence" or ( + "annotationSet" in sent and len(sent.annotationSet) > 2 + ): + # a full-text sentence OR sentence with multiple targets. + # (multiple targets = >2 annotation sets, because the first annotation set is POS.) + return _annotation_ascii_frames(sent) + else: # an FE annotation set, or an LU sentence with 1 target + return _annotation_ascii_FEs(sent) + + +def _annotation_ascii_frames(sent): + """ + ASCII string rendering of the sentence along with its targets and frame names. + Called for all full-text sentences, as well as the few LU sentences with multiple + targets (e.g., fn.lu(6412).exemplars[82] has two want.v targets). + Line-wrapped to limit the display width. + """ + # list the target spans and their associated aset index + overt = [] + for a, aset in enumerate(sent.annotationSet[1:]): + for j, k in aset.Target: + indexS = f"[{a + 1}]" + if aset.status == "UNANN" or aset.LU.status == "Problem": + indexS += " " + if aset.status == "UNANN": + indexS += "!" # warning indicator that there is a frame annotation but no FE annotation + if aset.LU.status == "Problem": + indexS += "?" # warning indicator that there is a missing LU definition (because the LU has Problem status) + overt.append((j, k, aset.LU.frame.name, indexS)) + overt = sorted(overt) + + duplicates = set() + for o, (j, k, fname, asetIndex) in enumerate(overt): + if o > 0 and j <= overt[o - 1][1]: + # multiple annotation sets on the same target + # (e.g. due to a coordination construction or multiple annotators) + if ( + overt[o - 1][:2] == (j, k) and overt[o - 1][2] == fname + ): # same target, same frame + # splice indices together + combinedIndex = ( + overt[o - 1][3] + asetIndex + ) # e.g., '[1][2]', '[1]! [2]' + combinedIndex = combinedIndex.replace(" !", "! ").replace(" ?", "? ") + overt[o - 1] = overt[o - 1][:3] + (combinedIndex,) + duplicates.add(o) + else: # different frames, same or overlapping targets + s = sent.text + for j, k, fname, asetIndex in overt: + s += "\n" + asetIndex + " " + sent.text[j:k] + " :: " + fname + s += "\n(Unable to display sentence with targets marked inline due to overlap)" + return s + for o in reversed(sorted(duplicates)): + del overt[o] + + s0 = sent.text + s1 = "" + s11 = "" + s2 = "" + i = 0 + adjust = 0 + fAbbrevs = OrderedDict() + for j, k, fname, asetIndex in overt: + if not j >= i: + assert j >= i, ( + "Overlapping targets?" + + ( + " UNANN" + if any(aset.status == "UNANN" for aset in sent.annotationSet[1:]) + else "" + ), + (j, k, asetIndex), + ) + s1 += " " * (j - i) + "*" * (k - j) + short = fname[: k - j] + if (k - j) < len(fname): + r = 0 + while short in fAbbrevs: + if fAbbrevs[short] == fname: + break + r += 1 + short = fname[: k - j - 1] + str(r) + else: # short not in fAbbrevs + fAbbrevs[short] = fname + s11 += " " * (j - i) + short.ljust(k - j) + if len(asetIndex) > (k - j): + # add space in the sentence to make room for the annotation index + amt = len(asetIndex) - (k - j) + s0 = ( + s0[: k + adjust] + "~" * amt + s0[k + adjust :] + ) # '~' to prevent line wrapping + s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] + s11 = s11[: k + adjust] + " " * amt + s11[k + adjust :] + adjust += amt + s2 += " " * (j - i) + asetIndex.ljust(k - j) + i = k + + long_lines = [s0, s1, s11, s2] + + outstr = "\n\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ).replace("~", " ") + outstr += "\n" + if fAbbrevs: + outstr += " (" + ", ".join("=".join(pair) for pair in fAbbrevs.items()) + ")" + assert len(fAbbrevs) == len(dict(fAbbrevs)), "Abbreviation clash" + + return outstr + + +def _annotation_ascii_FE_layer(overt, ni, feAbbrevs): + """Helper for _annotation_ascii_FEs().""" + s1 = "" + s2 = "" + i = 0 + for j, k, fename in overt: + s1 += " " * (j - i) + ("^" if fename.islower() else "-") * (k - j) + short = fename[: k - j] + if len(fename) > len(short): + r = 0 + while short in feAbbrevs: + if feAbbrevs[short] == fename: + break + r += 1 + short = fename[: k - j - 1] + str(r) + else: # short not in feAbbrevs + feAbbrevs[short] = fename + s2 += " " * (j - i) + short.ljust(k - j) + i = k + + sNI = "" + if ni: + sNI += " [" + ", ".join(":".join(x) for x in sorted(ni.items())) + "]" + return [s1, s2, sNI] + + +def _annotation_ascii_FEs(sent): + """ + ASCII string rendering of the sentence along with a single target and its FEs. + Secondary and tertiary FE layers are included if present. + 'sent' can be an FE annotation set or an LU sentence with a single target. + Line-wrapped to limit the display width. + """ + feAbbrevs = OrderedDict() + posspec = [] # POS-specific layer spans (e.g., Supp[ort], Cop[ula]) + posspec_separate = False + for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): + if lyr in sent and sent[lyr]: + for a, b, lbl in sent[lyr]: + if ( + lbl == "X" + ): # skip this, which covers an entire phrase typically containing the target and all its FEs + # (but do display the Gov) + continue + if any(1 for x, y, felbl in sent.FE[0] if x <= a < y or a <= x < b): + # overlap between one of the POS-specific layers and first FE layer + posspec_separate = ( + True # show POS-specific layers on a separate line + ) + posspec.append( + (a, b, lbl.lower().replace("-", "")) + ) # lowercase Cop=>cop, Non-Asp=>nonasp, etc. to distinguish from FE names + if posspec_separate: + POSSPEC = _annotation_ascii_FE_layer(posspec, {}, feAbbrevs) + FE1 = _annotation_ascii_FE_layer( + sorted(sent.FE[0] + (posspec if not posspec_separate else [])), + sent.FE[1], + feAbbrevs, + ) + FE2 = FE3 = None + if "FE2" in sent: + FE2 = _annotation_ascii_FE_layer(sent.FE2[0], sent.FE2[1], feAbbrevs) + if "FE3" in sent: + FE3 = _annotation_ascii_FE_layer(sent.FE3[0], sent.FE3[1], feAbbrevs) + + for i, j in sent.Target: + FE1span, FE1name, FE1exp = FE1 + if len(FE1span) < j: + FE1span += " " * (j - len(FE1span)) + if len(FE1name) < j: + FE1name += " " * (j - len(FE1name)) + FE1[1] = FE1name + FE1[0] = ( + FE1span[:i] + FE1span[i:j].replace(" ", "*").replace("-", "=") + FE1span[j:] + ) + long_lines = [sent.text] + if posspec_separate: + long_lines.extend(POSSPEC[:2]) + long_lines.extend([FE1[0], FE1[1] + FE1[2]]) # lines with no length limit + if FE2: + long_lines.extend([FE2[0], FE2[1] + FE2[2]]) + if FE3: + long_lines.extend([FE3[0], FE3[1] + FE3[2]]) + long_lines.append("") + outstr = "\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ) + if feAbbrevs: + outstr += "(" + ", ".join("=".join(pair) for pair in feAbbrevs.items()) + ")" + assert len(feAbbrevs) == len(dict(feAbbrevs)), "Abbreviation clash" + outstr += "\n" + + return outstr + + +def _pretty_fe(fe): + + """ + Helper function for pretty-printing a frame element. + + :param fe: The frame element to be printed. + :type fe: AttrDict + :return: A nicely formatted string representation of the frame element. + :rtype: str + """ + fekeys = fe.keys() + outstr = "" + outstr += "frame element ({0.ID}): {0.name}\n of {1.name}({1.ID})\n".format( + fe, fe.frame + ) + if "definition" in fekeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(fe.definition, " ") + if "abbrev" in fekeys: + outstr += f"[abbrev] {fe.abbrev}\n" + if "coreType" in fekeys: + outstr += f"[coreType] {fe.coreType}\n" + if "requiresFE" in fekeys: + outstr += "[requiresFE] " + if fe.requiresFE is None: + outstr += "\n" + else: + outstr += f"{fe.requiresFE.name}({fe.requiresFE.ID})\n" + if "excludesFE" in fekeys: + outstr += "[excludesFE] " + if fe.excludesFE is None: + outstr += "\n" + else: + outstr += f"{fe.excludesFE.name}({fe.excludesFE.ID})\n" + if "semType" in fekeys: + outstr += "[semType] " + if fe.semType is None: + outstr += "\n" + else: + outstr += "\n " + f"{fe.semType.name}({fe.semType.ID})" + "\n" + + return outstr + + +def _pretty_frame(frame): + + """ + Helper function for pretty-printing a frame. + + :param frame: The frame to be printed. + :type frame: AttrDict + :return: A nicely formatted string representation of the frame. + :rtype: str + """ + + outstr = "" + outstr += "frame ({0.ID}): {0.name}\n\n".format(frame) + outstr += f"[URL] {frame.URL}\n\n" + outstr += "[definition]\n" + outstr += _pretty_longstring(frame.definition, " ") + "\n" + + outstr += f"[semTypes] {len(frame.semTypes)} semantic types\n" + outstr += ( + " " * (len(frame.semTypes) > 0) + + ", ".join(f"{x.name}({x.ID})" for x in frame.semTypes) + + "\n" * (len(frame.semTypes) > 0) + ) + + outstr += "\n[frameRelations] {} frame relations\n".format( + len(frame.frameRelations) + ) + outstr += " " + "\n ".join(repr(frel) for frel in frame.frameRelations) + "\n" + + outstr += f"\n[lexUnit] {len(frame.lexUnit)} lexical units\n" + lustrs = [] + for luName, lu in sorted(frame.lexUnit.items()): + tmpstr = f"{luName} ({lu.ID})" + lustrs.append(tmpstr) + outstr += "{}\n".format(_pretty_longstring(", ".join(lustrs), prefix=" ")) + + outstr += f"\n[FE] {len(frame.FE)} frame elements\n" + fes = {} + for feName, fe in sorted(frame.FE.items()): + try: + fes[fe.coreType].append(f"{feName} ({fe.ID})") + except KeyError: + fes[fe.coreType] = [] + fes[fe.coreType].append(f"{feName} ({fe.ID})") + for ct in sorted( + fes.keys(), + key=lambda ct2: [ + "Core", + "Core-Unexpressed", + "Peripheral", + "Extra-Thematic", + ].index(ct2), + ): + outstr += "{:>16}: {}\n".format(ct, ", ".join(sorted(fes[ct]))) + + outstr += "\n[FEcoreSets] {} frame element core sets\n".format( + len(frame.FEcoreSets) + ) + outstr += ( + " " + + "\n ".join( + ", ".join([x.name for x in coreSet]) for coreSet in frame.FEcoreSets + ) + + "\n" + ) + + return outstr + + +class FramenetError(Exception): + + """An exception class for framenet-related errors.""" + + +class AttrDict(dict): + + """A class that wraps a dict and allows accessing the keys of the + dict as if they were attributes. Taken from here: + https://stackoverflow.com/a/14620633/8879 + + >>> foo = {'a':1, 'b':2, 'c':3} + >>> bar = AttrDict(foo) + >>> pprint(dict(bar)) + {'a': 1, 'b': 2, 'c': 3} + >>> bar.b + 2 + >>> bar.d = 4 + >>> pprint(dict(bar)) + {'a': 1, 'b': 2, 'c': 3, 'd': 4} + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # self.__dict__ = self + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + if name == "_short_repr": + return self._short_repr + return self[name] + + def __getitem__(self, name): + v = super().__getitem__(name) + if isinstance(v, Future): + return v._data() + return v + + def _short_repr(self): + if "_type" in self: + if self["_type"].endswith("relation"): + return self.__repr__() + try: + return "<{} ID={} name={}>".format( + self["_type"], self["ID"], self["name"] + ) + except KeyError: + try: # no ID--e.g., for _type=lusubcorpus + return "<{} name={}>".format(self["_type"], self["name"]) + except KeyError: # no name--e.g., for _type=lusentence + return "<{} ID={}>".format(self["_type"], self["ID"]) + else: + return self.__repr__() + + def _str(self): + outstr = "" + + if "_type" not in self: + outstr = _pretty_any(self) + elif self["_type"] == "frame": + outstr = _pretty_frame(self) + elif self["_type"] == "fe": + outstr = _pretty_fe(self) + elif self["_type"] == "lu": + outstr = _pretty_lu(self) + elif self["_type"] == "luexemplars": # list of ALL exemplars for LU + outstr = _pretty_exemplars(self, self[0].LU) + elif ( + self["_type"] == "fulltext_annotation" + ): # list of all sentences for full-text doc + outstr = _pretty_fulltext_sentences(self) + elif self["_type"] == "lusentence": + outstr = _pretty_annotation(self) + elif self["_type"] == "fulltext_sentence": + outstr = _pretty_fulltext_sentence(self) + elif self["_type"] in ("luannotationset", "fulltext_annotationset"): + outstr = _pretty_annotation(self, aset_level=True) + elif self["_type"] == "posannotationset": + outstr = _pretty_pos(self) + elif self["_type"] == "semtype": + outstr = _pretty_semtype(self) + elif self["_type"] == "framerelationtype": + outstr = _pretty_frame_relation_type(self) + elif self["_type"] == "framerelation": + outstr = _pretty_frame_relation(self) + elif self["_type"] == "ferelation": + outstr = _pretty_fe_relation(self) + else: + outstr = _pretty_any(self) + + # ensure result is unicode string prior to applying the + # decorator (because non-ASCII characters + # could in principle occur in the data and would trigger an encoding error when + # passed as arguments to str.format()). + # assert isinstance(outstr, unicode) # not in Python 3.2 + return outstr + + def __str__(self): + return self._str() + + def __repr__(self): + return self.__str__() + + +class SpecialList(list): + """ + A list subclass which adds a '_type' attribute for special printing + (similar to an AttrDict, though this is NOT an AttrDict subclass). + """ + + def __init__(self, typ, *args, **kwargs): + super().__init__(*args, **kwargs) + self._type = typ + + def _str(self): + outstr = "" + + assert self._type + if len(self) == 0: + outstr = "[]" + elif self._type == "luexemplars": # list of ALL exemplars for LU + outstr = _pretty_exemplars(self, self[0].LU) + else: + assert False, self._type + return outstr + + def __str__(self): + return self._str() + + def __repr__(self): + return self.__str__() + + +class Future: + """ + Wraps and acts as a proxy for a value to be loaded lazily (on demand). + Adapted from https://gist.github.com/sergey-miryanov/2935416 + """ + + def __init__(self, loader, *args, **kwargs): + """ + :param loader: when called with no arguments, returns the value to be stored + :type loader: callable + """ + super().__init__(*args, **kwargs) + self._loader = loader + self._d = None + + def _data(self): + if callable(self._loader): + self._d = self._loader() + self._loader = None # the data is now cached + return self._d + + def __nonzero__(self): + return bool(self._data()) + + def __len__(self): + return len(self._data()) + + def __setitem__(self, key, value): + return self._data().__setitem__(key, value) + + def __getitem__(self, key): + return self._data().__getitem__(key) + + def __getattr__(self, key): + return self._data().__getattr__(key) + + def __str__(self): + return self._data().__str__() + + def __repr__(self): + return self._data().__repr__() + + +class PrettyDict(AttrDict): + """ + Displays an abbreviated repr of values where possible. + Inherits from AttrDict, so a callable value will + be lazily converted to an actual value. + """ + + def __init__(self, *args, **kwargs): + _BREAK_LINES = kwargs.pop("breakLines", False) + super().__init__(*args, **kwargs) + dict.__setattr__(self, "_BREAK_LINES", _BREAK_LINES) + + def __repr__(self): + parts = [] + for k, v in sorted(self.items()): + kv = repr(k) + ": " + try: + kv += v._short_repr() + except AttributeError: + kv += repr(v) + parts.append(kv) + return "{" + (",\n " if self._BREAK_LINES else ", ").join(parts) + "}" + + +class PrettyList(list): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + def __init__(self, *args, **kwargs): + self._MAX_REPR_SIZE = kwargs.pop("maxReprSize", 60) + self._BREAK_LINES = kwargs.pop("breakLines", False) + super().__init__(*args, **kwargs) + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if self._MAX_REPR_SIZE and length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % str(",\n " if self._BREAK_LINES else ", ").join( + pieces[:-1] + ) + return "[%s]" % str(",\n " if self._BREAK_LINES else ", ").join(pieces) + + +class PrettyLazyMap(LazyMap): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + +class PrettyLazyIteratorList(LazyIteratorList): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + +class PrettyLazyConcatenation(LazyConcatenation): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + def __add__(self, other): + """Return a list concatenating self with other.""" + return PrettyLazyIteratorList(itertools.chain(self, other)) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return PrettyLazyIteratorList(itertools.chain(other, self)) + + +class FramenetCorpusReader(XMLCorpusReader): + """A corpus reader for the Framenet Corpus. + + >>> from nltk.corpus import framenet as fn + >>> fn.lu(3238).frame.lexUnit['glint.v'] is fn.lu(3238) + True + >>> fn.frame_by_name('Replacing') is fn.lus('replace.v')[0].frame + True + >>> fn.lus('prejudice.n')[0].frame.frameRelations == fn.frame_relations('Partiality') + True + """ + + _bad_statuses = ["Problem"] + """ + When loading LUs for a frame, those whose status is in this list will be ignored. + Due to caching, if user code modifies this, it should do so before loading any data. + 'Problem' should always be listed for FrameNet 1.5, as these LUs are not included + in the XML index. + """ + + _warnings = False + + def warnings(self, v): + """Enable or disable warnings of data integrity issues as they are encountered. + If v is truthy, warnings will be enabled. + + (This is a function rather than just an attribute/property to ensure that if + enabling warnings is the first action taken, the corpus reader is instantiated first.) + """ + self._warnings = v + + def __init__(self, root, fileids): + XMLCorpusReader.__init__(self, root, fileids) + + # framenet corpus sub dirs + # sub dir containing the xml files for frames + self._frame_dir = "frame" + # sub dir containing the xml files for lexical units + self._lu_dir = "lu" + # sub dir containing the xml files for fulltext annotation files + self._fulltext_dir = "fulltext" + + # location of latest development version of FrameNet + self._fnweb_url = "https://framenet2.icsi.berkeley.edu/fnReports/data" + + # Indexes used for faster look-ups + self._frame_idx = None + self._cached_frames = {} # name -> ID + self._lu_idx = None + self._fulltext_idx = None + self._semtypes = None + self._freltyp_idx = None # frame relation types (Inheritance, Using, etc.) + self._frel_idx = None # frame-to-frame relation instances + self._ferel_idx = None # FE-to-FE relation instances + self._frel_f_idx = None # frame-to-frame relations associated with each frame + + self._readme = "README.txt" + + def help(self, attrname=None): + """Display help information summarizing the main methods.""" + + if attrname is not None: + return help(self.__getattribute__(attrname)) + + # No need to mention frame_by_name() or frame_by_id(), + # as it's easier to just call frame(). + # Also not mentioning lu_basic(). + + msg = """ +Citation: Nathan Schneider and Chuck Wooters (2017), +"The NLTK FrameNet API: Designing for Discoverability with a Rich Linguistic Resource". +Proceedings of EMNLP: System Demonstrations. https://arxiv.org/abs/1703.07438 + +Use the following methods to access data in FrameNet. +Provide a method name to `help()` for more information. + +FRAMES +====== + +frame() to look up a frame by its exact name or ID +frames() to get frames matching a name pattern +frames_by_lemma() to get frames containing an LU matching a name pattern +frame_ids_and_names() to get a mapping from frame IDs to names + +FRAME ELEMENTS +============== + +fes() to get frame elements (a.k.a. roles) matching a name pattern, optionally constrained + by a frame name pattern + +LEXICAL UNITS +============= + +lu() to look up an LU by its ID +lus() to get lexical units matching a name pattern, optionally constrained by frame +lu_ids_and_names() to get a mapping from LU IDs to names + +RELATIONS +========= + +frame_relation_types() to get the different kinds of frame-to-frame relations + (Inheritance, Subframe, Using, etc.). +frame_relations() to get the relation instances, optionally constrained by + frame(s) or relation type +fe_relations() to get the frame element pairs belonging to a frame-to-frame relation + +SEMANTIC TYPES +============== + +semtypes() to get the different kinds of semantic types that can be applied to + FEs, LUs, and entire frames +semtype() to look up a particular semtype by name, ID, or abbreviation +semtype_inherits() to check whether two semantic types have a subtype-supertype + relationship in the semtype hierarchy +propagate_semtypes() to apply inference rules that distribute semtypes over relations + between FEs + +ANNOTATIONS +=========== + +annotations() to get annotation sets, in which a token in a sentence is annotated + with a lexical unit in a frame, along with its frame elements and their syntactic properties; + can be constrained by LU name pattern and limited to lexicographic exemplars or full-text. + Sentences of full-text annotation can have multiple annotation sets. +sents() to get annotated sentences illustrating one or more lexical units +exemplars() to get sentences of lexicographic annotation, most of which have + just 1 annotation set; can be constrained by LU name pattern, frame, and overt FE(s) +doc() to look up a document of full-text annotation by its ID +docs() to get documents of full-text annotation that match a name pattern +docs_metadata() to get metadata about all full-text documents without loading them +ft_sents() to iterate over sentences of full-text annotation + +UTILITIES +========= + +buildindexes() loads metadata about all frames, LUs, etc. into memory to avoid + delay when one is accessed for the first time. It does not load annotations. +readme() gives the text of the FrameNet README file +warnings(True) to display corpus consistency warnings when loading data + """ + print(msg) + + def _buildframeindex(self): + # The total number of Frames in Framenet is fairly small (~1200) so + # this index should not be very large + if not self._frel_idx: + self._buildrelationindex() # always load frame relations before frames, + # otherwise weird ordering effects might result in incomplete information + self._frame_idx = {} + with XMLCorpusView( + self.abspath("frameIndex.xml"), "frameIndex/frame", self._handle_elt + ) as view: + for f in view: + self._frame_idx[f["ID"]] = f + + def _buildcorpusindex(self): + # The total number of fulltext annotated documents in Framenet + # is fairly small (~90) so this index should not be very large + self._fulltext_idx = {} + with XMLCorpusView( + self.abspath("fulltextIndex.xml"), + "fulltextIndex/corpus", + self._handle_fulltextindex_elt, + ) as view: + for doclist in view: + for doc in doclist: + self._fulltext_idx[doc.ID] = doc + + def _buildluindex(self): + # The number of LUs in Framenet is about 13,000 so this index + # should not be very large + self._lu_idx = {} + with XMLCorpusView( + self.abspath("luIndex.xml"), "luIndex/lu", self._handle_elt + ) as view: + for lu in view: + self._lu_idx[ + lu["ID"] + ] = lu # populate with LU index entries. if any of these + # are looked up they will be replaced by full LU objects. + + def _buildrelationindex(self): + # print('building relation index...', file=sys.stderr) + self._freltyp_idx = {} + self._frel_idx = {} + self._frel_f_idx = defaultdict(set) + self._ferel_idx = {} + + with XMLCorpusView( + self.abspath("frRelation.xml"), + "frameRelations/frameRelationType", + self._handle_framerelationtype_elt, + ) as view: + for freltyp in view: + self._freltyp_idx[freltyp.ID] = freltyp + for frel in freltyp.frameRelations: + supF = frel.superFrame = frel[freltyp.superFrameName] = Future( + (lambda fID: lambda: self.frame_by_id(fID))(frel.supID) + ) + subF = frel.subFrame = frel[freltyp.subFrameName] = Future( + (lambda fID: lambda: self.frame_by_id(fID))(frel.subID) + ) + self._frel_idx[frel.ID] = frel + self._frel_f_idx[frel.supID].add(frel.ID) + self._frel_f_idx[frel.subID].add(frel.ID) + for ferel in frel.feRelations: + ferel.superFrame = supF + ferel.subFrame = subF + ferel.superFE = Future( + (lambda fer: lambda: fer.superFrame.FE[fer.superFEName])( + ferel + ) + ) + ferel.subFE = Future( + (lambda fer: lambda: fer.subFrame.FE[fer.subFEName])(ferel) + ) + self._ferel_idx[ferel.ID] = ferel + # print('...done building relation index', file=sys.stderr) + + def _warn(self, *message, **kwargs): + if self._warnings: + kwargs.setdefault("file", sys.stderr) + print(*message, **kwargs) + + def buildindexes(self): + """ + Build the internal indexes to make look-ups faster. + """ + # Frames + self._buildframeindex() + # LUs + self._buildluindex() + # Fulltext annotation corpora index + self._buildcorpusindex() + # frame and FE relations + self._buildrelationindex() + + def doc(self, fn_docid): + """ + Returns the annotated document whose id number is + ``fn_docid``. This id number can be obtained by calling the + Documents() function. + + The dict that is returned from this function will contain the + following keys: + + - '_type' : 'fulltextannotation' + - 'sentence' : a list of sentences in the document + - Each item in the list is a dict containing the following keys: + - 'ID' : the ID number of the sentence + - '_type' : 'sentence' + - 'text' : the text of the sentence + - 'paragNo' : the paragraph number + - 'sentNo' : the sentence number + - 'docID' : the document ID number + - 'corpID' : the corpus ID number + - 'aPos' : the annotation position + - 'annotationSet' : a list of annotation layers for the sentence + - Each item in the list is a dict containing the following keys: + - 'ID' : the ID number of the annotation set + - '_type' : 'annotationset' + - 'status' : either 'MANUAL' or 'UNANN' + - 'luName' : (only if status is 'MANUAL') + - 'luID' : (only if status is 'MANUAL') + - 'frameID' : (only if status is 'MANUAL') + - 'frameName': (only if status is 'MANUAL') + - 'layer' : a list of labels for the layer + - Each item in the layer is a dict containing the following keys: + - '_type': 'layer' + - 'rank' + - 'name' + - 'label' : a list of labels in the layer + - Each item is a dict containing the following keys: + - 'start' + - 'end' + - 'name' + - 'feID' (optional) + + :param fn_docid: The Framenet id number of the document + :type fn_docid: int + :return: Information about the annotated document + :rtype: dict + """ + try: + xmlfname = self._fulltext_idx[fn_docid].filename + except TypeError: # happens when self._fulltext_idx == None + # build the index + self._buildcorpusindex() + xmlfname = self._fulltext_idx[fn_docid].filename + except KeyError as e: # probably means that fn_docid was not in the index + raise FramenetError(f"Unknown document id: {fn_docid}") from e + + # construct the path name for the xml file containing the document info + locpath = os.path.join(f"{self._root}", self._fulltext_dir, xmlfname) + + # Grab the top-level xml element containing the fulltext annotation + with XMLCorpusView(locpath, "fullTextAnnotation") as view: + elt = view[0] + info = self._handle_fulltextannotation_elt(elt) + # add metadata + for k, v in self._fulltext_idx[fn_docid].items(): + info[k] = v + return info + + def frame_by_id(self, fn_fid, ignorekeys=[]): + """ + Get the details for the specified Frame using the frame's id + number. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame_by_id(256) + >>> f.ID + 256 + >>> f.name + 'Medical_specialties' + >>> f.definition # doctest: +NORMALIZE_WHITESPACE + "This frame includes words that name medical specialties and is closely related to the + Medical_professionals frame. The FE Type characterizing a sub-are in a Specialty may also be + expressed. 'Ralph practices paediatric oncology.'" + + :param fn_fid: The Framenet id number of the frame + :type fn_fid: int + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + + Also see the ``frame()`` function for details about what is + contained in the dict that is returned. + """ + + # get the name of the frame with this id number + try: + fentry = self._frame_idx[fn_fid] + if "_type" in fentry: + return fentry # full frame object is cached + name = fentry["name"] + except TypeError: + self._buildframeindex() + name = self._frame_idx[fn_fid]["name"] + except KeyError as e: + raise FramenetError(f"Unknown frame id: {fn_fid}") from e + + return self.frame_by_name(name, ignorekeys, check_cache=False) + + def frame_by_name(self, fn_fname, ignorekeys=[], check_cache=True): + """ + Get the details for the specified Frame using the frame's name. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame_by_name('Medical_specialties') + >>> f.ID + 256 + >>> f.name + 'Medical_specialties' + >>> f.definition # doctest: +NORMALIZE_WHITESPACE + "This frame includes words that name medical specialties and is closely related to the + Medical_professionals frame. The FE Type characterizing a sub-are in a Specialty may also be + expressed. 'Ralph practices paediatric oncology.'" + + :param fn_fname: The name of the frame + :type fn_fname: str + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + + Also see the ``frame()`` function for details about what is + contained in the dict that is returned. + """ + + if check_cache and fn_fname in self._cached_frames: + return self._frame_idx[self._cached_frames[fn_fname]] + elif not self._frame_idx: + self._buildframeindex() + + # construct the path name for the xml file containing the Frame info + locpath = os.path.join(f"{self._root}", self._frame_dir, fn_fname + ".xml") + # print(locpath, file=sys.stderr) + # Grab the xml for the frame + try: + with XMLCorpusView(locpath, "frame") as view: + elt = view[0] + except OSError as e: + raise FramenetError(f"Unknown frame: {fn_fname}") from e + + fentry = self._handle_frame_elt(elt, ignorekeys) + assert fentry + + fentry.URL = self._fnweb_url + "/" + self._frame_dir + "/" + fn_fname + ".xml" + + # INFERENCE RULE: propagate lexical semtypes from the frame to all its LUs + for st in fentry.semTypes: + if st.rootType.name == "Lexical_type": + for lu in fentry.lexUnit.values(): + if not any( + x is st for x in lu.semTypes + ): # identity containment check + lu.semTypes.append(st) + + self._frame_idx[fentry.ID] = fentry + self._cached_frames[fentry.name] = fentry.ID + """ + # now set up callables to resolve the LU pointers lazily. + # (could also do this here--caching avoids infinite recursion.) + for luName,luinfo in fentry.lexUnit.items(): + fentry.lexUnit[luName] = (lambda luID: Future(lambda: self.lu(luID)))(luinfo.ID) + """ + return fentry + + def frame(self, fn_fid_or_fname, ignorekeys=[]): + """ + Get the details for the specified Frame using the frame's name + or id number. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame(256) + >>> f.name + 'Medical_specialties' + >>> f = fn.frame('Medical_specialties') + >>> f.ID + 256 + >>> # ensure non-ASCII character in definition doesn't trigger an encoding error: + >>> fn.frame('Imposing_obligation') # doctest: +ELLIPSIS + frame (1494): Imposing_obligation... + + + The dict that is returned from this function will contain the + following information about the Frame: + + - 'name' : the name of the Frame (e.g. 'Birth', 'Apply_heat', etc.) + - 'definition' : textual definition of the Frame + - 'ID' : the internal ID number of the Frame + - 'semTypes' : a list of semantic types for this frame + - Each item in the list is a dict containing the following keys: + - 'name' : can be used with the semtype() function + - 'ID' : can be used with the semtype() function + + - 'lexUnit' : a dict containing all of the LUs for this frame. + The keys in this dict are the names of the LUs and + the value for each key is itself a dict containing + info about the LU (see the lu() function for more info.) + + - 'FE' : a dict containing the Frame Elements that are part of this frame + The keys in this dict are the names of the FEs (e.g. 'Body_system') + and the values are dicts containing the following keys + + - 'definition' : The definition of the FE + - 'name' : The name of the FE e.g. 'Body_system' + - 'ID' : The id number + - '_type' : 'fe' + - 'abbrev' : Abbreviation e.g. 'bod' + - 'coreType' : one of "Core", "Peripheral", or "Extra-Thematic" + - 'semType' : if not None, a dict with the following two keys: + - 'name' : name of the semantic type. can be used with + the semtype() function + - 'ID' : id number of the semantic type. can be used with + the semtype() function + - 'requiresFE' : if not None, a dict with the following two keys: + - 'name' : the name of another FE in this frame + - 'ID' : the id of the other FE in this frame + - 'excludesFE' : if not None, a dict with the following two keys: + - 'name' : the name of another FE in this frame + - 'ID' : the id of the other FE in this frame + + - 'frameRelation' : a list of objects describing frame relations + - 'FEcoreSets' : a list of Frame Element core sets for this frame + - Each item in the list is a list of FE objects + + :param fn_fid_or_fname: The Framenet name or id number of the frame + :type fn_fid_or_fname: int or str + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + """ + + # get the frame info by name or id number + if isinstance(fn_fid_or_fname, str): + f = self.frame_by_name(fn_fid_or_fname, ignorekeys) + else: + f = self.frame_by_id(fn_fid_or_fname, ignorekeys) + + return f + + def frames_by_lemma(self, pat): + """ + Returns a list of all frames that contain LUs in which the + ``name`` attribute of the LU matches the given regular expression + ``pat``. Note that LU names are composed of "lemma.POS", where + the "lemma" part can be made up of either a single lexeme + (e.g. 'run') or multiple lexemes (e.g. 'a little'). + + Note: if you are going to be doing a lot of this type of + searching, you'd want to build an index that maps from lemmas to + frames because each time frames_by_lemma() is called, it has to + search through ALL of the frame XML files in the db. + + >>> from nltk.corpus import framenet as fn + >>> from nltk.corpus.reader.framenet import PrettyList + >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID'))) # doctest: +ELLIPSIS + [, ] + + :return: A list of frame objects. + :rtype: list(AttrDict) + """ + return PrettyList( + f + for f in self.frames() + if any(re.search(pat, luName) for luName in f.lexUnit) + ) + + def lu_basic(self, fn_luid): + """ + Returns basic information about the LU whose id is + ``fn_luid``. This is basically just a wrapper around the + ``lu()`` function with "subCorpus" info excluded. + + >>> from nltk.corpus import framenet as fn + >>> lu = PrettyDict(fn.lu_basic(256), breakLines=True) + >>> # ellipses account for differences between FN 1.5 and 1.7 + >>> lu # doctest: +ELLIPSIS + {'ID': 256, + 'POS': 'V', + 'URL': 'https://framenet2.icsi.berkeley.edu/fnReports/data/lu/lu256.xml', + '_type': 'lu', + 'cBy': ..., + 'cDate': '02/08/2001 01:27:50 PST Thu', + 'definition': 'COD: be aware of beforehand; predict.', + 'definitionMarkup': 'COD: be aware of beforehand; predict.', + 'frame': , + 'lemmaID': 15082, + 'lexemes': [{'POS': 'V', 'breakBefore': 'false', 'headword': 'false', 'name': 'foresee', 'order': 1}], + 'name': 'foresee.v', + 'semTypes': [], + 'sentenceCount': {'annotated': ..., 'total': ...}, + 'status': 'FN1_Sent'} + + :param fn_luid: The id number of the desired LU + :type fn_luid: int + :return: Basic information about the lexical unit + :rtype: dict + """ + return self.lu(fn_luid, ignorekeys=["subCorpus", "exemplars"]) + + def lu(self, fn_luid, ignorekeys=[], luName=None, frameID=None, frameName=None): + """ + Access a lexical unit by its ID. luName, frameID, and frameName are used + only in the event that the LU does not have a file in the database + (which is the case for LUs with "Problem" status); in this case, + a placeholder LU is created which just contains its name, ID, and frame. + + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> fn.lu(256).name + 'foresee.v' + >>> fn.lu(256).definition + 'COD: be aware of beforehand; predict.' + >>> fn.lu(256).frame.name + 'Expectation' + >>> list(map(PrettyDict, fn.lu(256).lexemes)) + [{'POS': 'V', 'breakBefore': 'false', 'headword': 'false', 'name': 'foresee', 'order': 1}] + + >>> fn.lu(227).exemplars[23] # doctest: +NORMALIZE_WHITESPACE + exemplar sentence (352962): + [sentNo] 0 + [aPos] 59699508 + + [LU] (227) guess.v in Coming_to_believe + + [frame] (23) Coming_to_believe + + [annotationSet] 2 annotation sets + + [POS] 18 tags + + [POS_tagset] BNC + + [GF] 3 relations + + [PT] 3 phrases + + [Other] 1 entry + + [text] + [Target] + [FE] + + When he was inside the house , Culley noticed the characteristic + ------------------ + Content + + he would n't have guessed at . + -- ******* -- + Co C1 [Evidence:INI] + (Co=Cognizer, C1=Content) + + + + The dict that is returned from this function will contain most of the + following information about the LU. Note that some LUs do not contain + all of these pieces of information - particularly 'totalAnnotated' and + 'incorporatedFE' may be missing in some LUs: + + - 'name' : the name of the LU (e.g. 'merger.n') + - 'definition' : textual definition of the LU + - 'ID' : the internal ID number of the LU + - '_type' : 'lu' + - 'status' : e.g. 'Created' + - 'frame' : Frame that this LU belongs to + - 'POS' : the part of speech of this LU (e.g. 'N') + - 'totalAnnotated' : total number of examples annotated with this LU + - 'incorporatedFE' : FE that incorporates this LU (e.g. 'Ailment') + - 'sentenceCount' : a dict with the following two keys: + - 'annotated': number of sentences annotated with this LU + - 'total' : total number of sentences with this LU + + - 'lexemes' : a list of dicts describing the lemma of this LU. + Each dict in the list contains these keys: + + - 'POS' : part of speech e.g. 'N' + - 'name' : either single-lexeme e.g. 'merger' or + multi-lexeme e.g. 'a little' + - 'order': the order of the lexeme in the lemma (starting from 1) + - 'headword': a boolean ('true' or 'false') + - 'breakBefore': Can this lexeme be separated from the previous lexeme? + Consider: "take over.v" as in:: + + Germany took over the Netherlands in 2 days. + Germany took the Netherlands over in 2 days. + + In this case, 'breakBefore' would be "true" for the lexeme + "over". Contrast this with "take after.v" as in:: + + Mary takes after her grandmother. + *Mary takes her grandmother after. + + In this case, 'breakBefore' would be "false" for the lexeme "after" + + - 'lemmaID' : Can be used to connect lemmas in different LUs + - 'semTypes' : a list of semantic type objects for this LU + - 'subCorpus' : a list of subcorpora + - Each item in the list is a dict containing the following keys: + - 'name' : + - 'sentence' : a list of sentences in the subcorpus + - each item in the list is a dict with the following keys: + - 'ID': + - 'sentNo': + - 'text': the text of the sentence + - 'aPos': + - 'annotationSet': a list of annotation sets + - each item in the list is a dict with the following keys: + - 'ID': + - 'status': + - 'layer': a list of layers + - each layer is a dict containing the following keys: + - 'name': layer name (e.g. 'BNC') + - 'rank': + - 'label': a list of labels for the layer + - each label is a dict containing the following keys: + - 'start': start pos of label in sentence 'text' (0-based) + - 'end': end pos of label in sentence 'text' (0-based) + - 'name': name of label (e.g. 'NN1') + + Under the hood, this implementation looks up the lexical unit information + in the *frame* definition file. That file does not contain + corpus annotations, so the LU files will be accessed on demand if those are + needed. In principle, valence patterns could be loaded here too, + though these are not currently supported. + + :param fn_luid: The id number of the lexical unit + :type fn_luid: int + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: All information about the lexical unit + :rtype: dict + """ + # look for this LU in cache + if not self._lu_idx: + self._buildluindex() + OOV = object() + luinfo = self._lu_idx.get(fn_luid, OOV) + if luinfo is OOV: + # LU not in the index. We create a placeholder by falling back to + # luName, frameID, and frameName. However, this will not be listed + # among the LUs for its frame. + self._warn( + "LU ID not found: {} ({}) in {} ({})".format( + luName, fn_luid, frameName, frameID + ) + ) + luinfo = AttrDict( + { + "_type": "lu", + "ID": fn_luid, + "name": luName, + "frameID": frameID, + "status": "Problem", + } + ) + f = self.frame_by_id(luinfo.frameID) + assert f.name == frameName, (f.name, frameName) + luinfo["frame"] = f + self._lu_idx[fn_luid] = luinfo + elif "_type" not in luinfo: + # we only have an index entry for the LU. loading the frame will replace this. + f = self.frame_by_id(luinfo.frameID) + luinfo = self._lu_idx[fn_luid] + if ignorekeys: + return AttrDict({k: v for k, v in luinfo.items() if k not in ignorekeys}) + + return luinfo + + def _lu_file(self, lu, ignorekeys=[]): + """ + Augment the LU information that was loaded from the frame file + with additional information from the LU file. + """ + fn_luid = lu.ID + + fname = f"lu{fn_luid}.xml" + locpath = os.path.join(f"{self._root}", self._lu_dir, fname) + # print(locpath, file=sys.stderr) + if not self._lu_idx: + self._buildluindex() + + try: + with XMLCorpusView(locpath, "lexUnit") as view: + elt = view[0] + except OSError as e: + raise FramenetError(f"Unknown LU id: {fn_luid}") from e + + lu2 = self._handle_lexunit_elt(elt, ignorekeys) + lu.URL = self._fnweb_url + "/" + self._lu_dir + "/" + fname + lu.subCorpus = lu2.subCorpus + lu.exemplars = SpecialList( + "luexemplars", [sent for subc in lu.subCorpus for sent in subc.sentence] + ) + for sent in lu.exemplars: + sent["LU"] = lu + sent["frame"] = lu.frame + for aset in sent.annotationSet: + aset["LU"] = lu + aset["frame"] = lu.frame + + return lu + + def _loadsemtypes(self): + """Create the semantic types index.""" + self._semtypes = AttrDict() + with XMLCorpusView( + self.abspath("semTypes.xml"), + "semTypes/semType", + self._handle_semtype_elt, + ) as view: + for st in view: + n = st["name"] + a = st["abbrev"] + i = st["ID"] + # Both name and abbrev should be able to retrieve the + # ID. The ID will retrieve the semantic type dict itself. + self._semtypes[n] = i + self._semtypes[a] = i + self._semtypes[i] = st + # now that all individual semtype XML is loaded, we can link them together + roots = [] + for st in self.semtypes(): + if st.superType: + st.superType = self.semtype(st.superType.supID) + st.superType.subTypes.append(st) + else: + if st not in roots: + roots.append(st) + st.rootType = st + queue = list(roots) + assert queue + while queue: + st = queue.pop(0) + for child in st.subTypes: + child.rootType = st.rootType + queue.append(child) + # self.propagate_semtypes() # apply inferencing over FE relations + + def propagate_semtypes(self): + """ + Apply inference rules to distribute semtypes over relations between FEs. + For FrameNet 1.5, this results in 1011 semtypes being propagated. + (Not done by default because it requires loading all frame files, + which takes several seconds. If this needed to be fast, it could be rewritten + to traverse the neighboring relations on demand for each FE semtype.) + + >>> from nltk.corpus import framenet as fn + >>> x = sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType) + >>> fn.propagate_semtypes() + >>> y = sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType) + >>> y-x > 1000 + True + """ + if not self._semtypes: + self._loadsemtypes() + if not self._ferel_idx: + self._buildrelationindex() + changed = True + i = 0 + nPropagations = 0 + while changed: + # make a pass and see if anything needs to be propagated + i += 1 + changed = False + for ferel in self.fe_relations(): + superST = ferel.superFE.semType + subST = ferel.subFE.semType + try: + if superST and superST is not subST: + # propagate downward + assert subST is None or self.semtype_inherits(subST, superST), ( + superST.name, + ferel, + subST.name, + ) + if subST is None: + ferel.subFE.semType = subST = superST + changed = True + nPropagations += 1 + if ( + ferel.type.name in ["Perspective_on", "Subframe", "Precedes"] + and subST + and subST is not superST + ): + # propagate upward + assert superST is None, (superST.name, ferel, subST.name) + ferel.superFE.semType = superST = subST + changed = True + nPropagations += 1 + except AssertionError as ex: + # bug in the data! ignore + # print(ex, file=sys.stderr) + continue + # print(i, nPropagations, file=sys.stderr) + + def semtype(self, key): + """ + >>> from nltk.corpus import framenet as fn + >>> fn.semtype(233).name + 'Temperature' + >>> fn.semtype(233).abbrev + 'Temp' + >>> fn.semtype('Temperature').ID + 233 + + :param key: The name, abbreviation, or id number of the semantic type + :type key: string or int + :return: Information about a semantic type + :rtype: dict + """ + if isinstance(key, int): + stid = key + else: + try: + stid = self._semtypes[key] + except TypeError: + self._loadsemtypes() + stid = self._semtypes[key] + + try: + st = self._semtypes[stid] + except TypeError: + self._loadsemtypes() + st = self._semtypes[stid] + + return st + + def semtype_inherits(self, st, superST): + if not isinstance(st, dict): + st = self.semtype(st) + if not isinstance(superST, dict): + superST = self.semtype(superST) + par = st.superType + while par: + if par is superST: + return True + par = par.superType + return False + + def frames(self, name=None): + """ + Obtain details for a specific frame. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.frames()) in (1019, 1221) # FN 1.5 and 1.7, resp. + True + >>> x = PrettyList(fn.frames(r'(?i)crim'), maxReprSize=0, breakLines=True) + >>> x.sort(key=itemgetter('ID')) + >>> x + [, + , + , + ] + + A brief intro to Frames (excerpted from "FrameNet II: Extended + Theory and Practice" by Ruppenhofer et. al., 2010): + + A Frame is a script-like conceptual structure that describes a + particular type of situation, object, or event along with the + participants and props that are needed for that Frame. For + example, the "Apply_heat" frame describes a common situation + involving a Cook, some Food, and a Heating_Instrument, and is + evoked by words such as bake, blanch, boil, broil, brown, + simmer, steam, etc. + + We call the roles of a Frame "frame elements" (FEs) and the + frame-evoking words are called "lexical units" (LUs). + + FrameNet includes relations between Frames. Several types of + relations are defined, of which the most important are: + + - Inheritance: An IS-A relation. The child frame is a subtype + of the parent frame, and each FE in the parent is bound to + a corresponding FE in the child. An example is the + "Revenge" frame which inherits from the + "Rewards_and_punishments" frame. + + - Using: The child frame presupposes the parent frame as + background, e.g the "Speed" frame "uses" (or presupposes) + the "Motion" frame; however, not all parent FEs need to be + bound to child FEs. + + - Subframe: The child frame is a subevent of a complex event + represented by the parent, e.g. the "Criminal_process" frame + has subframes of "Arrest", "Arraignment", "Trial", and + "Sentencing". + + - Perspective_on: The child frame provides a particular + perspective on an un-perspectivized parent frame. A pair of + examples consists of the "Hiring" and "Get_a_job" frames, + which perspectivize the "Employment_start" frame from the + Employer's and the Employee's point of view, respectively. + + :param name: A regular expression pattern used to match against + Frame names. If 'name' is None, then a list of all + Framenet Frames will be returned. + :type name: str + :return: A list of matching Frames (or all Frames). + :rtype: list(AttrDict) + """ + try: + fIDs = list(self._frame_idx.keys()) + except AttributeError: + self._buildframeindex() + fIDs = list(self._frame_idx.keys()) + + if name is not None: + return PrettyList( + self.frame(fID) for fID, finfo in self.frame_ids_and_names(name).items() + ) + else: + return PrettyLazyMap(self.frame, fIDs) + + def frame_ids_and_names(self, name=None): + """ + Uses the frame index, which is much faster than looking up each frame definition + if only the names and IDs are needed. + """ + if not self._frame_idx: + self._buildframeindex() + return { + fID: finfo.name + for fID, finfo in self._frame_idx.items() + if name is None or re.search(name, finfo.name) is not None + } + + def fes(self, name=None, frame=None): + """ + Lists frame element objects. If 'name' is provided, this is treated as + a case-insensitive regular expression to filter by frame name. + (Case-insensitivity is because casing of frame element names is not always + consistent across frames.) Specify 'frame' to filter by a frame name pattern, + ID, or object. + + >>> from nltk.corpus import framenet as fn + >>> fn.fes('Noise_maker') + [] + >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound')]) # doctest: +NORMALIZE_WHITESPACE + [('Cause_to_make_noise', 'Sound_maker'), ('Make_noise', 'Sound'), + ('Make_noise', 'Sound_source'), ('Sound_movement', 'Location_of_sound_source'), + ('Sound_movement', 'Sound'), ('Sound_movement', 'Sound_source'), + ('Sounds', 'Component_sound'), ('Sounds', 'Location_of_sound_source'), + ('Sounds', 'Sound_source'), ('Vocalizations', 'Location_of_sound_source'), + ('Vocalizations', 'Sound_source')] + >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound',r'(?i)make_noise')]) # doctest: +NORMALIZE_WHITESPACE + [('Cause_to_make_noise', 'Sound_maker'), + ('Make_noise', 'Sound'), + ('Make_noise', 'Sound_source')] + >>> sorted(set(fe.name for fe in fn.fes('^sound'))) + ['Sound', 'Sound_maker', 'Sound_source'] + >>> len(fn.fes('^sound$')) + 2 + + :param name: A regular expression pattern used to match against + frame element names. If 'name' is None, then a list of all + frame elements will be returned. + :type name: str + :return: A list of matching frame elements + :rtype: list(AttrDict) + """ + # what frames are we searching in? + if frame is not None: + if isinstance(frame, int): + frames = [self.frame(frame)] + elif isinstance(frame, str): + frames = self.frames(frame) + else: + frames = [frame] + else: + frames = self.frames() + + return PrettyList( + fe + for f in frames + for fename, fe in f.FE.items() + if name is None or re.search(name, fename, re.I) + ) + + def lus(self, name=None, frame=None): + """ + Obtain details for lexical units. + Optionally restrict by lexical unit name pattern, and/or to a certain frame + or frames whose name matches a pattern. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.lus()) in (11829, 13572) # FN 1.5 and 1.7, resp. + True + >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')), maxReprSize=0, breakLines=True) + [, + , + ] + >>> PrettyList(sorted(fn.lus(r'interest', r'(?i)stimulus'), key=itemgetter('ID'))) + [, ] + + A brief intro to Lexical Units (excerpted from "FrameNet II: + Extended Theory and Practice" by Ruppenhofer et. al., 2010): + + A lexical unit (LU) is a pairing of a word with a meaning. For + example, the "Apply_heat" Frame describes a common situation + involving a Cook, some Food, and a Heating Instrument, and is + _evoked_ by words such as bake, blanch, boil, broil, brown, + simmer, steam, etc. These frame-evoking words are the LUs in the + Apply_heat frame. Each sense of a polysemous word is a different + LU. + + We have used the word "word" in talking about LUs. The reality + is actually rather complex. When we say that the word "bake" is + polysemous, we mean that the lemma "bake.v" (which has the + word-forms "bake", "bakes", "baked", and "baking") is linked to + three different frames: + + - Apply_heat: "Michelle baked the potatoes for 45 minutes." + + - Cooking_creation: "Michelle baked her mother a cake for her birthday." + + - Absorb_heat: "The potatoes have to bake for more than 30 minutes." + + These constitute three different LUs, with different + definitions. + + Multiword expressions such as "given name" and hyphenated words + like "shut-eye" can also be LUs. Idiomatic phrases such as + "middle of nowhere" and "give the slip (to)" are also defined as + LUs in the appropriate frames ("Isolated_places" and "Evading", + respectively), and their internal structure is not analyzed. + + Framenet provides multiple annotated examples of each sense of a + word (i.e. each LU). Moreover, the set of examples + (approximately 20 per LU) illustrates all of the combinatorial + possibilities of the lexical unit. + + Each LU is linked to a Frame, and hence to the other words which + evoke that Frame. This makes the FrameNet database similar to a + thesaurus, grouping together semantically similar words. + + In the simplest case, frame-evoking words are verbs such as + "fried" in: + + "Matilde fried the catfish in a heavy iron skillet." + + Sometimes event nouns may evoke a Frame. For example, + "reduction" evokes "Cause_change_of_scalar_position" in: + + "...the reduction of debt levels to $665 million from $2.6 billion." + + Adjectives may also evoke a Frame. For example, "asleep" may + evoke the "Sleep" frame as in: + + "They were asleep for hours." + + Many common nouns, such as artifacts like "hat" or "tower", + typically serve as dependents rather than clearly evoking their + own frames. + + :param name: A regular expression pattern used to search the LU + names. Note that LU names take the form of a dotted + string (e.g. "run.v" or "a little.adv") in which a + lemma precedes the "." and a POS follows the + dot. The lemma may be composed of a single lexeme + (e.g. "run") or of multiple lexemes (e.g. "a + little"). If 'name' is not given, then all LUs will + be returned. + + The valid POSes are: + + v - verb + n - noun + a - adjective + adv - adverb + prep - preposition + num - numbers + intj - interjection + art - article + c - conjunction + scon - subordinating conjunction + + :type name: str + :type frame: str or int or frame + :return: A list of selected (or all) lexical units + :rtype: list of LU objects (dicts). See the lu() function for info + about the specifics of LU objects. + + """ + if not self._lu_idx: + self._buildluindex() + + if name is not None: # match LUs, then restrict by frame + result = PrettyList( + self.lu(luID) for luID, luName in self.lu_ids_and_names(name).items() + ) + if frame is not None: + if isinstance(frame, int): + frameIDs = {frame} + elif isinstance(frame, str): + frameIDs = {f.ID for f in self.frames(frame)} + else: + frameIDs = {frame.ID} + result = PrettyList(lu for lu in result if lu.frame.ID in frameIDs) + elif frame is not None: # all LUs in matching frames + if isinstance(frame, int): + frames = [self.frame(frame)] + elif isinstance(frame, str): + frames = self.frames(frame) + else: + frames = [frame] + result = PrettyLazyIteratorList( + iter(LazyConcatenation(list(f.lexUnit.values()) for f in frames)) + ) + else: # all LUs + luIDs = [ + luID + for luID, lu in self._lu_idx.items() + if lu.status not in self._bad_statuses + ] + result = PrettyLazyMap(self.lu, luIDs) + return result + + def lu_ids_and_names(self, name=None): + """ + Uses the LU index, which is much faster than looking up each LU definition + if only the names and IDs are needed. + """ + if not self._lu_idx: + self._buildluindex() + return { + luID: luinfo.name + for luID, luinfo in self._lu_idx.items() + if luinfo.status not in self._bad_statuses + and (name is None or re.search(name, luinfo.name) is not None) + } + + def docs_metadata(self, name=None): + """ + Return an index of the annotated documents in Framenet. + + Details for a specific annotated document can be obtained using this + class's doc() function and pass it the value of the 'ID' field. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.docs()) in (78, 107) # FN 1.5 and 1.7, resp. + True + >>> set([x.corpname for x in fn.docs_metadata()])>=set(['ANC', 'KBEval', \ + 'LUCorpus-v0.3', 'Miscellaneous', 'NTI', 'PropBank']) + True + + :param name: A regular expression pattern used to search the + file name of each annotated document. The document's + file name contains the name of the corpus that the + document is from, followed by two underscores "__" + followed by the document name. So, for example, the + file name "LUCorpus-v0.3__20000410_nyt-NEW.xml" is + from the corpus named "LUCorpus-v0.3" and the + document name is "20000410_nyt-NEW.xml". + :type name: str + :return: A list of selected (or all) annotated documents + :rtype: list of dicts, where each dict object contains the following + keys: + + - 'name' + - 'ID' + - 'corpid' + - 'corpname' + - 'description' + - 'filename' + """ + try: + ftlist = PrettyList(self._fulltext_idx.values()) + except AttributeError: + self._buildcorpusindex() + ftlist = PrettyList(self._fulltext_idx.values()) + + if name is None: + return ftlist + else: + return PrettyList( + x for x in ftlist if re.search(name, x["filename"]) is not None + ) + + def docs(self, name=None): + """ + Return a list of the annotated full-text documents in FrameNet, + optionally filtered by a regex to be matched against the document name. + """ + return PrettyLazyMap((lambda x: self.doc(x.ID)), self.docs_metadata(name)) + + def sents(self, exemplars=True, full_text=True): + """ + Annotated sentences matching the specified criteria. + """ + if exemplars: + if full_text: + return self.exemplars() + self.ft_sents() + else: + return self.exemplars() + elif full_text: + return self.ft_sents() + + def annotations(self, luNamePattern=None, exemplars=True, full_text=True): + """ + Frame annotation sets matching the specified criteria. + """ + + if exemplars: + epart = PrettyLazyIteratorList( + sent.frameAnnotation for sent in self.exemplars(luNamePattern) + ) + else: + epart = [] + + if full_text: + if luNamePattern is not None: + matchedLUIDs = set(self.lu_ids_and_names(luNamePattern).keys()) + ftpart = PrettyLazyIteratorList( + aset + for sent in self.ft_sents() + for aset in sent.annotationSet[1:] + if luNamePattern is None or aset.get("luID", "CXN_ASET") in matchedLUIDs + ) + else: + ftpart = [] + + if exemplars: + if full_text: + return epart + ftpart + else: + return epart + elif full_text: + return ftpart + + def exemplars(self, luNamePattern=None, frame=None, fe=None, fe2=None): + """ + Lexicographic exemplar sentences, optionally filtered by LU name and/or 1-2 FEs that + are realized overtly. 'frame' may be a name pattern, frame ID, or frame instance. + 'fe' may be a name pattern or FE instance; if specified, 'fe2' may also + be specified to retrieve sentences with both overt FEs (in either order). + """ + if fe is None and fe2 is not None: + raise FramenetError("exemplars(..., fe=None, fe2=) is not allowed") + elif fe is not None and fe2 is not None: + if not isinstance(fe2, str): + if isinstance(fe, str): + # fe2 is specific to a particular frame. swap fe and fe2 so fe is always used to determine the frame. + fe, fe2 = fe2, fe + elif fe.frame is not fe2.frame: # ensure frames match + raise FramenetError( + "exemplars() call with inconsistent `fe` and `fe2` specification (frames must match)" + ) + if frame is None and fe is not None and not isinstance(fe, str): + frame = fe.frame + + # narrow down to frames matching criteria + + lusByFrame = defaultdict( + list + ) # frame name -> matching LUs, if luNamePattern is specified + if frame is not None or luNamePattern is not None: + if frame is None or isinstance(frame, str): + if luNamePattern is not None: + frames = set() + for lu in self.lus(luNamePattern, frame=frame): + frames.add(lu.frame.ID) + lusByFrame[lu.frame.name].append(lu) + frames = LazyMap(self.frame, list(frames)) + else: + frames = self.frames(frame) + else: + if isinstance(frame, int): + frames = [self.frame(frame)] + else: # frame object + frames = [frame] + + if luNamePattern is not None: + lusByFrame = {frame.name: self.lus(luNamePattern, frame=frame)} + + if fe is not None: # narrow to frames that define this FE + if isinstance(fe, str): + frames = PrettyLazyIteratorList( + f + for f in frames + if fe in f.FE + or any(re.search(fe, ffe, re.I) for ffe in f.FE.keys()) + ) + else: + if fe.frame not in frames: + raise FramenetError( + "exemplars() call with inconsistent `frame` and `fe` specification" + ) + frames = [fe.frame] + + if fe2 is not None: # narrow to frames that ALSO define this FE + if isinstance(fe2, str): + frames = PrettyLazyIteratorList( + f + for f in frames + if fe2 in f.FE + or any(re.search(fe2, ffe, re.I) for ffe in f.FE.keys()) + ) + # else we already narrowed it to a single frame + else: # frame, luNamePattern are None. fe, fe2 are None or strings + if fe is not None: + frames = {ffe.frame.ID for ffe in self.fes(fe)} + if fe2 is not None: + frames2 = {ffe.frame.ID for ffe in self.fes(fe2)} + frames = frames & frames2 + frames = LazyMap(self.frame, list(frames)) + else: + frames = self.frames() + + # we've narrowed down 'frames' + # now get exemplars for relevant LUs in those frames + + def _matching_exs(): + for f in frames: + fes = fes2 = None # FEs of interest + if fe is not None: + fes = ( + {ffe for ffe in f.FE.keys() if re.search(fe, ffe, re.I)} + if isinstance(fe, str) + else {fe.name} + ) + if fe2 is not None: + fes2 = ( + {ffe for ffe in f.FE.keys() if re.search(fe2, ffe, re.I)} + if isinstance(fe2, str) + else {fe2.name} + ) + + for lu in ( + lusByFrame[f.name] + if luNamePattern is not None + else f.lexUnit.values() + ): + for ex in lu.exemplars: + if (fes is None or self._exemplar_of_fes(ex, fes)) and ( + fes2 is None or self._exemplar_of_fes(ex, fes2) + ): + yield ex + + return PrettyLazyIteratorList(_matching_exs()) + + def _exemplar_of_fes(self, ex, fes=None): + """ + Given an exemplar sentence and a set of FE names, return the subset of FE names + that are realized overtly in the sentence on the FE, FE2, or FE3 layer. + + If 'fes' is None, returns all overt FE names. + """ + overtNames = set(list(zip(*ex.FE[0]))[2]) if ex.FE[0] else set() + if "FE2" in ex: + overtNames |= set(list(zip(*ex.FE2[0]))[2]) if ex.FE2[0] else set() + if "FE3" in ex: + overtNames |= set(list(zip(*ex.FE3[0]))[2]) if ex.FE3[0] else set() + return overtNames & fes if fes is not None else overtNames + + def ft_sents(self, docNamePattern=None): + """ + Full-text annotation sentences, optionally filtered by document name. + """ + return PrettyLazyIteratorList( + sent for d in self.docs(docNamePattern) for sent in d.sentence + ) + + def frame_relation_types(self): + """ + Obtain a list of frame relation types. + + >>> from nltk.corpus import framenet as fn + >>> frts = sorted(fn.frame_relation_types(), key=itemgetter('ID')) + >>> isinstance(frts, list) + True + >>> len(frts) in (9, 10) # FN 1.5 and 1.7, resp. + True + >>> PrettyDict(frts[0], breakLines=True) + {'ID': 1, + '_type': 'framerelationtype', + 'frameRelations': [ Child=Change_of_consistency>, Child=Rotting>, ...], + 'name': 'Inheritance', + 'subFrameName': 'Child', + 'superFrameName': 'Parent'} + + :return: A list of all of the frame relation types in framenet + :rtype: list(dict) + """ + if not self._freltyp_idx: + self._buildrelationindex() + return self._freltyp_idx.values() + + def frame_relations(self, frame=None, frame2=None, type=None): + """ + :param frame: (optional) frame object, name, or ID; only relations involving + this frame will be returned + :param frame2: (optional; 'frame' must be a different frame) only show relations + between the two specified frames, in either direction + :param type: (optional) frame relation type (name or object); show only relations + of this type + :type frame: int or str or AttrDict + :return: A list of all of the frame relations in framenet + :rtype: list(dict) + + >>> from nltk.corpus import framenet as fn + >>> frels = fn.frame_relations() + >>> isinstance(frels, list) + True + >>> len(frels) in (1676, 2070) # FN 1.5 and 1.7, resp. + True + >>> PrettyList(fn.frame_relations('Cooking_creation'), maxReprSize=0, breakLines=True) + [ Child=Cooking_creation>, + Child=Cooking_creation>, + ReferringEntry=Cooking_creation>] + >>> PrettyList(fn.frame_relations(274), breakLines=True) + [ Child=Dodging>, + Child=Evading>, ...] + >>> PrettyList(fn.frame_relations(fn.frame('Cooking_creation')), breakLines=True) + [ Child=Cooking_creation>, + Child=Cooking_creation>, ...] + >>> PrettyList(fn.frame_relations('Cooking_creation', type='Inheritance')) + [ Child=Cooking_creation>] + >>> PrettyList(fn.frame_relations('Cooking_creation', 'Apply_heat'), breakLines=True) # doctest: +NORMALIZE_WHITESPACE + [ Child=Cooking_creation>, + ReferringEntry=Cooking_creation>] + """ + relation_type = type + + if not self._frel_idx: + self._buildrelationindex() + + rels = None + + if relation_type is not None: + if not isinstance(relation_type, dict): + type = [rt for rt in self.frame_relation_types() if rt.name == type][0] + assert isinstance(type, dict) + + # lookup by 'frame' + if frame is not None: + if isinstance(frame, dict) and "frameRelations" in frame: + rels = PrettyList(frame.frameRelations) + else: + if not isinstance(frame, int): + if isinstance(frame, dict): + frame = frame.ID + else: + frame = self.frame_by_name(frame).ID + rels = [self._frel_idx[frelID] for frelID in self._frel_f_idx[frame]] + + # filter by 'type' + if type is not None: + rels = [rel for rel in rels if rel.type is type] + elif type is not None: + # lookup by 'type' + rels = type.frameRelations + else: + rels = self._frel_idx.values() + + # filter by 'frame2' + if frame2 is not None: + if frame is None: + raise FramenetError( + "frame_relations(frame=None, frame2=) is not allowed" + ) + if not isinstance(frame2, int): + if isinstance(frame2, dict): + frame2 = frame2.ID + else: + frame2 = self.frame_by_name(frame2).ID + if frame == frame2: + raise FramenetError( + "The two frame arguments to frame_relations() must be different frames" + ) + rels = [ + rel + for rel in rels + if rel.superFrame.ID == frame2 or rel.subFrame.ID == frame2 + ] + + return PrettyList( + sorted( + rels, + key=lambda frel: (frel.type.ID, frel.superFrameName, frel.subFrameName), + ) + ) + + def fe_relations(self): + """ + Obtain a list of frame element relations. + + >>> from nltk.corpus import framenet as fn + >>> ferels = fn.fe_relations() + >>> isinstance(ferels, list) + True + >>> len(ferels) in (10020, 12393) # FN 1.5 and 1.7, resp. + True + >>> PrettyDict(ferels[0], breakLines=True) # doctest: +NORMALIZE_WHITESPACE + {'ID': 14642, + '_type': 'ferelation', + 'frameRelation': Child=Lively_place>, + 'subFE': , + 'subFEName': 'Degree', + 'subFrame': , + 'subID': 11370, + 'supID': 2271, + 'superFE': , + 'superFEName': 'Degree', + 'superFrame': , + 'type': } + + :return: A list of all of the frame element relations in framenet + :rtype: list(dict) + """ + if not self._ferel_idx: + self._buildrelationindex() + return PrettyList( + sorted( + self._ferel_idx.values(), + key=lambda ferel: ( + ferel.type.ID, + ferel.frameRelation.superFrameName, + ferel.superFEName, + ferel.frameRelation.subFrameName, + ferel.subFEName, + ), + ) + ) + + def semtypes(self): + """ + Obtain a list of semantic types. + + >>> from nltk.corpus import framenet as fn + >>> stypes = fn.semtypes() + >>> len(stypes) in (73, 109) # FN 1.5 and 1.7, resp. + True + >>> sorted(stypes[0].keys()) + ['ID', '_type', 'abbrev', 'definition', 'definitionMarkup', 'name', 'rootType', 'subTypes', 'superType'] + + :return: A list of all of the semantic types in framenet + :rtype: list(dict) + """ + if not self._semtypes: + self._loadsemtypes() + return PrettyList( + self._semtypes[i] for i in self._semtypes if isinstance(i, int) + ) + + def _load_xml_attributes(self, d, elt): + """ + Extracts a subset of the attributes from the given element and + returns them in a dictionary. + + :param d: A dictionary in which to store the attributes. + :type d: dict + :param elt: An ElementTree Element + :type elt: Element + :return: Returns the input dict ``d`` possibly including attributes from ``elt`` + :rtype: dict + """ + + d = type(d)(d) + + try: + attr_dict = elt.attrib + except AttributeError: + return d + + if attr_dict is None: + return d + + # Ignore these attributes when loading attributes from an xml node + ignore_attrs = [ #'cBy', 'cDate', 'mDate', # <-- annotation metadata that could be of interest + "xsi", + "schemaLocation", + "xmlns", + "bgColor", + "fgColor", + ] + + for attr in attr_dict: + + if any(attr.endswith(x) for x in ignore_attrs): + continue + + val = attr_dict[attr] + if val.isdigit(): + d[attr] = int(val) + else: + d[attr] = val + + return d + + def _strip_tags(self, data): + """ + Gets rid of all tags and newline characters from the given input + + :return: A cleaned-up version of the input string + :rtype: str + """ + + try: + r""" + # Look for boundary issues in markup. (Sometimes FEs are pluralized in definitions.) + m = re.search(r'\w[<][^/]|[<][/][^>]+[>](s\w|[a-rt-z0-9])', data) + if m: + print('Markup boundary:', data[max(0,m.start(0)-10):m.end(0)+10].replace('\n',' '), file=sys.stderr) + """ + + data = data.replace("", "") + data = data.replace("", "") + data = re.sub('', "", data) + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "'") + data = data.replace("", "'") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + + # Get rid of and tags + data = data.replace("", "") + data = data.replace("", "") + + data = data.replace("\n", " ") + except AttributeError: + pass + + return data + + def _handle_elt(self, elt, tagspec=None): + """Extracts and returns the attributes of the given element""" + return self._load_xml_attributes(AttrDict(), elt) + + def _handle_fulltextindex_elt(self, elt, tagspec=None): + """ + Extracts corpus/document info from the fulltextIndex.xml file. + + Note that this function "flattens" the information contained + in each of the "corpus" elements, so that each "document" + element will contain attributes for the corpus and + corpusid. Also, each of the "document" items will contain a + new attribute called "filename" that is the base file name of + the xml file for the document in the "fulltext" subdir of the + Framenet corpus. + """ + ftinfo = self._load_xml_attributes(AttrDict(), elt) + corpname = ftinfo.name + corpid = ftinfo.ID + retlist = [] + for sub in elt: + if sub.tag.endswith("document"): + doc = self._load_xml_attributes(AttrDict(), sub) + if "name" in doc: + docname = doc.name + else: + docname = doc.description + doc.filename = f"{corpname}__{docname}.xml" + doc.URL = ( + self._fnweb_url + "/" + self._fulltext_dir + "/" + doc.filename + ) + doc.corpname = corpname + doc.corpid = corpid + retlist.append(doc) + + return retlist + + def _handle_frame_elt(self, elt, ignorekeys=[]): + """Load the info for a Frame from a frame xml file""" + frinfo = self._load_xml_attributes(AttrDict(), elt) + + frinfo["_type"] = "frame" + frinfo["definition"] = "" + frinfo["definitionMarkup"] = "" + frinfo["FE"] = PrettyDict() + frinfo["FEcoreSets"] = [] + frinfo["lexUnit"] = PrettyDict() + frinfo["semTypes"] = [] + for k in ignorekeys: + if k in frinfo: + del frinfo[k] + + for sub in elt: + if sub.tag.endswith("definition") and "definition" not in ignorekeys: + frinfo["definitionMarkup"] = sub.text + frinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("FE") and "FE" not in ignorekeys: + feinfo = self._handle_fe_elt(sub) + frinfo["FE"][feinfo.name] = feinfo + feinfo["frame"] = frinfo # backpointer + elif sub.tag.endswith("FEcoreSet") and "FEcoreSet" not in ignorekeys: + coreset = self._handle_fecoreset_elt(sub) + # assumes all FEs have been loaded before coresets + frinfo["FEcoreSets"].append( + PrettyList(frinfo["FE"][fe.name] for fe in coreset) + ) + elif sub.tag.endswith("lexUnit") and "lexUnit" not in ignorekeys: + luentry = self._handle_framelexunit_elt(sub) + if luentry["status"] in self._bad_statuses: + # problematic LU entry; ignore it + continue + luentry["frame"] = frinfo + luentry["URL"] = ( + self._fnweb_url + + "/" + + self._lu_dir + + "/" + + "lu{}.xml".format(luentry["ID"]) + ) + luentry["subCorpus"] = Future( + (lambda lu: lambda: self._lu_file(lu).subCorpus)(luentry) + ) + luentry["exemplars"] = Future( + (lambda lu: lambda: self._lu_file(lu).exemplars)(luentry) + ) + frinfo["lexUnit"][luentry.name] = luentry + if not self._lu_idx: + self._buildluindex() + self._lu_idx[luentry.ID] = luentry + elif sub.tag.endswith("semType") and "semTypes" not in ignorekeys: + semtypeinfo = self._load_xml_attributes(AttrDict(), sub) + frinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + frinfo["frameRelations"] = self.frame_relations(frame=frinfo) + + # resolve 'requires' and 'excludes' links between FEs of this frame + for fe in frinfo.FE.values(): + if fe.requiresFE: + name, ID = fe.requiresFE.name, fe.requiresFE.ID + fe.requiresFE = frinfo.FE[name] + assert fe.requiresFE.ID == ID + if fe.excludesFE: + name, ID = fe.excludesFE.name, fe.excludesFE.ID + fe.excludesFE = frinfo.FE[name] + assert fe.excludesFE.ID == ID + + return frinfo + + def _handle_fecoreset_elt(self, elt): + """Load fe coreset info from xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + tmp = [] + for sub in elt: + tmp.append(self._load_xml_attributes(AttrDict(), sub)) + + return tmp + + def _handle_framerelationtype_elt(self, elt, *args): + """Load frame-relation element and its child fe-relation elements from frRelation.xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "framerelationtype" + info["frameRelations"] = PrettyList() + + for sub in elt: + if sub.tag.endswith("frameRelation"): + frel = self._handle_framerelation_elt(sub) + frel["type"] = info # backpointer + for ferel in frel.feRelations: + ferel["type"] = info + info["frameRelations"].append(frel) + + return info + + def _handle_framerelation_elt(self, elt): + """Load frame-relation element and its child fe-relation elements from frRelation.xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + assert info["superFrameName"] != info["subFrameName"], (elt, info) + info["_type"] = "framerelation" + info["feRelations"] = PrettyList() + + for sub in elt: + if sub.tag.endswith("FERelation"): + ferel = self._handle_elt(sub) + ferel["_type"] = "ferelation" + ferel["frameRelation"] = info # backpointer + info["feRelations"].append(ferel) + + return info + + def _handle_fulltextannotation_elt(self, elt): + """Load full annotation info for a document from its xml + file. The main element (fullTextAnnotation) contains a 'header' + element (which we ignore here) and a bunch of 'sentence' + elements.""" + info = AttrDict() + info["_type"] = "fulltext_annotation" + info["sentence"] = [] + + for sub in elt: + if sub.tag.endswith("header"): + continue # not used + elif sub.tag.endswith("sentence"): + s = self._handle_fulltext_sentence_elt(sub) + s.doc = info + info["sentence"].append(s) + + return info + + def _handle_fulltext_sentence_elt(self, elt): + """Load information from the given 'sentence' element. Each + 'sentence' element contains a "text" and "annotationSet" sub + elements.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "fulltext_sentence" + info["annotationSet"] = [] + info["targets"] = [] + target_spans = set() + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + info["text"] = "" + + for sub in elt: + if sub.tag.endswith("text"): + info["text"] = self._strip_tags(sub.text) + elif sub.tag.endswith("annotationSet"): + a = self._handle_fulltextannotationset_elt( + sub, is_pos=(len(info["annotationSet"]) == 0) + ) + if "cxnID" in a: # ignoring construction annotations for now + continue + a.sent = info + a.text = info.text + info["annotationSet"].append(a) + if "Target" in a: + for tspan in a.Target: + if tspan in target_spans: + self._warn( + 'Duplicate target span "{}"'.format( + info.text[slice(*tspan)] + ), + tspan, + "in sentence", + info["ID"], + info.text, + ) + # this can happen in cases like "chemical and biological weapons" + # being annotated as "chemical weapons" and "biological weapons" + else: + target_spans.add(tspan) + info["targets"].append((a.Target, a.luName, a.frameName)) + + assert info["annotationSet"][0].status == "UNANN" + info["POS"] = info["annotationSet"][0].POS + info["POS_tagset"] = info["annotationSet"][0].POS_tagset + return info + + def _handle_fulltextannotationset_elt(self, elt, is_pos=False): + """Load information from the given 'annotationSet' element. Each + 'annotationSet' contains several "layer" elements.""" + + info = self._handle_luannotationset_elt(elt, is_pos=is_pos) + if not is_pos: + info["_type"] = "fulltext_annotationset" + if "cxnID" not in info: # ignoring construction annotations for now + info["LU"] = self.lu( + info.luID, + luName=info.luName, + frameID=info.frameID, + frameName=info.frameName, + ) + info["frame"] = info.LU.frame + return info + + def _handle_fulltextlayer_elt(self, elt): + """Load information from the given 'layer' element. Each + 'layer' contains several "label" elements.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "layer" + info["label"] = [] + + for sub in elt: + if sub.tag.endswith("label"): + l = self._load_xml_attributes(AttrDict(), sub) + info["label"].append(l) + + return info + + def _handle_framelexunit_elt(self, elt): + """Load the lexical unit info from an xml element in a frame's xml file.""" + luinfo = AttrDict() + luinfo["_type"] = "lu" + luinfo = self._load_xml_attributes(luinfo, elt) + luinfo["definition"] = "" + luinfo["definitionMarkup"] = "" + luinfo["sentenceCount"] = PrettyDict() + luinfo["lexemes"] = PrettyList() # multiword LUs have multiple lexemes + luinfo["semTypes"] = PrettyList() # an LU can have multiple semtypes + + for sub in elt: + if sub.tag.endswith("definition"): + luinfo["definitionMarkup"] = sub.text + luinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("sentenceCount"): + luinfo["sentenceCount"] = self._load_xml_attributes(PrettyDict(), sub) + elif sub.tag.endswith("lexeme"): + lexemeinfo = self._load_xml_attributes(PrettyDict(), sub) + if not isinstance(lexemeinfo.name, str): + # some lexeme names are ints by default: e.g., + # thousand.num has lexeme with name="1000" + lexemeinfo.name = str(lexemeinfo.name) + luinfo["lexemes"].append(lexemeinfo) + elif sub.tag.endswith("semType"): + semtypeinfo = self._load_xml_attributes(PrettyDict(), sub) + luinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + # sort lexemes by 'order' attribute + # otherwise, e.g., 'write down.v' may have lexemes in wrong order + luinfo["lexemes"].sort(key=lambda x: x.order) + + return luinfo + + def _handle_lexunit_elt(self, elt, ignorekeys): + """ + Load full info for a lexical unit from its xml file. + This should only be called when accessing corpus annotations + (which are not included in frame files). + """ + luinfo = self._load_xml_attributes(AttrDict(), elt) + luinfo["_type"] = "lu" + luinfo["definition"] = "" + luinfo["definitionMarkup"] = "" + luinfo["subCorpus"] = PrettyList() + luinfo["lexemes"] = PrettyList() # multiword LUs have multiple lexemes + luinfo["semTypes"] = PrettyList() # an LU can have multiple semtypes + for k in ignorekeys: + if k in luinfo: + del luinfo[k] + + for sub in elt: + if sub.tag.endswith("header"): + continue # not used + elif sub.tag.endswith("valences"): + continue # not used + elif sub.tag.endswith("definition") and "definition" not in ignorekeys: + luinfo["definitionMarkup"] = sub.text + luinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("subCorpus") and "subCorpus" not in ignorekeys: + sc = self._handle_lusubcorpus_elt(sub) + if sc is not None: + luinfo["subCorpus"].append(sc) + elif sub.tag.endswith("lexeme") and "lexeme" not in ignorekeys: + luinfo["lexemes"].append(self._load_xml_attributes(PrettyDict(), sub)) + elif sub.tag.endswith("semType") and "semType" not in ignorekeys: + semtypeinfo = self._load_xml_attributes(AttrDict(), sub) + luinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + return luinfo + + def _handle_lusubcorpus_elt(self, elt): + """Load a subcorpus of a lexical unit from the given xml.""" + sc = AttrDict() + try: + sc["name"] = elt.get("name") + except AttributeError: + return None + sc["_type"] = "lusubcorpus" + sc["sentence"] = [] + + for sub in elt: + if sub.tag.endswith("sentence"): + s = self._handle_lusentence_elt(sub) + if s is not None: + sc["sentence"].append(s) + + return sc + + def _handle_lusentence_elt(self, elt): + """Load a sentence from a subcorpus of an LU from xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "lusentence" + info["annotationSet"] = [] + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + for sub in elt: + if sub.tag.endswith("text"): + info["text"] = self._strip_tags(sub.text) + elif sub.tag.endswith("annotationSet"): + annset = self._handle_luannotationset_elt( + sub, is_pos=(len(info["annotationSet"]) == 0) + ) + if annset is not None: + assert annset.status == "UNANN" or "FE" in annset, annset + if annset.status != "UNANN": + info["frameAnnotation"] = annset + # copy layer info up to current level + for k in ( + "Target", + "FE", + "FE2", + "FE3", + "GF", + "PT", + "POS", + "POS_tagset", + "Other", + "Sent", + "Verb", + "Noun", + "Adj", + "Adv", + "Prep", + "Scon", + "Art", + ): + if k in annset: + info[k] = annset[k] + info["annotationSet"].append(annset) + annset["sent"] = info + annset["text"] = info.text + return info + + def _handle_luannotationset_elt(self, elt, is_pos=False): + """Load an annotation set from a sentence in an subcorpus of an LU""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "posannotationset" if is_pos else "luannotationset" + info["layer"] = [] + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + + if "cxnID" in info: # ignoring construction annotations for now. + return info + + for sub in elt: + if sub.tag.endswith("layer"): + l = self._handle_lulayer_elt(sub) + if l is not None: + overt = [] + ni = {} # null instantiations + + info["layer"].append(l) + for lbl in l.label: + if "start" in lbl: + thespan = (lbl.start, lbl.end + 1, lbl.name) + if l.name not in ( + "Sent", + "Other", + ): # 'Sent' and 'Other' layers sometimes contain accidental duplicate spans + assert thespan not in overt, (info.ID, l.name, thespan) + overt.append(thespan) + else: # null instantiation + if lbl.name in ni: + self._warn( + "FE with multiple NI entries:", + lbl.name, + ni[lbl.name], + lbl.itype, + ) + else: + ni[lbl.name] = lbl.itype + overt = sorted(overt) + + if l.name == "Target": + if not overt: + self._warn( + "Skipping empty Target layer in annotation set ID={}".format( + info.ID + ) + ) + continue + assert all(lblname == "Target" for i, j, lblname in overt) + if "Target" in info: + self._warn( + "Annotation set {} has multiple Target layers".format( + info.ID + ) + ) + else: + info["Target"] = [(i, j) for (i, j, _) in overt] + elif l.name == "FE": + if l.rank == 1: + assert "FE" not in info + info["FE"] = (overt, ni) + # assert False,info + else: + # sometimes there are 3 FE layers! e.g. Change_position_on_a_scale.fall.v + assert 2 <= l.rank <= 3, l.rank + k = "FE" + str(l.rank) + assert k not in info + info[k] = (overt, ni) + elif l.name in ("GF", "PT"): + assert l.rank == 1 + info[l.name] = overt + elif l.name in ("BNC", "PENN"): + assert l.rank == 1 + info["POS"] = overt + info["POS_tagset"] = l.name + else: + if is_pos: + if l.name not in ("NER", "WSL"): + self._warn( + "Unexpected layer in sentence annotationset:", + l.name, + ) + else: + if l.name not in ( + "Sent", + "Verb", + "Noun", + "Adj", + "Adv", + "Prep", + "Scon", + "Art", + "Other", + ): + self._warn( + "Unexpected layer in frame annotationset:", l.name + ) + info[l.name] = overt + if not is_pos and "cxnID" not in info: + if "Target" not in info: + self._warn(f"Missing target in annotation set ID={info.ID}") + assert "FE" in info + if "FE3" in info: + assert "FE2" in info + + return info + + def _handle_lulayer_elt(self, elt): + """Load a layer from an annotation set""" + layer = self._load_xml_attributes(AttrDict(), elt) + layer["_type"] = "lulayer" + layer["label"] = [] + + for sub in elt: + if sub.tag.endswith("label"): + l = self._load_xml_attributes(AttrDict(), sub) + if l is not None: + layer["label"].append(l) + return layer + + def _handle_fe_elt(self, elt): + feinfo = self._load_xml_attributes(AttrDict(), elt) + feinfo["_type"] = "fe" + feinfo["definition"] = "" + feinfo["definitionMarkup"] = "" + feinfo["semType"] = None + feinfo["requiresFE"] = None + feinfo["excludesFE"] = None + for sub in elt: + if sub.tag.endswith("definition"): + feinfo["definitionMarkup"] = sub.text + feinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("semType"): + stinfo = self._load_xml_attributes(AttrDict(), sub) + feinfo["semType"] = self.semtype(stinfo.ID) + elif sub.tag.endswith("requiresFE"): + feinfo["requiresFE"] = self._load_xml_attributes(AttrDict(), sub) + elif sub.tag.endswith("excludesFE"): + feinfo["excludesFE"] = self._load_xml_attributes(AttrDict(), sub) + + return feinfo + + def _handle_semtype_elt(self, elt, tagspec=None): + semt = self._load_xml_attributes(AttrDict(), elt) + semt["_type"] = "semtype" + semt["superType"] = None + semt["subTypes"] = PrettyList() + for sub in elt: + if sub.text is not None: + semt["definitionMarkup"] = sub.text + semt["definition"] = self._strip_tags(sub.text) + else: + supertypeinfo = self._load_xml_attributes(AttrDict(), sub) + semt["superType"] = supertypeinfo + # the supertype may not have been loaded yet + + return semt + + +# +# Demo +# +def demo(): + from nltk.corpus import framenet as fn + + # + # It is not necessary to explicitly build the indexes by calling + # buildindexes(). We do this here just for demo purposes. If the + # indexes are not built explicitly, they will be built as needed. + # + print("Building the indexes...") + fn.buildindexes() + + # + # Get some statistics about the corpus + # + print("Number of Frames:", len(fn.frames())) + print("Number of Lexical Units:", len(fn.lus())) + print("Number of annotated documents:", len(fn.docs())) + print() + + # + # Frames + # + print( + 'getting frames whose name matches the (case insensitive) regex: "(?i)medical"' + ) + medframes = fn.frames(r"(?i)medical") + print(f'Found {len(medframes)} Frames whose name matches "(?i)medical":') + print([(f.name, f.ID) for f in medframes]) + + # + # store the first frame in the list of frames + # + tmp_id = medframes[0].ID + m_frame = fn.frame(tmp_id) # reads all info for the frame + + # + # get the frame relations + # + print( + '\nNumber of frame relations for the "{}" ({}) frame:'.format( + m_frame.name, m_frame.ID + ), + len(m_frame.frameRelations), + ) + for fr in m_frame.frameRelations: + print(" ", fr) + + # + # get the names of the Frame Elements + # + print( + f'\nNumber of Frame Elements in the "{m_frame.name}" frame:', + len(m_frame.FE), + ) + print(" ", [x for x in m_frame.FE]) + + # + # get the names of the "Core" Frame Elements + # + print(f'\nThe "core" Frame Elements in the "{m_frame.name}" frame:') + print(" ", [x.name for x in m_frame.FE.values() if x.coreType == "Core"]) + + # + # get all of the Lexical Units that are incorporated in the + # 'Ailment' FE of the 'Medical_conditions' frame (id=239) + # + print('\nAll Lexical Units that are incorporated in the "Ailment" FE:') + m_frame = fn.frame(239) + ailment_lus = [ + x + for x in m_frame.lexUnit.values() + if "incorporatedFE" in x and x.incorporatedFE == "Ailment" + ] + print(" ", [x.name for x in ailment_lus]) + + # + # get all of the Lexical Units for the frame + # + print( + f'\nNumber of Lexical Units in the "{m_frame.name}" frame:', + len(m_frame.lexUnit), + ) + print(" ", [x.name for x in m_frame.lexUnit.values()][:5], "...") + + # + # get basic info on the second LU in the frame + # + tmp_id = m_frame.lexUnit["ailment.n"].ID # grab the id of the specified LU + luinfo = fn.lu_basic(tmp_id) # get basic info on the LU + print(f"\nInformation on the LU: {luinfo.name}") + pprint(luinfo) + + # + # Get a list of all of the corpora used for fulltext annotation + # + print("\nNames of all of the corpora used for fulltext annotation:") + allcorpora = {x.corpname for x in fn.docs_metadata()} + pprint(list(allcorpora)) + + # + # Get the names of the annotated documents in the first corpus + # + firstcorp = list(allcorpora)[0] + firstcorp_docs = fn.docs(firstcorp) + print(f'\nNames of the annotated documents in the "{firstcorp}" corpus:') + pprint([x.filename for x in firstcorp_docs]) + + # + # Search for frames containing LUs whose name attribute matches a + # regexp pattern. + # + # Note: if you were going to be doing a lot of this type of + # searching, you'd want to build an index that maps from + # lemmas to frames because each time frames_by_lemma() is + # called, it has to search through ALL of the frame XML files + # in the db. + print( + '\nSearching for all Frames that have a lemma that matches the regexp: "^run.v$":' + ) + pprint(fn.frames_by_lemma(r"^run.v$")) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py new file mode 100644 index 0000000000000000000000000000000000000000..24f83cfaebcf9a583a33806136f8788b112aaf95 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py @@ -0,0 +1,116 @@ +# Natural Language Toolkit: IEER Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Information Extraction and Entity Recognition Corpus. + +NIST 1999 Information Extraction: Entity Recognition Evaluation +https://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm + +This corpus contains the NEWSWIRE development test data for the +NIST 1999 IE-ER Evaluation. The files were taken from the +subdirectory: ``/ie_er_99/english/devtest/newswire/*.ref.nwt`` +and filenames were shortened. + +The corpus contains the following files: APW_19980314, APW_19980424, +APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407. +""" + +import nltk +from nltk.corpus.reader.api import * + +#: A dictionary whose keys are the names of documents in this corpus; +#: and whose values are descriptions of those documents' contents. +titles = { + "APW_19980314": "Associated Press Weekly, 14 March 1998", + "APW_19980424": "Associated Press Weekly, 24 April 1998", + "APW_19980429": "Associated Press Weekly, 29 April 1998", + "NYT_19980315": "New York Times, 15 March 1998", + "NYT_19980403": "New York Times, 3 April 1998", + "NYT_19980407": "New York Times, 7 April 1998", +} + +#: A list of all documents in this corpus. +documents = sorted(titles) + + +class IEERDocument: + def __init__(self, text, docno=None, doctype=None, date_time=None, headline=""): + self.text = text + self.docno = docno + self.doctype = doctype + self.date_time = date_time + self.headline = headline + + def __repr__(self): + if self.headline: + headline = " ".join(self.headline.leaves()) + else: + headline = ( + " ".join([w for w in self.text.leaves() if w[:1] != "<"][:12]) + "..." + ) + if self.docno is not None: + return f"" + else: + return "" % headline + + +class IEERCorpusReader(CorpusReader): + """ """ + + def docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def parsed_docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_parsed_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_parsed_block(self, stream): + # TODO: figure out while empty documents are being returned + return [ + self._parse(doc) + for doc in self._read_block(stream) + if self._parse(doc).docno is not None + ] + + def _parse(self, doc): + val = nltk.chunk.ieerstr2tree(doc, root_label="DOCUMENT") + if isinstance(val, dict): + return IEERDocument(**val) + else: + return IEERDocument(val) + + def _read_block(self, stream): + out = [] + # Skip any preamble. + while True: + line = stream.readline() + if not line: + break + if line.strip() == "": + break + out.append(line) + # Read the document + while True: + line = stream.readline() + if not line: + break + out.append(line) + if line.strip() == "": + break + # Return the document + return ["\n".join(out)] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d16c90f4edf380658af969a0488c28d5f1b24a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py @@ -0,0 +1,356 @@ +# Natural Language Toolkit: IPI PAN Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Konrad Goluchowski +# URL: +# For license information, see LICENSE.TXT + +import functools + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import StreamBackedCorpusView, concat + + +def _parse_args(fun): + @functools.wraps(fun) + def decorator(self, fileids=None, **kwargs): + kwargs.pop("tags", None) + if not fileids: + fileids = self.fileids() + return fun(self, fileids, **kwargs) + + return decorator + + +class IPIPANCorpusReader(CorpusReader): + """ + Corpus reader designed to work with corpus created by IPI PAN. + See http://korpus.pl/en/ for more details about IPI PAN corpus. + + The corpus includes information about text domain, channel and categories. + You can access possible values using ``domains()``, ``channels()`` and + ``categories()``. You can use also this metadata to filter files, e.g.: + ``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``. + + The reader supports methods: words, sents, paras and their tagged versions. + You can get part of speech instead of full tag by giving "simplify_tags=True" + parameter, e.g.: ``tagged_sents(simplify_tags=True)``. + + Also you can get all tags disambiguated tags specifying parameter + "one_tag=False", e.g.: ``tagged_paras(one_tag=False)``. + + You can get all tags that were assigned by a morphological analyzer specifying + parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``. + + The IPIPAN Corpus contains tags indicating if there is a space between two + tokens. To add special "no space" markers, you should specify parameter + "append_no_space=True", e.g. ``tagged_words(append_no_space=True)``. + As a result in place where there should be no space between two tokens new + pair ('', 'no-space') will be inserted (for tagged data) and just '' for + methods without tags. + + The corpus reader can also try to append spaces between words. To enable this + option, specify parameter "append_space=True", e.g. ``words(append_space=True)``. + As a result either ' ' or (' ', 'space') will be inserted between tokens. + + By default, xml entities like " and & are replaced by corresponding + characters. You can turn off this feature, specifying parameter + "replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, None, None) + + def channels(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "channel") + + def domains(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "domain") + + def categories(self, fileids=None): + if not fileids: + fileids = self.fileids() + return [ + self._map_category(cat) for cat in self._parse_header(fileids, "keyTerm") + ] + + def fileids(self, channels=None, domains=None, categories=None): + if channels is not None and domains is not None and categories is not None: + raise ValueError( + "You can specify only one of channels, domains " + "and categories parameter at once" + ) + if channels is None and domains is None and categories is None: + return CorpusReader.fileids(self) + if isinstance(channels, str): + channels = [channels] + if isinstance(domains, str): + domains = [domains] + if isinstance(categories, str): + categories = [categories] + if channels: + return self._list_morph_files_by("channel", channels) + elif domains: + return self._list_morph_files_by("domain", domains) + else: + return self._list_morph_files_by( + "keyTerm", categories, map=self._map_category + ) + + @_parse_args + def sents(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def paras(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def words(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, tags=False, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_sents(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_paras(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_words(self, fileids=None, **kwargs): + return concat( + [self._view(fileid, **kwargs) for fileid in self._list_morph_files(fileids)] + ) + + def _list_morph_files(self, fileids): + return [f for f in self.abspaths(fileids)] + + def _list_header_files(self, fileids): + return [ + f.replace("morph.xml", "header.xml") + for f in self._list_morph_files(fileids) + ] + + def _parse_header(self, fileids, tag): + values = set() + for f in self._list_header_files(fileids): + values_list = self._get_tag(f, tag) + for v in values_list: + values.add(v) + return list(values) + + def _list_morph_files_by(self, tag, values, map=None): + fileids = self.fileids() + ret_fileids = set() + for f in fileids: + fp = self.abspath(f).replace("morph.xml", "header.xml") + values_list = self._get_tag(fp, tag) + for value in values_list: + if map is not None: + value = map(value) + if value in values: + ret_fileids.add(f) + return list(ret_fileids) + + def _get_tag(self, f, tag): + tags = [] + with open(f) as infile: + header = infile.read() + tag_end = 0 + while True: + tag_pos = header.find("<" + tag, tag_end) + if tag_pos < 0: + return tags + tag_end = header.find("", tag_pos) + tags.append(header[tag_pos + len(tag) + 2 : tag_end]) + + def _map_category(self, cat): + pos = cat.find(">") + if pos == -1: + return cat + else: + return cat[pos + 1 :] + + def _view(self, filename, **kwargs): + tags = kwargs.pop("tags", True) + mode = kwargs.pop("mode", 0) + simplify_tags = kwargs.pop("simplify_tags", False) + one_tag = kwargs.pop("one_tag", True) + disamb_only = kwargs.pop("disamb_only", True) + append_no_space = kwargs.pop("append_no_space", False) + append_space = kwargs.pop("append_space", False) + replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + if len(kwargs) > 0: + raise ValueError("Unexpected arguments: %s" % kwargs.keys()) + if not one_tag and not disamb_only: + raise ValueError( + "You cannot specify both one_tag=False and " "disamb_only=False" + ) + if not tags and (simplify_tags or not one_tag or not disamb_only): + raise ValueError( + "You cannot specify simplify_tags, one_tag or " + "disamb_only with functions other than tagged_*" + ) + + return IPIPANCorpusView( + filename, + tags=tags, + mode=mode, + simplify_tags=simplify_tags, + one_tag=one_tag, + disamb_only=disamb_only, + append_no_space=append_no_space, + append_space=append_space, + replace_xmlentities=replace_xmlentities, + ) + + +class IPIPANCorpusView(StreamBackedCorpusView): + + WORDS_MODE = 0 + SENTS_MODE = 1 + PARAS_MODE = 2 + + def __init__(self, filename, startpos=0, **kwargs): + StreamBackedCorpusView.__init__(self, filename, None, startpos, None) + self.in_sentence = False + self.position = 0 + + self.show_tags = kwargs.pop("tags", True) + self.disamb_only = kwargs.pop("disamb_only", True) + self.mode = kwargs.pop("mode", IPIPANCorpusView.WORDS_MODE) + self.simplify_tags = kwargs.pop("simplify_tags", False) + self.one_tag = kwargs.pop("one_tag", True) + self.append_no_space = kwargs.pop("append_no_space", False) + self.append_space = kwargs.pop("append_space", False) + self.replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + def read_block(self, stream): + sentence = [] + sentences = [] + space = False + no_space = False + + tags = set() + + lines = self._read_data(stream) + + while True: + + # we may have only part of last line + if len(lines) <= 1: + self._seek(stream) + lines = self._read_data(stream) + + if lines == [""]: + assert not sentences + return [] + + line = lines.pop() + self.position += len(line) + 1 + + if line.startswith('"): + if self.append_space: + no_space = True + if self.append_no_space: + if self.show_tags: + sentence.append(("", "no-space")) + else: + sentence.append("") + elif line.startswith(" +# URL: +# For license information, see LICENSE.TXT + +# For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html + +import re + +from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader +from nltk.corpus.reader.util import ( + FileSystemPathPointer, + find_corpus_fileids, + read_blankline_block, +) +from nltk.parse import DependencyGraph + +# default function to convert morphlist to str for tree representation +_morphs2str_default = lambda morphs: "/".join(m[0] for m in morphs if m[0] != "EOS") + + +class KNBCorpusReader(SyntaxCorpusReader): + """ + This class implements: + - ``__init__``, which specifies the location of the corpus + and a method for detecting the sentence blocks in corpus files. + - ``_read_block``, which reads a block from the input stream. + - ``_word``, which takes a block and returns a list of list of words. + - ``_tag``, which takes a block and returns a list of list of tagged + words. + - ``_parse``, which takes a block and returns a list of parsed + sentences. + + The structure of tagged words: + tagged_word = (word(str), tags(tuple)) + tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...) + + Usage example + + >>> from nltk.corpus.util import LazyCorpusLoader + >>> knbc = LazyCorpusLoader( + ... 'knbc/corpus1', + ... KNBCorpusReader, + ... r'.*/KN.*', + ... encoding='euc-jp', + ... ) + + >>> len(knbc.sents()[0]) + 9 + + """ + + def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default): + """ + Initialize KNBCorpusReader + morphs2str is a function to convert morphlist to str for tree representation + for _parse() + """ + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + self.morphs2str = morphs2str + + def _read_block(self, stream): + # blocks are split by blankline (or EOF) - default + return read_blankline_block(stream) + + def _word(self, t): + res = [] + for line in t.splitlines(): + # ignore the Bunsets headers + if not re.match(r"EOS|\*|\#|\+", line): + cells = line.strip().split(" ") + res.append(cells[0]) + + return res + + # ignores tagset argument + def _tag(self, t, tagset=None): + res = [] + for line in t.splitlines(): + # ignore the Bunsets headers + if not re.match(r"EOS|\*|\#|\+", line): + cells = line.strip().split(" ") + # convert cells to morph tuples + res.append((cells[0], " ".join(cells[1:]))) + + return res + + def _parse(self, t): + dg = DependencyGraph() + i = 0 + for line in t.splitlines(): + if line[0] in "*+": + # start of bunsetsu or tag + + cells = line.strip().split(" ", 3) + m = re.match(r"([\-0-9]*)([ADIP])", cells[1]) + + assert m is not None + + node = dg.nodes[i] + node.update({"address": i, "rel": m.group(2), "word": []}) + + dep_parent = int(m.group(1)) + + if dep_parent == -1: + dg.root = node + else: + dg.nodes[dep_parent]["deps"].append(i) + + i += 1 + elif line[0] != "#": + # normal morph + cells = line.strip().split(" ") + # convert cells to morph tuples + morph = cells[0], " ".join(cells[1:]) + dg.nodes[i - 1]["word"].append(morph) + + if self.morphs2str: + for node in dg.nodes.values(): + node["word"] = self.morphs2str(node["word"]) + + return dg.tree() + + +###################################################################### +# Demo +###################################################################### + + +def demo(): + + import nltk + from nltk.corpus.util import LazyCorpusLoader + + root = nltk.data.find("corpora/knbc/corpus1") + fileids = [ + f + for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") + if re.search(r"\d\-\d\-[\d]+\-[\d]+", f) + ] + + def _knbc_fileids_sort(x): + cells = x.split("-") + return (cells[0], int(cells[1]), int(cells[2]), int(cells[3])) + + knbc = LazyCorpusLoader( + "knbc/corpus1", + KNBCorpusReader, + sorted(fileids, key=_knbc_fileids_sort), + encoding="euc-jp", + ) + + print(knbc.fileids()[:10]) + print("".join(knbc.words()[:100])) + + print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2])) + + knbc.morphs2str = lambda morphs: "/".join( + "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS" + ).encode("utf-8") + + print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])) + + print( + "\n".join( + " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent) + for sent in knbc.tagged_sents()[0:2] + ) + ) + + +def test(): + + from nltk.corpus.util import LazyCorpusLoader + + knbc = LazyCorpusLoader( + "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp" + ) + assert isinstance(knbc.words()[0], str) + assert isinstance(knbc.sents()[0][0], str) + assert isinstance(knbc.tagged_words()[0], tuple) + assert isinstance(knbc.tagged_sents()[0][0], tuple) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/lin.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/lin.py new file mode 100644 index 0000000000000000000000000000000000000000..15c20a6803c0c83557cd2f4689cddecfdd2d83da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/lin.py @@ -0,0 +1,183 @@ +# Natural Language Toolkit: Lin's Thesaurus +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Blanchard +# URL: +# For license information, see LICENSE.txt + +import re +from collections import defaultdict +from functools import reduce + +from nltk.corpus.reader import CorpusReader + + +class LinThesaurusCorpusReader(CorpusReader): + """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin.""" + + # Compiled regular expression for extracting the key from the first line of each + # thesaurus entry + _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+') + + @staticmethod + def __defaultdict_factory(): + """Factory for creating defaultdict of defaultdict(dict)s""" + return defaultdict(dict) + + def __init__(self, root, badscore=0.0): + """ + Initialize the thesaurus. + + :param root: root directory containing thesaurus LISP files + :type root: C{string} + :param badscore: the score to give to words which do not appear in each other's sets of synonyms + :type badscore: C{float} + """ + + super().__init__(root, r"sim[A-Z]\.lsp") + self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory) + self._badscore = badscore + for path, encoding, fileid in self.abspaths( + include_encoding=True, include_fileid=True + ): + with open(path) as lin_file: + first = True + for line in lin_file: + line = line.strip() + # Start of entry + if first: + key = LinThesaurusCorpusReader._key_re.sub(r"\1", line) + first = False + # End of entry + elif line == "))": + first = True + # Lines with pairs of ngrams and scores + else: + split_line = line.split("\t") + if len(split_line) == 2: + ngram, score = split_line + self._thesaurus[fileid][key][ngram.strip('"')] = float( + score + ) + + def similarity(self, ngram1, ngram2, fileid=None): + """ + Returns the similarity score for two ngrams. + + :param ngram1: first ngram to compare + :type ngram1: C{string} + :param ngram2: second ngram to compare + :type ngram2: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, just the score for the two ngrams; otherwise, + list of tuples of fileids and scores. + """ + # Entries don't contain themselves, so make sure similarity between item and itself is 1.0 + if ngram1 == ngram2: + if fileid: + return 1.0 + else: + return [(fid, 1.0) for fid in self._fileids] + else: + if fileid: + return ( + self._thesaurus[fileid][ngram1][ngram2] + if ngram2 in self._thesaurus[fileid][ngram1] + else self._badscore + ) + else: + return [ + ( + fid, + ( + self._thesaurus[fid][ngram1][ngram2] + if ngram2 in self._thesaurus[fid][ngram1] + else self._badscore + ), + ) + for fid in self._fileids + ] + + def scored_synonyms(self, ngram, fileid=None): + """ + Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of tuples of scores and synonyms; otherwise, + list of tuples of fileids and lists, where inner lists consist of tuples of + scores and synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].items() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].items()) + for fileid in self._fileids + ] + + def synonyms(self, ngram, fileid=None): + """ + Returns a list of synonyms for the current ngram. + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and + lists, where inner lists contain synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].keys() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].keys()) + for fileid in self._fileids + ] + + def __contains__(self, ngram): + """ + Determines whether or not the given ngram is in the thesaurus. + + :param ngram: ngram to lookup + :type ngram: C{string} + :return: whether the given ngram is in the thesaurus. + """ + return reduce( + lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]), + self._fileids, + False, + ) + + +###################################################################### +# Demo +###################################################################### + + +def demo(): + from nltk.corpus import lin_thesaurus as thes + + word1 = "business" + word2 = "enterprise" + print("Getting synonyms for " + word1) + print(thes.synonyms(word1)) + + print("Getting scored synonyms for " + word1) + print(thes.scored_synonyms(word1)) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print(f"Similarity score for {word1} and {word2}:") + print(thes.similarity(word1, word2)) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py new file mode 100644 index 0000000000000000000000000000000000000000..8df4f924e25426dbe30ef2484f3a0cb4cb1a1740 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py @@ -0,0 +1,342 @@ +from collections import namedtuple +from functools import partial, wraps + +from nltk.corpus.reader.api import CategorizedCorpusReader +from nltk.corpus.reader.plaintext import PlaintextCorpusReader +from nltk.corpus.reader.util import concat, read_blankline_block +from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize + + +def comma_separated_string_args(func): + """ + A decorator that allows a function to be called with + a single string of comma-separated values which become + individual function arguments. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + _args = list() + for arg in args: + if isinstance(arg, str): + _args.append({part.strip() for part in arg.split(",")}) + elif isinstance(arg, list): + _args.append(set(arg)) + else: + _args.append(arg) + for name, value in kwargs.items(): + if isinstance(value, str): + kwargs[name] = {part.strip() for part in value.split(",")} + return func(*_args, **kwargs) + + return wrapper + + +def read_parse_blankline_block(stream, parser): + block = read_blankline_block(stream) + if block: + return [parser.render(block[0])] + return block + + +class MarkdownBlock: + def __init__(self, content): + self.content = content + self.truncate_at = 16 + + def __repr__(self): + return f"{self.__class__.__name__}(content={repr(str(self))})" + + def __str__(self): + return ( + f"{self.content[:self.truncate_at]}" + f"{'...' if len(self.content) > self.truncate_at else ''}" + ) + + @property + def raw(self): + return self.content + + @property + def words(self): + return word_tokenize(self.content) + + @property + def sents(self): + return [word_tokenize(sent) for sent in sent_tokenize(self.content)] + + @property + def paras(self): + return [ + [word_tokenize(sent) for sent in sent_tokenize(para)] + for para in blankline_tokenize(self.content) + ] + + +class CodeBlock(MarkdownBlock): + def __init__(self, language, *args): + self.language = language + super().__init__(*args) + + @property + def sents(self): + return [word_tokenize(line) for line in self.content.splitlines()] + + @property + def lines(self): + return self.content.splitlines() + + @property + def paras(self): + return [ + [word_tokenize(line) for line in para.splitlines()] + for para in blankline_tokenize(self.content) + ] + + +class MarkdownSection(MarkdownBlock): + def __init__(self, heading, level, *args): + self.heading = heading + self.level = level + super().__init__(*args) + + +Image = namedtuple("Image", "label, src, title") +Link = namedtuple("Link", "label, href, title") +List = namedtuple("List", "is_ordered, items") + + +class MarkdownCorpusReader(PlaintextCorpusReader): + def __init__(self, *args, parser=None, **kwargs): + from markdown_it import MarkdownIt + from mdit_plain.renderer import RendererPlain + from mdit_py_plugins.front_matter import front_matter_plugin + + self.parser = parser + if self.parser is None: + self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain) + self.parser.use(front_matter_plugin) + + kwargs.setdefault( + "para_block_reader", partial(read_parse_blankline_block, parser=self.parser) + ) + super().__init__(*args, **kwargs) + + # This override takes care of removing markup. + def _read_word_block(self, stream): + words = list() + for para in self._para_block_reader(stream): + words.extend(self._word_tokenizer.tokenize(para)) + return words + + +class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader): + """ + A reader for markdown corpora whose documents are divided into + categories based on their file identifiers. + + Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader: + https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader + """ + + def __init__(self, *args, cat_field="tags", **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``MarkdownCorpusReader`` constructor. + """ + cat_args = ["cat_pattern", "cat_map", "cat_file"] + if not any(arg in kwargs for arg in cat_args): + # Initialize with a blank map now, + # and try to build categories from document metadata later. + kwargs["cat_map"] = dict() + CategorizedCorpusReader.__init__(self, kwargs) + MarkdownCorpusReader.__init__(self, *args, **kwargs) + + # Map file IDs to categories if self._map exists but is still empty: + if self._map is not None and not self._map: + for file_id in self._fileids: + metadata = self.metadata(file_id) + if metadata: + self._map[file_id] = metadata[0].get(cat_field, []) + + ### Begin CategorizedCorpusReader Overrides + @comma_separated_string_args + def categories(self, fileids=None): + return super().categories(fileids) + + @comma_separated_string_args + def fileids(self, categories=None): + if categories is None: + return self._fileids + return super().fileids(categories) + + ### End CategorizedCorpusReader Overrides + + ### Begin MarkdownCorpusReader Overrides + @comma_separated_string_args + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + @comma_separated_string_args + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + @comma_separated_string_args + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + @comma_separated_string_args + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + ### End MarkdownCorpusReader Overrides + + def concatenated_view(self, reader, fileids, categories): + return concat( + [ + self.CorpusView(path, reader, encoding=enc) + for (path, enc) in self.abspaths( + self._resolve(fileids, categories), include_encoding=True + ) + ] + ) + + def metadata_reader(self, stream): + from yaml import safe_load + + return [ + safe_load(t.content) + for t in self.parser.parse(stream.read()) + if t.type == "front_matter" + ] + + @comma_separated_string_args + def metadata(self, fileids=None, categories=None): + return self.concatenated_view(self.metadata_reader, fileids, categories) + + def blockquote_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_open", tokens + ) + closing_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_close", tokens + ) + blockquotes = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + blockquotes.append(tokens[opening_index : closing_index + 1]) + return [ + MarkdownBlock( + self.parser.renderer.render(block, self.parser.options, env=None) + ) + for block in blockquotes + ] + + @comma_separated_string_args + def blockquotes(self, fileids=None, categories=None): + return self.concatenated_view(self.blockquote_reader, fileids, categories) + + def code_block_reader(self, stream): + return [ + CodeBlock( + t.info, + t.content, + ) + for t in self.parser.parse(stream.read()) + if t.level == 0 and t.type in ("fence", "code_block") + ] + + @comma_separated_string_args + def code_blocks(self, fileids=None, categories=None): + return self.concatenated_view(self.code_block_reader, fileids, categories) + + def image_reader(self, stream): + return [ + Image( + child_token.content, + child_token.attrGet("src"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for child_token in inline_token.children + if child_token.type == "image" + ] + + @comma_separated_string_args + def images(self, fileids=None, categories=None): + return self.concatenated_view(self.image_reader, fileids, categories) + + def link_reader(self, stream): + return [ + Link( + inline_token.children[i + 1].content, + child_token.attrGet("href"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for i, child_token in enumerate(inline_token.children) + if child_token.type == "link_open" + ] + + @comma_separated_string_args + def links(self, fileids=None, categories=None): + return self.concatenated_view(self.link_reader, fileids, categories) + + def list_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_types = ("bullet_list_open", "ordered_list_open") + opening_tokens = filter( + lambda t: t.level == 0 and t.type in opening_types, tokens + ) + closing_types = ("bullet_list_close", "ordered_list_close") + closing_tokens = filter( + lambda t: t.level == 0 and t.type in closing_types, tokens + ) + list_blocks = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + list_blocks.append(tokens[opening_index : closing_index + 1]) + return [ + List( + tokens[0].type == "ordered_list_open", + [t.content for t in tokens if t.content], + ) + for tokens in list_blocks + ] + + @comma_separated_string_args + def lists(self, fileids=None, categories=None): + return self.concatenated_view(self.list_reader, fileids, categories) + + def section_reader(self, stream): + section_blocks, block = list(), list() + in_heading = False + for t in self.parser.parse(stream.read()): + if t.level == 0 and t.type == "heading_open": + if block: + section_blocks.append(block) + block = list() + in_heading = True + if in_heading: + block.append(t) + return [ + MarkdownSection( + block[1].content, + block[0].markup.count("#"), + self.parser.renderer.render(block, self.parser.options, env=None), + ) + for block in section_blocks + ] + + @comma_separated_string_args + def sections(self, fileids=None, categories=None): + return self.concatenated_view(self.section_reader, fileids, categories) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/mte.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/mte.py new file mode 100644 index 0000000000000000000000000000000000000000..99190bed452095dc948e324ce5cc0f3c94c46505 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/mte.py @@ -0,0 +1,397 @@ +""" +A reader for corpora whose documents are in MTE format. +""" +import os +import re +from functools import reduce + +from nltk.corpus.reader import TaggedCorpusReader, concat +from nltk.corpus.reader.xmldocs import XMLCorpusView + + +def xpath(root, path, ns): + return root.findall(path, ns) + + +class MTECorpusView(XMLCorpusView): + """ + Class for lazy viewing the MTE Corpus. + """ + + def __init__(self, fileid, tagspec, elt_handler=None): + XMLCorpusView.__init__(self, fileid, tagspec, elt_handler) + + def read_block(self, stream, tagspec=None, elt_handler=None): + return list( + filter( + lambda x: x is not None, + XMLCorpusView.read_block(self, stream, tagspec, elt_handler), + ) + ) + + +class MTEFileReader: + """ + Class for loading the content of the multext-east corpus. It + parses the xml files and does some tag-filtering depending on the + given method parameters. + """ + + ns = { + "tei": "https://www.tei-c.org/ns/1.0", + "xml": "https://www.w3.org/XML/1998/namespace", + } + tag_ns = "{https://www.tei-c.org/ns/1.0}" + xml_ns = "{https://www.w3.org/XML/1998/namespace}" + word_path = "TEI/text/body/div/div/p/s/(w|c)" + sent_path = "TEI/text/body/div/div/p/s" + para_path = "TEI/text/body/div/div/p" + + def __init__(self, file_path): + self.__file_path = file_path + + @classmethod + def _word_elt(cls, elt, context): + return elt.text + + @classmethod + def _sent_elt(cls, elt, context): + return [cls._word_elt(w, None) for w in xpath(elt, "*", cls.ns)] + + @classmethod + def _para_elt(cls, elt, context): + return [cls._sent_elt(s, None) for s in xpath(elt, "*", cls.ns)] + + @classmethod + def _tagged_word_elt(cls, elt, context): + if "ana" not in elt.attrib: + return (elt.text, "") + + if cls.__tags == "" and cls.__tagset == "msd": + return (elt.text, elt.attrib["ana"]) + elif cls.__tags == "" and cls.__tagset == "universal": + return (elt.text, MTETagConverter.msd_to_universal(elt.attrib["ana"])) + else: + tags = re.compile("^" + re.sub("-", ".", cls.__tags) + ".*$") + if tags.match(elt.attrib["ana"]): + if cls.__tagset == "msd": + return (elt.text, elt.attrib["ana"]) + else: + return ( + elt.text, + MTETagConverter.msd_to_universal(elt.attrib["ana"]), + ) + else: + return None + + @classmethod + def _tagged_sent_elt(cls, elt, context): + return list( + filter( + lambda x: x is not None, + [cls._tagged_word_elt(w, None) for w in xpath(elt, "*", cls.ns)], + ) + ) + + @classmethod + def _tagged_para_elt(cls, elt, context): + return list( + filter( + lambda x: x is not None, + [cls._tagged_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)], + ) + ) + + @classmethod + def _lemma_word_elt(cls, elt, context): + if "lemma" not in elt.attrib: + return (elt.text, "") + else: + return (elt.text, elt.attrib["lemma"]) + + @classmethod + def _lemma_sent_elt(cls, elt, context): + return [cls._lemma_word_elt(w, None) for w in xpath(elt, "*", cls.ns)] + + @classmethod + def _lemma_para_elt(cls, elt, context): + return [cls._lemma_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)] + + def words(self): + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._word_elt + ) + + def sents(self): + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._sent_elt + ) + + def paras(self): + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._para_elt + ) + + def lemma_words(self): + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._lemma_word_elt + ) + + def tagged_words(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._tagged_word_elt + ) + + def lemma_sents(self): + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._lemma_sent_elt + ) + + def tagged_sents(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._tagged_sent_elt + ) + + def lemma_paras(self): + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._lemma_para_elt + ) + + def tagged_paras(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._tagged_para_elt + ) + + +class MTETagConverter: + """ + Class for converting msd tags to universal tags, more conversion + options are currently not implemented. + """ + + mapping_msd_universal = { + "A": "ADJ", + "S": "ADP", + "R": "ADV", + "C": "CONJ", + "D": "DET", + "N": "NOUN", + "M": "NUM", + "Q": "PRT", + "P": "PRON", + "V": "VERB", + ".": ".", + "-": "X", + } + + @staticmethod + def msd_to_universal(tag): + """ + This function converts the annotation from the Multex-East to the universal tagset + as described in Chapter 5 of the NLTK-Book + + Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so + """ + indicator = tag[0] if not tag[0] == "#" else tag[1] + + if not indicator in MTETagConverter.mapping_msd_universal: + indicator = "-" + + return MTETagConverter.mapping_msd_universal[indicator] + + +class MTECorpusReader(TaggedCorpusReader): + """ + Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East. + MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging + scheme. These tags can be converted to the Universal tagset + """ + + def __init__(self, root=None, fileids=None, encoding="utf8"): + """ + Construct a new MTECorpusreader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP + + :param root: The root directory for this corpus. (default points to location in multext config file) + :param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml) + :param encoding: The encoding of the given files (default is utf8) + """ + TaggedCorpusReader.__init__(self, root, fileids, encoding) + self._readme = "00README.txt" + + def __fileids(self, fileids): + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + # filter wrong userinput + fileids = filter(lambda x: x in self._fileids, fileids) + # filter multext-east sourcefiles that are not compatible to the teip5 specification + fileids = filter(lambda x: x not in ["oana-bg.xml", "oana-mk.xml"], fileids) + if not fileids: + print("No valid multext-east file specified") + return fileids + + def words(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).words() + for f in self.__fileids(fileids) + ] + ) + + def sents(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of sentences or utterances, + each encoded as a list of word strings + :rtype: list(list(str)) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).sents() + for f in self.__fileids(fileids) + ] + ) + + def paras(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of paragraphs, each encoded as a list + of sentences, which are in turn encoded as lists of word string + :rtype: list(list(list(str))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).paras() + for f in self.__fileids(fileids) + ] + ) + + def lemma_words(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of words, the corresponding lemmas + and punctuation symbols, encoded as tuples (word, lemma) + :rtype: list(tuple(str,str)) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_words() + for f in self.__fileids(fileids) + ] + ) + + def tagged_words(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of tagged words and punctuation symbols + encoded as tuples (word, tag) + :rtype: list(tuple(str, str)) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_words( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") + + def lemma_sents(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of sentences or utterances, each + encoded as a list of tuples of the word and the corresponding + lemma (word, lemma) + :rtype: list(list(tuple(str, str))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_sents() + for f in self.__fileids(fileids) + ] + ) + + def tagged_sents(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of sentences or utterances, each + each encoded as a list of (word,tag) tuples + :rtype: list(list(tuple(str, str))) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_sents( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") + + def lemma_paras(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of paragraphs, each encoded as a + list of sentences, which are in turn encoded as a list of + tuples of the word and the corresponding lemma (word, lemma) + :rtype: list(List(List(tuple(str, str)))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_paras() + for f in self.__fileids(fileids) + ] + ) + + def tagged_paras(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of paragraphs, each encoded as a + list of sentences, which are in turn encoded as a list + of (word,tag) tuples + :rtype: list(list(list(tuple(str, str)))) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_paras( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py new file mode 100644 index 0000000000000000000000000000000000000000..685485590727fb8231062eedba6727cf3dc45d81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py @@ -0,0 +1,487 @@ +# Natural Language Toolkit: NKJP Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Gabriela Kaczka +# URL: +# For license information, see LICENSE.TXT + +import functools +import os +import re +import tempfile + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView + + +def _parse_args(fun): + """ + Wraps function arguments: + if fileids not specified then function set NKJPCorpusReader paths. + """ + + @functools.wraps(fun) + def decorator(self, fileids=None, **kwargs): + if not fileids: + fileids = self._paths + return fun(self, fileids, **kwargs) + + return decorator + + +class NKJPCorpusReader(XMLCorpusReader): + WORDS_MODE = 0 + SENTS_MODE = 1 + HEADER_MODE = 2 + RAW_MODE = 3 + + def __init__(self, root, fileids=".*"): + """ + Corpus reader designed to work with National Corpus of Polish. + See http://nkjp.pl/ for more details about NKJP. + use example: + import nltk + import nkjp + from nkjp import NKJPCorpusReader + x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='') # obtain the whole corpus + x.header() + x.raw() + x.words() + x.tagged_words(tags=['subst', 'comp']) #Link to find more tags: nkjp.pl/poliqarp/help/ense2.html + x.sents() + x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='Wilk*') # obtain particular file(s) + x.header(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy']) + x.tagged_words(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy'], tags=['subst', 'comp']) + """ + if isinstance(fileids, str): + XMLCorpusReader.__init__(self, root, fileids + ".*/header.xml") + else: + XMLCorpusReader.__init__( + self, root, [fileid + "/header.xml" for fileid in fileids] + ) + self._paths = self.get_paths() + + def get_paths(self): + return [ + os.path.join(str(self._root), f.split("header.xml")[0]) + for f in self._fileids + ] + + def fileids(self): + """ + Returns a list of file identifiers for the fileids that make up + this corpus. + """ + return [f.split("header.xml")[0] for f in self._fileids] + + def _view(self, filename, tags=None, **kwargs): + """ + Returns a view specialised for use with particular corpus file. + """ + mode = kwargs.pop("mode", NKJPCorpusReader.WORDS_MODE) + if mode is NKJPCorpusReader.WORDS_MODE: + return NKJPCorpus_Morph_View(filename, tags=tags) + elif mode is NKJPCorpusReader.SENTS_MODE: + return NKJPCorpus_Segmentation_View(filename, tags=tags) + elif mode is NKJPCorpusReader.HEADER_MODE: + return NKJPCorpus_Header_View(filename, tags=tags) + elif mode is NKJPCorpusReader.RAW_MODE: + return NKJPCorpus_Text_View( + filename, tags=tags, mode=NKJPCorpus_Text_View.RAW_MODE + ) + + else: + raise NameError("No such mode!") + + def add_root(self, fileid): + """ + Add root if necessary to specified fileid. + """ + if self.root in fileid: + return fileid + return self.root + fileid + + @_parse_args + def header(self, fileids=None, **kwargs): + """ + Returns header(s) of specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.HEADER_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def sents(self, fileids=None, **kwargs): + """ + Returns sentences in specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.SENTS_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def words(self, fileids=None, **kwargs): + """ + Returns words in specified fileids. + """ + + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.WORDS_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def tagged_words(self, fileids=None, **kwargs): + """ + Call with specified tags as a list, e.g. tags=['subst', 'comp']. + Returns tagged words in specified fileids. + """ + tags = kwargs.pop("tags", []) + return concat( + [ + self._view( + self.add_root(fileid), + mode=NKJPCorpusReader.WORDS_MODE, + tags=tags, + **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def raw(self, fileids=None, **kwargs): + """ + Returns words in specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.RAW_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + +class NKJPCorpus_Header_View(XMLCorpusView): + def __init__(self, filename, **kwargs): + """ + HEADER_MODE + A stream backed corpus view specialized for use with + header.xml files in NKJP corpus. + """ + self.tagspec = ".*/sourceDesc$" + XMLCorpusView.__init__(self, filename + "header.xml", self.tagspec) + + def handle_query(self): + self._open() + header = [] + while True: + segm = XMLCorpusView.read_block(self, self._stream) + if len(segm) == 0: + break + header.extend(segm) + self.close() + return header + + def handle_elt(self, elt, context): + titles = elt.findall("bibl/title") + title = [] + if titles: + title = "\n".join(title.text.strip() for title in titles) + + authors = elt.findall("bibl/author") + author = [] + if authors: + author = "\n".join(author.text.strip() for author in authors) + + dates = elt.findall("bibl/date") + date = [] + if dates: + date = "\n".join(date.text.strip() for date in dates) + + publishers = elt.findall("bibl/publisher") + publisher = [] + if publishers: + publisher = "\n".join(publisher.text.strip() for publisher in publishers) + + idnos = elt.findall("bibl/idno") + idno = [] + if idnos: + idno = "\n".join(idno.text.strip() for idno in idnos) + + notes = elt.findall("bibl/note") + note = [] + if notes: + note = "\n".join(note.text.strip() for note in notes) + + return { + "title": title, + "author": author, + "date": date, + "publisher": publisher, + "idno": idno, + "note": note, + } + + +class XML_Tool: + """ + Helper class creating xml file to one without references to nkjp: namespace. + That's needed because the XMLCorpusView assumes that one can find short substrings + of XML that are valid XML, which is not true if a namespace is declared at top level + """ + + def __init__(self, root, filename): + self.read_file = os.path.join(root, filename) + self.write_file = tempfile.NamedTemporaryFile(delete=False) + + def build_preprocessed_file(self): + try: + fr = open(self.read_file) + fw = self.write_file + line = " " + while len(line): + line = fr.readline() + x = re.split(r"nkjp:[^ ]* ", line) # in all files + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + fw.write(ret) + fr.close() + fw.close() + return self.write_file.name + except Exception as e: + self.remove_preprocessed_file() + raise Exception from e + + def remove_preprocessed_file(self): + os.remove(self.write_file.name) + + +class NKJPCorpus_Segmentation_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + ann_segmentation.xml files in NKJP corpus. + """ + + def __init__(self, filename, **kwargs): + self.tagspec = ".*p/.*s" + # intersperse NKJPCorpus_Text_View + self.text_view = NKJPCorpus_Text_View( + filename, mode=NKJPCorpus_Text_View.SENTS_MODE + ) + self.text_view.handle_query() + # xml preprocessing + self.xml_tool = XML_Tool(filename, "ann_segmentation.xml") + # base class init + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def get_segm_id(self, example_word): + return example_word.split("(")[1].split(",")[0] + + def get_sent_beg(self, beg_word): + # returns index of beginning letter in sentence + return int(beg_word.split(",")[1]) + + def get_sent_end(self, end_word): + # returns index of end letter in sentence + splitted = end_word.split(")")[0].split(",") + return int(splitted[1]) + int(splitted[2]) + + def get_sentences(self, sent_segm): + # returns one sentence + id = self.get_segm_id(sent_segm[0]) + segm = self.text_view.segm_dict[id] # text segment + beg = self.get_sent_beg(sent_segm[0]) + end = self.get_sent_end(sent_segm[len(sent_segm) - 1]) + return segm[beg:end] + + def remove_choice(self, segm): + ret = [] + prev_txt_end = -1 + prev_txt_nr = -1 + for word in segm: + txt_nr = self.get_segm_id(word) + # get increasing sequence of ids: in case of choice get first possibility + if self.get_sent_beg(word) > prev_txt_end - 1 or prev_txt_nr != txt_nr: + ret.append(word) + prev_txt_end = self.get_sent_end(word) + prev_txt_nr = txt_nr + + return ret + + def handle_query(self): + try: + self._open() + sentences = [] + while True: + sent_segm = XMLCorpusView.read_block(self, self._stream) + if len(sent_segm) == 0: + break + for segm in sent_segm: + segm = self.remove_choice(segm) + sentences.append(self.get_sentences(segm)) + self.close() + self.xml_tool.remove_preprocessed_file() + return sentences + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def handle_elt(self, elt, context): + ret = [] + for seg in elt: + ret.append(seg.get("corresp")) + return ret + + +class NKJPCorpus_Text_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + text.xml files in NKJP corpus. + """ + + SENTS_MODE = 0 + RAW_MODE = 1 + + def __init__(self, filename, **kwargs): + self.mode = kwargs.pop("mode", 0) + self.tagspec = ".*/div/ab" + self.segm_dict = dict() + # xml preprocessing + self.xml_tool = XML_Tool(filename, "text.xml") + # base class init + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def handle_query(self): + try: + self._open() + x = self.read_block(self._stream) + self.close() + self.xml_tool.remove_preprocessed_file() + return x + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def read_block(self, stream, tagspec=None, elt_handler=None): + """ + Returns text as a list of sentences. + """ + txt = [] + while True: + segm = XMLCorpusView.read_block(self, stream) + if len(segm) == 0: + break + for part in segm: + txt.append(part) + + return [" ".join([segm for segm in txt])] + + def get_segm_id(self, elt): + for attr in elt.attrib: + if attr.endswith("id"): + return elt.get(attr) + + def handle_elt(self, elt, context): + # fill dictionary to use later in sents mode + if self.mode is NKJPCorpus_Text_View.SENTS_MODE: + self.segm_dict[self.get_segm_id(elt)] = elt.text + return elt.text + + +class NKJPCorpus_Morph_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + ann_morphosyntax.xml files in NKJP corpus. + """ + + def __init__(self, filename, **kwargs): + self.tags = kwargs.pop("tags", None) + self.tagspec = ".*/seg/fs" + self.xml_tool = XML_Tool(filename, "ann_morphosyntax.xml") + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def handle_query(self): + try: + self._open() + words = [] + while True: + segm = XMLCorpusView.read_block(self, self._stream) + if len(segm) == 0: + break + for part in segm: + if part is not None: + words.append(part) + self.close() + self.xml_tool.remove_preprocessed_file() + return words + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def handle_elt(self, elt, context): + word = "" + flag = False + is_not_interp = True + # if tags not specified, then always return word + if self.tags is None: + flag = True + + for child in elt: + + # get word + if "name" in child.keys() and child.attrib["name"] == "orth": + for symbol in child: + if symbol.tag == "string": + word = symbol.text + elif "name" in child.keys() and child.attrib["name"] == "interps": + for symbol in child: + if "type" in symbol.keys() and symbol.attrib["type"] == "lex": + for symbol2 in symbol: + if ( + "name" in symbol2.keys() + and symbol2.attrib["name"] == "ctag" + ): + for symbol3 in symbol2: + if ( + "value" in symbol3.keys() + and self.tags is not None + and symbol3.attrib["value"] in self.tags + ): + flag = True + elif ( + "value" in symbol3.keys() + and symbol3.attrib["value"] == "interp" + ): + is_not_interp = False + if flag and is_not_interp: + return word diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py new file mode 100644 index 0000000000000000000000000000000000000000..59492992353ca876eea00f63e3759f14ec5b0e02 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py @@ -0,0 +1,174 @@ +# Natural Language Toolkit: PanLex Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: David Kamholz +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for PanLex Lite, a stripped down version of PanLex distributed +as an SQLite database. See the README.txt in the panlex_lite corpus directory +for more information on PanLex Lite. +""" + +import os +import sqlite3 + +from nltk.corpus.reader.api import CorpusReader + + +class PanLexLiteCorpusReader(CorpusReader): + MEANING_Q = """ + SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ? + ORDER BY dnx2.uq DESC + """ + + TRANSLATION_Q = """ + SELECT s.tt, sum(s.uq) AS trq FROM ( + SELECT ex2.tt, max(dnx.uq) AS uq + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ? + GROUP BY ex2.tt, dnx.ui + ) s + GROUP BY s.tt + ORDER BY trq DESC, s.tt + """ + + def __init__(self, root): + self._c = sqlite3.connect(os.path.join(root, "db.sqlite")).cursor() + + self._uid_lv = {} + self._lv_uid = {} + + for row in self._c.execute("SELECT uid, lv FROM lv"): + self._uid_lv[row[0]] = row[1] + self._lv_uid[row[1]] = row[0] + + def language_varieties(self, lc=None): + """ + Return a list of PanLex language varieties. + + :param lc: ISO 639 alpha-3 code. If specified, filters returned varieties + by this code. If unspecified, all varieties are returned. + :return: the specified language varieties as a list of tuples. The first + element is the language variety's seven-character uniform identifier, + and the second element is its default name. + :rtype: list(tuple) + """ + + if lc is None: + return self._c.execute("SELECT uid, tt FROM lv ORDER BY uid").fetchall() + else: + return self._c.execute( + "SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid", (lc,) + ).fetchall() + + def meanings(self, expr_uid, expr_tt): + """ + Return a list of meanings for an expression. + + :param expr_uid: the expression's language variety, as a seven-character + uniform identifier. + :param expr_tt: the expression's text. + :return: a list of Meaning objects. + :rtype: list(Meaning) + """ + + expr_lv = self._uid_lv[expr_uid] + + mn_info = {} + + for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)): + mn = i[0] + uid = self._lv_uid[i[5]] + + if not mn in mn_info: + mn_info[mn] = { + "uq": i[1], + "ap": i[2], + "ui": i[3], + "ex": {expr_uid: [expr_tt]}, + } + + if not uid in mn_info[mn]["ex"]: + mn_info[mn]["ex"][uid] = [] + + mn_info[mn]["ex"][uid].append(i[4]) + + return [Meaning(mn, mn_info[mn]) for mn in mn_info] + + def translations(self, from_uid, from_tt, to_uid): + """ + Return a list of translations for an expression into a single language + variety. + + :param from_uid: the source expression's language variety, as a + seven-character uniform identifier. + :param from_tt: the source expression's text. + :param to_uid: the target language variety, as a seven-character + uniform identifier. + :return: a list of translation tuples. The first element is the expression + text and the second element is the translation quality. + :rtype: list(tuple) + """ + + from_lv = self._uid_lv[from_uid] + to_lv = self._uid_lv[to_uid] + + return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall() + + +class Meaning(dict): + """ + Represents a single PanLex meaning. A meaning is a translation set derived + from a single source. + """ + + def __init__(self, mn, attr): + super().__init__(**attr) + self["mn"] = mn + + def id(self): + """ + :return: the meaning's id. + :rtype: int + """ + return self["mn"] + + def quality(self): + """ + :return: the meaning's source's quality (0=worst, 9=best). + :rtype: int + """ + return self["uq"] + + def source(self): + """ + :return: the meaning's source id. + :rtype: int + """ + return self["ap"] + + def source_group(self): + """ + :return: the meaning's source group id. + :rtype: int + """ + return self["ui"] + + def expressions(self): + """ + :return: the meaning's expressions as a dictionary whose keys are language + variety uniform identifiers and whose values are lists of expression + texts. + :rtype: dict + """ + return self["ex"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py new file mode 100644 index 0000000000000000000000000000000000000000..182960f2ebc4b3e2411e3980ce4e445412af9bcc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py @@ -0,0 +1,95 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + + +import re +from collections import defaultdict, namedtuple + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.wordlist import WordListCorpusReader +from nltk.tokenize import line_tokenize + +PanlexLanguage = namedtuple( + "PanlexLanguage", + [ + "panlex_uid", # (1) PanLex UID + "iso639", # (2) ISO 639 language code + "iso639_type", # (3) ISO 639 language type, see README + "script", # (4) normal scripts of expressions + "name", # (5) PanLex default name + "langvar_uid", # (6) UID of the language variety in which the default name is an expression + ], +) + + +class PanlexSwadeshCorpusReader(WordListCorpusReader): + """ + This is a class to read the PanLex Swadesh list from + + David Kamholz, Jonathan Pool, and Susan M. Colowick (2014). + PanLex: Building a Resource for Panlingual Lexical Translation. + In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf + + License: CC0 1.0 Universal + https://creativecommons.org/publicdomain/zero/1.0/legalcode + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Find the swadesh size using the fileids' path. + self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1) + self._languages = {lang.panlex_uid: lang for lang in self.get_languages()} + self._macro_langauges = self.get_macrolanguages() + + def license(self): + return "CC0 1.0 Universal" + + def language_codes(self): + return self._languages.keys() + + def get_languages(self): + for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"): + if not line.strip(): # Skip empty lines. + continue + yield PanlexLanguage(*line.strip().split("\t")) + + def get_macrolanguages(self): + macro_langauges = defaultdict(list) + for lang in self._languages.values(): + macro_langauges[lang.iso639].append(lang.panlex_uid) + return macro_langauges + + def words_by_lang(self, lang_code): + """ + :return: a list of list(str) + """ + fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt" + return [concept.split("\t") for concept in self.words(fileid)] + + def words_by_iso639(self, iso63_code): + """ + :return: a list of list(str) + """ + fileids = [ + f"swadesh{self.swadesh_size}/{lang_code}.txt" + for lang_code in self._macro_langauges[iso63_code] + ] + return [ + concept.split("\t") for fileid in fileids for concept in self.words(fileid) + ] + + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py new file mode 100644 index 0000000000000000000000000000000000000000..f096f3ecb0ef7196950071723393656ec91aa363 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py @@ -0,0 +1,227 @@ +# Natural Language Toolkit: Plaintext Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that consist of plaintext documents. +""" + +import nltk.data +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import * + + +class PlaintextCorpusReader(CorpusReader): + """ + Reader for corpora that consist of plaintext documents. Paragraphs + are assumed to be split using blank lines. Sentences and words can + be tokenized using the default tokenizers, or by custom tokenizers + specified as parameters to the constructor. + + This corpus reader can be customized (e.g., to skip preface + sections of specific document formats) by creating a subclass and + overriding the ``CorpusView`` class variable. + """ + + CorpusView = StreamBackedCorpusView + """The corpus view class used by this reader. Subclasses of + ``PlaintextCorpusReader`` may specify alternative corpus view + classes (e.g., to skip the preface sections of documents.)""" + + def __init__( + self, + root, + fileids, + word_tokenizer=WordPunctTokenizer(), + sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"), + para_block_reader=read_blankline_block, + encoding="utf8", + ): + r""" + Construct a new plaintext corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/usr/local/share/nltk_data/corpora/webtext/' + >>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param word_tokenizer: Tokenizer for breaking sentences or + paragraphs into words. + :param sent_tokenizer: Tokenizer for breaking paragraphs + into words. + :param para_block_reader: The block reader used to divide the + corpus into paragraph blocks. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + if self._sent_tokenizer is None: + raise ValueError("No sentence tokenizer for this corpus") + + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + if self._sent_tokenizer is None: + raise ValueError("No sentence tokenizer for this corpus") + + return concat( + [ + self.CorpusView(path, self._read_para_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + words.extend(self._word_tokenizer.tokenize(stream.readline())) + return words + + def _read_sent_block(self, stream): + sents = [] + for para in self._para_block_reader(stream): + sents.extend( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(para) + ] + ) + return sents + + def _read_para_block(self, stream): + paras = [] + for para in self._para_block_reader(stream): + paras.append( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(para) + ] + ) + return paras + + +class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader): + """ + A reader for plaintext corpora whose documents are divided into + categories based on their file identifiers. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``PlaintextCorpusReader`` constructor. + """ + CategorizedCorpusReader.__init__(self, kwargs) + PlaintextCorpusReader.__init__(self, *args, **kwargs) + + +# FIXME: Is there a better way? How to not hardcode this? +# Possibly, add a language kwargs to CategorizedPlaintextCorpusReader to +# override the `sent_tokenizer`. +class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader): + def __init__(self, *args, **kwargs): + CategorizedCorpusReader.__init__(self, kwargs) + kwargs["sent_tokenizer"] = nltk.data.LazyLoader( + "tokenizers/punkt/portuguese.pickle" + ) + PlaintextCorpusReader.__init__(self, *args, **kwargs) + + +class EuroparlCorpusReader(PlaintextCorpusReader): + + """ + Reader for Europarl corpora that consist of plaintext documents. + Documents are divided into chapters instead of paragraphs as + for regular plaintext documents. Chapters are separated using blank + lines. Everything is inherited from ``PlaintextCorpusReader`` except + that: + + - Since the corpus is pre-processed and pre-tokenized, the + word tokenizer should just split the line at whitespaces. + - For the same reason, the sentence tokenizer should just + split the paragraph at line breaks. + - There is a new 'chapters()' method that returns chapters instead + instead of paragraphs. + - The 'paras()' method inherited from PlaintextCorpusReader is + made non-functional to remove any confusion between chapters + and paragraphs for Europarl. + """ + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + words.extend(stream.readline().split()) + return words + + def _read_sent_block(self, stream): + sents = [] + for para in self._para_block_reader(stream): + sents.extend([sent.split() for sent in para.splitlines()]) + return sents + + def _read_para_block(self, stream): + paras = [] + for para in self._para_block_reader(stream): + paras.append([sent.split() for sent in para.splitlines()]) + return paras + + def chapters(self, fileids=None): + """ + :return: the given file(s) as a list of + chapters, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + self.CorpusView(fileid, self._read_para_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + raise NotImplementedError( + "The Europarl corpus reader does not support paragraphs. Please use chapters() instead." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py new file mode 100644 index 0000000000000000000000000000000000000000..0006e640e9ef30cb50fbdee621b13f2f78b484dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py @@ -0,0 +1,95 @@ +# Natural Language Toolkit: PP Attachment Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read lines from the Prepositional Phrase Attachment Corpus. + +The PP Attachment Corpus contains several files having the format: + +sentence_id verb noun1 preposition noun2 attachment + +For example: + +42960 gives authority to administration V +46742 gives inventors of microchip N + +The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.: + +(VP gives (NP authority) (PP to administration)) +(VP gives (NP inventors (PP of microchip))) + +The corpus contains the following files: + +training: training set +devset: development test set, used for algorithm development. +test: test set, used to report results +bitstrings: word classes derived from Mutual Information Clustering for the Wall Street Journal. + +Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional +Phrase Attachment. Proceedings of the ARPA Human Language Technology +Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps] + +The PP Attachment Corpus is distributed with NLTK with the permission +of the author. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * + + +class PPAttachment: + def __init__(self, sent, verb, noun1, prep, noun2, attachment): + self.sent = sent + self.verb = verb + self.noun1 = noun1 + self.prep = prep + self.noun2 = noun2 + self.attachment = attachment + + def __repr__(self): + return ( + "PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, " + "noun2=%r, attachment=%r)" + % (self.sent, self.verb, self.noun1, self.prep, self.noun2, self.attachment) + ) + + +class PPAttachmentCorpusReader(CorpusReader): + """ + sentence_id verb noun1 preposition noun2 attachment + """ + + def attachments(self, fileids): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_obj_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tuples(self, fileids): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_tuple_block(self, stream): + line = stream.readline() + if line: + return [tuple(line.split())] + else: + return [] + + def _read_obj_block(self, stream): + line = stream.readline() + if line: + return [PPAttachment(*line.split())] + else: + return [] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py new file mode 100644 index 0000000000000000000000000000000000000000..31f1b02f701bc68a652af9617751d78b1c04d56d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py @@ -0,0 +1,133 @@ +# Natural Language Toolkit: Pros and Cons Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Pros and Cons dataset. + +- Pros and Cons dataset information - + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and Comparing + Opinions on the Web". Proceedings of the 14th international World Wide Web + conference (WWW-2005), May 10-14, 2005, in Chiba, Japan. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + + +class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader): + """ + Reader for the Pros and Cons sentence dataset. + + >>> from nltk.corpus import pros_cons + >>> pros_cons.sents(categories='Cons') # doctest: +NORMALIZE_WHITESPACE + [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy', + 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'], + ...] + >>> pros_cons.words('IntegratedPros.txt') + ['Easy', 'to', 'use', ',', 'economical', '!', ...] + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WordPunctTokenizer(), + encoding="utf8", + **kwargs + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param encoding: the encoding that should be used to read the corpus. + :param kwargs: additional parameters passed to CategorizedCorpusReader. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + CategorizedCorpusReader.__init__(self, kwargs) + self._word_tokenizer = word_tokenizer + + def sents(self, fileids=None, categories=None): + """ + Return all sentences in the corpus or in the specified files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :param categories: a list specifying the categories whose sentences + have to be returned. + :return: the given file(s) as a list of sentences. Each sentence is + tokenized using the specified word_tokenizer. + :rtype: list(list(str)) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None, categories=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :param categories: a list specifying the categories whose words have + to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_sent_block(self, stream): + sents = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)", line) + if sent: + sents.append(self._word_tokenizer.tokenize(sent.group(2).strip())) + return sents + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py new file mode 100644 index 0000000000000000000000000000000000000000..5f52425c0f7c260f62d7d953b90d241a6c00a2b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py @@ -0,0 +1,331 @@ +# Natural Language Toolkit: Product Reviews Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for reviews corpora (syntax based on Customer Review Corpus). + +Customer Review Corpus information +================================== + +Annotated by: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +The "product_reviews_1" and "product_reviews_2" datasets respectively contain +annotated customer reviews of 5 and 9 products from amazon.com. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge + Discovery & Data Mining (KDD-04), 2004. + +- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews". + Proceedings of Nineteeth National Conference on Artificial Intelligence + (AAAI-2004), 2004. + +- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to + Opinion Mining." Proceedings of First ACM International Conference on Web + Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University, + Stanford, California, USA. + +Symbols used in the annotated reviews: + + :[t]: the title of the review: Each [t] tag starts a review. + :xxxx[+|-n]: xxxx is a product feature. + :[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest. + Note that the strength is quite subjective. + You may want ignore it, but only considering + and - + :[-n]: Negative opinion + :##: start of each sentence. Each line is a sentence. + :[u]: feature not appeared in the sentence. + :[p]: feature not appeared in the sentence. Pronoun resolution is needed. + :[s]: suggestion or recommendation. + :[cc]: comparison with a competing product from a different brand. + :[cs]: comparison with a competing product from the same brand. + +Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not + provide separation between different reviews. This is due to the fact that + the dataset was specifically designed for aspect/feature-based sentiment + analysis, for which sentence-level annotation is sufficient. For document- + level classification and analysis, this peculiarity should be taken into + consideration. +""" + +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +TITLE = re.compile(r"^\[t\](.*)$") # [t] Title +FEATURES = re.compile( + r"((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]" +) # find 'feature' in feature[+3] +NOTES = re.compile(r"\[(?!t)(p|u|s|cc|cs)\]") # find 'p' in camera[+2][p] +SENT = re.compile(r"##(.*)$") # find tokenized sentence + + +class Review: + """ + A Review is the main block of a ReviewsCorpusReader. + """ + + def __init__(self, title=None, review_lines=None): + """ + :param title: the title of the review. + :param review_lines: the list of the ReviewLines that belong to the Review. + """ + self.title = title + if review_lines is None: + self.review_lines = [] + else: + self.review_lines = review_lines + + def add_line(self, review_line): + """ + Add a line (ReviewLine) to the review. + + :param review_line: a ReviewLine instance that belongs to the Review. + """ + assert isinstance(review_line, ReviewLine) + self.review_lines.append(review_line) + + def features(self): + """ + Return a list of features in the review. Each feature is a tuple made of + the specific item feature and the opinion strength about that feature. + + :return: all features of the review as a list of tuples (feat, score). + :rtype: list(tuple) + """ + features = [] + for review_line in self.review_lines: + features.extend(review_line.features) + return features + + def sents(self): + """ + Return all tokenized sentences in the review. + + :return: all sentences of the review as lists of tokens. + :rtype: list(list(str)) + """ + return [review_line.sent for review_line in self.review_lines] + + def __repr__(self): + return 'Review(title="{}", review_lines={})'.format( + self.title, self.review_lines + ) + + +class ReviewLine: + """ + A ReviewLine represents a sentence of the review, together with (optional) + annotations of its features and notes about the reviewed item. + """ + + def __init__(self, sent, features=None, notes=None): + self.sent = sent + if features is None: + self.features = [] + else: + self.features = features + + if notes is None: + self.notes = [] + else: + self.notes = notes + + def __repr__(self): + return "ReviewLine(features={}, notes={}, sent={})".format( + self.features, self.notes, self.sent + ) + + +class ReviewsCorpusReader(CorpusReader): + """ + Reader for the Customer Review Data dataset by Hu, Liu (2004). + Note: we are not applying any sentence tokenization at the moment, just word + tokenization. + + >>> from nltk.corpus import product_reviews_1 + >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt') + >>> review = camera_reviews[0] + >>> review.sents()[0] # doctest: +NORMALIZE_WHITESPACE + ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am', + 'extremely', 'satisfied', 'with', 'the', 'purchase', '.'] + >>> review.features() # doctest: +NORMALIZE_WHITESPACE + [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'), + ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'), + ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'), + ('option', '+1')] + + We can also reach the same information directly from the stream: + + >>> product_reviews_1.features('Canon_G3.txt') + [('canon powershot g3', '+3'), ('use', '+2'), ...] + + We can compute stats for specific product features: + + >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> mean = tot / n_reviews + >>> print(n_reviews, tot, mean) + 15 24 1.6 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8" + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WordPunctTokenizer` + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._readme = "README.txt" + + def features(self, fileids=None): + """ + Return a list of features. Each feature is a tuple made of the specific + item feature and the opinion strength about that feature. + + :param fileids: a list or regexp specifying the ids of the files whose + features have to be returned. + :return: all features for the item(s) in the given file(s). + :rtype: list(tuple) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(fileid, self._read_features, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def reviews(self, fileids=None): + """ + Return all the reviews as a list of Review objects. If `fileids` is + specified, return all the reviews from each of the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + reviews have to be returned. + :return: the given file(s) as a list of reviews. + """ + if fileids is None: + fileids = self._fileids + return concat( + [ + self.CorpusView(fileid, self._read_review_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + Return all sentences in the corpus or in the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: the given file(s) as a list of sentences, each encoded as a + list of word strings. + :rtype: list(list(str)) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_features(self, stream): + features = [] + for i in range(20): + line = stream.readline() + if not line: + return features + features.extend(re.findall(FEATURES, line)) + return features + + def _read_review_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + title_match = re.match(TITLE, line) + if title_match: + review = Review( + title=title_match.group(1).strip() + ) # We create a new review + break + + # Scan until we find another line matching the regexp, or EOF. + while True: + oldpos = stream.tell() + line = stream.readline() + # End of file: + if not line: + return [review] + # Start of a new review: backup to just before it starts, and + # return the review we've already collected. + if re.match(TITLE, line): + stream.seek(oldpos) + return [review] + # Anything else is part of the review line. + feats = re.findall(FEATURES, line) + notes = re.findall(NOTES, line) + sent = re.findall(SENT, line) + if sent: + sent = self._word_tokenizer.tokenize(sent[0]) + review_line = ReviewLine(sent=sent, features=feats, notes=notes) + review.add_line(review_line) + + def _read_sent_block(self, stream): + sents = [] + for review in self._read_review_block(stream): + sents.extend([sent for sent in review.sents()]) + return sents + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + sent = re.findall(SENT, line) + if sent: + words.extend(self._word_tokenizer.tokenize(sent[0])) + return words diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/rte.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/rte.py new file mode 100644 index 0000000000000000000000000000000000000000..98261fae9adf04ecf6938c966ec3cae4fcc775a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/rte.py @@ -0,0 +1,146 @@ +# Natural Language Toolkit: RTE Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora. + +The files were taken from the RTE1, RTE2 and RTE3 datasets and the files +were regularized. + +Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the +gold standard annotated files. + +Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following +example is taken from RTE3:: + + + + The sale was made to pay Yukos' US$ 27.5 billion tax bill, + Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known + company Baikalfinansgroup which was later bought by the Russian + state-owned oil company Rosneft . + + Baikalfinansgroup was sold to Rosneft. + + +In order to provide globally unique IDs for each pair, a new attribute +``challenge`` has been added to the root element ``entailment-corpus`` of each +file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the +challenge number and 'n' is the pair ID. +""" +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * + + +def norm(value_string): + """ + Normalize the string value in an RTE pair's ``value`` or ``entailment`` + attribute as an integer (1, 0). + + :param value_string: the label used to classify a text/hypothesis pair + :type value_string: str + :rtype: int + """ + + valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} + return valdict[value_string.upper()] + + +class RTEPair: + """ + Container for RTE text-hypothesis pairs. + + The entailment relation is signalled by the ``value`` attribute in RTE1, and by + ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment`` + attribute of this class. + """ + + def __init__( + self, + pair, + challenge=None, + id=None, + text=None, + hyp=None, + value=None, + task=None, + length=None, + ): + """ + :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3) + :param id: identifier for the pair + :param text: the text component of the pair + :param hyp: the hypothesis component of the pair + :param value: classification label for the pair + :param task: attribute for the particular NLP task that the data was drawn from + :param length: attribute for the length of the text of the pair + """ + self.challenge = challenge + self.id = pair.attrib["id"] + self.gid = f"{self.challenge}-{self.id}" + self.text = pair[0].text + self.hyp = pair[1].text + + if "value" in pair.attrib: + self.value = norm(pair.attrib["value"]) + elif "entailment" in pair.attrib: + self.value = norm(pair.attrib["entailment"]) + else: + self.value = value + if "task" in pair.attrib: + self.task = pair.attrib["task"] + else: + self.task = task + if "length" in pair.attrib: + self.length = pair.attrib["length"] + else: + self.length = length + + def __repr__(self): + if self.challenge: + return f"" + else: + return "" % self.id + + +class RTECorpusReader(XMLCorpusReader): + """ + Corpus reader for corpora in RTE challenges. + + This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected + structure of input documents. + """ + + def _read_etree(self, doc): + """ + Map the XML input into an RTEPair. + + This uses the ``getiterator()`` method from the ElementTree package to + find all the ```` elements. + + :param doc: a parsed XML document + :rtype: list(RTEPair) + """ + try: + challenge = doc.attrib["challenge"] + except KeyError: + challenge = None + pairiter = doc.iter("pair") + return [RTEPair(pair, challenge=challenge) for pair in pairiter] + + def pairs(self, fileids): + """ + Build a list of RTEPairs from a RTE corpus. + + :param fileids: a list of RTE corpus fileids + :type: list + :rtype: list(RTEPair) + """ + if isinstance(fileids, str): + fileids = [fileids] + return concat([self._read_etree(self.xml(fileid)) for fileid in fileids]) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py new file mode 100644 index 0000000000000000000000000000000000000000..c44474280deda5087069e7c398eaab79656f97b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: SemCor Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the SemCor Corpus. +""" + +__docformat__ = "epytext en" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView +from nltk.tree import Tree + + +class SemcorCorpusReader(XMLCorpusReader): + """ + Corpus reader for the SemCor Corpus. + For access to the complete XML data structure, use the ``xml()`` + method. For access to simple word lists and tagged word lists, use + ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``. + """ + + def __init__(self, root, fileids, wordnet, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + self._wordnet = wordnet + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return self._items(fileids, "word", False, False, False) + + def chunks(self, fileids=None): + """ + :return: the given file(s) as a list of chunks, + each of which is a list of words and punctuation symbols + that form a unit. + :rtype: list(list(str)) + """ + return self._items(fileids, "chunk", False, False, False) + + def tagged_chunks(self, fileids=None, tag=("pos" or "sem" or "both")): + """ + :return: the given file(s) as a list of tagged chunks, represented + in tree form. + :rtype: list(Tree) + + :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'` + to indicate the kind of tags to include. Semantic tags consist of + WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity + without a specific entry in WordNet. (Named entities of type 'other' + have no lemma. Other chunks not in WordNet have no semantic tag. + Punctuation tokens have `None` for their part of speech tag.) + """ + return self._items(fileids, "chunk", False, tag != "sem", tag != "pos") + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of sentences, each encoded + as a list of word strings. + :rtype: list(list(str)) + """ + return self._items(fileids, "word", True, False, False) + + def chunk_sents(self, fileids=None): + """ + :return: the given file(s) as a list of sentences, each encoded + as a list of chunks. + :rtype: list(list(list(str))) + """ + return self._items(fileids, "chunk", True, False, False) + + def tagged_sents(self, fileids=None, tag=("pos" or "sem" or "both")): + """ + :return: the given file(s) as a list of sentences. Each sentence + is represented as a list of tagged chunks (in tree form). + :rtype: list(list(Tree)) + + :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'` + to indicate the kind of tags to include. Semantic tags consist of + WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity + without a specific entry in WordNet. (Named entities of type 'other' + have no lemma. Other chunks not in WordNet have no semantic tag. + Punctuation tokens have `None` for their part of speech tag.) + """ + return self._items(fileids, "chunk", True, tag != "sem", tag != "pos") + + def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag): + if unit == "word" and not bracket_sent: + # the result of the SemcorWordView may be a multiword unit, so the + # LazyConcatenation will make sure the sentence is flattened + _ = lambda *args: LazyConcatenation( + (SemcorWordView if self._lazy else self._words)(*args) + ) + else: + _ = SemcorWordView if self._lazy else self._words + return concat( + [ + _(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet) + for fileid in self.abspaths(fileids) + ] + ) + + def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag): + """ + Helper used to implement the view methods -- returns a list of + tokens, (segmented) words, chunks, or sentences. The tokens + and chunks may optionally be tagged (with POS and sense + information). + + :param fileid: The name of the underlying file. + :param unit: One of `'token'`, `'word'`, or `'chunk'`. + :param bracket_sent: If true, include sentence bracketing. + :param pos_tag: Whether to include part-of-speech tags. + :param sem_tag: Whether to include semantic tags, namely WordNet lemma + and OOV named entity status. + """ + assert unit in ("token", "word", "chunk") + result = [] + + xmldoc = ElementTree.parse(fileid).getroot() + for xmlsent in xmldoc.findall(".//s"): + sent = [] + for xmlword in _all_xmlwords_in(xmlsent): + itm = SemcorCorpusReader._word( + xmlword, unit, pos_tag, sem_tag, self._wordnet + ) + if unit == "word": + sent.extend(itm) + else: + sent.append(itm) + + if bracket_sent: + result.append(SemcorSentence(xmlsent.attrib["snum"], sent)) + else: + result.extend(sent) + + assert None not in result + return result + + @staticmethod + def _word(xmlword, unit, pos_tag, sem_tag, wordnet): + tkn = xmlword.text + if not tkn: + tkn = "" # fixes issue 337? + + lemma = xmlword.get("lemma", tkn) # lemma or NE class + lexsn = xmlword.get("lexsn") # lex_sense (locator for the lemma's sense) + if lexsn is not None: + sense_key = lemma + "%" + lexsn + wnpos = ("n", "v", "a", "r", "s")[ + int(lexsn.split(":")[0]) - 1 + ] # see http://wordnet.princeton.edu/man/senseidx.5WN.html + else: + sense_key = wnpos = None + redef = xmlword.get( + "rdf", tkn + ) # redefinition--this indicates the lookup string + # does not exactly match the enclosed string, e.g. due to typographical adjustments + # or discontinuity of a multiword expression. If a redefinition has occurred, + # the "rdf" attribute holds its inflected form and "lemma" holds its lemma. + # For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class). + sensenum = xmlword.get("wnsn") # WordNet sense number + isOOVEntity = "pn" in xmlword.keys() # a "personal name" (NE) not in WordNet + pos = xmlword.get( + "pos" + ) # part of speech for the whole chunk (None for punctuation) + + if unit == "token": + if not pos_tag and not sem_tag: + itm = tkn + else: + itm = ( + (tkn,) + + ((pos,) if pos_tag else ()) + + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ()) + ) + return itm + else: + ww = tkn.split("_") # TODO: case where punctuation intervenes in MWE + if unit == "word": + return ww + else: + if sensenum is not None: + try: + sense = wordnet.lemma_from_key(sense_key) # Lemma object + except Exception: + # cannot retrieve the wordnet.Lemma object. possible reasons: + # (a) the wordnet corpus is not downloaded; + # (b) a nonexistent sense is annotated: e.g., such.s.00 triggers: + # nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00' + # solution: just use the lemma name as a string + try: + sense = "%s.%s.%02d" % ( + lemma, + wnpos, + int(sensenum), + ) # e.g.: reach.v.02 + except ValueError: + sense = ( + lemma + "." + wnpos + "." + sensenum + ) # e.g. the sense number may be "2;1" + + bottom = [Tree(pos, ww)] if pos_tag else ww + + if sem_tag and isOOVEntity: + if sensenum is not None: + return Tree(sense, [Tree("NE", bottom)]) + else: # 'other' NE + return Tree("NE", bottom) + elif sem_tag and sensenum is not None: + return Tree(sense, bottom) + elif pos_tag: + return bottom[0] + else: + return bottom # chunk as a list + + +def _all_xmlwords_in(elt, result=None): + if result is None: + result = [] + for child in elt: + if child.tag in ("wf", "punc"): + result.append(child) + else: + _all_xmlwords_in(child, result) + return result + + +class SemcorSentence(list): + """ + A list of words, augmented by an attribute ``num`` used to record + the sentence identifier (the ``n`` attribute from the XML). + """ + + def __init__(self, num, items): + self.num = num + list.__init__(self, items) + + +class SemcorWordView(XMLCorpusView): + """ + A stream backed corpus view specialized for use with the BNC corpus. + """ + + def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet): + """ + :param fileid: The name of the underlying file. + :param unit: One of `'token'`, `'word'`, or `'chunk'`. + :param bracket_sent: If true, include sentence bracketing. + :param pos_tag: Whether to include part-of-speech tags. + :param sem_tag: Whether to include semantic tags, namely WordNet lemma + and OOV named entity status. + """ + if bracket_sent: + tagspec = ".*/s" + else: + tagspec = ".*/s/(punc|wf)" + + self._unit = unit + self._sent = bracket_sent + self._pos_tag = pos_tag + self._sem_tag = sem_tag + self._wordnet = wordnet + + XMLCorpusView.__init__(self, fileid, tagspec) + + def handle_elt(self, elt, context): + if self._sent: + return self.handle_sent(elt) + else: + return self.handle_word(elt) + + def handle_word(self, elt): + return SemcorCorpusReader._word( + elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet + ) + + def handle_sent(self, elt): + sent = [] + for child in elt: + if child.tag in ("wf", "punc"): + itm = self.handle_word(child) + if self._unit == "word": + sent.extend(itm) + else: + sent.append(itm) + else: + raise ValueError("Unexpected element %s" % child.tag) + return SemcorSentence(elt.attrib["snum"], sent) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..42426100da71cf1d6b23353a22ce2e074837424d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: SentiWordNet +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for SentiWordNet + +SentiWordNet is a lexical resource for opinion mining. +SentiWordNet assigns to each synset of WordNet three +sentiment scores: positivity, negativity, and objectivity. + +For details about SentiWordNet see: +http://sentiwordnet.isti.cnr.it/ + + >>> from nltk.corpus import sentiwordnet as swn + >>> print(swn.senti_synset('breakdown.n.03')) + + >>> list(swn.senti_synsets('slow')) + [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'),\ + SentiSynset('slow.v.03'), SentiSynset('slow.a.01'),\ + SentiSynset('slow.a.02'), SentiSynset('dense.s.04'),\ + SentiSynset('slow.a.04'), SentiSynset('boring.s.01'),\ + SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'),\ + SentiSynset('behind.r.03')] + >>> happy = swn.senti_synsets('happy', 'a') + >>> happy0 = list(happy)[0] + >>> happy0.pos_score() + 0.875 + >>> happy0.neg_score() + 0.0 + >>> happy0.obj_score() + 0.125 +""" + +import re + +from nltk.corpus.reader import CorpusReader + + +class SentiWordNetCorpusReader(CorpusReader): + def __init__(self, root, fileids, encoding="utf-8"): + """ + Construct a new SentiWordNet Corpus Reader, using data from + the specified file. + """ + super().__init__(root, fileids, encoding=encoding) + if len(self._fileids) != 1: + raise ValueError("Exactly one file must be specified") + self._db = {} + self._parse_src_file() + + def _parse_src_file(self): + lines = self.open(self._fileids[0]).read().splitlines() + lines = filter((lambda x: not re.search(r"^\s*#", x)), lines) + for i, line in enumerate(lines): + fields = [field.strip() for field in re.split(r"\t+", line)] + try: + pos, offset, pos_score, neg_score, synset_terms, gloss = fields + except BaseException as e: + raise ValueError(f"Line {i} formatted incorrectly: {line}\n") from e + if pos and offset: + offset = int(offset) + self._db[(pos, offset)] = (float(pos_score), float(neg_score)) + + def senti_synset(self, *vals): + from nltk.corpus import wordnet as wn + + if tuple(vals) in self._db: + pos_score, neg_score = self._db[tuple(vals)] + pos, offset = vals + if pos == "s": + pos = "a" + synset = wn.synset_from_pos_and_offset(pos, offset) + return SentiSynset(pos_score, neg_score, synset) + else: + synset = wn.synset(vals[0]) + pos = synset.pos() + if pos == "s": + pos = "a" + offset = synset.offset() + if (pos, offset) in self._db: + pos_score, neg_score = self._db[(pos, offset)] + return SentiSynset(pos_score, neg_score, synset) + else: + return None + + def senti_synsets(self, string, pos=None): + from nltk.corpus import wordnet as wn + + sentis = [] + synset_list = wn.synsets(string, pos) + for synset in synset_list: + sentis.append(self.senti_synset(synset.name())) + sentis = filter(lambda x: x, sentis) + return sentis + + def all_senti_synsets(self): + from nltk.corpus import wordnet as wn + + for key, fields in self._db.items(): + pos, offset = key + pos_score, neg_score = fields + synset = wn.synset_from_pos_and_offset(pos, offset) + yield SentiSynset(pos_score, neg_score, synset) + + +class SentiSynset: + def __init__(self, pos_score, neg_score, synset): + self._pos_score = pos_score + self._neg_score = neg_score + self._obj_score = 1.0 - (self._pos_score + self._neg_score) + self.synset = synset + + def pos_score(self): + return self._pos_score + + def neg_score(self): + return self._neg_score + + def obj_score(self): + return self._obj_score + + def __str__(self): + """Prints just the Pos/Neg scores for now.""" + s = "<" + s += self.synset.name() + ": " + s += "PosScore=%s " % self._pos_score + s += "NegScore=%s" % self._neg_score + s += ">" + return s + + def __repr__(self): + return "Senti" + repr(self.synset) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa7f5ec9f34114c499721650bbb307413dd7804 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py @@ -0,0 +1,75 @@ +# Natural Language Toolkit: Sinica Treebank Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Sinica Treebank Corpus Sample + +http://rocling.iis.sinica.edu.tw/CKIP/engversion/treebank.htm + +10,000 parsed sentences, drawn from the Academia Sinica Balanced +Corpus of Modern Chinese. Parse tree notation is based on +Information-based Case Grammar. Tagset documentation is available +at https://www.sinica.edu.tw/SinicaCorpus/modern_e_wordtype.html + +Language and Knowledge Processing Group, Institute of Information +Science, Academia Sinica + +The data is distributed with the Natural Language Toolkit under the terms of +the Creative Commons Attribution-NonCommercial-ShareAlike License +[https://creativecommons.org/licenses/by-nc-sa/2.5/]. + +References: + +Feng-Yi Chen, Pi-Fang Tsai, Keh-Jiann Chen, and Chu-Ren Huang (1999) +The Construction of Sinica Treebank. Computational Linguistics and +Chinese Language Processing, 4, pp 87-104. + +Huang Chu-Ren, Keh-Jiann Chen, Feng-Yi Chen, Keh-Jiann Chen, Zhao-Ming +Gao, and Kuang-Yu Chen. 2000. Sinica Treebank: Design Criteria, +Annotation Guidelines, and On-line Interface. Proceedings of 2nd +Chinese Language Processing Workshop, Association for Computational +Linguistics. + +Chen Keh-Jiann and Yu-Ming Hsieh (2004) Chinese Treebanks and Grammar +Extraction, Proceedings of IJCNLP-04, pp560-565. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import sinica_parse + +IDENTIFIER = re.compile(r"^#\S+\s") +APPENDIX = re.compile(r"(?<=\))#.*$") +TAGWORD = re.compile(r":([^:()|]+):([^:()|]+)") +WORD = re.compile(r":[^:()|]+:([^:()|]+)") + + +class SinicaTreebankCorpusReader(SyntaxCorpusReader): + """ + Reader for the sinica treebank. + """ + + def _read_block(self, stream): + sent = stream.readline() + sent = IDENTIFIER.sub("", sent) + sent = APPENDIX.sub("", sent) + return [sent] + + def _parse(self, sent): + return sinica_parse(sent) + + def _tag(self, sent, tagset=None): + tagged_sent = [(w, t) for (t, w) in TAGWORD.findall(sent)] + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_sent + ] + return tagged_sent + + def _word(self, sent): + return WORD.findall(sent) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ae423eb920d6d86c0fce8a43881f7bdeaf5b35 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: String Category Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read tuples from a corpus consisting of categorized strings. +For example, from the question classification corpus: + +NUM:dist How far is it from Denver to Aspen ? +LOC:city What county is Modesto , California in ? +HUM:desc Who was Galileo ? +DESC:def What is an atom ? +NUM:date When did Hawaii become a state ? +""" + +from nltk.corpus.reader.api import * + +# based on PPAttachmentCorpusReader +from nltk.corpus.reader.util import * + + +# [xx] Should the order of the tuple be reversed -- in most other places +# in nltk, we use the form (data, tag) -- e.g., tagged words and +# labeled texts for classifiers. +class StringCategoryCorpusReader(CorpusReader): + def __init__(self, root, fileids, delimiter=" ", encoding="utf8"): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param delimiter: Field delimiter + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._delimiter = delimiter + + def tuples(self, fileids=None): + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_tuple_block(self, stream): + line = stream.readline().strip() + if line: + return [tuple(line.split(self._delimiter, 1))] + else: + return [] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a396fb137ccf17c990f41268f77e176380acb1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Switchboard Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT +import re + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple + + +class SwitchboardTurn(list): + """ + A specialized list object used to encode switchboard utterances. + The elements of the list are the words in the utterance; and two + attributes, ``speaker`` and ``id``, are provided to retrieve the + spearker identifier and utterance id. Note that utterance ids + are only unique within a given discourse. + """ + + def __init__(self, words, speaker, id): + list.__init__(self, words) + self.speaker = speaker + self.id = int(id) + + def __repr__(self): + if len(self) == 0: + text = "" + elif isinstance(self[0], tuple): + text = " ".join("%s/%s" % w for w in self) + else: + text = " ".join(self) + return f"<{self.speaker}.{self.id}: {text!r}>" + + +class SwitchboardCorpusReader(CorpusReader): + _FILES = ["tagged"] + # Use the "tagged" file even for non-tagged data methods, since + # it's tokenized. + + def __init__(self, root, tagset=None): + CorpusReader.__init__(self, root, self._FILES) + self._tagset = tagset + + def words(self): + return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader) + + def tagged_words(self, tagset=None): + def tagged_words_block_reader(stream): + return self._tagged_words_block_reader(stream, tagset) + + return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader) + + def turns(self): + return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader) + + def tagged_turns(self, tagset=None): + def tagged_turns_block_reader(stream): + return self._tagged_turns_block_reader(stream, tagset) + + return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader) + + def discourses(self): + return StreamBackedCorpusView( + self.abspath("tagged"), self._discourses_block_reader + ) + + def tagged_discourses(self, tagset=False): + def tagged_discourses_block_reader(stream): + return self._tagged_discourses_block_reader(stream, tagset) + + return StreamBackedCorpusView( + self.abspath("tagged"), tagged_discourses_block_reader + ) + + def _discourses_block_reader(self, stream): + # returns at most 1 discourse. (The other methods depend on this.) + return [ + [ + self._parse_utterance(u, include_tag=False) + for b in read_blankline_block(stream) + for u in b.split("\n") + if u.strip() + ] + ] + + def _tagged_discourses_block_reader(self, stream, tagset=None): + # returns at most 1 discourse. (The other methods depend on this.) + return [ + [ + self._parse_utterance(u, include_tag=True, tagset=tagset) + for b in read_blankline_block(stream) + for u in b.split("\n") + if u.strip() + ] + ] + + def _turns_block_reader(self, stream): + return self._discourses_block_reader(stream)[0] + + def _tagged_turns_block_reader(self, stream, tagset=None): + return self._tagged_discourses_block_reader(stream, tagset)[0] + + def _words_block_reader(self, stream): + return sum(self._discourses_block_reader(stream)[0], []) + + def _tagged_words_block_reader(self, stream, tagset=None): + return sum(self._tagged_discourses_block_reader(stream, tagset)[0], []) + + _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)") + _SEP = "/" + + def _parse_utterance(self, utterance, include_tag, tagset=None): + m = self._UTTERANCE_RE.match(utterance) + if m is None: + raise ValueError("Bad utterance %r" % utterance) + speaker, id, text = m.groups() + words = [str2tuple(s, self._SEP) for s in text.split()] + if not include_tag: + words = [w for (w, t) in words] + elif tagset and tagset != self._tagset: + words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words] + return SwitchboardTurn(words, speaker, id) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcfe1b6ff5487a57da8b5e8a9b919eba8b3b6e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py @@ -0,0 +1,354 @@ +# Natural Language Toolkit: Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora whose documents contain part-of-speech-tagged words. +""" + +import os + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.timit import read_timit_block +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple +from nltk.tokenize import * + + +class TaggedCorpusReader(CorpusReader): + """ + Reader for simple part-of-speech tagged corpora. Paragraphs are + assumed to be split using blank lines. Sentences and words can be + tokenized using the default tokenizers, or by custom tokenizers + specified as parameters to the constructor. Words are parsed + using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the + separator. I.e., words should have the form:: + + word1/tag1 word2/tag2 word3/tag3 ... + + But custom separators may be specified as parameters to the + constructor. Part of speech tags are case-normalized to upper + case. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + Construct a new Tagged Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tagset = tagset + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader): + """ + A reader for part-of-speech tagged corpora whose documents are + divided into categories based on their file identifiers. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``TaggedCorpusReader``. + """ + CategorizedCorpusReader.__init__(self, kwargs) + TaggedCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + +class TaggedCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for tagged documents. It can be + customized via flags to divide the tagged corpus documents up by + sentence or paragraph, and to include or omit part of speech tags. + ``TaggedCorpusView`` objects are typically created by + ``TaggedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sep, + word_tokenizer, + sent_tokenizer, + para_block_reader, + tag_mapping_function=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = [ + str2tuple(s, self._sep) + for s in self._word_tokenizer.tokenize(sent_str) + ] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + block.append(para) + else: + block.extend(para) + return block + + +# needs to implement simplified tags +class MacMorphoCorpusReader(TaggedCorpusReader): + """ + A corpus reader for the MAC_MORPHO corpus. Each line contains a + single tagged word, using '_' as a separator. Sentence boundaries + are based on the end-sentence tag ('_.'). Paragraph information + is not included in the corpus, so each paragraph returned by + ``self.paras()`` and ``self.tagged_paras()`` contains a single + sentence. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + TaggedCorpusReader.__init__( + self, + root, + fileids, + sep="_", + word_tokenizer=LineTokenizer(), + sent_tokenizer=RegexpTokenizer(".*\n"), + para_block_reader=self._read_block, + encoding=encoding, + tagset=tagset, + ) + + def _read_block(self, stream): + return read_regexp_block(stream, r".*", r".*_\.") + + +class TimitTaggedCorpusReader(TaggedCorpusReader): + """ + A corpus reader for tagged sentences that are included in the TIMIT corpus. + """ + + def __init__(self, *args, **kwargs): + TaggedCorpusReader.__init__( + self, para_block_reader=read_timit_block, *args, **kwargs + ) + + def paras(self): + raise NotImplementedError("use sents() instead") + + def tagged_paras(self): + raise NotImplementedError("use tagged_sents() instead") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/timit.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/timit.py new file mode 100644 index 0000000000000000000000000000000000000000..e399ac2ff31fd39c5dfc9ac9e9de0bc29d1f1842 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/timit.py @@ -0,0 +1,510 @@ +# Natural Language Toolkit: TIMIT Corpus Reader +# +# Copyright (C) 2001-2007 NLTK Project +# Author: Haejoong Lee +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +# [xx] this docstring is out-of-date: +""" +Read tokens, phonemes and audio data from the NLTK TIMIT Corpus. + +This corpus contains selected portion of the TIMIT corpus. + + - 16 speakers from 8 dialect regions + - 1 male and 1 female from each dialect region + - total 130 sentences (10 sentences per speaker. Note that some + sentences are shared among other speakers, especially sa1 and sa2 + are spoken by all speakers.) + - total 160 recording of sentences (10 recordings per speaker) + - audio format: NIST Sphere, single channel, 16kHz sampling, + 16 bit sample, PCM encoding + + +Module contents +=============== + +The timit corpus reader provides 4 functions and 4 data items. + + - utterances + + List of utterances in the corpus. There are total 160 utterances, + each of which corresponds to a unique utterance of a speaker. + Here's an example of an utterance identifier in the list:: + + dr1-fvmh0/sx206 + - _---- _--- + | | | | | + | | | | | + | | | | `--- sentence number + | | | `----- sentence type (a:all, i:shared, x:exclusive) + | | `--------- speaker ID + | `------------ sex (m:male, f:female) + `-------------- dialect region (1..8) + + - speakers + + List of speaker IDs. An example of speaker ID:: + + dr1-fvmh0 + + Note that if you split an item ID with colon and take the first element of + the result, you will get a speaker ID. + + >>> itemid = 'dr1-fvmh0/sx206' + >>> spkrid , sentid = itemid.split('/') + >>> spkrid + 'dr1-fvmh0' + + The second element of the result is a sentence ID. + + - dictionary() + + Phonetic dictionary of words contained in this corpus. This is a Python + dictionary from words to phoneme lists. + + - spkrinfo() + + Speaker information table. It's a Python dictionary from speaker IDs to + records of 10 fields. Speaker IDs the same as the ones in timie.speakers. + Each record is a dictionary from field names to values, and the fields are + as follows:: + + id speaker ID as defined in the original TIMIT speaker info table + sex speaker gender (M:male, F:female) + dr speaker dialect region (1:new england, 2:northern, + 3:north midland, 4:south midland, 5:southern, 6:new york city, + 7:western, 8:army brat (moved around)) + use corpus type (TRN:training, TST:test) + in this sample corpus only TRN is available + recdate recording date + birthdate speaker birth date + ht speaker height + race speaker race (WHT:white, BLK:black, AMR:american indian, + SPN:spanish-american, ORN:oriental,???:unknown) + edu speaker education level (HS:high school, AS:associate degree, + BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA), + PHD:doctorate degree (PhD,JD,MD), ??:unknown) + comments comments by the recorder + +The 4 functions are as follows. + + - tokenized(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of word lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the word list is a tuple of word(string), start offset and + end offset, where offset is represented as a number of 16kHz samples. + + - phonetic(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of phoneme lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the phoneme list is a tuple of word(string), start offset + and end offset, where offset is represented as a number of 16kHz samples. + + - audiodata(item, start=0, end=None) + + Given an item, returns a chunk of audio samples formatted into a string. + When the function is called, if start and end are omitted, the entire + samples of the recording will be returned. If only end is omitted, + samples from the start offset to the end of the recording will be returned. + + - play(data) + + Play the given audio samples. The audio samples can be obtained from the + timit.audiodata function. + +""" +import sys +import time + +from nltk.corpus.reader.api import * +from nltk.internals import import_from_stdlib +from nltk.tree import Tree + + +class TimitCorpusReader(CorpusReader): + """ + Reader for the TIMIT corpus (or any other corpus with the same + file layout and use of file formats). The corpus root directory + should contain the following files: + + - timitdic.txt: dictionary of standard transcriptions + - spkrinfo.txt: table of speaker information + + In addition, the root directory should contain one subdirectory + for each speaker, containing three files for each utterance: + + - .txt: text content of utterances + - .wrd: tokenized text content of utterances + - .phn: phonetic transcription of utterances + - .wav: utterance sound file + """ + + _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt" + """A regexp matching fileids that are used by this corpus reader.""" + _UTTERANCE_RE = r"\w+-\w+/\w+\.txt" + + def __init__(self, root, encoding="utf8"): + """ + Construct a new TIMIT corpus reader in the given directory. + :param root: The root directory for this corpus. + """ + # Ensure that wave files don't get treated as unicode data: + if isinstance(encoding, str): + encoding = [(r".*\.wav", None), (".*", encoding)] + + CorpusReader.__init__( + self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding + ) + + self._utterances = [ + name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE) + ] + """A list of the utterance identifiers for all utterances in + this corpus.""" + + self._speakerinfo = None + self._root = root + self.speakers = sorted({u.split("/")[0] for u in self._utterances}) + + def fileids(self, filetype=None): + """ + Return a list of file identifiers for the files that make up + this corpus. + + :param filetype: If specified, then ``filetype`` indicates that + only the files that have the given type should be + returned. Accepted values are: ``txt``, ``wrd``, ``phn``, + ``wav``, or ``metadata``, + """ + if filetype is None: + return CorpusReader.fileids(self) + elif filetype in ("txt", "wrd", "phn", "wav"): + return [f"{u}.{filetype}" for u in self._utterances] + elif filetype == "metadata": + return ["timitdic.txt", "spkrinfo.txt"] + else: + raise ValueError("Bad value for filetype: %r" % filetype) + + def utteranceids( + self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None + ): + """ + :return: A list of the utterance identifiers for all + utterances in this corpus, or for the given speaker, dialect + region, gender, sentence type, or sentence number, if + specified. + """ + if isinstance(dialect, str): + dialect = [dialect] + if isinstance(sex, str): + sex = [sex] + if isinstance(spkrid, str): + spkrid = [spkrid] + if isinstance(sent_type, str): + sent_type = [sent_type] + if isinstance(sentid, str): + sentid = [sentid] + + utterances = self._utterances[:] + if dialect is not None: + utterances = [u for u in utterances if u[2] in dialect] + if sex is not None: + utterances = [u for u in utterances if u[4] in sex] + if spkrid is not None: + utterances = [u for u in utterances if u[:9] in spkrid] + if sent_type is not None: + utterances = [u for u in utterances if u[11] in sent_type] + if sentid is not None: + utterances = [u for u in utterances if u[10:] in spkrid] + return utterances + + def transcription_dict(self): + """ + :return: A dictionary giving the 'standard' transcription for + each word. + """ + _transcriptions = {} + with self.open("timitdic.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line) + if not m: + raise ValueError("Bad line: %r" % line) + _transcriptions[m.group(1)] = m.group(2).split() + return _transcriptions + + def spkrid(self, utterance): + return utterance.split("/")[0] + + def sentid(self, utterance): + return utterance.split("/")[1] + + def utterance(self, spkrid, sentid): + return f"{spkrid}/{sentid}" + + def spkrutteranceids(self, speaker): + """ + :return: A list of all utterances associated with a given + speaker. + """ + return [ + utterance + for utterance in self._utterances + if utterance.startswith(speaker + "/") + ] + + def spkrinfo(self, speaker): + """ + :return: A dictionary mapping .. something. + """ + if speaker in self._utterances: + speaker = self.spkrid(speaker) + + if self._speakerinfo is None: + self._speakerinfo = {} + with self.open("spkrinfo.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + rec = line.strip().split(None, 9) + key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}" + self._speakerinfo[key] = SpeakerInfo(*rec) + + return self._speakerinfo[speaker] + + def phones(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def phone_times(self, utterances=None): + """ + offset is represented as a number of 16kHz samples! + """ + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def words(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def word_times(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def sents(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + results.append([line.split()[-1] for line in fp if line.strip()]) + return results + + def sent_times(self, utterances=None): + # TODO: Check this + return [ + ( + line.split(None, 2)[-1].strip(), + int(line.split()[0]), + int(line.split()[1]), + ) + for fileid in self._utterance_fileids(utterances, ".txt") + for line in self.open(fileid) + if line.strip() + ] + + def phone_trees(self, utterances=None): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + + trees = [] + for utterance in utterances: + word_times = self.word_times(utterance) + phone_times = self.phone_times(utterance) + sent_times = self.sent_times(utterance) + + while sent_times: + (sent, sent_start, sent_end) = sent_times.pop(0) + trees.append(Tree("S", [])) + while ( + word_times and phone_times and phone_times[0][2] <= word_times[0][1] + ): + trees[-1].append(phone_times.pop(0)[0]) + while word_times and word_times[0][2] <= sent_end: + (word, word_start, word_end) = word_times.pop(0) + trees[-1].append(Tree(word, [])) + while phone_times and phone_times[0][2] <= word_end: + trees[-1][-1].append(phone_times.pop(0)[0]) + while phone_times and phone_times[0][2] <= sent_end: + trees[-1].append(phone_times.pop(0)[0]) + return trees + + # [xx] NOTE: This is currently broken -- we're assuming that the + # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE + # fileids. + def wav(self, utterance, start=0, end=None): + # nltk.chunk conflicts with the stdlib module 'chunk' + wave = import_from_stdlib("wave") + + w = wave.open(self.open(utterance + ".wav"), "rb") + + if end is None: + end = w.getnframes() + + # Skip past frames before start, then read the frames we want + w.readframes(start) + frames = w.readframes(end - start) + + # Open a new temporary file -- the wave module requires + # an actual file, and won't work w/ stringio. :( + tf = tempfile.TemporaryFile() + out = wave.open(tf, "w") + + # Write the parameters & data to the new file. + out.setparams(w.getparams()) + out.writeframes(frames) + out.close() + + # Read the data back from the file, and return it. The + # file will automatically be deleted when we return. + tf.seek(0) + return tf.read() + + def audiodata(self, utterance, start=0, end=None): + assert end is None or end > start + headersize = 44 + with self.open(utterance + ".wav") as fp: + if end is None: + data = fp.read() + else: + data = fp.read(headersize + end * 2) + return data[headersize + start * 2 :] + + def _utterance_fileids(self, utterances, extension): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + return [f"{u}{extension}" for u in utterances] + + def play(self, utterance, start=0, end=None): + """ + Play the given audio sample. + + :param utterance: The utterance id of the sample to play + """ + # Method 1: os audio dev. + try: + import ossaudiodev + + try: + dsp = ossaudiodev.open("w") + dsp.setfmt(ossaudiodev.AFMT_S16_LE) + dsp.channels(1) + dsp.speed(16000) + dsp.write(self.audiodata(utterance, start, end)) + dsp.close() + except OSError as e: + print( + ( + "can't acquire the audio device; please " + "activate your audio device." + ), + file=sys.stderr, + ) + print("system error message:", str(e), file=sys.stderr) + return + except ImportError: + pass + + # Method 2: pygame + try: + # FIXME: this won't work under python 3 + import pygame.mixer + import StringIO + + pygame.mixer.init(16000) + f = StringIO.StringIO(self.wav(utterance, start, end)) + pygame.mixer.Sound(f).play() + while pygame.mixer.get_busy(): + time.sleep(0.01) + return + except ImportError: + pass + + # Method 3: complain. :) + print( + ("you must install pygame or ossaudiodev " "for audio playback."), + file=sys.stderr, + ) + + +class SpeakerInfo: + def __init__( + self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None + ): + self.id = id + self.sex = sex + self.dr = dr + self.use = use + self.recdate = recdate + self.birthdate = birthdate + self.ht = ht + self.race = race + self.edu = edu + self.comments = comments + + def __repr__(self): + attribs = "id sex dr use recdate birthdate ht race edu comments" + args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()] + return "SpeakerInfo(%s)" % (", ".join(args)) + + +def read_timit_block(stream): + """ + Block reader for timit tagged sentences, which are preceded by a sentence + number that will be ignored. + """ + line = stream.readline() + if not line: + return [] + n, sent = line.split(" ", 1) + return [sent] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/util.py new file mode 100644 index 0000000000000000000000000000000000000000..0934f1705952b4c00d8884da76c8e052c5a23d58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/util.py @@ -0,0 +1,867 @@ +# Natural Language Toolkit: Corpus Reader Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import bisect +import os +import pickle +import re +import tempfile +from functools import reduce +from xml.etree import ElementTree + +from nltk.data import ( + FileSystemPathPointer, + PathPointer, + SeekableUnicodeStreamReader, + ZipFilePathPointer, +) +from nltk.internals import slice_bounds +from nltk.tokenize import wordpunct_tokenize +from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence + +###################################################################### +# { Corpus View +###################################################################### + + +class StreamBackedCorpusView(AbstractLazySequence): + """ + A 'view' of a corpus file, which acts like a sequence of tokens: + it can be accessed by index, iterated over, etc. However, the + tokens are only constructed as-needed -- the entire corpus is + never stored in memory at once. + + The constructor to ``StreamBackedCorpusView`` takes two arguments: + a corpus fileid (specified as a string or as a ``PathPointer``); + and a block reader. A "block reader" is a function that reads + zero or more tokens from a stream, and returns them as a list. A + very simple example of a block reader is: + + >>> def simple_block_reader(stream): + ... return stream.readline().split() + + This simple block reader reads a single line at a time, and + returns a single token (consisting of a string) for each + whitespace-separated substring on the line. + + When deciding how to define the block reader for a given + corpus, careful consideration should be given to the size of + blocks handled by the block reader. Smaller block sizes will + increase the memory requirements of the corpus view's internal + data structures (by 2 integers per block). On the other hand, + larger block sizes may decrease performance for random access to + the corpus. (But note that larger block sizes will *not* + decrease performance for iteration.) + + Internally, ``CorpusView`` maintains a partial mapping from token + index to file position, with one entry per block. When a token + with a given index *i* is requested, the ``CorpusView`` constructs + it as follows: + + 1. First, it searches the toknum/filepos mapping for the token + index closest to (but less than or equal to) *i*. + + 2. Then, starting at the file position corresponding to that + index, it reads one block at a time using the block reader + until it reaches the requested token. + + The toknum/filepos mapping is created lazily: it is initially + empty, but every time a new block is read, the block's + initial token is added to the mapping. (Thus, the toknum/filepos + map has one entry per block.) + + In order to increase efficiency for random access patterns that + have high degrees of locality, the corpus view may cache one or + more blocks. + + :note: Each ``CorpusView`` object internally maintains an open file + object for its underlying corpus file. This file should be + automatically closed when the ``CorpusView`` is garbage collected, + but if you wish to close it manually, use the ``close()`` + method. If you access a ``CorpusView``'s items after it has been + closed, the file object will be automatically re-opened. + + :warning: If the contents of the file are modified during the + lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior + is undefined. + + :warning: If a unicode encoding is specified when constructing a + ``CorpusView``, then the block reader may only call + ``stream.seek()`` with offsets that have been returned by + ``stream.tell()``; in particular, calling ``stream.seek()`` with + relative offsets, or with offsets based on string lengths, may + lead to incorrect behavior. + + :ivar _block_reader: The function used to read + a single block from the underlying file stream. + :ivar _toknum: A list containing the token index of each block + that has been processed. In particular, ``_toknum[i]`` is the + token index of the first token in block ``i``. Together + with ``_filepos``, this forms a partial mapping between token + indices and file positions. + :ivar _filepos: A list containing the file position of each block + that has been processed. In particular, ``_toknum[i]`` is the + file position of the first character in block ``i``. Together + with ``_toknum``, this forms a partial mapping between token + indices and file positions. + :ivar _stream: The stream used to access the underlying corpus file. + :ivar _len: The total number of tokens in the corpus, if known; + or None, if the number of tokens is not yet known. + :ivar _eofpos: The character position of the last character in the + file. This is calculated when the corpus view is initialized, + and is used to decide when the end of file has been reached. + :ivar _cache: A cache of the most recently read block. It + is encoded as a tuple (start_toknum, end_toknum, tokens), where + start_toknum is the token index of the first token in the block; + end_toknum is the token index of the first token not in the + block; and tokens is a list of the tokens in the block. + """ + + def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"): + """ + Create a new corpus view, based on the file ``fileid``, and + read with ``block_reader``. See the class documentation + for more information. + + :param fileid: The path to the file that is read by this + corpus view. ``fileid`` can either be a string or a + ``PathPointer``. + + :param startpos: The file position at which the view will + start reading. This can be used to skip over preface + sections. + + :param encoding: The unicode encoding that should be used to + read the file's contents. If no encoding is specified, + then the file's contents will be read as a non-unicode + string (i.e., a str). + """ + if block_reader: + self.read_block = block_reader + # Initialize our toknum/filepos mapping. + self._toknum = [0] + self._filepos = [startpos] + self._encoding = encoding + # We don't know our length (number of tokens) yet. + self._len = None + + self._fileid = fileid + self._stream = None + + self._current_toknum = None + """This variable is set to the index of the next token that + will be read, immediately before ``self.read_block()`` is + called. This is provided for the benefit of the block + reader, which under rare circumstances may need to know + the current token number.""" + + self._current_blocknum = None + """This variable is set to the index of the next block that + will be read, immediately before ``self.read_block()`` is + called. This is provided for the benefit of the block + reader, which under rare circumstances may need to know + the current block number.""" + + # Find the length of the file. + try: + if isinstance(self._fileid, PathPointer): + self._eofpos = self._fileid.file_size() + else: + self._eofpos = os.stat(self._fileid).st_size + except Exception as exc: + raise ValueError(f"Unable to open or access {fileid!r} -- {exc}") from exc + + # Maintain a cache of the most recently read block, to + # increase efficiency of random access. + self._cache = (-1, -1, None) + + fileid = property( + lambda self: self._fileid, + doc=""" + The fileid of the file that is accessed by this view. + + :type: str or PathPointer""", + ) + + def read_block(self, stream): + """ + Read a block from the input stream. + + :return: a block of tokens from the input stream + :rtype: list(any) + :param stream: an input stream + :type stream: stream + """ + raise NotImplementedError("Abstract Method") + + def _open(self): + """ + Open the file stream associated with this corpus view. This + will be called performed if any value is read from the view + while its file stream is closed. + """ + if isinstance(self._fileid, PathPointer): + self._stream = self._fileid.open(self._encoding) + elif self._encoding: + self._stream = SeekableUnicodeStreamReader( + open(self._fileid, "rb"), self._encoding + ) + else: + self._stream = open(self._fileid, "rb") + + def close(self): + """ + Close the file stream associated with this corpus view. This + can be useful if you are worried about running out of file + handles (although the stream should automatically be closed + upon garbage collection of the corpus view). If the corpus + view is accessed after it is closed, it will be automatically + re-opened. + """ + if self._stream is not None: + self._stream.close() + self._stream = None + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __len__(self): + if self._len is None: + # iterate_from() sets self._len when it reaches the end + # of the file: + for tok in self.iterate_from(self._toknum[-1]): + pass + return self._len + + def __getitem__(self, i): + if isinstance(i, slice): + start, stop = slice_bounds(self, i) + # Check if it's in the cache. + offset = self._cache[0] + if offset <= start and stop <= self._cache[1]: + return self._cache[2][start - offset : stop - offset] + # Construct & return the result. + return LazySubsequence(self, start, stop) + else: + # Handle negative indices + if i < 0: + i += len(self) + if i < 0: + raise IndexError("index out of range") + # Check if it's in the cache. + offset = self._cache[0] + if offset <= i < self._cache[1]: + return self._cache[2][i - offset] + # Use iterate_from to extract it. + try: + return next(self.iterate_from(i)) + except StopIteration as e: + raise IndexError("index out of range") from e + + # If we wanted to be thread-safe, then this method would need to + # do some locking. + def iterate_from(self, start_tok): + # Start by feeding from the cache, if possible. + if self._cache[0] <= start_tok < self._cache[1]: + for tok in self._cache[2][start_tok - self._cache[0] :]: + yield tok + start_tok += 1 + + # Decide where in the file we should start. If `start` is in + # our mapping, then we can jump straight to the correct block; + # otherwise, start at the last block we've processed. + if start_tok < self._toknum[-1]: + block_index = bisect.bisect_right(self._toknum, start_tok) - 1 + toknum = self._toknum[block_index] + filepos = self._filepos[block_index] + else: + block_index = len(self._toknum) - 1 + toknum = self._toknum[-1] + filepos = self._filepos[-1] + + # Open the stream, if it's not open already. + if self._stream is None: + self._open() + + # If the file is empty, the while loop will never run. + # This *seems* to be all the state we need to set: + if self._eofpos == 0: + self._len = 0 + + # Each iteration through this loop, we read a single block + # from the stream. + while filepos < self._eofpos: + # Read the next block. + self._stream.seek(filepos) + self._current_toknum = toknum + self._current_blocknum = block_index + tokens = self.read_block(self._stream) + assert isinstance(tokens, (tuple, list, AbstractLazySequence)), ( + "block reader %s() should return list or tuple." + % self.read_block.__name__ + ) + num_toks = len(tokens) + new_filepos = self._stream.tell() + assert ( + new_filepos > filepos + ), "block reader %s() should consume at least 1 byte (filepos=%d)" % ( + self.read_block.__name__, + filepos, + ) + + # Update our cache. + self._cache = (toknum, toknum + num_toks, list(tokens)) + + # Update our mapping. + assert toknum <= self._toknum[-1] + if num_toks > 0: + block_index += 1 + if toknum == self._toknum[-1]: + assert new_filepos > self._filepos[-1] # monotonic! + self._filepos.append(new_filepos) + self._toknum.append(toknum + num_toks) + else: + # Check for consistency: + assert ( + new_filepos == self._filepos[block_index] + ), "inconsistent block reader (num chars read)" + assert ( + toknum + num_toks == self._toknum[block_index] + ), "inconsistent block reader (num tokens returned)" + + # If we reached the end of the file, then update self._len + if new_filepos == self._eofpos: + self._len = toknum + num_toks + # Generate the tokens in this block (but skip any tokens + # before start_tok). Note that between yields, our state + # may be modified. + for tok in tokens[max(0, start_tok - toknum) :]: + yield tok + # If we're at the end of the file, then we're done. + assert new_filepos <= self._eofpos + if new_filepos == self._eofpos: + break + # Update our indices + toknum += num_toks + filepos = new_filepos + + # If we reach this point, then we should know our length. + assert self._len is not None + # Enforce closing of stream once we reached end of file + # We should have reached EOF once we're out of the while loop. + self.close() + + # Use concat for these, so we can use a ConcatenatedCorpusView + # when possible. + def __add__(self, other): + return concat([self, other]) + + def __radd__(self, other): + return concat([other, self]) + + def __mul__(self, count): + return concat([self] * count) + + def __rmul__(self, count): + return concat([self] * count) + + +class ConcatenatedCorpusView(AbstractLazySequence): + """ + A 'view' of a corpus file that joins together one or more + ``StreamBackedCorpusViews``. At most + one file handle is left open at any time. + """ + + def __init__(self, corpus_views): + self._pieces = corpus_views + """A list of the corpus subviews that make up this + concatenation.""" + + self._offsets = [0] + """A list of offsets, indicating the index at which each + subview begins. In particular:: + offsets[i] = sum([len(p) for p in pieces[:i]])""" + + self._open_piece = None + """The most recently accessed corpus subview (or None). + Before a new subview is accessed, this subview will be closed.""" + + def __len__(self): + if len(self._offsets) <= len(self._pieces): + # Iterate to the end of the corpus. + for tok in self.iterate_from(self._offsets[-1]): + pass + + return self._offsets[-1] + + def close(self): + for piece in self._pieces: + piece.close() + + def iterate_from(self, start_tok): + piecenum = bisect.bisect_right(self._offsets, start_tok) - 1 + + while piecenum < len(self._pieces): + offset = self._offsets[piecenum] + piece = self._pieces[piecenum] + + # If we've got another piece open, close it first. + if self._open_piece is not piece: + if self._open_piece is not None: + self._open_piece.close() + self._open_piece = piece + + # Get everything we can from this piece. + yield from piece.iterate_from(max(0, start_tok - offset)) + + # Update the offset table. + if piecenum + 1 == len(self._offsets): + self._offsets.append(self._offsets[-1] + len(piece)) + + # Move on to the next piece. + piecenum += 1 + + +def concat(docs): + """ + Concatenate together the contents of multiple documents from a + single corpus, using an appropriate concatenation function. This + utility function is used by corpus readers when the user requests + more than one document at a time. + """ + if len(docs) == 1: + return docs[0] + if len(docs) == 0: + raise ValueError("concat() expects at least one object!") + + types = {d.__class__ for d in docs} + + # If they're all strings, use string concatenation. + if all(isinstance(doc, str) for doc in docs): + return "".join(docs) + + # If they're all corpus views, then use ConcatenatedCorpusView. + for typ in types: + if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)): + break + else: + return ConcatenatedCorpusView(docs) + + # If they're all lazy sequences, use a lazy concatenation + for typ in types: + if not issubclass(typ, AbstractLazySequence): + break + else: + return LazyConcatenation(docs) + + # Otherwise, see what we can do: + if len(types) == 1: + typ = list(types)[0] + + if issubclass(typ, list): + return reduce((lambda a, b: a + b), docs, []) + + if issubclass(typ, tuple): + return reduce((lambda a, b: a + b), docs, ()) + + if ElementTree.iselement(typ): + xmltree = ElementTree.Element("documents") + for doc in docs: + xmltree.append(doc) + return xmltree + + # No method found! + raise ValueError("Don't know how to concatenate types: %r" % types) + + +###################################################################### +# { Corpus View for Pickled Sequences +###################################################################### + + +class PickleCorpusView(StreamBackedCorpusView): + """ + A stream backed corpus view for corpus files that consist of + sequences of serialized Python objects (serialized using + ``pickle.dump``). One use case for this class is to store the + result of running feature detection on a corpus to disk. This can + be useful when performing feature detection is expensive (so we + don't want to repeat it); but the corpus is too large to store in + memory. The following example illustrates this technique: + + >>> from nltk.corpus.reader.util import PickleCorpusView + >>> from nltk.util import LazyMap + >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP + >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP + >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP + """ + + BLOCK_SIZE = 100 + PROTOCOL = -1 + + def __init__(self, fileid, delete_on_gc=False): + """ + Create a new corpus view that reads the pickle corpus + ``fileid``. + + :param delete_on_gc: If true, then ``fileid`` will be deleted + whenever this object gets garbage-collected. + """ + self._delete_on_gc = delete_on_gc + StreamBackedCorpusView.__init__(self, fileid) + + def read_block(self, stream): + result = [] + for i in range(self.BLOCK_SIZE): + try: + result.append(pickle.load(stream)) + except EOFError: + break + return result + + def __del__(self): + """ + If ``delete_on_gc`` was set to true when this + ``PickleCorpusView`` was created, then delete the corpus view's + fileid. (This method is called whenever a + ``PickledCorpusView`` is garbage-collected. + """ + if getattr(self, "_delete_on_gc"): + if os.path.exists(self._fileid): + try: + os.remove(self._fileid) + except OSError: + pass + self.__dict__.clear() # make the garbage collector's job easier + + @classmethod + def write(cls, sequence, output_file): + if isinstance(output_file, str): + output_file = open(output_file, "wb") + for item in sequence: + pickle.dump(item, output_file, cls.PROTOCOL) + + @classmethod + def cache_to_tempfile(cls, sequence, delete_on_gc=True): + """ + Write the given sequence to a temporary file as a pickle + corpus; and then return a ``PickleCorpusView`` view for that + temporary corpus file. + + :param delete_on_gc: If true, then the temporary file will be + deleted whenever this object gets garbage-collected. + """ + try: + fd, output_file_name = tempfile.mkstemp(".pcv", "nltk-") + output_file = os.fdopen(fd, "wb") + cls.write(sequence, output_file) + output_file.close() + return PickleCorpusView(output_file_name, delete_on_gc) + except OSError as e: + raise ValueError("Error while creating temp file: %s" % e) from e + + +###################################################################### +# { Block Readers +###################################################################### + + +def read_whitespace_block(stream): + toks = [] + for i in range(20): # Read 20 lines at a time. + toks.extend(stream.readline().split()) + return toks + + +def read_wordpunct_block(stream): + toks = [] + for i in range(20): # Read 20 lines at a time. + toks.extend(wordpunct_tokenize(stream.readline())) + return toks + + +def read_line_block(stream): + toks = [] + for i in range(20): + line = stream.readline() + if not line: + return toks + toks.append(line.rstrip("\n")) + return toks + + +def read_blankline_block(stream): + s = "" + while True: + line = stream.readline() + # End of file: + if not line: + if s: + return [s] + else: + return [] + # Blank line: + elif line and not line.strip(): + if s: + return [s] + # Other line: + else: + s += line + + +def read_alignedsent_block(stream): + s = "" + while True: + line = stream.readline() + if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n": + continue + # End of file: + if not line: + if s: + return [s] + else: + return [] + # Other line: + else: + s += line + if re.match(r"^\d+-\d+", line) is not None: + return [s] + + +def read_regexp_block(stream, start_re, end_re=None): + """ + Read a sequence of tokens from a stream, where tokens begin with + lines that match ``start_re``. If ``end_re`` is specified, then + tokens end with lines that match ``end_re``; otherwise, tokens end + whenever the next line matching ``start_re`` or EOF is found. + """ + # Scan until we find a line matching the start regexp. + while True: + line = stream.readline() + if not line: + return [] # end of file. + if re.match(start_re, line): + break + + # Scan until we find another line matching the regexp, or EOF. + lines = [line] + while True: + oldpos = stream.tell() + line = stream.readline() + # End of file: + if not line: + return ["".join(lines)] + # End of token: + if end_re is not None and re.match(end_re, line): + return ["".join(lines)] + # Start of new token: backup to just before it starts, and + # return the token we've already collected. + if end_re is None and re.match(start_re, line): + stream.seek(oldpos) + return ["".join(lines)] + # Anything else is part of the token. + lines.append(line) + + +def read_sexpr_block(stream, block_size=16384, comment_char=None): + """ + Read a sequence of s-expressions from the stream, and leave the + stream's file position at the end the last complete s-expression + read. This function will always return at least one s-expression, + unless there are no more s-expressions in the file. + + If the file ends in in the middle of an s-expression, then that + incomplete s-expression is returned when the end of the file is + reached. + + :param block_size: The default block size for reading. If an + s-expression is longer than one block, then more than one + block will be read. + :param comment_char: A character that marks comments. Any lines + that begin with this character will be stripped out. + (If spaces or tabs precede the comment character, then the + line will not be stripped.) + """ + start = stream.tell() + block = stream.read(block_size) + encoding = getattr(stream, "encoding", None) + assert encoding is not None or isinstance(block, str) + if encoding not in (None, "utf-8"): + import warnings + + warnings.warn( + "Parsing may fail, depending on the properties " + "of the %s encoding!" % encoding + ) + # (e.g., the utf-16 encoding does not work because it insists + # on adding BOMs to the beginning of encoded strings.) + + if comment_char: + COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char)) + while True: + try: + # If we're stripping comments, then make sure our block ends + # on a line boundary; and then replace any comments with + # space characters. (We can't just strip them out -- that + # would make our offset wrong.) + if comment_char: + block += stream.readline() + block = re.sub(COMMENT, _sub_space, block) + # Read the block. + tokens, offset = _parse_sexpr_block(block) + # Skip whitespace + offset = re.compile(r"\s*").search(block, offset).end() + + # Move to the end position. + if encoding is None: + stream.seek(start + offset) + else: + stream.seek(start + len(block[:offset].encode(encoding))) + + # Return the list of tokens we processed + return tokens + except ValueError as e: + if e.args[0] == "Block too small": + next_block = stream.read(block_size) + if next_block: + block += next_block + continue + else: + # The file ended mid-sexpr -- return what we got. + return [block.strip()] + else: + raise + + +def _sub_space(m): + """Helper function: given a regexp match, return a string of + spaces that's the same length as the matched string.""" + return " " * (m.end() - m.start()) + + +def _parse_sexpr_block(block): + tokens = [] + start = end = 0 + + while end < len(block): + m = re.compile(r"\S").search(block, end) + if not m: + return tokens, end + + start = m.start() + + # Case 1: sexpr is not parenthesized. + if m.group() != "(": + m2 = re.compile(r"[\s(]").search(block, start) + if m2: + end = m2.start() + else: + if tokens: + return tokens, end + raise ValueError("Block too small") + + # Case 2: parenthesized sexpr. + else: + nesting = 0 + for m in re.compile(r"[()]").finditer(block, start): + if m.group() == "(": + nesting += 1 + else: + nesting -= 1 + if nesting == 0: + end = m.end() + break + else: + if tokens: + return tokens, end + raise ValueError("Block too small") + + tokens.append(block[start:end]) + + return tokens, end + + +###################################################################### +# { Finding Corpus Items +###################################################################### + + +def find_corpus_fileids(root, regexp): + if not isinstance(root, PathPointer): + raise TypeError("find_corpus_fileids: expected a PathPointer") + regexp += "$" + + # Find fileids in a zipfile: scan the zipfile's namelist. Filter + # out entries that end in '/' -- they're directories. + if isinstance(root, ZipFilePathPointer): + fileids = [ + name[len(root.entry) :] + for name in root.zipfile.namelist() + if not name.endswith("/") + ] + items = [name for name in fileids if re.match(regexp, name)] + return sorted(items) + + # Find fileids in a directory: use os.walk to search all (proper + # or symlinked) subdirectories, and match paths against the regexp. + elif isinstance(root, FileSystemPathPointer): + items = [] + for dirname, subdirs, fileids in os.walk(root.path): + prefix = "".join("%s/" % p for p in _path_from(root.path, dirname)) + items += [ + prefix + fileid + for fileid in fileids + if re.match(regexp, prefix + fileid) + ] + # Don't visit svn directories: + if ".svn" in subdirs: + subdirs.remove(".svn") + return sorted(items) + + else: + raise AssertionError("Don't know how to handle %r" % root) + + +def _path_from(parent, child): + if os.path.split(parent)[1] == "": + parent = os.path.split(parent)[0] + path = [] + while parent != child: + child, dirname = os.path.split(child) + path.insert(0, dirname) + assert os.path.split(child)[0] != child + return path + + +###################################################################### +# { Paragraph structure in Treebank files +###################################################################### + + +def tagged_treebank_para_block_reader(stream): + # Read the next paragraph. + para = "" + while True: + line = stream.readline() + # End of paragraph: + if re.match(r"======+\s*$", line): + if para.strip(): + return [para] + # End of file: + elif line == "": + if para.strip(): + return [para] + else: + return [] + # Content line: + else: + para += line diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py new file mode 100644 index 0000000000000000000000000000000000000000..6056574bb03a0797d47c68b2de333b8337b08a46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Verbnet Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface to the VerbNet verb lexicon + +For details about VerbNet see: +https://verbs.colorado.edu/~mpalmer/projects/verbnet.html +""" + +import re +import textwrap +from collections import defaultdict + +from nltk.corpus.reader.xmldocs import XMLCorpusReader + + +class VerbnetCorpusReader(XMLCorpusReader): + """ + An NLTK interface to the VerbNet verb lexicon. + + From the VerbNet site: "VerbNet (VN) (Kipper-Schuler 2006) is the largest + on-line verb lexicon currently available for English. It is a hierarchical + domain-independent, broad-coverage verb lexicon with mappings to other + lexical resources such as WordNet (Miller, 1990; Fellbaum, 1998), XTAG + (XTAG Research Group, 2001), and FrameNet (Baker et al., 1998)." + + For details about VerbNet see: + https://verbs.colorado.edu/~mpalmer/projects/verbnet.html + """ + + # No unicode encoding param, since the data files are all XML. + def __init__(self, root, fileids, wrap_etree=False): + XMLCorpusReader.__init__(self, root, fileids, wrap_etree) + + self._lemma_to_class = defaultdict(list) + """A dictionary mapping from verb lemma strings to lists of + VerbNet class identifiers.""" + + self._wordnet_to_class = defaultdict(list) + """A dictionary mapping from wordnet identifier strings to + lists of VerbNet class identifiers.""" + + self._class_to_fileid = {} + """A dictionary mapping from class identifiers to + corresponding file identifiers. The keys of this dictionary + provide a complete list of all classes and subclasses.""" + + self._shortid_to_longid = {} + + # Initialize the dictionaries. Use the quick (regexp-based) + # method instead of the slow (xml-based) method, because it + # runs 2-30 times faster. + self._quick_index() + + _LONGID_RE = re.compile(r"([^\-\.]*)-([\d+.\-]+)$") + """Regular expression that matches (and decomposes) longids""" + + _SHORTID_RE = re.compile(r"[\d+.\-]+$") + """Regular expression that matches shortids""" + + _INDEX_RE = re.compile( + r']+>|' r'' + ) + """Regular expression used by ``_index()`` to quickly scan the corpus + for basic information.""" + + def lemmas(self, vnclass=None): + """ + Return a list of all verb lemmas that appear in any class, or + in the ``classid`` if specified. + """ + if vnclass is None: + return sorted(self._lemma_to_class.keys()) + else: + # [xx] should this include subclass members? + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + return [member.get("name") for member in vnclass.findall("MEMBERS/MEMBER")] + + def wordnetids(self, vnclass=None): + """ + Return a list of all wordnet identifiers that appear in any + class, or in ``classid`` if specified. + """ + if vnclass is None: + return sorted(self._wordnet_to_class.keys()) + else: + # [xx] should this include subclass members? + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + return sum( + ( + member.get("wn", "").split() + for member in vnclass.findall("MEMBERS/MEMBER") + ), + [], + ) + + def classids(self, lemma=None, wordnetid=None, fileid=None, classid=None): + """ + Return a list of the VerbNet class identifiers. If a file + identifier is specified, then return only the VerbNet class + identifiers for classes (and subclasses) defined by that file. + If a lemma is specified, then return only VerbNet class + identifiers for classes that contain that lemma as a member. + If a wordnetid is specified, then return only identifiers for + classes that contain that wordnetid as a member. If a classid + is specified, then return only identifiers for subclasses of + the specified VerbNet class. + If nothing is specified, return all classids within VerbNet + """ + if fileid is not None: + return [c for (c, f) in self._class_to_fileid.items() if f == fileid] + elif lemma is not None: + return self._lemma_to_class[lemma] + elif wordnetid is not None: + return self._wordnet_to_class[wordnetid] + elif classid is not None: + xmltree = self.vnclass(classid) + return [ + subclass.get("ID") + for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS") + ] + else: + return sorted(self._class_to_fileid.keys()) + + def vnclass(self, fileid_or_classid): + """Returns VerbNet class ElementTree + + Return an ElementTree containing the xml for the specified + VerbNet class. + + :param fileid_or_classid: An identifier specifying which class + should be returned. Can be a file identifier (such as + ``'put-9.1.xml'``), or a VerbNet class identifier (such as + ``'put-9.1'``) or a short VerbNet class identifier (such as + ``'9.1'``). + """ + # File identifier: just return the xml. + if fileid_or_classid in self._fileids: + return self.xml(fileid_or_classid) + + # Class identifier: get the xml, and find the right elt. + classid = self.longid(fileid_or_classid) + if classid in self._class_to_fileid: + fileid = self._class_to_fileid[self.longid(classid)] + tree = self.xml(fileid) + if classid == tree.get("ID"): + return tree + else: + for subclass in tree.findall(".//VNSUBCLASS"): + if classid == subclass.get("ID"): + return subclass + else: + assert False # we saw it during _index()! + + else: + raise ValueError(f"Unknown identifier {fileid_or_classid}") + + def fileids(self, vnclass_ids=None): + """ + Return a list of fileids that make up this corpus. If + ``vnclass_ids`` is specified, then return the fileids that make + up the specified VerbNet class(es). + """ + if vnclass_ids is None: + return self._fileids + elif isinstance(vnclass_ids, str): + return [self._class_to_fileid[self.longid(vnclass_ids)]] + else: + return [ + self._class_to_fileid[self.longid(vnclass_id)] + for vnclass_id in vnclass_ids + ] + + def frames(self, vnclass): + """Given a VerbNet class, this method returns VerbNet frames + + The members returned are: + 1) Example + 2) Description + 3) Syntax + 4) Semantics + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: frames - a list of frame dictionaries + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + frames = [] + vnframes = vnclass.findall("FRAMES/FRAME") + for vnframe in vnframes: + frames.append( + { + "example": self._get_example_within_frame(vnframe), + "description": self._get_description_within_frame(vnframe), + "syntax": self._get_syntactic_list_within_frame(vnframe), + "semantics": self._get_semantics_within_frame(vnframe), + } + ) + return frames + + def subclasses(self, vnclass): + """Returns subclass ids, if any exist + + Given a VerbNet class, this method returns subclass ids (if they exist) + in a list of strings. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: list of subclasses + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + subclasses = [ + subclass.get("ID") for subclass in vnclass.findall("SUBCLASSES/VNSUBCLASS") + ] + return subclasses + + def themroles(self, vnclass): + """Returns thematic roles participating in a VerbNet class + + Members returned as part of roles are- + 1) Type + 2) Modifiers + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: themroles: A list of thematic roles in the VerbNet class + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + themroles = [] + for trole in vnclass.findall("THEMROLES/THEMROLE"): + themroles.append( + { + "type": trole.get("type"), + "modifiers": [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in trole.findall("SELRESTRS/SELRESTR") + ], + } + ) + return themroles + + ###################################################################### + # { Index Initialization + ###################################################################### + + def _index(self): + """ + Initialize the indexes ``_lemma_to_class``, + ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning + through the corpus fileids. This is fast if ElementTree + uses the C implementation (<0.1 secs), but quite slow (>10 secs) + if only the python implementation is available. + """ + for fileid in self._fileids: + self._index_helper(self.xml(fileid), fileid) + + def _index_helper(self, xmltree, fileid): + """Helper for ``_index()``""" + vnclass = xmltree.get("ID") + self._class_to_fileid[vnclass] = fileid + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + for member in xmltree.findall("MEMBERS/MEMBER"): + self._lemma_to_class[member.get("name")].append(vnclass) + for wn in member.get("wn", "").split(): + self._wordnet_to_class[wn].append(vnclass) + for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS"): + self._index_helper(subclass, fileid) + + def _quick_index(self): + """ + Initialize the indexes ``_lemma_to_class``, + ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning + through the corpus fileids. This doesn't do proper xml parsing, + but is good enough to find everything in the standard VerbNet + corpus -- and it runs about 30 times faster than xml parsing + (with the python ElementTree; only 2-3 times faster + if ElementTree uses the C implementation). + """ + # nb: if we got rid of wordnet_to_class, this would run 2-3 + # times faster. + for fileid in self._fileids: + vnclass = fileid[:-4] # strip the '.xml' + self._class_to_fileid[vnclass] = fileid + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + with self.open(fileid) as fp: + for m in self._INDEX_RE.finditer(fp.read()): + groups = m.groups() + if groups[0] is not None: + self._lemma_to_class[groups[0]].append(vnclass) + for wn in groups[1].split(): + self._wordnet_to_class[wn].append(vnclass) + elif groups[2] is not None: + self._class_to_fileid[groups[2]] = fileid + vnclass = groups[2] # for elts. + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + else: + assert False, "unexpected match condition" + + ###################################################################### + # { Identifier conversion + ###################################################################### + + def longid(self, shortid): + """Returns longid of a VerbNet class + + Given a short VerbNet class identifier (eg '37.10'), map it + to a long id (eg 'confess-37.10'). If ``shortid`` is already a + long id, then return it as-is""" + if self._LONGID_RE.match(shortid): + return shortid # it's already a longid. + elif not self._SHORTID_RE.match(shortid): + raise ValueError("vnclass identifier %r not found" % shortid) + try: + return self._shortid_to_longid[shortid] + except KeyError as e: + raise ValueError("vnclass identifier %r not found" % shortid) from e + + def shortid(self, longid): + """Returns shortid of a VerbNet class + + Given a long VerbNet class identifier (eg 'confess-37.10'), + map it to a short id (eg '37.10'). If ``longid`` is already a + short id, then return it as-is.""" + if self._SHORTID_RE.match(longid): + return longid # it's already a shortid. + m = self._LONGID_RE.match(longid) + if m: + return m.group(2) + else: + raise ValueError("vnclass identifier %r not found" % longid) + + ###################################################################### + # { Frame access utility functions + ###################################################################### + + def _get_semantics_within_frame(self, vnframe): + """Returns semantics within a single frame + + A utility function to retrieve semantics within a frame in VerbNet + Members of the semantics dictionary: + 1) Predicate value + 2) Arguments + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: semantics: semantics dictionary + """ + semantics_within_single_frame = [] + for pred in vnframe.findall("SEMANTICS/PRED"): + arguments = [ + {"type": arg.get("type"), "value": arg.get("value")} + for arg in pred.findall("ARGS/ARG") + ] + semantics_within_single_frame.append( + { + "predicate_value": pred.get("value"), + "arguments": arguments, + "negated": pred.get("bool") == "!", + } + ) + return semantics_within_single_frame + + def _get_example_within_frame(self, vnframe): + """Returns example within a frame + + A utility function to retrieve an example within a frame in VerbNet. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: example_text: The example sentence for this particular frame + """ + example_element = vnframe.find("EXAMPLES/EXAMPLE") + if example_element is not None: + example_text = example_element.text + else: + example_text = "" + return example_text + + def _get_description_within_frame(self, vnframe): + """Returns member description within frame + + A utility function to retrieve a description of participating members + within a frame in VerbNet. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: description: a description dictionary with members - primary and secondary + """ + description_element = vnframe.find("DESCRIPTION") + return { + "primary": description_element.attrib["primary"], + "secondary": description_element.get("secondary", ""), + } + + def _get_syntactic_list_within_frame(self, vnframe): + """Returns semantics within a frame + + A utility function to retrieve semantics within a frame in VerbNet. + Members of the syntactic dictionary: + 1) POS Tag + 2) Modifiers + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: syntax_within_single_frame + """ + syntax_within_single_frame = [] + for elt in vnframe.find("SYNTAX"): + pos_tag = elt.tag + modifiers = dict() + modifiers["value"] = elt.get("value") if "value" in elt.attrib else "" + modifiers["selrestrs"] = [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in elt.findall("SELRESTRS/SELRESTR") + ] + modifiers["synrestrs"] = [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in elt.findall("SYNRESTRS/SYNRESTR") + ] + syntax_within_single_frame.append( + {"pos_tag": pos_tag, "modifiers": modifiers} + ) + return syntax_within_single_frame + + ###################################################################### + # { Pretty Printing + ###################################################################### + + def pprint(self, vnclass): + """Returns pretty printed version of a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + s = vnclass.get("ID") + "\n" + s += self.pprint_subclasses(vnclass, indent=" ") + "\n" + s += self.pprint_members(vnclass, indent=" ") + "\n" + s += " Thematic roles:\n" + s += self.pprint_themroles(vnclass, indent=" ") + "\n" + s += " Frames:\n" + s += self.pprint_frames(vnclass, indent=" ") + return s + + def pprint_subclasses(self, vnclass, indent=""): + """Returns pretty printed version of subclasses of VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's subclasses. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + subclasses = self.subclasses(vnclass) + if not subclasses: + subclasses = ["(none)"] + s = "Subclasses: " + " ".join(subclasses) + return textwrap.fill( + s, 70, initial_indent=indent, subsequent_indent=indent + " " + ) + + def pprint_members(self, vnclass, indent=""): + """Returns pretty printed version of members in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's member verbs. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + members = self.lemmas(vnclass) + if not members: + members = ["(none)"] + s = "Members: " + " ".join(members) + return textwrap.fill( + s, 70, initial_indent=indent, subsequent_indent=indent + " " + ) + + def pprint_themroles(self, vnclass, indent=""): + """Returns pretty printed version of thematic roles in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's thematic roles. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + pieces = [] + for themrole in self.themroles(vnclass): + piece = indent + "* " + themrole.get("type") + modifiers = [ + modifier["value"] + modifier["type"] + for modifier in themrole["modifiers"] + ] + if modifiers: + piece += "[{}]".format(" ".join(modifiers)) + pieces.append(piece) + return "\n".join(pieces) + + def pprint_frames(self, vnclass, indent=""): + """Returns pretty version of all frames in a VerbNet class + + Return a string containing a pretty-printed representation of + the list of frames within the VerbNet class. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + pieces = [] + for vnframe in self.frames(vnclass): + pieces.append(self._pprint_single_frame(vnframe, indent)) + return "\n".join(pieces) + + def _pprint_single_frame(self, vnframe, indent=""): + """Returns pretty printed version of a single frame in a VerbNet class + + Returns a string containing a pretty-printed representation of + the given frame. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + frame_string = self._pprint_description_within_frame(vnframe, indent) + "\n" + frame_string += self._pprint_example_within_frame(vnframe, indent + " ") + "\n" + frame_string += ( + self._pprint_syntax_within_frame(vnframe, indent + " Syntax: ") + "\n" + ) + frame_string += indent + " Semantics:\n" + frame_string += self._pprint_semantics_within_frame(vnframe, indent + " ") + return frame_string + + def _pprint_example_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of example within frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame example. + + :param vnframe: An ElementTree containing the xml contents of + a Verbnet frame. + """ + if vnframe["example"]: + return indent + " Example: " + vnframe["example"] + + def _pprint_description_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of a VerbNet frame description + + Return a string containing a pretty-printed representation of + the given VerbNet frame description. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + description = indent + vnframe["description"]["primary"] + if vnframe["description"]["secondary"]: + description += " ({})".format(vnframe["description"]["secondary"]) + return description + + def _pprint_syntax_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of syntax within a frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame syntax. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + pieces = [] + for element in vnframe["syntax"]: + piece = element["pos_tag"] + modifier_list = [] + if "value" in element["modifiers"] and element["modifiers"]["value"]: + modifier_list.append(element["modifiers"]["value"]) + modifier_list += [ + "{}{}".format(restr["value"], restr["type"]) + for restr in ( + element["modifiers"]["selrestrs"] + + element["modifiers"]["synrestrs"] + ) + ] + if modifier_list: + piece += "[{}]".format(" ".join(modifier_list)) + pieces.append(piece) + + return indent + " ".join(pieces) + + def _pprint_semantics_within_frame(self, vnframe, indent=""): + """Returns a pretty printed version of semantics within frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame semantics. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + pieces = [] + for predicate in vnframe["semantics"]: + arguments = [argument["value"] for argument in predicate["arguments"]] + pieces.append( + f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})" + ) + return "\n".join(f"{indent}* {piece}" for piece in pieces) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f10c3436dde87850528529b4ab1b4cf6413a1bce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py @@ -0,0 +1,2489 @@ +# Natural Language Toolkit: WordNet +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bethard +# Steven Bird +# Edward Loper +# Nitin Madnani +# Nasruddin A’aidil Shari +# Sim Wei Ying Geraldine +# Soe Lynn +# Francis Bond +# Eric Kafe + +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for WordNet + +WordNet is a lexical database of English. +Using synsets, helps find conceptual relationships between words +such as hypernyms, hyponyms, synonyms, antonyms etc. + +For details about WordNet see: +https://wordnet.princeton.edu/ + +This module also allows you to find lemmas in languages +other than English from the Open Multilingual Wordnet +https://omwn.org/ + +""" + +import math +import os +import re +import warnings +from collections import defaultdict, deque +from functools import total_ordering +from itertools import chain, islice +from operator import itemgetter + +from nltk.corpus.reader import CorpusReader +from nltk.internals import deprecated +from nltk.probability import FreqDist +from nltk.util import binary_search_file as _binary_search_file + +###################################################################### +# Table of Contents +###################################################################### +# - Constants +# - Data Classes +# - WordNetError +# - Lemma +# - Synset +# - WordNet Corpus Reader +# - WordNet Information Content Corpus Reader +# - Similarity Metrics +# - Demo + +###################################################################### +# Constants +###################################################################### + +#: Positive infinity (for similarity functions) +_INF = 1e300 + +# { Part-of-speech constants +ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" +# } + +POS_LIST = [NOUN, VERB, ADJ, ADV] + +# A table of strings that are used to express verb frames. +VERB_FRAME_STRINGS = ( + None, + "Something %s", + "Somebody %s", + "It is %sing", + "Something is %sing PP", + "Something %s something Adjective/Noun", + "Something %s Adjective/Noun", + "Somebody %s Adjective", + "Somebody %s something", + "Somebody %s somebody", + "Something %s somebody", + "Something %s something", + "Something %s to somebody", + "Somebody %s on something", + "Somebody %s somebody something", + "Somebody %s something to somebody", + "Somebody %s something from somebody", + "Somebody %s somebody with something", + "Somebody %s somebody of something", + "Somebody %s something on somebody", + "Somebody %s somebody PP", + "Somebody %s something PP", + "Somebody %s PP", + "Somebody's (body part) %s", + "Somebody %s somebody to INFINITIVE", + "Somebody %s somebody INFINITIVE", + "Somebody %s that CLAUSE", + "Somebody %s to somebody", + "Somebody %s to INFINITIVE", + "Somebody %s whether INFINITIVE", + "Somebody %s somebody into V-ing something", + "Somebody %s something with something", + "Somebody %s INFINITIVE", + "Somebody %s VERB-ing", + "It %s that CLAUSE", + "Something %s INFINITIVE", + # OEWN additions: + "Somebody %s at something", + "Somebody %s for something", + "Somebody %s on somebody", + "Somebody %s out of somebody", +) + +SENSENUM_RE = re.compile(r"\.[\d]+\.") + + +###################################################################### +# Data Classes +###################################################################### + + +class WordNetError(Exception): + """An exception class for wordnet-related errors.""" + + +@total_ordering +class _WordNetObject: + """A common base class for lemmas and synsets.""" + + def hypernyms(self): + return self._related("@") + + def _hypernyms(self): + return self._related("@") + + def instance_hypernyms(self): + return self._related("@i") + + def _instance_hypernyms(self): + return self._related("@i") + + def hyponyms(self): + return self._related("~") + + def instance_hyponyms(self): + return self._related("~i") + + def member_holonyms(self): + return self._related("#m") + + def substance_holonyms(self): + return self._related("#s") + + def part_holonyms(self): + return self._related("#p") + + def member_meronyms(self): + return self._related("%m") + + def substance_meronyms(self): + return self._related("%s") + + def part_meronyms(self): + return self._related("%p") + + def topic_domains(self): + return self._related(";c") + + def in_topic_domains(self): + return self._related("-c") + + def region_domains(self): + return self._related(";r") + + def in_region_domains(self): + return self._related("-r") + + def usage_domains(self): + return self._related(";u") + + def in_usage_domains(self): + return self._related("-u") + + def attributes(self): + return self._related("=") + + def entailments(self): + return self._related("*") + + def causes(self): + return self._related(">") + + def also_sees(self): + return self._related("^") + + def verb_groups(self): + return self._related("$") + + def similar_tos(self): + return self._related("&") + + def __hash__(self): + return hash(self._name) + + def __eq__(self, other): + return self._name == other._name + + def __ne__(self, other): + return self._name != other._name + + def __lt__(self, other): + return self._name < other._name + + +class Lemma(_WordNetObject): + """ + The lexical entry for a single morphological form of a + sense-disambiguated word. + + Create a Lemma from a "..." string where: + is the morphological stem identifying the synset + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + is the morphological form of interest + + Note that and can be different, e.g. the Synset + 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and + 'salt.n.03.salinity'. + + Lemma attributes, accessible via methods with the same name: + + - name: The canonical name of this lemma. + - synset: The synset that this lemma belongs to. + - syntactic_marker: For adjectives, the WordNet string identifying the + syntactic position relative modified noun. See: + https://wordnet.princeton.edu/documentation/wninput5wn + For all other parts of speech, this attribute is None. + - count: The frequency of this lemma in wordnet. + + Lemma methods: + + Lemmas have the following methods for retrieving related Lemmas. They + correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Lemmas: + + - antonyms + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - topic_domains, region_domains, usage_domains + - attributes + - derivationally_related_forms + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + - pertainyms + """ + + __slots__ = [ + "_wordnet_corpus_reader", + "_name", + "_syntactic_marker", + "_synset", + "_frame_strings", + "_frame_ids", + "_lexname_index", + "_lex_id", + "_lang", + "_key", + ] + + def __init__( + self, + wordnet_corpus_reader, + synset, + name, + lexname_index, + lex_id, + syntactic_marker, + ): + self._wordnet_corpus_reader = wordnet_corpus_reader + self._name = name + self._syntactic_marker = syntactic_marker + self._synset = synset + self._frame_strings = [] + self._frame_ids = [] + self._lexname_index = lexname_index + self._lex_id = lex_id + self._lang = "eng" + + self._key = None # gets set later. + + def name(self): + return self._name + + def syntactic_marker(self): + return self._syntactic_marker + + def synset(self): + return self._synset + + def frame_strings(self): + return self._frame_strings + + def frame_ids(self): + return self._frame_ids + + def lang(self): + return self._lang + + def key(self): + return self._key + + def __repr__(self): + tup = type(self).__name__, self._synset._name, self._name + return "%s('%s.%s')" % tup + + def _related(self, relation_symbol): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if (self._name, relation_symbol) not in self._synset._lemma_pointers: + return [] + return [ + get_synset(pos, offset)._lemmas[lemma_index] + for pos, offset, lemma_index in self._synset._lemma_pointers[ + self._name, relation_symbol + ] + ] + + def count(self): + """Return the frequency count for this Lemma""" + return self._wordnet_corpus_reader.lemma_count(self) + + def antonyms(self): + return self._related("!") + + def derivationally_related_forms(self): + return self._related("+") + + def pertainyms(self): + return self._related("\\") + + +class Synset(_WordNetObject): + """Create a Synset from a ".." string where: + is the word's morphological stem + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + + Synset attributes, accessible via methods with the same name: + + - name: The canonical name of this synset, formed using the first lemma + of this synset. Note that this may be different from the name + passed to the constructor if that string used a different lemma to + identify the synset. + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB. + - lemmas: A list of the Lemma objects for this synset. + - definition: The definition for this synset. + - examples: A list of example strings for this synset. + - offset: The offset in the WordNet dict file of this synset. + - lexname: The name of the lexicographer file containing this synset. + + Synset methods: + + Synsets have the following methods for retrieving related Synsets. + They correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Synsets. + + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - attributes + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + + Additionally, Synsets support the following methods specific to the + hypernym relation: + + - root_hypernyms + - common_hypernyms + - lowest_common_hypernyms + + Note that Synsets do not support the following relations because + these are defined by WordNet as lexical relations: + + - antonyms + - derivationally_related_forms + - pertainyms + """ + + __slots__ = [ + "_pos", + "_offset", + "_name", + "_frame_ids", + "_lemmas", + "_lemma_names", + "_definition", + "_examples", + "_lexname", + "_pointers", + "_lemma_pointers", + "_max_depth", + "_min_depth", + ] + + def __init__(self, wordnet_corpus_reader): + self._wordnet_corpus_reader = wordnet_corpus_reader + # All of these attributes get initialized by + # WordNetCorpusReader._synset_from_pos_and_line() + + self._pos = None + self._offset = None + self._name = None + self._frame_ids = [] + self._lemmas = [] + self._lemma_names = [] + self._definition = None + self._examples = [] + self._lexname = None # lexicographer name + self._all_hypernyms = None + + self._pointers = defaultdict(set) + self._lemma_pointers = defaultdict(list) + + def pos(self): + return self._pos + + def offset(self): + return self._offset + + def name(self): + return self._name + + def frame_ids(self): + return self._frame_ids + + def _doc(self, doc_type, default, lang="eng"): + """Helper method for Synset.definition and Synset.examples""" + corpus = self._wordnet_corpus_reader + if lang not in corpus.langs(): + return None + elif lang == "eng": + return default + else: + corpus._load_lang_data(lang) + of = corpus.ss2of(self) + i = corpus.lg_attrs.index(doc_type) + if of in corpus._lang_data[lang][i]: + return corpus._lang_data[lang][i][of] + else: + return None + + def definition(self, lang="eng"): + """Return definition in specified language""" + return self._doc("def", self._definition, lang=lang) + + def examples(self, lang="eng"): + """Return examples in specified language""" + return self._doc("exe", self._examples, lang=lang) + + def lexname(self): + return self._lexname + + def _needs_root(self): + if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6": + return False + else: + return True + + def lemma_names(self, lang="eng"): + """Return all the lemma_names associated with the synset""" + if lang == "eng": + return self._lemma_names + else: + reader = self._wordnet_corpus_reader + reader._load_lang_data(lang) + i = reader.ss2of(self) + if i in reader._lang_data[lang][0]: + return reader._lang_data[lang][0][i] + else: + return [] + + def lemmas(self, lang="eng"): + """Return all the lemma objects associated with the synset""" + if lang == "eng": + return self._lemmas + elif self._name: + self._wordnet_corpus_reader._load_lang_data(lang) + lemmark = [] + lemmy = self.lemma_names(lang) + for lem in lemmy: + temp = Lemma( + self._wordnet_corpus_reader, + self, + lem, + self._wordnet_corpus_reader._lexnames.index(self.lexname()), + 0, + None, + ) + temp._lang = lang + lemmark.append(temp) + return lemmark + + def root_hypernyms(self): + """Get the topmost hypernyms of this synset in WordNet.""" + + result = [] + seen = set() + todo = [self] + while todo: + next_synset = todo.pop() + if next_synset not in seen: + seen.add(next_synset) + next_hypernyms = ( + next_synset.hypernyms() + next_synset.instance_hypernyms() + ) + if not next_hypernyms: + result.append(next_synset) + else: + todo.extend(next_hypernyms) + return result + + # Simpler implementation which makes incorrect assumption that + # hypernym hierarchy is acyclic: + # + # if not self.hypernyms(): + # return [self] + # else: + # return list(set(root for h in self.hypernyms() + # for root in h.root_hypernyms())) + def max_depth(self): + """ + :return: The length of the longest hypernym path from this + synset to the root. + """ + + if "_max_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._max_depth = 0 + else: + self._max_depth = 1 + max(h.max_depth() for h in hypernyms) + return self._max_depth + + def min_depth(self): + """ + :return: The length of the shortest hypernym path from this + synset to the root. + """ + + if "_min_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._min_depth = 0 + else: + self._min_depth = 1 + min(h.min_depth() for h in hypernyms) + return self._min_depth + + def closure(self, rel, depth=-1): + """ + Return the transitive closure of source under the rel + relationship, breadth-first, discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> print(list(computer.closure(topic))) + [Synset('computer_science.n.01')] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2 + + + Include redundant paths (but only once), avoiding duplicate searches + (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> print(list(dog.closure(hyp))) + [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\ + Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\ + Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\ + Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\ + Synset('physical_entity.n.01'), Synset('entity.n.01')] + + UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7 + """ + + from nltk.util import acyclic_breadth_first + + for synset in acyclic_breadth_first(self, rel, depth): + if synset != self: + yield synset + + from nltk.util import acyclic_depth_first as acyclic_tree + from nltk.util import unweighted_minimum_spanning_tree as mst + + # Also add this shortcut? + # from nltk.util import unweighted_minimum_spanning_digraph as umsd + + def tree(self, rel, depth=-1, cut_mark=None): + """ + Return the full relation tree, including self, + discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> from pprint import pprint + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> pprint(computer.tree(topic)) + [Synset('computer.n.01'), [Synset('computer_science.n.01')]] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3 + + + But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> pprint(dog.tree(hyp)) + [Synset('dog.n.01'), + [Synset('canine.n.02'), + [Synset('carnivore.n.01'), + [Synset('placental.n.01'), + [Synset('mammal.n.01'), + [Synset('vertebrate.n.01'), + [Synset('chordate.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), + [Synset('entity.n.01')]]]]]]]]]]]]], + [Synset('domestic_animal.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]] + """ + + from nltk.util import acyclic_branches_depth_first + + return acyclic_branches_depth_first(self, rel, depth, cut_mark) + + def hypernym_paths(self): + """ + Get the path(s) from this synset to the root, where each path is a + list of the synset nodes traversed on the way to the root. + + :return: A list of lists, where each list gives the node sequence + connecting the initial ``Synset`` node and a root node. + """ + paths = [] + + hypernyms = self.hypernyms() + self.instance_hypernyms() + if len(hypernyms) == 0: + paths = [[self]] + + for hypernym in hypernyms: + for ancestor_list in hypernym.hypernym_paths(): + ancestor_list.append(self) + paths.append(ancestor_list) + return paths + + def common_hypernyms(self, other): + """ + Find all synsets that are hypernyms of this synset and the + other synset. + + :type other: Synset + :param other: other input synset. + :return: The synsets that are hypernyms of both synsets. + """ + if not self._all_hypernyms: + self._all_hypernyms = { + self_synset + for self_synsets in self._iter_hypernym_lists() + for self_synset in self_synsets + } + if not other._all_hypernyms: + other._all_hypernyms = { + other_synset + for other_synsets in other._iter_hypernym_lists() + for other_synset in other_synsets + } + return list(self._all_hypernyms.intersection(other._all_hypernyms)) + + def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False): + """ + Get a list of lowest synset(s) that both synsets have as a hypernym. + When `use_min_depth == False` this means that the synset which appears + as a hypernym of both `self` and `other` with the lowest maximum depth + is returned or if there are multiple such synsets at the same depth + they are all returned + + However, if `use_min_depth == True` then the synset(s) which has/have + the lowest minimum depth and appear(s) in both paths is/are returned. + + By setting the use_min_depth flag to True, the behavior of NLTK2 can be + preserved. This was changed in NLTK3 to give more accurate results in a + small set of cases, generally with synsets concerning people. (eg: + 'chef.n.01', 'fireman.n.01', etc.) + + This method is an implementation of Ted Pedersen's "Lowest Common + Subsumer" method from the Perl Wordnet module. It can return either + "self" or "other" if they are a hypernym of the other. + + :type other: Synset + :param other: other input synset + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (False by default) + creates a fake root that connects all the taxonomies. Set it + to True to enable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will need to be added + for nouns as well. + :type use_min_depth: bool + :param use_min_depth: This setting mimics older (v2) behavior of NLTK + wordnet If True, will use the min_depth function to calculate the + lowest common hypernyms. This is known to give strange results for + some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained + for backwards compatibility + :return: The synsets that are the lowest common hypernyms of both + synsets + """ + synsets = self.common_hypernyms(other) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset.hypernyms = lambda: [] + fake_synset.instance_hypernyms = lambda: [] + synsets.append(fake_synset) + + try: + if use_min_depth: + max_depth = max(s.min_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.min_depth() == max_depth] + else: + max_depth = max(s.max_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.max_depth() == max_depth] + return sorted(unsorted_lch) + except ValueError: + return [] + + def hypernym_distances(self, distance=0, simulate_root=False): + """ + Get the path(s) from this synset to the root, counting the distance + of each node from the initial node on the way. A set of + (synset, distance) tuples is returned. + + :type distance: int + :param distance: the distance (number of edges) from this hypernym to + the original hypernym ``Synset`` on which this method was called. + :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is + a hypernym of the first ``Synset``. + """ + distances = {(self, distance)} + for hypernym in self._hypernyms() + self._instance_hypernyms(): + distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset_distance = max(distances, key=itemgetter(1))[1] + distances.add((fake_synset, fake_synset_distance + 1)) + return distances + + def _shortest_hypernym_paths(self, simulate_root): + if self._name == "*ROOT*": + return {self: 0} + + queue = deque([(self, 0)]) + path = {} + + while queue: + s, depth = queue.popleft() + if s in path: + continue + path[s] = depth + + depth += 1 + queue.extend((hyp, depth) for hyp in s._hypernyms()) + queue.extend((hyp, depth) for hyp in s._instance_hypernyms()) + + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + path[fake_synset] = max(path.values()) + 1 + + return path + + def shortest_path_distance(self, other, simulate_root=False): + """ + Returns the distance of the shortest path linking the two synsets (if + one exists). For each synset, all the ancestor nodes and their + distances are recorded and compared. The ancestor node common to both + synsets that can be reached with the minimum number of traversals is + used. If no ancestor nodes are common, None is returned. If a node is + compared with itself 0 is returned. + + :type other: Synset + :param other: The Synset to which the shortest path will be found. + :return: The number of edges in the shortest path connecting the two + nodes, or None if no path exists. + """ + + if self == other: + return 0 + + dist_dict1 = self._shortest_hypernym_paths(simulate_root) + dist_dict2 = other._shortest_hypernym_paths(simulate_root) + + # For each ancestor synset common to both subject synsets, find the + # connecting path length. Return the shortest of these. + + inf = float("inf") + path_distance = inf + for synset, d1 in dist_dict1.items(): + d2 = dist_dict2.get(synset, inf) + path_distance = min(path_distance, d1 + d2) + + return None if math.isinf(path_distance) else path_distance + + # interface to similarity methods + def path_similarity(self, other, verbose=False, simulate_root=True): + """ + Path Distance Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses in the is-a (hypernym/hypnoym) + taxonomy. The score is in the range 0 to 1, except in those cases where + a path cannot be found (will only be true for verbs as there are many + distinct verb taxonomies), in which case None is returned. A score of + 1 represents identity i.e. comparing a sense with itself will return 1. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally between 0 and 1. None is returned if no connecting path + could be found. 1 is returned if a ``Synset`` is compared with + itself. + """ + + distance = self.shortest_path_distance( + other, + simulate_root=simulate_root and (self._needs_root() or other._needs_root()), + ) + if distance is None or distance < 0: + return None + return 1.0 / (distance + 1) + + def lch_similarity(self, other, verbose=False, simulate_root=True): + """ + Leacock Chodorow Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses (as above) and the maximum depth + of the taxonomy in which the senses occur. The relationship is given as + -log(p/2d) where p is the shortest path length and d is the taxonomy + depth. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally greater than 0. None is returned if no connecting path + could be found. If a ``Synset`` is compared with itself, the + maximum score is returned, which varies depending on the taxonomy + depth. + """ + + if self._pos != other._pos: + raise WordNetError( + "Computing the lch similarity requires " + "%s and %s to have the same part of speech." % (self, other) + ) + + need_root = self._needs_root() + + if self._pos not in self._wordnet_corpus_reader._max_depth: + self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root) + + depth = self._wordnet_corpus_reader._max_depth[self._pos] + + distance = self.shortest_path_distance( + other, simulate_root=simulate_root and need_root + ) + + if distance is None or distance < 0 or depth == 0: + return None + return -math.log((distance + 1) / (2.0 * depth)) + + def wup_similarity(self, other, verbose=False, simulate_root=True): + """ + Wu-Palmer Similarity: + Return a score denoting how similar two word senses are, based on the + depth of the two senses in the taxonomy and that of their Least Common + Subsumer (most specific ancestor node). Previously, the scores computed + by this implementation did _not_ always agree with those given by + Pedersen's Perl implementation of WordNet Similarity. However, with + the addition of the simulate_root flag (see below), the score for + verbs now almost always agree but not always for nouns. + + The LCS does not necessarily feature in the shortest path connecting + the two senses, as it is by definition the common ancestor deepest in + the taxonomy, not closest to the two senses. Typically, however, it + will so feature. Where multiple candidates for the LCS exist, that + whose shortest path to the root node is the longest will be selected. + Where the LCS has multiple paths to the root, the longer path is used + for the purposes of the calculation. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A float score denoting the similarity of the two ``Synset`` + objects, normally greater than zero. If no connecting path between + the two senses can be found, None is returned. + + """ + need_root = self._needs_root() or other._needs_root() + + # Note that to preserve behavior from NLTK2 we set use_min_depth=True + # It is possible that more accurate results could be obtained by + # removing this setting and it should be tested later on + subsumers = self.lowest_common_hypernyms( + other, simulate_root=simulate_root and need_root, use_min_depth=True + ) + + # If no LCS was found return None + if len(subsumers) == 0: + return None + + subsumer = self if self in subsumers else subsumers[0] + + # Get the longest path from the LCS to the root, + # including a correction: + # - add one because the calculations include both the start and end + # nodes + depth = subsumer.max_depth() + 1 + + # Note: No need for an additional add-one correction for non-nouns + # to account for an imaginary root node because that is now + # automatically handled by simulate_root + # if subsumer._pos != NOUN: + # depth += 1 + + # Get the shortest path from the LCS to each of the synsets it is + # subsuming. Add this to the LCS path length to get the path + # length from each synset to the root. + len1 = self.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + len2 = other.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + if len1 is None or len2 is None: + return None + len1 += depth + len2 += depth + return (2.0 * depth) / (len1 + len2) + + def res_similarity(self, other, ic, verbose=False): + """ + Resnik Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. Synsets whose LCS is the root node of the taxonomy will + have a score of 0 (e.g. N['dog'][0] and N['table'][0]). + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return lcs_ic + + def jcn_similarity(self, other, ic, verbose=False): + """ + Jiang-Conrath Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. + """ + + if self == other: + return _INF + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + + # If either of the input synsets are the root synset, or have a + # frequency of 0 (sparse data problem), return 0. + if ic1 == 0 or ic2 == 0: + return 0 + + ic_difference = ic1 + ic2 - 2 * lcs_ic + + if ic_difference == 0: + return _INF + + return 1 / ic_difference + + def lin_similarity(self, other, ic, verbose=False): + """ + Lin Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects, in the range 0 to 1. + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return (2.0 * lcs_ic) / (ic1 + ic2) + + def _iter_hypernym_lists(self): + """ + :return: An iterator over ``Synset`` objects that are either proper + hypernyms or instance of hypernyms of the synset. + """ + todo = [self] + seen = set() + while todo: + for synset in todo: + seen.add(synset) + yield todo + todo = [ + hypernym + for synset in todo + for hypernym in (synset.hypernyms() + synset.instance_hypernyms()) + if hypernym not in seen + ] + + def __repr__(self): + return f"{type(self).__name__}('{self._name}')" + + def _related(self, relation_symbol, sort=True): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if relation_symbol not in self._pointers: + return [] + pointer_tuples = self._pointers[relation_symbol] + r = [get_synset(pos, offset) for pos, offset in pointer_tuples] + if sort: + r.sort() + return r + + +###################################################################### +# WordNet Corpus Reader +###################################################################### + + +class WordNetCorpusReader(CorpusReader): + """ + A corpus reader used to access wordnet or its variants. + """ + + _ENCODING = "utf8" + + # { Part-of-speech constants + ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" + # } + + # { Filename constants + _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"} + # } + + # { Part of speech constants + _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5} + _pos_names = dict(tup[::-1] for tup in _pos_numbers.items()) + # } + + #: A list of file identifiers for all the fileids used by this + #: corpus reader. + _FILES = ( + "cntlist.rev", + "lexnames", + "index.sense", + "index.adj", + "index.adv", + "index.noun", + "index.verb", + "data.adj", + "data.adv", + "data.noun", + "data.verb", + "adj.exc", + "adv.exc", + "noun.exc", + "verb.exc", + ) + + def __init__(self, root, omw_reader): + """ + Construct a new wordnet corpus reader, with the given root + directory. + """ + + super().__init__(root, self._FILES, encoding=self._ENCODING) + + # A index that provides the file offset + # Map from lemma -> pos -> synset_index -> offset + self._lemma_pos_offset_map = defaultdict(dict) + + # A cache so we don't have to reconstruct synsets + # Map from pos -> offset -> synset + self._synset_offset_cache = defaultdict(dict) + + # A lookup for the maximum depth of each part of speech. Useful for + # the lch similarity metric. + self._max_depth = defaultdict(dict) + + # Corpus reader containing omw data. + self._omw_reader = omw_reader + + # Corpus reader containing extended_omw data. + self._exomw_reader = None + + self.provenances = defaultdict(str) + self.provenances["eng"] = "" + + if self._omw_reader is None: + warnings.warn( + "The multilingual functions are not available with this Wordnet version" + ) + + self.omw_langs = set() + + # A cache to store the wordnet data of multiple languages + self._lang_data = defaultdict(list) + + self._data_file_map = {} + self._exception_map = {} + self._lexnames = [] + self._key_count_file = None + self._key_synset_file = None + + # Load the lexnames + with self.open("lexnames") as fp: + for i, line in enumerate(fp): + index, lexname, _ = line.split() + assert int(index) == i + self._lexnames.append(lexname) + + # Load the indices for lemmas and synset offsets + self._load_lemma_pos_offset_map() + + # load the exception file data into memory + self._load_exception_map() + + self.nomap = [] + self.splits = {} + + # map from WordNet 3.0 for OMW data + self.map30 = self.map_wn30() + + # Language data attributes + self.lg_attrs = ["lemma", "none", "def", "exe"] + + def index_sense(self, version=None): + """Read sense key to synset id mapping from index.sense file in corpus directory""" + fn = "index.sense" + if version: + from nltk.corpus import CorpusReader, LazyCorpusLoader + + ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn) + else: + ixreader = self + with ixreader.open(fn) as fp: + sensekey_map = {} + for line in fp: + fields = line.strip().split() + sensekey = fields[0] + pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])] + sensekey_map[sensekey] = f"{fields[1]}-{pos}" + return sensekey_map + + def map_to_many(self): + sensekey_map1 = self.index_sense("wordnet") + sensekey_map2 = self.index_sense() + synset_to_many = {} + for synsetid in set(sensekey_map1.values()): + synset_to_many[synsetid] = [] + for sensekey in set(sensekey_map1.keys()).intersection( + set(sensekey_map2.keys()) + ): + source = sensekey_map1[sensekey] + target = sensekey_map2[sensekey] + synset_to_many[source].append(target) + return synset_to_many + + def map_to_one(self): + synset_to_many = self.map_to_many() + synset_to_one = {} + for source in synset_to_many: + candidates_bag = synset_to_many[source] + if candidates_bag: + candidates_set = set(candidates_bag) + if len(candidates_set) == 1: + target = candidates_bag[0] + else: + counts = [] + for candidate in candidates_set: + counts.append((candidates_bag.count(candidate), candidate)) + self.splits[source] = counts + target = max(counts)[1] + synset_to_one[source] = target + if source[-1] == "s": + # Add a mapping from "a" to target for applications like omw, + # where only Lithuanian and Slovak use the "s" ss_type. + synset_to_one[f"{source[:-1]}a"] = target + else: + self.nomap.append(source) + return synset_to_one + + def map_wn30(self): + """Mapping from Wordnet 3.0 to currently loaded Wordnet version""" + if self.get_version() == "3.0": + return None + else: + return self.map_to_one() + + # Open Multilingual WordNet functions, contributed by + # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn + + def of2ss(self, of): + """take an id and return the synsets""" + return self.synset_from_pos_and_offset(of[-1], int(of[:8])) + + def ss2of(self, ss): + """return the ID of the synset""" + if ss: + return f"{ss.offset():08d}-{ss.pos()}" + + def _load_lang_data(self, lang): + """load the wordnet data of the requested language from the file to + the cache, _lang_data""" + + if lang in self._lang_data: + return + + if self._omw_reader and not self.omw_langs: + self.add_omw() + + if lang not in self.langs(): + raise WordNetError("Language is not supported.") + + if self._exomw_reader and lang not in self.omw_langs: + reader = self._exomw_reader + else: + reader = self._omw_reader + + prov = self.provenances[lang] + if prov in ["cldr", "wikt"]: + prov2 = prov + else: + prov2 = "data" + + with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp: + self.custom_lemmas(fp, lang) + self.disable_custom_lemmas(lang) + + def add_provs(self, reader): + """Add languages from Multilingual Wordnet to the provenance dictionary""" + fileids = reader.fileids() + for fileid in fileids: + prov, langfile = os.path.split(fileid) + file_name, file_extension = os.path.splitext(langfile) + if file_extension == ".tab": + lang = file_name.split("-")[-1] + if lang in self.provenances or prov in ["cldr", "wikt"]: + # We already have another resource for this lang, + # so we need to further specify the lang id: + lang = f"{lang}_{prov}" + self.provenances[lang] = prov + + def add_omw(self): + self.add_provs(self._omw_reader) + self.omw_langs = set(self.provenances.keys()) + + def add_exomw(self): + """ + Add languages from Extended OMW + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> wn.add_exomw() + >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt")) + [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')] + """ + from nltk.corpus import extended_omw + + self.add_omw() + self._exomw_reader = extended_omw + self.add_provs(self._exomw_reader) + + def langs(self): + """return a list of languages supported by Multilingual Wordnet""" + return list(self.provenances.keys()) + + def _load_lemma_pos_offset_map(self): + for suffix in self._FILEMAP.values(): + + # parse each line of the file (ignoring comment lines) + with self.open("index.%s" % suffix) as fp: + for i, line in enumerate(fp): + if line.startswith(" "): + continue + + _iter = iter(line.split()) + + def _next_token(): + return next(_iter) + + try: + + # get the lemma and part-of-speech + lemma = _next_token() + pos = _next_token() + + # get the number of synsets for this lemma + n_synsets = int(_next_token()) + assert n_synsets > 0 + + # get and ignore the pointer symbols for all synsets of + # this lemma + n_pointers = int(_next_token()) + [_next_token() for _ in range(n_pointers)] + + # same as number of synsets + n_senses = int(_next_token()) + assert n_synsets == n_senses + + # get and ignore number of senses ranked according to + # frequency + _next_token() + + # get synset offsets + synset_offsets = [int(_next_token()) for _ in range(n_synsets)] + + # raise more informative error with file name and line number + except (AssertionError, ValueError) as e: + tup = ("index.%s" % suffix), (i + 1), e + raise WordNetError("file %s, line %i: %s" % tup) from e + + # map lemmas and parts of speech to synsets + self._lemma_pos_offset_map[lemma][pos] = synset_offsets + if pos == ADJ: + self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets + + def _load_exception_map(self): + # load the exception file data into memory + for pos, suffix in self._FILEMAP.items(): + self._exception_map[pos] = {} + with self.open("%s.exc" % suffix) as fp: + for line in fp: + terms = line.split() + self._exception_map[pos][terms[0]] = terms[1:] + self._exception_map[ADJ_SAT] = self._exception_map[ADJ] + + def _compute_max_depth(self, pos, simulate_root): + """ + Compute the max depth for the given part of speech. This is + used by the lch similarity metric. + """ + depth = 0 + for ii in self.all_synsets(pos): + try: + depth = max(depth, ii.max_depth()) + except RuntimeError: + print(ii) + if simulate_root: + depth += 1 + self._max_depth[pos] = depth + + def get_version(self): + fh = self._data_file(ADJ) + fh.seek(0) + for line in fh: + match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line) + if match is not None: + version = match.group(1) + fh.seek(0) + return version + + ############################################################# + # Loading Lemmas + ############################################################# + + def lemma(self, name, lang="eng"): + """Return lemma object that matches the name""" + # cannot simply split on first '.', + # e.g.: '.45_caliber.a.01..45_caliber' + separator = SENSENUM_RE.search(name).end() + + synset_name, lemma_name = name[: separator - 1], name[separator:] + + synset = self.synset(synset_name) + for lemma in synset.lemmas(lang): + if lemma._name == lemma_name: + return lemma + raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}") + + def lemma_from_key(self, key): + # Keys are case sensitive and always lower-case + key = key.lower() + + lemma_name, lex_sense = key.split("%") + pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":") + pos = self._pos_names[int(pos_number)] + + # open the key -> synset file if necessary + if self._key_synset_file is None: + self._key_synset_file = self.open("index.sense") + + # Find the synset for the lemma. + synset_line = _binary_search_file(self._key_synset_file, key) + if not synset_line: + raise WordNetError("No synset found for key %r" % key) + offset = int(synset_line.split()[1]) + synset = self.synset_from_pos_and_offset(pos, offset) + # return the corresponding lemma + for lemma in synset._lemmas: + if lemma._key == key: + return lemma + raise WordNetError("No lemma found for for key %r" % key) + + ############################################################# + # Loading Synsets + ############################################################# + def synset(self, name): + # split name into lemma, part of speech and synset number + lemma, pos, synset_index_str = name.lower().rsplit(".", 2) + synset_index = int(synset_index_str) - 1 + + # get the offset for this synset + try: + offset = self._lemma_pos_offset_map[lemma][pos][synset_index] + except KeyError as e: + raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e + except IndexError as e: + n_senses = len(self._lemma_pos_offset_map[lemma][pos]) + raise WordNetError( + f"Lemma {lemma!r} with part of speech {pos!r} only " + f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}" + ) from e + + # load synset information from the appropriate file + synset = self.synset_from_pos_and_offset(pos, offset) + + # some basic sanity checks on loaded attributes + if pos == "s" and synset._pos == "a": + message = ( + "Adjective satellite requested but only plain " + "adjective found for lemma %r" + ) + raise WordNetError(message % lemma) + assert synset._pos == pos or (pos == "a" and synset._pos == "s") + + # Return the synset object. + return synset + + def _data_file(self, pos): + """ + Return an open file pointer for the data file for the given + part of speech. + """ + if pos == ADJ_SAT: + pos = ADJ + if self._data_file_map.get(pos) is None: + fileid = "data.%s" % self._FILEMAP[pos] + self._data_file_map[pos] = self.open(fileid) + return self._data_file_map[pos] + + def synset_from_pos_and_offset(self, pos, offset): + """ + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v'). + - offset: The byte offset of this synset in the WordNet dict file + for this pos. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_pos_and_offset('n', 1740)) + Synset('entity.n.01') + """ + # Check to see if the synset is in the cache + if offset in self._synset_offset_cache[pos]: + return self._synset_offset_cache[pos][offset] + + data_file = self._data_file(pos) + data_file.seek(offset) + data_file_line = data_file.readline() + # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line: + line_offset = data_file_line[:8] + if ( + line_offset.isalnum() + and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}" + ): + synset = self._synset_from_pos_and_line(pos, data_file_line) + assert synset._offset == offset + self._synset_offset_cache[pos][offset] = synset + else: + synset = None + warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.") + data_file.seek(0) + return synset + + @deprecated("Use public method synset_from_pos_and_offset() instead") + def _synset_from_pos_and_offset(self, *args, **kwargs): + """ + Hack to help people like the readers of + https://stackoverflow.com/a/27145655/1709587 + who were using this function before it was officially a public method + """ + return self.synset_from_pos_and_offset(*args, **kwargs) + + def _synset_from_pos_and_line(self, pos, data_file_line): + # Construct a new (empty) synset. + synset = Synset(self) + + # parse the entry for this synset + try: + + # parse out the definitions and examples from the gloss + columns_str, gloss = data_file_line.strip().split("|") + definition = re.sub(r"[\"].*?[\"]", "", gloss).strip() + examples = re.findall(r'"([^"]*)"', gloss) + for example in examples: + synset._examples.append(example) + + synset._definition = definition.strip("; ") + + # split the other info into fields + _iter = iter(columns_str.split()) + + def _next_token(): + return next(_iter) + + # get the offset + synset._offset = int(_next_token()) + + # determine the lexicographer file name + lexname_index = int(_next_token()) + synset._lexname = self._lexnames[lexname_index] + + # get the part of speech + synset._pos = _next_token() + + # create Lemma objects for each lemma + n_lemmas = int(_next_token(), 16) + for _ in range(n_lemmas): + # get the lemma name + lemma_name = _next_token() + # get the lex_id (used for sense_keys) + lex_id = int(_next_token(), 16) + # If the lemma has a syntactic marker, extract it. + m = re.match(r"(.*?)(\(.*\))?$", lemma_name) + lemma_name, syn_mark = m.groups() + # create the lemma object + lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark) + synset._lemmas.append(lemma) + synset._lemma_names.append(lemma._name) + + # collect the pointer tuples + n_pointers = int(_next_token()) + for _ in range(n_pointers): + symbol = _next_token() + offset = int(_next_token()) + pos = _next_token() + lemma_ids_str = _next_token() + if lemma_ids_str == "0000": + synset._pointers[symbol].add((pos, offset)) + else: + source_index = int(lemma_ids_str[:2], 16) - 1 + target_index = int(lemma_ids_str[2:], 16) - 1 + source_lemma_name = synset._lemmas[source_index]._name + lemma_pointers = synset._lemma_pointers + tups = lemma_pointers[source_lemma_name, symbol] + tups.append((pos, offset, target_index)) + + # read the verb frames + try: + frame_count = int(_next_token()) + except StopIteration: + pass + else: + for _ in range(frame_count): + # read the plus sign + plus = _next_token() + assert plus == "+" + # read the frame and lemma number + frame_number = int(_next_token()) + frame_string_fmt = VERB_FRAME_STRINGS[frame_number] + lemma_number = int(_next_token(), 16) + # lemma number of 00 means all words in the synset + if lemma_number == 0: + synset._frame_ids.append(frame_number) + for lemma in synset._lemmas: + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + # only a specific word in the synset + else: + lemma = synset._lemmas[lemma_number - 1] + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + + # raise a more informative error with line text + except ValueError as e: + raise WordNetError(f"line {data_file_line!r}: {e}") from e + + # set sense keys for Lemma objects - note that this has to be + # done afterwards so that the relations are available + for lemma in synset._lemmas: + if synset._pos == ADJ_SAT: + head_lemma = synset.similar_tos()[0]._lemmas[0] + head_name = head_lemma._name + head_id = "%02d" % head_lemma._lex_id + else: + head_name = head_id = "" + tup = ( + lemma._name, + WordNetCorpusReader._pos_numbers[synset._pos], + lemma._lexname_index, + lemma._lex_id, + head_name, + head_id, + ) + lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower() + + # the canonical name is based on the first lemma + lemma_name = synset._lemmas[0]._name.lower() + offsets = self._lemma_pos_offset_map[lemma_name][synset._pos] + sense_index = offsets.index(synset._offset) + tup = lemma_name, synset._pos, sense_index + 1 + synset._name = "%s.%s.%02i" % tup + + return synset + + def synset_from_sense_key(self, sense_key): + """ + Retrieves synset based on a given sense_key. Sense keys can be + obtained from lemma.key() + + From https://wordnet.princeton.edu/documentation/senseidx5wn: + A sense_key is represented as:: + + lemma % lex_sense (e.g. 'dog%1:18:01::') + + where lex_sense is encoded as:: + + ss_type:lex_filenum:lex_id:head_word:head_id + + :lemma: ASCII text of word/collocation, in lower case + :ss_type: synset type for the sense (1 digit int) + The synset type is encoded as follows:: + + 1 NOUN + 2 VERB + 3 ADJECTIVE + 4 ADVERB + 5 ADJECTIVE SATELLITE + :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int) + :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int) + :head_word: lemma of the first word in satellite's head synset + Only used if sense is in an adjective satellite synset + :head_id: uniquely identifies sense in a lexicographer file when paired with head_word + Only used if head_word is present (2 digit int) + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_sense_key("drive%1:04:03::")) + Synset('drive.n.06') + + >>> print(wn.synset_from_sense_key("driving%1:04:03::")) + Synset('drive.n.06') + """ + return self.lemma_from_key(sense_key).synset() + + ############################################################# + # Retrieve synsets and lemmas. + ############################################################# + + def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True): + """Load all synsets with a given lemma and part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + If lang is specified, all the synsets associated with the lemma name + of that language will be returned. + """ + lemma = lemma.lower() + + if lang == "eng": + get_synset = self.synset_from_pos_and_offset + index = self._lemma_pos_offset_map + if pos is None: + pos = POS_LIST + return [ + get_synset(p, offset) + for p in pos + for form in self._morphy(lemma, p, check_exceptions) + for offset in index[form].get(p, []) + ] + + else: + self._load_lang_data(lang) + synset_list = [] + if lemma in self._lang_data[lang][1]: + for l in self._lang_data[lang][1][lemma]: + if pos is not None and l[-1] != pos: + continue + synset_list.append(self.of2ss(l)) + return synset_list + + def lemmas(self, lemma, pos=None, lang="eng"): + """Return all Lemma objects with a name matching the specified lemma + name and part of speech tag. Matches any part of speech tag if none is + specified.""" + + lemma = lemma.lower() + if lang == "eng": + return [ + lemma_obj + for synset in self.synsets(lemma, pos) + for lemma_obj in synset.lemmas() + if lemma_obj.name().lower() == lemma + ] + + else: + self._load_lang_data(lang) + lemmas = [] + syn = self.synsets(lemma, lang=lang) + for s in syn: + if pos is not None and s.pos() != pos: + continue + for lemma_obj in s.lemmas(lang=lang): + if lemma_obj.name().lower() == lemma: + lemmas.append(lemma_obj) + return lemmas + + def all_lemma_names(self, pos=None, lang="eng"): + """Return all lemma names for all synsets for the given + part of speech tag and language or languages. If pos is + not specified, all synsets for all parts of speech will + be used.""" + + if lang == "eng": + if pos is None: + return iter(self._lemma_pos_offset_map) + else: + return ( + lemma + for lemma in self._lemma_pos_offset_map + if pos in self._lemma_pos_offset_map[lemma] + ) + else: + self._load_lang_data(lang) + lemma = [] + for i in self._lang_data[lang][0]: + if pos is not None and i[-1] != pos: + continue + lemma.extend(self._lang_data[lang][0][i]) + + lemma = iter(set(lemma)) + return lemma + + def all_omw_synsets(self, pos=None, lang=None): + if lang not in self.langs(): + return None + self._load_lang_data(lang) + for of in self._lang_data[lang][0]: + if not pos or of[-1] == pos: + ss = self.of2ss(of) + if ss: + yield ss + + # else: + # A few OMW offsets don't exist in Wordnet 3.0. + # warnings.warn(f"Language {lang}: no synset found for {of}") + + def all_synsets(self, pos=None, lang="eng"): + """Iterate over all synsets with a given part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + """ + if lang == "eng": + return self.all_eng_synsets(pos=pos) + else: + return self.all_omw_synsets(pos=pos, lang=lang) + + def all_eng_synsets(self, pos=None): + if pos is None: + pos_tags = self._FILEMAP.keys() + else: + pos_tags = [pos] + + cache = self._synset_offset_cache + from_pos_and_line = self._synset_from_pos_and_line + + # generate all synsets for each part of speech + for pos_tag in pos_tags: + # Open the file for reading. Note that we can not re-use + # the file pointers from self._data_file_map here, because + # we're defining an iterator, and those file pointers might + # be moved while we're not looking. + if pos_tag == ADJ_SAT: + pos_file = ADJ + else: + pos_file = pos_tag + fileid = "data.%s" % self._FILEMAP[pos_file] + data_file = self.open(fileid) + + try: + # generate synsets for each line in the POS file + offset = data_file.tell() + line = data_file.readline() + while line: + if not line[0].isspace(): + if offset in cache[pos_tag]: + # See if the synset is cached + synset = cache[pos_tag][offset] + else: + # Otherwise, parse the line + synset = from_pos_and_line(pos_tag, line) + cache[pos_tag][offset] = synset + + # adjective satellites are in the same file as + # adjectives so only yield the synset if it's actually + # a satellite + if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT: + yield synset + # for all other POS tags, yield all synsets (this means + # that adjectives also include adjective satellites) + elif pos_tag != ADJ_SAT: + yield synset + offset = data_file.tell() + line = data_file.readline() + + # close the extra file handle we opened + except: + data_file.close() + raise + else: + data_file.close() + + def words(self, lang="eng"): + """return lemmas of the given language as list of words""" + return self.all_lemma_names(lang=lang) + + def synonyms(self, word, lang="eng"): + """return nested list with the synonyms of the different senses of word in the given language""" + return [ + sorted(list(set(ss.lemma_names(lang=lang)) - {word})) + for ss in self.synsets(word, lang=lang) + ] + + def doc(self, file="README", lang="eng"): + """Return the contents of readme, license or citation file + use lang=lang to get the file for an individual language""" + if lang == "eng": + reader = self + else: + reader = self._omw_reader + if lang in self.langs(): + file = f"{os.path.join(self.provenances[lang],file)}" + try: + with reader.open(file) as fp: + return fp.read() + except: + if lang in self._lang_data: + return f"Cannot determine {file} for {lang}" + else: + return f"Language {lang} is not supported." + + def license(self, lang="eng"): + """Return the contents of LICENSE (for omw) + use lang=lang to get the license for an individual language""" + return self.doc(file="LICENSE", lang=lang) + + def readme(self, lang="eng"): + """Return the contents of README (for omw) + use lang=lang to get the readme for an individual language""" + return self.doc(file="README", lang=lang) + + def citation(self, lang="eng"): + """Return the contents of citation.bib file (for omw) + use lang=lang to get the citation for an individual language""" + return self.doc(file="citation.bib", lang=lang) + + ############################################################# + # Misc + ############################################################# + def lemma_count(self, lemma): + """Return the frequency count for this Lemma""" + # Currently, count is only work for English + if lemma._lang != "eng": + return 0 + # open the count file if we haven't already + if self._key_count_file is None: + self._key_count_file = self.open("cntlist.rev") + # find the key in the counts file and return the count + line = _binary_search_file(self._key_count_file, lemma._key) + if line: + return int(line.rsplit(" ", 1)[-1]) + else: + return 0 + + def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity(synset2, verbose, simulate_root) + + path_similarity.__doc__ = Synset.path_similarity.__doc__ + + def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose, simulate_root) + + lch_similarity.__doc__ = Synset.lch_similarity.__doc__ + + def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose, simulate_root) + + wup_similarity.__doc__ = Synset.wup_similarity.__doc__ + + def res_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose) + + res_similarity.__doc__ = Synset.res_similarity.__doc__ + + def jcn_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose) + + jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ + + def lin_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose) + + lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + ############################################################# + # Morphy + ############################################################# + # Morphy, adapted from Oliver Steele's pywordnet + def morphy(self, form, pos=None, check_exceptions=True): + """ + Find a possible base form for the given form, with the given + part of speech, by checking WordNet's list of exceptional + forms, and by recursively stripping affixes for this part of + speech until a form in WordNet is found. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.morphy('dogs')) + dog + >>> print(wn.morphy('churches')) + church + >>> print(wn.morphy('aardwolves')) + aardwolf + >>> print(wn.morphy('abaci')) + abacus + >>> wn.morphy('hardrock', wn.ADV) + >>> print(wn.morphy('book', wn.NOUN)) + book + >>> wn.morphy('book', wn.ADJ) + """ + + if pos is None: + morphy = self._morphy + analyses = chain(a for p in POS_LIST for a in morphy(form, p)) + else: + analyses = self._morphy(form, pos, check_exceptions) + + # get the first one we find + first = list(islice(analyses, 1)) + if len(first) == 1: + return first[0] + else: + return None + + MORPHOLOGICAL_SUBSTITUTIONS = { + NOUN: [ + ("s", ""), + ("ses", "s"), + ("ves", "f"), + ("xes", "x"), + ("zes", "z"), + ("ches", "ch"), + ("shes", "sh"), + ("men", "man"), + ("ies", "y"), + ], + VERB: [ + ("s", ""), + ("ies", "y"), + ("es", "e"), + ("es", ""), + ("ed", "e"), + ("ed", ""), + ("ing", "e"), + ("ing", ""), + ], + ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")], + ADV: [], + } + + MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ] + + def _morphy(self, form, pos, check_exceptions=True): + # from jordanbg: + # Given an original string x + # 1. Apply rules once to the input to get y1, y2, y3, etc. + # 2. Return all that are in the database + # 3. If there are no matches, keep applying rules until you either + # find a match or you can't go any further + + exceptions = self._exception_map[pos] + substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos] + + def apply_rules(forms): + return [ + form[: -len(old)] + new + for form in forms + for old, new in substitutions + if form.endswith(old) + ] + + def filter_forms(forms): + result = [] + seen = set() + for form in forms: + if form in self._lemma_pos_offset_map: + if pos in self._lemma_pos_offset_map[form]: + if form not in seen: + result.append(form) + seen.add(form) + return result + + # 0. Check the exception lists + if check_exceptions: + if form in exceptions: + return filter_forms([form] + exceptions[form]) + + # 1. Apply rules once to the input to get y1, y2, y3, etc. + forms = apply_rules([form]) + + # 2. Return all that are in the database (and check the original too) + results = filter_forms([form] + forms) + if results: + return results + + # 3. If there are no matches, keep applying rules until we find a match + while forms: + forms = apply_rules(forms) + results = filter_forms(forms) + if results: + return results + + # Return an empty list if we can't find anything + return [] + + ############################################################# + # Create information content from corpus + ############################################################# + def ic(self, corpus, weight_senses_equally=False, smoothing=1.0): + """ + Creates an information content lookup dictionary from a corpus. + + :type corpus: CorpusReader + :param corpus: The corpus from which we create an information + content dictionary. + :type weight_senses_equally: bool + :param weight_senses_equally: If this is True, gives all + possible senses equal weight rather than dividing by the + number of possible senses. (If a word has 3 synses, each + sense gets 0.3333 per appearance when this is False, 1.0 when + it is true.) + :param smoothing: How much do we smooth synset counts (default is 1.0) + :type smoothing: float + :return: An information content dictionary + """ + counts = FreqDist() + for ww in corpus.words(): + counts[ww] += 1 + + ic = {} + for pp in POS_LIST: + ic[pp] = defaultdict(float) + + # Initialize the counts with the smoothing value + if smoothing > 0.0: + for pp in POS_LIST: + ic[pp][0] = smoothing + for ss in self.all_synsets(): + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + ic[pos][ss._offset] = smoothing + + for ww in counts: + possible_synsets = self.synsets(ww) + if len(possible_synsets) == 0: + continue + + # Distribute weight among possible synsets + weight = float(counts[ww]) + if not weight_senses_equally: + weight /= float(len(possible_synsets)) + + for ss in possible_synsets: + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + for level in ss._iter_hypernym_lists(): + for hh in level: + ic[pos][hh._offset] += weight + # Add the weight to the root + ic[pos][0] += weight + return ic + + def custom_lemmas(self, tab_file, lang): + """ + Reads a custom tab file containing mappings of lemmas in the given + language to Princeton WordNet 3.0 synset offsets, allowing NLTK's + WordNet functions to then be used with that language. + + See the "Tab files" section at https://omwn.org/omw1.html for + documentation on the Multilingual WordNet tab file format. + + :param tab_file: Tab file as a file or file-like object + :type: lang str + :param: lang ISO 639-3 code of the language of the tab file + """ + lg = lang.split("_")[0] + if len(lg) != 3: + raise ValueError("lang should be a (3 character) ISO 639-3 code") + self._lang_data[lang] = [ + defaultdict(list), + defaultdict(list), + defaultdict(list), + defaultdict(list), + ] + for line in tab_file.readlines(): + if isinstance(line, bytes): + # Support byte-stream files (e.g. as returned by Python 2's + # open() function) as well as text-stream ones + line = line.decode("utf-8") + if not line.startswith("#"): + triple = line.strip().split("\t") + if len(triple) < 3: + continue + offset_pos, label = triple[:2] + val = triple[-1] + if self.map30: + if offset_pos in self.map30: + # Map offset_pos to current Wordnet version: + offset_pos = self.map30[offset_pos] + else: + # Some OMW offsets were never in Wordnet: + if ( + offset_pos not in self.nomap + and offset_pos.replace("a", "s") not in self.nomap + ): + warnings.warn( + f"{lang}: invalid offset {offset_pos} in '{line}'" + ) + continue + elif offset_pos[-1] == "a": + wnss = self.of2ss(offset_pos) + if wnss and wnss.pos() == "s": # Wordnet pos is "s" + # Label OMW adjective satellites back to their Wordnet pos ("s") + offset_pos = self.ss2of(wnss) + pair = label.split(":") + attr = pair[-1] + if len(pair) == 1 or pair[0] == lg: + if attr == "lemma": + val = val.strip().replace(" ", "_") + self._lang_data[lang][1][val.lower()].append(offset_pos) + if attr in self.lg_attrs: + self._lang_data[lang][self.lg_attrs.index(attr)][ + offset_pos + ].append(val) + + def disable_custom_lemmas(self, lang): + """prevent synsets from being mistakenly added""" + for n in range(len(self.lg_attrs)): + self._lang_data[lang][n].default_factory = None + + ###################################################################### + # Visualize WordNet relation graphs using Graphviz + ###################################################################### + + def digraph( + self, + inputs, + rel=lambda s: s.hypernyms(), + pos=None, + maxdepth=-1, + shapes=None, + attr=None, + verbose=False, + ): + """ + Produce a graphical representation from 'inputs' (a list of + start nodes, which can be a mix of Synsets, Lemmas and/or words), + and a synset relation, for drawing with the 'dot' graph visualisation + program from the Graphviz package. + + Return a string in the DOT graph file language, which can then be + converted to an image by nltk.parse.dependencygraph.dot2img(dot_string). + + Optional Parameters: + :rel: Wordnet synset relation + :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r' + :maxdepth: limit the longest path + :shapes: dictionary of strings that trigger a specified shape + :attr: dictionary with global graph attributes + :verbose: warn about cycles + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.digraph([wn.synset('dog.n.01')])) + digraph G { + "Synset('animal.n.01')" -> "Synset('organism.n.01')"; + "Synset('canine.n.02')" -> "Synset('carnivore.n.01')"; + "Synset('carnivore.n.01')" -> "Synset('placental.n.01')"; + "Synset('chordate.n.01')" -> "Synset('animal.n.01')"; + "Synset('dog.n.01')" -> "Synset('canine.n.02')"; + "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')"; + "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')"; + "Synset('living_thing.n.01')" -> "Synset('whole.n.02')"; + "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')"; + "Synset('object.n.01')" -> "Synset('physical_entity.n.01')"; + "Synset('organism.n.01')" -> "Synset('living_thing.n.01')"; + "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')"; + "Synset('placental.n.01')" -> "Synset('mammal.n.01')"; + "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')"; + "Synset('whole.n.02')" -> "Synset('object.n.01')"; + } + + """ + from nltk.util import edge_closure, edges2dot + + synsets = set() + edges = set() + if not shapes: + shapes = dict() + if not attr: + attr = dict() + + def add_lemma(lem): + ss = lem.synset() + synsets.add(ss) + edges.add((lem, ss)) + + for node in inputs: + typ = type(node) + if typ == Synset: + synsets.add(node) + elif typ == Lemma: + add_lemma(node) + elif typ == str: + for lemma in self.lemmas(node, pos): + add_lemma(lemma) + + for ss in synsets: + edges = edges.union(edge_closure(ss, rel, maxdepth, verbose)) + dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr) + return dot_string + + +###################################################################### +# WordNet Information Content Corpus Reader +###################################################################### + + +class WordNetICCorpusReader(CorpusReader): + """ + A corpus reader for the WordNet information content corpus. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, encoding="utf8") + + # this load function would be more efficient if the data was pickled + # Note that we can't use NLTK's frequency distributions because + # synsets are overlapping (each instance of a synset also counts + # as an instance of its hypernyms) + def ic(self, icfile): + """ + Load an information content file from the wordnet_ic corpus + and return a dictionary. This dictionary has just two keys, + NOUN and VERB, whose values are dictionaries that map from + synsets to information content values. + + :type icfile: str + :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat") + :return: An information content dictionary + """ + ic = {} + ic[NOUN] = defaultdict(float) + ic[VERB] = defaultdict(float) + with self.open(icfile) as fp: + for num, line in enumerate(fp): + if num == 0: # skip the header + continue + fields = line.split() + offset = int(fields[0][:-1]) + value = float(fields[1]) + pos = _get_pos(fields[0]) + if len(fields) == 3 and fields[2] == "ROOT": + # Store root count. + ic[pos][0] += value + if value != 0: + ic[pos][offset] = value + return ic + + +###################################################################### +# Similarity metrics +###################################################################### + +# TODO: Add in the option to manually add a new root node; this will be +# useful for verb similarity as there exist multiple verb taxonomies. + +# More information about the metrics is available at +# http://marimba.d.umn.edu/similarity/measures.html + + +def path_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity( + synset2, verbose=verbose, simulate_root=simulate_root + ) + + +def lch_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def wup_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def res_similarity(synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose=verbose) + + +def jcn_similarity(synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose=verbose) + + +def lin_similarity(synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose=verbose) + + +path_similarity.__doc__ = Synset.path_similarity.__doc__ +lch_similarity.__doc__ = Synset.lch_similarity.__doc__ +wup_similarity.__doc__ = Synset.wup_similarity.__doc__ +res_similarity.__doc__ = Synset.res_similarity.__doc__ +jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ +lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + +def _lcs_ic(synset1, synset2, ic, verbose=False): + """ + Get the information content of the least common subsumer that has + the highest information content value. If two nodes have no + explicit common subsumer, assume that they share an artificial + root node that is the hypernym of all explicit roots. + + :type synset1: Synset + :param synset1: First input synset. + :type synset2: Synset + :param synset2: Second input synset. Must be the same part of + speech as the first synset. + :type ic: dict + :param ic: an information content object (as returned by ``load_ic()``). + :return: The information content of the two synsets and their most + informative subsumer + """ + if synset1._pos != synset2._pos: + raise WordNetError( + "Computing the least common subsumer requires " + "%s and %s to have the same part of speech." % (synset1, synset2) + ) + + ic1 = information_content(synset1, ic) + ic2 = information_content(synset2, ic) + subsumers = synset1.common_hypernyms(synset2) + if len(subsumers) == 0: + subsumer_ic = 0 + else: + subsumer_ic = max(information_content(s, ic) for s in subsumers) + + if verbose: + print("> LCS Subsumer by content:", subsumer_ic) + + return ic1, ic2, subsumer_ic + + +# Utility functions + + +def information_content(synset, ic): + pos = synset._pos + if pos == ADJ_SAT: + pos = ADJ + try: + icpos = ic[pos] + except KeyError as e: + msg = "Information content file has no entries for part-of-speech: %s" + raise WordNetError(msg % pos) from e + + counts = icpos[synset._offset] + if counts == 0: + return _INF + else: + return -math.log(counts / icpos[0]) + + +# get the part of speech (NOUN or VERB) from the information content record +# (each identifier has a 'n' or 'v' suffix) + + +def _get_pos(field): + if field[-1] == "n": + return NOUN + elif field[-1] == "v": + return VERB + else: + msg = ( + "Unidentified part of speech in WordNet Information Content file " + "for field %s" % field + ) + raise ValueError(msg) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9b3d001e0e31120ff1a7df266bb4c82b8de360 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py @@ -0,0 +1,397 @@ +# Natural Language Toolkit: XML Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for corpora whose documents are xml files. + +(note -- not named 'xml' to avoid conflicting w/ standard xml package) +""" + +import codecs +from xml.etree import ElementTree + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import * +from nltk.data import SeekableUnicodeStreamReader +from nltk.internals import ElementWrapper +from nltk.tokenize import WordPunctTokenizer + + +class XMLCorpusReader(CorpusReader): + """ + Corpus reader for corpora whose documents are xml files. + + Note that the ``XMLCorpusReader`` constructor does not take an + ``encoding`` argument, because the unicode encoding is specified by + the XML files themselves. See the XML specs for more info. + """ + + def __init__(self, root, fileids, wrap_etree=False): + self._wrap_etree = wrap_etree + CorpusReader.__init__(self, root, fileids) + + def xml(self, fileid=None): + # Make sure we have exactly one file -- no concatenating XML. + if fileid is None and len(self._fileids) == 1: + fileid = self._fileids[0] + if not isinstance(fileid, str): + raise TypeError("Expected a single file identifier string") + # Read the XML in using ElementTree. + with self.abspath(fileid).open() as fp: + elt = ElementTree.parse(fp).getroot() + # If requested, wrap it. + if self._wrap_etree: + elt = ElementWrapper(elt) + # Return the ElementTree element. + return elt + + def words(self, fileid=None): + """ + Returns all of the words and punctuation symbols in the specified file + that were in text nodes -- ie, tags are ignored. Like the xml() method, + fileid can only specify one file. + + :return: the given file's text nodes as a list of words and punctuation symbols + :rtype: list(str) + """ + + elt = self.xml(fileid) + encoding = self.encoding(fileid) + word_tokenizer = WordPunctTokenizer() + try: + iterator = elt.getiterator() + except: + iterator = elt.iter() + out = [] + + for node in iterator: + text = node.text + if text is not None: + if isinstance(text, bytes): + text = text.decode(encoding) + toks = word_tokenizer.tokenize(text) + out.extend(toks) + return out + + +class XMLCorpusView(StreamBackedCorpusView): + """ + A corpus view that selects out specified elements from an XML + file, and provides a flat list-like interface for accessing them. + (Note: ``XMLCorpusView`` is not used by ``XMLCorpusReader`` itself, + but may be used by subclasses of ``XMLCorpusReader``.) + + Every XML corpus view has a "tag specification", indicating what + XML elements should be included in the view; and each (non-nested) + element that matches this specification corresponds to one item in + the view. Tag specifications are regular expressions over tag + paths, where a tag path is a list of element tag names, separated + by '/', indicating the ancestry of the element. Some examples: + + - ``'foo'``: A top-level element whose tag is ``foo``. + - ``'foo/bar'``: An element whose tag is ``bar`` and whose parent + is a top-level element whose tag is ``foo``. + - ``'.*/foo'``: An element whose tag is ``foo``, appearing anywhere + in the xml tree. + - ``'.*/(foo|bar)'``: An wlement whose tag is ``foo`` or ``bar``, + appearing anywhere in the xml tree. + + The view items are generated from the selected XML elements via + the method ``handle_elt()``. By default, this method returns the + element as-is (i.e., as an ElementTree object); but it can be + overridden, either via subclassing or via the ``elt_handler`` + constructor parameter. + """ + + #: If true, then display debugging output to stdout when reading + #: blocks. + _DEBUG = False + + #: The number of characters read at a time by this corpus reader. + _BLOCK_SIZE = 1024 + + def __init__(self, fileid, tagspec, elt_handler=None): + """ + Create a new corpus view based on a specified XML file. + + Note that the ``XMLCorpusView`` constructor does not take an + ``encoding`` argument, because the unicode encoding is + specified by the XML files themselves. + + :type tagspec: str + :param tagspec: A tag specification, indicating what XML + elements should be included in the view. Each non-nested + element that matches this specification corresponds to one + item in the view. + + :param elt_handler: A function used to transform each element + to a value for the view. If no handler is specified, then + ``self.handle_elt()`` is called, which returns the element + as an ElementTree object. The signature of elt_handler is:: + + elt_handler(elt, tagspec) -> value + """ + if elt_handler: + self.handle_elt = elt_handler + + self._tagspec = re.compile(tagspec + r"\Z") + """The tag specification for this corpus view.""" + + self._tag_context = {0: ()} + """A dictionary mapping from file positions (as returned by + ``stream.seek()`` to XML contexts. An XML context is a + tuple of XML tag names, indicating which tags have not yet + been closed.""" + + encoding = self._detect_encoding(fileid) + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + + def _detect_encoding(self, fileid): + if isinstance(fileid, PathPointer): + try: + infile = fileid.open() + s = infile.readline() + finally: + infile.close() + else: + with open(fileid, "rb") as infile: + s = infile.readline() + if s.startswith(codecs.BOM_UTF16_BE): + return "utf-16-be" + if s.startswith(codecs.BOM_UTF16_LE): + return "utf-16-le" + if s.startswith(codecs.BOM_UTF32_BE): + return "utf-32-be" + if s.startswith(codecs.BOM_UTF32_LE): + return "utf-32-le" + if s.startswith(codecs.BOM_UTF8): + return "utf-8" + m = re.match(rb'\s*<\?xml\b.*\bencoding="([^"]+)"', s) + if m: + return m.group(1).decode() + m = re.match(rb"\s*<\?xml\b.*\bencoding='([^']+)'", s) + if m: + return m.group(1).decode() + # No encoding found -- what should the default be? + return "utf-8" + + def handle_elt(self, elt, context): + """ + Convert an element into an appropriate value for inclusion in + the view. Unless overridden by a subclass or by the + ``elt_handler`` constructor argument, this method simply + returns ``elt``. + + :return: The view value corresponding to ``elt``. + + :type elt: ElementTree + :param elt: The element that should be converted. + + :type context: str + :param context: A string composed of element tags separated by + forward slashes, indicating the XML context of the given + element. For example, the string ``'foo/bar/baz'`` + indicates that the element is a ``baz`` element whose + parent is a ``bar`` element and whose grandparent is a + top-level ``foo`` element. + """ + return elt + + #: A regular expression that matches XML fragments that do not + #: contain any un-closed tags. + _VALID_XML_RE = re.compile( + r""" + [^<]* + ( + (() | # comment + () | # doctype decl + (<[^!>][^>]*>)) # tag or PI + [^<]*)* + \Z""", + re.DOTALL | re.VERBOSE, + ) + + #: A regular expression used to extract the tag name from a start tag, + #: end tag, or empty-elt tag string. + _XML_TAG_NAME = re.compile(r"<\s*(?:/\s*)?([^\s>]+)") + + #: A regular expression used to find all start-tags, end-tags, and + #: empty-elt tags in an XML file. This regexp is more lenient than + #: the XML spec -- e.g., it allows spaces in some places where the + #: spec does not. + _XML_PIECE = re.compile( + r""" + # Include these so we can skip them: + (?P )| + (?P )| + (?P <\?.*?\?> )| + (?P ]*(\[[^\]]*])?\s*>)| + # These are the ones we actually care about: + (?P <\s*[^>/\?!\s][^>]*/\s*> )| + (?P <\s*[^>/\?!\s][^>]*> )| + (?P <\s*/[^>/\?!\s][^>]*> )""", + re.DOTALL | re.VERBOSE, + ) + + def _read_xml_fragment(self, stream): + """ + Read a string from the given stream that does not contain any + un-closed tags. In particular, this function first reads a + block from the stream of size ``self._BLOCK_SIZE``. It then + checks if that block contains an un-closed tag. If it does, + then this function either backtracks to the last '<', or reads + another block. + """ + fragment = "" + + if isinstance(stream, SeekableUnicodeStreamReader): + startpos = stream.tell() + while True: + # Read a block and add it to the fragment. + xml_block = stream.read(self._BLOCK_SIZE) + fragment += xml_block + + # Do we have a well-formed xml fragment? + if self._VALID_XML_RE.match(fragment): + return fragment + + # Do we have a fragment that will never be well-formed? + if re.search("[<>]", fragment).group(0) == ">": + pos = stream.tell() - ( + len(fragment) - re.search("[<>]", fragment).end() + ) + raise ValueError('Unexpected ">" near char %s' % pos) + + # End of file? + if not xml_block: + raise ValueError("Unexpected end of file: tag not closed") + + # If not, then we must be in the middle of a <..tag..>. + # If appropriate, backtrack to the most recent '<' + # character. + last_open_bracket = fragment.rfind("<") + if last_open_bracket > 0: + if self._VALID_XML_RE.match(fragment[:last_open_bracket]): + if isinstance(stream, SeekableUnicodeStreamReader): + stream.seek(startpos) + stream.char_seek_forward(last_open_bracket) + else: + stream.seek(-(len(fragment) - last_open_bracket), 1) + return fragment[:last_open_bracket] + + # Otherwise, read another block. (i.e., return to the + # top of the loop.) + + def read_block(self, stream, tagspec=None, elt_handler=None): + """ + Read from ``stream`` until we find at least one element that + matches ``tagspec``, and return the result of applying + ``elt_handler`` to each element found. + """ + if tagspec is None: + tagspec = self._tagspec + if elt_handler is None: + elt_handler = self.handle_elt + + # Use a stack of strings to keep track of our context: + context = list(self._tag_context.get(stream.tell())) + assert context is not None # check this -- could it ever happen? + + elts = [] + + elt_start = None # where does the elt start + elt_depth = None # what context depth + elt_text = "" + + while elts == [] or elt_start is not None: + if isinstance(stream, SeekableUnicodeStreamReader): + startpos = stream.tell() + xml_fragment = self._read_xml_fragment(stream) + + # End of file. + if not xml_fragment: + if elt_start is None: + break + else: + raise ValueError("Unexpected end of file") + + # Process each in the xml fragment. + for piece in self._XML_PIECE.finditer(xml_fragment): + if self._DEBUG: + print("{:>25} {}".format("/".join(context)[-20:], piece.group())) + + if piece.group("START_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + # Keep context up-to-date. + context.append(name) + # Is this one of the elts we're looking for? + if elt_start is None: + if re.match(tagspec, "/".join(context)): + elt_start = piece.start() + elt_depth = len(context) + + elif piece.group("END_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + # sanity checks: + if not context: + raise ValueError("Unmatched tag " % name) + if name != context[-1]: + raise ValueError(f"Unmatched tag <{context[-1]}>...") + # Is this the end of an element? + if elt_start is not None and elt_depth == len(context): + elt_text += xml_fragment[elt_start : piece.end()] + elts.append((elt_text, "/".join(context))) + elt_start = elt_depth = None + elt_text = "" + # Keep context up-to-date + context.pop() + + elif piece.group("EMPTY_ELT_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + if elt_start is None: + if re.match(tagspec, "/".join(context) + "/" + name): + elts.append((piece.group(), "/".join(context) + "/" + name)) + + if elt_start is not None: + # If we haven't found any elements yet, then keep + # looping until we do. + if elts == []: + elt_text += xml_fragment[elt_start:] + elt_start = 0 + + # If we've found at least one element, then try + # backtracking to the start of the element that we're + # inside of. + else: + # take back the last start-tag, and return what + # we've gotten so far (elts is non-empty). + if self._DEBUG: + print(" " * 36 + "(backtrack)") + if isinstance(stream, SeekableUnicodeStreamReader): + stream.seek(startpos) + stream.char_seek_forward(elt_start) + else: + stream.seek(-(len(xml_fragment) - elt_start), 1) + context = context[: elt_depth - 1] + elt_start = elt_depth = None + elt_text = "" + + # Update the _tag_context dict. + pos = stream.tell() + if pos in self._tag_context: + assert tuple(context) == self._tag_context[pos] + else: + self._tag_context[pos] = tuple(context) + + return [ + elt_handler( + ElementTree.fromstring(elt.encode("ascii", "xmlcharrefreplace")), + context, + ) + for (elt, context) in elts + ] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py new file mode 100644 index 0000000000000000000000000000000000000000..35bafdfef4f12f934de8e5e4617341fb2ba7b7a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py @@ -0,0 +1,256 @@ +# Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE) +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old +English Prose (YCOE), a 1.5 million word syntactically-annotated +corpus of Old English prose texts. The corpus is distributed by the +Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included +with NLTK. + +The YCOE corpus is divided into 100 files, each representing +an Old English prose text. Tags used within each text complies +to the YCOE standard: https://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm +""" + +import os +import re + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.tagged import TaggedCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import RegexpTokenizer + + +class YCOECorpusReader(CorpusReader): + """ + Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old + English Prose (YCOE), a 1.5 million word syntactically-annotated + corpus of Old English prose texts. + """ + + def __init__(self, root, encoding="utf8"): + CorpusReader.__init__(self, root, [], encoding) + + self._psd_reader = YCOEParseCorpusReader( + self.root.join("psd"), ".*", ".psd", encoding=encoding + ) + self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos") + + # Make sure we have a consistent set of items: + documents = {f[:-4] for f in self._psd_reader.fileids()} + if {f[:-4] for f in self._pos_reader.fileids()} != documents: + raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.") + + fileids = sorted( + ["%s.psd" % doc for doc in documents] + + ["%s.pos" % doc for doc in documents] + ) + CorpusReader.__init__(self, root, fileids, encoding) + self._documents = sorted(documents) + + def documents(self, fileids=None): + """ + Return a list of document identifiers for all documents in + this corpus, or for the documents with the given file(s) if + specified. + """ + if fileids is None: + return self._documents + if isinstance(fileids, str): + fileids = [fileids] + for f in fileids: + if f not in self._fileids: + raise KeyError("File id %s not found" % fileids) + # Strip off the '.pos' and '.psd' extensions. + return sorted({f[:-4] for f in fileids}) + + def fileids(self, documents=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that store the given document(s) if specified. + """ + if documents is None: + return self._fileids + elif isinstance(documents, str): + documents = [documents] + return sorted( + set( + ["%s.pos" % doc for doc in documents] + + ["%s.psd" % doc for doc in documents] + ) + ) + + def _getfileids(self, documents, subcorpus): + """ + Helper that selects the appropriate fileids for a given set of + documents from a given subcorpus (pos or psd). + """ + if documents is None: + documents = self._documents + else: + if isinstance(documents, str): + documents = [documents] + for document in documents: + if document not in self._documents: + if document[-4:] in (".pos", ".psd"): + raise ValueError( + "Expected a document identifier, not a file " + "identifier. (Use corpus.documents() to get " + "a list of document identifiers." + ) + else: + raise ValueError("Document identifier %s not found" % document) + return [f"{d}.{subcorpus}" for d in documents] + + # Delegate to one of our two sub-readers: + def words(self, documents=None): + return self._pos_reader.words(self._getfileids(documents, "pos")) + + def sents(self, documents=None): + return self._pos_reader.sents(self._getfileids(documents, "pos")) + + def paras(self, documents=None): + return self._pos_reader.paras(self._getfileids(documents, "pos")) + + def tagged_words(self, documents=None): + return self._pos_reader.tagged_words(self._getfileids(documents, "pos")) + + def tagged_sents(self, documents=None): + return self._pos_reader.tagged_sents(self._getfileids(documents, "pos")) + + def tagged_paras(self, documents=None): + return self._pos_reader.tagged_paras(self._getfileids(documents, "pos")) + + def parsed_sents(self, documents=None): + return self._psd_reader.parsed_sents(self._getfileids(documents, "psd")) + + +class YCOEParseCorpusReader(BracketParseCorpusReader): + """Specialized version of the standard bracket parse corpus reader + that strips out (CODE ...) and (ID ...) nodes.""" + + def _parse(self, t): + t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t) + if re.match(r"\s*\(\s*\)\s*$", t): + return None + return BracketParseCorpusReader._parse(self, t) + + +class YCOETaggedCorpusReader(TaggedCorpusReader): + def __init__(self, root, items, encoding="utf8"): + gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*" + sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True) + TaggedCorpusReader.__init__( + self, root, items, sep="_", sent_tokenizer=sent_tokenizer + ) + + +#: A list of all documents and their titles in ycoe. +documents = { + "coadrian.o34": "Adrian and Ritheus", + "coaelhom.o3": "Ælfric, Supplemental Homilies", + "coaelive.o3": "Ælfric's Lives of Saints", + "coalcuin": "Alcuin De virtutibus et vitiis", + "coalex.o23": "Alexander's Letter to Aristotle", + "coapollo.o3": "Apollonius of Tyre", + "coaugust": "Augustine", + "cobede.o2": "Bede's History of the English Church", + "cobenrul.o3": "Benedictine Rule", + "coblick.o23": "Blickling Homilies", + "coboeth.o2": "Boethius' Consolation of Philosophy", + "cobyrhtf.o3": "Byrhtferth's Manual", + "cocanedgD": "Canons of Edgar (D)", + "cocanedgX": "Canons of Edgar (X)", + "cocathom1.o3": "Ælfric's Catholic Homilies I", + "cocathom2.o3": "Ælfric's Catholic Homilies II", + "cochad.o24": "Saint Chad", + "cochdrul": "Chrodegang of Metz, Rule", + "cochristoph": "Saint Christopher", + "cochronA.o23": "Anglo-Saxon Chronicle A", + "cochronC": "Anglo-Saxon Chronicle C", + "cochronD": "Anglo-Saxon Chronicle D", + "cochronE.o34": "Anglo-Saxon Chronicle E", + "cocura.o2": "Cura Pastoralis", + "cocuraC": "Cura Pastoralis (Cotton)", + "codicts.o34": "Dicts of Cato", + "codocu1.o1": "Documents 1 (O1)", + "codocu2.o12": "Documents 2 (O1/O2)", + "codocu2.o2": "Documents 2 (O2)", + "codocu3.o23": "Documents 3 (O2/O3)", + "codocu3.o3": "Documents 3 (O3)", + "codocu4.o24": "Documents 4 (O2/O4)", + "coeluc1": "Honorius of Autun, Elucidarium 1", + "coeluc2": "Honorius of Autun, Elucidarium 1", + "coepigen.o3": "Ælfric's Epilogue to Genesis", + "coeuphr": "Saint Euphrosyne", + "coeust": "Saint Eustace and his companions", + "coexodusP": "Exodus (P)", + "cogenesiC": "Genesis (C)", + "cogregdC.o24": "Gregory's Dialogues (C)", + "cogregdH.o23": "Gregory's Dialogues (H)", + "coherbar": "Pseudo-Apuleius, Herbarium", + "coinspolD.o34": "Wulfstan's Institute of Polity (D)", + "coinspolX": "Wulfstan's Institute of Polity (X)", + "cojames": "Saint James", + "colacnu.o23": "Lacnunga", + "colaece.o2": "Leechdoms", + "colaw1cn.o3": "Laws, Cnut I", + "colaw2cn.o3": "Laws, Cnut II", + "colaw5atr.o3": "Laws, Æthelred V", + "colaw6atr.o3": "Laws, Æthelred VI", + "colawaf.o2": "Laws, Alfred", + "colawafint.o2": "Alfred's Introduction to Laws", + "colawger.o34": "Laws, Gerefa", + "colawine.ox2": "Laws, Ine", + "colawnorthu.o3": "Northumbra Preosta Lagu", + "colawwllad.o4": "Laws, William I, Lad", + "coleofri.o4": "Leofric", + "colsigef.o3": "Ælfric's Letter to Sigefyrth", + "colsigewB": "Ælfric's Letter to Sigeweard (B)", + "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)", + "colwgeat": "Ælfric's Letter to Wulfgeat", + "colwsigeT": "Ælfric's Letter to Wulfsige (T)", + "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)", + "colwstan1.o3": "Ælfric's Letter to Wulfstan I", + "colwstan2.o3": "Ælfric's Letter to Wulfstan II", + "comargaC.o34": "Saint Margaret (C)", + "comargaT": "Saint Margaret (T)", + "comart1": "Martyrology, I", + "comart2": "Martyrology, II", + "comart3.o23": "Martyrology, III", + "comarvel.o23": "Marvels of the East", + "comary": "Mary of Egypt", + "coneot": "Saint Neot", + "conicodA": "Gospel of Nicodemus (A)", + "conicodC": "Gospel of Nicodemus (C)", + "conicodD": "Gospel of Nicodemus (D)", + "conicodE": "Gospel of Nicodemus (E)", + "coorosiu.o2": "Orosius", + "cootest.o3": "Heptateuch", + "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I", + "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II", + "coprefcura.o2": "Preface to the Cura Pastoralis", + "coprefgen.o3": "Ælfric's Preface to Genesis", + "copreflives.o3": "Ælfric's Preface to Lives of Saints", + "coprefsolilo": "Preface to Augustine's Soliloquies", + "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus", + "corood": "History of the Holy Rood-Tree", + "cosevensl": "Seven Sleepers", + "cosolilo": "St. Augustine's Soliloquies", + "cosolsat1.o4": "Solomon and Saturn I", + "cosolsat2": "Solomon and Saturn II", + "cotempo.o3": "Ælfric's De Temporibus Anni", + "coverhom": "Vercelli Homilies", + "coverhomE": "Vercelli Homilies (E)", + "coverhomL": "Vercelli Homilies (L)", + "covinceB": "Saint Vincent (Bodley 343)", + "covinsal": "Vindicta Salvatoris", + "cowsgosp.o3": "West-Saxon Gospels", + "cowulf.o34": "Wulfstan's Homilies", +} diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/util.py new file mode 100644 index 0000000000000000000000000000000000000000..29a63574264c4859081ef8e36e26d9382f5b087f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/util.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Corpus Reader Utility Functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +###################################################################### +# { Lazy Corpus Loader +###################################################################### + +import gc +import re + +import nltk + +TRY_ZIPFILE_FIRST = False + + +class LazyCorpusLoader: + """ + To see the API documentation for this lazily loaded corpus, first + run corpus.ensure_loaded(), and then run help(this_corpus). + + LazyCorpusLoader is a proxy object which is used to stand in for a + corpus object before the corpus is loaded. This allows NLTK to + create an object for each corpus, but defer the costs associated + with loading those corpora until the first time that they're + actually accessed. + + The first time this object is accessed in any way, it will load + the corresponding corpus, and transform itself into that corpus + (by modifying its own ``__class__`` and ``__dict__`` attributes). + + If the corpus can not be found, then accessing this object will + raise an exception, displaying installation instructions for the + NLTK data package. Once they've properly installed the data + package (or modified ``nltk.data.path`` to point to its location), + they can then use the corpus object without restarting python. + + :param name: The name of the corpus + :type name: str + :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader + :type reader: nltk.corpus.reader.api.CorpusReader + :param nltk_data_subdir: The subdirectory where the corpus is stored. + :type nltk_data_subdir: str + :param `*args`: Any other non-keywords arguments that `reader_cls` might need. + :param `**kwargs`: Any other keywords arguments that `reader_cls` might need. + """ + + def __init__(self, name, reader_cls, *args, **kwargs): + from nltk.corpus.reader.api import CorpusReader + + assert issubclass(reader_cls, CorpusReader) + self.__name = self.__name__ = name + self.__reader_cls = reader_cls + # If nltk_data_subdir is set explicitly + if "nltk_data_subdir" in kwargs: + # Use the specified subdirectory path + self.subdir = kwargs["nltk_data_subdir"] + # Pops the `nltk_data_subdir` argument, we don't need it anymore. + kwargs.pop("nltk_data_subdir", None) + else: # Otherwise use 'nltk_data/corpora' + self.subdir = "corpora" + self.__args = args + self.__kwargs = kwargs + + def __load(self): + # Find the corpus root directory. + zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) + if TRY_ZIPFILE_FIRST: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError: + raise e + else: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError: + raise e + + # Load the corpus. + corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) + + # This is where the magic happens! Transform ourselves into + # the corpus by modifying our own __dict__ and __class__ to + # match that of the corpus. + + args, kwargs = self.__args, self.__kwargs + name, reader_cls = self.__name, self.__reader_cls + + self.__dict__ = corpus.__dict__ + self.__class__ = corpus.__class__ + + # _unload support: assign __dict__ and __class__ back, then do GC. + # after reassigning __dict__ there shouldn't be any references to + # corpus data so the memory should be deallocated after gc.collect() + def _unload(self): + lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) + self.__dict__ = lazy_reader.__dict__ + self.__class__ = lazy_reader.__class__ + gc.collect() + + self._unload = _make_bound_method(_unload, self) + + def __getattr__(self, attr): + + # Fix for inspect.isclass under Python 2.6 + # (see https://bugs.python.org/issue1225107). + # Without this fix tests may take extra 1.5GB RAM + # because all corpora gets loaded during test collection. + if attr == "__bases__": + raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") + + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return getattr(self, attr) + + def __repr__(self): + return "<{} in {!r} (not loaded yet)>".format( + self.__reader_cls.__name__, + ".../corpora/" + self.__name, + ) + + def _unload(self): + # If an exception occurs during corpus loading then + # '_unload' method may be unattached, so __getattr__ can be called; + # we shouldn't trigger corpus loading again in this case. + pass + + +def _make_bound_method(func, self): + """ + Magic for creating bound methods (used for _unload). + """ + + class Foo: + def meth(self): + pass + + f = Foo() + bound_method = type(f.meth) + + try: + return bound_method(func, self, self.__class__) + except TypeError: # python3 + return bound_method(func, self) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2dbe3065ce80b77e51e3d594025138fd50d98d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfb43e2974ea132cf9457ba3a659657b9a85be1c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92defa0d21333048ea2cd029c3c5ffa75cbb5468 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b0e2ce0ba0e3098db83a3d88f01340a8f8e7adf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f537dd6c7a9badc43313a8d2b4c5efed9b1b6ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__init__.py @@ -0,0 +1,184 @@ +# Natural Language Toolkit: Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT +""" +NLTK Taggers + +This package contains classes and interfaces for part-of-speech +tagging, or simply "tagging". + +A "tag" is a case-sensitive string that specifies some property of a token, +such as its part of speech. Tagged tokens are encoded as tuples +``(tag, token)``. For example, the following tagged token combines +the word ``'fly'`` with a noun part of speech tag (``'NN'``): + + >>> tagged_tok = ('fly', 'NN') + +An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset: + + >>> from nltk import pos_tag, word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + +A Russian tagger is also available if you specify lang="rus". It uses +the Russian National Corpus tagset: + + >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP + [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'), + ('бумажку', 'S'), ('.', 'NONLEX')] + +This package defines several taggers, which take a list of tokens, +assign a tag to each one, and return the resulting list of tagged tokens. +Most of the taggers are built automatically based on a training corpus. +For example, the unigram tagger tags each word *w* by checking what +the most frequent tag for *w* was in a training corpus: + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment'] + >>> for word, tag in tagger.tag(sent): + ... print(word, '->', tag) + Mitchell -> NP + decried -> None + the -> AT + high -> JJ + rate -> NN + of -> IN + unemployment -> None + +Note that words that the tagger has not seen during training receive a tag +of ``None``. + +We evaluate a tagger on data that was not seen during training: + + >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3) + 0.735 + +For more information, please consult chapter 5 of the NLTK Book. + +isort:skip_file +""" + +from nltk.tag.api import TaggerI +from nltk.tag.util import str2tuple, tuple2str, untag +from nltk.tag.sequential import ( + SequentialBackoffTagger, + ContextTagger, + DefaultTagger, + NgramTagger, + UnigramTagger, + BigramTagger, + TrigramTagger, + AffixTagger, + RegexpTagger, + ClassifierBasedTagger, + ClassifierBasedPOSTagger, +) +from nltk.tag.brill import BrillTagger +from nltk.tag.brill_trainer import BrillTaggerTrainer +from nltk.tag.tnt import TnT +from nltk.tag.hunpos import HunposTagger +from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger +from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer +from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger +from nltk.tag.mapping import tagset_mapping, map_tag +from nltk.tag.crf import CRFTagger +from nltk.tag.perceptron import PerceptronTagger + +from nltk.data import load, find + +RUS_PICKLE = ( + "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle" +) + + +def _get_tagger(lang=None): + if lang == "rus": + tagger = PerceptronTagger(False) + ap_russian_model_loc = "file:" + str(find(RUS_PICKLE)) + tagger.load(ap_russian_model_loc) + else: + tagger = PerceptronTagger() + return tagger + + +def _pos_tag(tokens, tagset=None, tagger=None, lang=None): + # Currently only supports English and Russian. + if lang not in ["eng", "rus"]: + raise NotImplementedError( + "Currently, NLTK pos_tag only supports English and Russian " + "(i.e. lang='eng' or lang='rus')" + ) + # Throws Error if tokens is of string type + elif isinstance(tokens, str): + raise TypeError("tokens: expected a list of strings, got a string") + + else: + tagged_tokens = tagger.tag(tokens) + if tagset: # Maps to the specified tagset. + if lang == "eng": + tagged_tokens = [ + (token, map_tag("en-ptb", tagset, tag)) + for (token, tag) in tagged_tokens + ] + elif lang == "rus": + # Note that the new Russian pos tags from the model contains suffixes, + # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018 + tagged_tokens = [ + (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0])) + for (token, tag) in tagged_tokens + ] + return tagged_tokens + + +def pos_tag(tokens, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to + tag the given list of tokens. + + >>> from nltk.tag import pos_tag + >>> from nltk.tokenize import word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE + [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'), + ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')] + + NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence. + + :param tokens: Sequence of tokens to be tagged + :type tokens: list(str) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The tagged tokens + :rtype: list(tuple(str, str)) + """ + tagger = _get_tagger(lang) + return _pos_tag(tokens, tagset, tagger, lang) + + +def pos_tag_sents(sentences, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to tag the + given list of sentences, each consisting of a list of tokens. + + :param sentences: List of sentences to be tagged + :type sentences: list(list(str)) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The list of tagged sentences + :rtype: list(list(tuple(str, str))) + """ + tagger = _get_tagger(lang) + return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc961563198ea6b3e63390a6c85ca947d92805bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffd9e8fa67971347c233d54b647acd87059b69bf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f31906f0a01763f130b2c41fa82b70a6b42246 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f189507b70e00ec66d2abcf89e830b22ea82ff3b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6273f53022f39231940662dfcff5b9eeb43db28 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a587500f088fc1e143fbde3ec2e53dce02bd5224 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71cbc303826ff9dfb94d909d7779a90b805b1379 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afa0929a154127cc3608be6f6d4d9fbb6251ebce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb73973ad3a09c0bf1fbd0943dfafc452b46bb7d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ef48eff79b3a25402b009e01d4dacd88da2c5c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..457daf85b60ee43f0cab95ec162923d9c7be36e3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d24a6b8f28a5c073cb6734c60469b72204c7edc0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill_trainer.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..236fd9858e755b501f3a8f384b68a383b6902f99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill_trainer.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect +import textwrap +from collections import defaultdict + +from nltk.tag import BrillTagger, untag + +###################################################################### +# Brill Tagger Trainer +###################################################################### + + +class BrillTaggerTrainer: + """ + A trainer for tbl taggers. + """ + + def __init__( + self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str" + ): + """ + Construct a Brill tagger from a baseline tagger and a + set of templates + + :param initial_tagger: the baseline tagger + :type initial_tagger: Tagger + :param templates: templates to be used in training + :type templates: list of Templates + :param trace: verbosity level + :type trace: int + :param deterministic: if True, adjudicate ties deterministically + :type deterministic: bool + :param ruleformat: format of reported Rules + :type ruleformat: str + :return: An untrained BrillTagger + :rtype: BrillTagger + """ + + if deterministic is None: + deterministic = trace > 0 + self._initial_tagger = initial_tagger + self._templates = templates + self._trace = trace + self._deterministic = deterministic + self._ruleformat = ruleformat + + self._tag_positions = None + """Mapping from tags to lists of positions that use that tag.""" + + self._rules_by_position = None + """Mapping from positions to the set of rules that are known + to occur at that position. Position is (sentnum, wordnum). + Initially, this will only contain positions where each rule + applies in a helpful way; but when we examine a rule, we'll + extend this list to also include positions where each rule + applies in a harmful or neutral way.""" + + self._positions_by_rule = None + """Mapping from rule to position to effect, specifying the + effect that each rule has on the overall score, at each + position. Position is (sentnum, wordnum); and effect is + -1, 0, or 1. As with _rules_by_position, this mapping starts + out only containing rules with positive effects; but when + we examine a rule, we'll extend this mapping to include + the positions where the rule is harmful or neutral.""" + + self._rules_by_score = None + """Mapping from scores to the set of rules whose effect on the + overall score is upper bounded by that score. Invariant: + rulesByScore[s] will contain r iff the sum of + _positions_by_rule[r] is s.""" + + self._rule_scores = None + """Mapping from rules to upper bounds on their effects on the + overall score. This is the inverse mapping to _rules_by_score. + Invariant: ruleScores[r] = sum(_positions_by_rule[r])""" + + self._first_unknown_position = None + """Mapping from rules to the first position where we're unsure + if the rule applies. This records the next position we + need to check to see if the rule messed anything up.""" + + # Training + + def train(self, train_sents, max_rules=200, min_score=2, min_acc=None): + r""" + Trains the Brill tagger on the corpus *train_sents*, + producing at most *max_rules* transformations, each of which + reduces the net number of errors in the corpus by at least + *min_score*, and each of which has accuracy not lower than + *min_acc*. + + >>> # Relevant imports + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Pos, Word + >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer + + >>> # Load some data + >>> from nltk.corpus import treebank + >>> training_data = treebank.tagged_sents()[:100] + >>> baseline_data = treebank.tagged_sents()[100:200] + >>> gold_data = treebank.tagged_sents()[200:300] + >>> testing_data = [untag(s) for s in gold_data] + + >>> backoff = RegexpTagger([ + ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + + >>> baseline = backoff #see NOTE1 + >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS + 0.243... + + >>> # Set up templates + >>> Template._cleartemplates() #clear any templates created in earlier tests + >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))] + + >>> # Construct a BrillTaggerTrainer + >>> tt = BrillTaggerTrainer(baseline, templates, trace=3) + + >>> tagger1 = tt.train(training_data, max_rules=10) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 47 63 16 162 | NN->IN if Pos:NNS@[-1] + 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0] + 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1] + 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> tagger1.rules()[1:3] + (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')])) + + >>> train_stats = tagger1.train_stats() + >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]] + + >>> tagger1.print_template_statistics(printunused=False) + TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules) + TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746 + #ID | Score (train) | #Rules | Template + -------------------------------------------- + 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0])) + 000 | 201 0.397 | 3 0.300 | Template(Pos([-1])) + + + + >>> round(tagger1.accuracy(gold_data),5) + 0.43834 + + >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data) + + >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'), + ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'), + ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')] + True + + >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]] + + >>> # A high-accuracy tagger + >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0] + 19 19 0 6 | NN->VB if Pos:TO@[-1] + 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0] + 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> round(tagger2.accuracy(gold_data), 8) + 0.43996744 + + >>> tagger2.rules()[2:4] + (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')])) + + # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger, + # with a RegexpTagger only as backoff. For instance, + # >>> baseline = UnigramTagger(baseline_data, backoff=backoff) + # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results + # between python versions. The simplistic backoff above is a workaround to make doctests + # get consistent input. + + :param train_sents: training data + :type train_sents: list(list(tuple)) + :param max_rules: output at most max_rules rules + :type max_rules: int + :param min_score: stop training when no rules better than min_score can be found + :type min_score: int + :param min_acc: discard any rule with lower accuracy than min_acc + :type min_acc: float or None + :return: the learned tagger + :rtype: BrillTagger + """ + # FIXME: several tests are a bit too dependent on tracing format + # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates + + # Basic idea: Keep track of the rules that apply at each position. + # And keep track of the positions to which each rule applies. + + # Create a new copy of the training corpus, and run the + # initial tagger on it. We will progressively update this + # test corpus to look more like the training corpus. + test_sents = [ + list(self._initial_tagger.tag(untag(sent))) for sent in train_sents + ] + + # Collect some statistics on the training process + trainstats = {} + trainstats["min_acc"] = min_acc + trainstats["min_score"] = min_score + trainstats["tokencount"] = sum(len(t) for t in test_sents) + trainstats["sequencecount"] = len(test_sents) + trainstats["templatecount"] = len(self._templates) + trainstats["rulescores"] = [] + trainstats["initialerrors"] = sum( + tag[1] != truth[1] + for paired in zip(test_sents, train_sents) + for (tag, truth) in zip(*paired) + ) + trainstats["initialacc"] = ( + 1 - trainstats["initialerrors"] / trainstats["tokencount"] + ) + if self._trace > 0: + print( + "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; " + "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format( + **trainstats + ) + ) + + # Initialize our mappings. This will find any errors made + # by the initial tagger, and use those to generate repair + # rules, which are added to the rule mappings. + if self._trace: + print("Finding initial useful rules...") + self._init_mappings(test_sents, train_sents) + if self._trace: + print(f" Found {len(self._rule_scores)} useful rules.") + + # Let the user know what we're up to. + if self._trace > 2: + self._trace_header() + elif self._trace == 1: + print("Selecting rules...") + + # Repeatedly select the best rule, and add it to `rules`. + rules = [] + try: + while len(rules) < max_rules: + # Find the best rule, and add it to our rule list. + rule = self._best_rule(train_sents, test_sents, min_score, min_acc) + if rule: + rules.append(rule) + score = self._rule_scores[rule] + trainstats["rulescores"].append(score) + else: + break # No more good rules left! + + # Report the rule that we found. + if self._trace > 1: + self._trace_rule(rule) + + # Apply the new rule at the relevant sites + self._apply_rule(rule, test_sents) + + # Update _tag_positions[rule.original_tag] and + # _tag_positions[rule.replacement_tag] for the affected + # positions (i.e., self._positions_by_rule[rule]). + self._update_tag_positions(rule) + + # Update rules that were affected by the change. + self._update_rules(rule, train_sents, test_sents) + + # The user can cancel training manually: + except KeyboardInterrupt: + print(f"Training stopped manually -- {len(rules)} rules found") + + # Discard our tag position mapping & rule mappings. + self._clean() + trainstats["finalerrors"] = trainstats["initialerrors"] - sum( + trainstats["rulescores"] + ) + trainstats["finalacc"] = ( + 1 - trainstats["finalerrors"] / trainstats["tokencount"] + ) + # Create and return a tagger from the rules we found. + return BrillTagger(self._initial_tagger, rules, trainstats) + + def _init_mappings(self, test_sents, train_sents): + """ + Initialize the tag position mapping & the rule related + mappings. For each error in test_sents, find new rules that + would correct them, and add them to the rule mappings. + """ + self._tag_positions = defaultdict(list) + self._rules_by_position = defaultdict(set) + self._positions_by_rule = defaultdict(dict) + self._rules_by_score = defaultdict(set) + self._rule_scores = defaultdict(int) + self._first_unknown_position = defaultdict(int) + # Scan through the corpus, initializing the tag_positions + # mapping and all the rule-related mappings. + for sentnum, sent in enumerate(test_sents): + for wordnum, (word, tag) in enumerate(sent): + + # Initialize tag_positions + self._tag_positions[tag].append((sentnum, wordnum)) + + # If it's an error token, update the rule-related mappings. + correct_tag = train_sents[sentnum][wordnum][1] + if tag != correct_tag: + for rule in self._find_rules(sent, wordnum, correct_tag): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + + def _clean(self): + self._tag_positions = None + self._rules_by_position = None + self._positions_by_rule = None + self._rules_by_score = None + self._rule_scores = None + self._first_unknown_position = None + + def _find_rules(self, sent, wordnum, new_tag): + """ + Use the templates to find rules that apply at index *wordnum* + in the sentence *sent* and generate the tag *new_tag*. + """ + for template in self._templates: + yield from template.applicable_rules(sent, wordnum, new_tag) + + def _update_rule_applies(self, rule, sentnum, wordnum, train_sents): + """ + Update the rule data tables to reflect the fact that + *rule* applies at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # If the rule is already known to apply here, ignore. + # (This only happens if the position's tag hasn't changed.) + if pos in self._positions_by_rule[rule]: + return + + # Update self._positions_by_rule. + correct_tag = train_sents[sentnum][wordnum][1] + if rule.replacement_tag == correct_tag: + self._positions_by_rule[rule][pos] = 1 + elif rule.original_tag == correct_tag: + self._positions_by_rule[rule][pos] = -1 + else: # was wrong, remains wrong + self._positions_by_rule[rule][pos] = 0 + + # Update _rules_by_position + self._rules_by_position[pos].add(rule) + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] += self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + def _update_rule_not_applies(self, rule, sentnum, wordnum): + """ + Update the rule data tables to reflect the fact that *rule* + does not apply at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] -= self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + # Update _positions_by_rule + del self._positions_by_rule[rule][pos] + self._rules_by_position[pos].remove(rule) + + # Optional addition: if the rule now applies nowhere, delete + # all its dictionary entries. + + def _best_rule(self, train_sents, test_sents, min_score, min_acc): + """ + Find the next best rule. This is done by repeatedly taking a + rule with the highest score and stepping through the corpus to + see where it applies. When it makes an error (decreasing its + score) it's bumped down, and we try a new rule with the + highest score. When we find a rule which has the highest + score *and* which has been tested against the entire corpus, we + can conclude that it's the next best rule. + """ + for max_score in sorted(self._rules_by_score.keys(), reverse=True): + if len(self._rules_by_score) == 0: + return None + if max_score < min_score or max_score <= 0: + return None + best_rules = list(self._rules_by_score[max_score]) + if self._deterministic: + best_rules.sort(key=repr) + for rule in best_rules: + positions = self._tag_positions[rule.original_tag] + + unk = self._first_unknown_position.get(rule, (0, -1)) + start = bisect.bisect_left(positions, unk) + + for i in range(start, len(positions)): + sentnum, wordnum = positions[i] + if rule.applies(test_sents[sentnum], wordnum): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + if self._rule_scores[rule] < max_score: + self._first_unknown_position[rule] = (sentnum, wordnum + 1) + break # The update demoted the rule. + + if self._rule_scores[rule] == max_score: + self._first_unknown_position[rule] = (len(train_sents) + 1, 0) + # optimization: if no min_acc threshold given, don't bother computing accuracy + if min_acc is None: + return rule + else: + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + # acc here is fixed/(fixed+broken); could also be + # fixed/(fixed+broken+other) == num_fixed/len(changes) + acc = num_fixed / (num_fixed + num_broken) + if acc >= min_acc: + return rule + # else: rule too inaccurate, discard and try next + + # We demoted (or skipped due to < min_acc, if that was given) + # all the rules with score==max_score. + + assert min_acc is not None or not self._rules_by_score[max_score] + if not self._rules_by_score[max_score]: + del self._rules_by_score[max_score] + + def _apply_rule(self, rule, test_sents): + """ + Update *test_sents* by applying *rule* everywhere where its + conditions are met. + """ + update_positions = set(self._positions_by_rule[rule]) + new_tag = rule.replacement_tag + + if self._trace > 3: + self._trace_apply(len(update_positions)) + + # Update test_sents. + for (sentnum, wordnum) in update_positions: + text = test_sents[sentnum][wordnum][0] + test_sents[sentnum][wordnum] = (text, new_tag) + + def _update_tag_positions(self, rule): + """ + Update _tag_positions to reflect the changes to tags that are + made by *rule*. + """ + # Update the tag index. + for pos in self._positions_by_rule[rule]: + # Delete the old tag. + old_tag_positions = self._tag_positions[rule.original_tag] + old_index = bisect.bisect_left(old_tag_positions, pos) + del old_tag_positions[old_index] + # Insert the new tag. + new_tag_positions = self._tag_positions[rule.replacement_tag] + bisect.insort_left(new_tag_positions, pos) + + def _update_rules(self, rule, train_sents, test_sents): + """ + Check if we should add or remove any rules from consideration, + given the changes made by *rule*. + """ + # Collect a list of all positions that might be affected. + neighbors = set() + for sentnum, wordnum in self._positions_by_rule[rule]: + for template in self._templates: + n = template.get_neighborhood(test_sents[sentnum], wordnum) + neighbors.update([(sentnum, i) for i in n]) + + # Update the rules at each position. + num_obsolete = num_new = num_unseen = 0 + for sentnum, wordnum in neighbors: + test_sent = test_sents[sentnum] + correct_tag = train_sents[sentnum][wordnum][1] + + # Check if the change causes any rule at this position to + # stop matching; if so, then update our rule mappings + # accordingly. + old_rules = set(self._rules_by_position[sentnum, wordnum]) + for old_rule in old_rules: + if not old_rule.applies(test_sent, wordnum): + num_obsolete += 1 + self._update_rule_not_applies(old_rule, sentnum, wordnum) + + # Check if the change causes our templates to propose any + # new rules for this position. + for template in self._templates: + for new_rule in template.applicable_rules( + test_sent, wordnum, correct_tag + ): + if new_rule not in old_rules: + num_new += 1 + if new_rule not in self._rule_scores: + num_unseen += 1 + old_rules.add(new_rule) + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + # We may have caused other rules to match here, that are + # not proposed by our templates -- in particular, rules + # that are harmful or neutral. We therefore need to + # update any rule whose first_unknown_position is past + # this rule. + for new_rule, pos in self._first_unknown_position.items(): + if pos > (sentnum, wordnum): + if new_rule not in old_rules: + num_new += 1 + if new_rule.applies(test_sent, wordnum): + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + if self._trace > 3: + self._trace_update_rules(num_obsolete, num_new, num_unseen) + + # Tracing + + def _trace_header(self): + print( + """ + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e +------------------+------------------------------------------------------- + """.rstrip() + ) + + def _trace_rule(self, rule): + assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values()) + + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + num_other = len([c for c in changes if c == 0]) + score = self._rule_scores[rule] + + rulestr = rule.format(self._ruleformat) + if self._trace > 2: + print( + "{:4d}{:4d}{:4d}{:4d} |".format( + score, num_fixed, num_broken, num_other + ), + end=" ", + ) + print( + textwrap.fill( + rulestr, + initial_indent=" " * 20, + width=79, + subsequent_indent=" " * 18 + "| ", + ).strip() + ) + else: + print(rulestr) + + def _trace_apply(self, num_updates): + prefix = " " * 18 + "|" + print(prefix) + print(prefix, f"Applying rule to {num_updates} positions.") + + def _trace_update_rules(self, num_obsolete, num_new, num_unseen): + prefix = " " * 18 + "|" + print(prefix, "Updated rule tables:") + print(prefix, (f" - {num_obsolete} rule applications removed")) + print( + prefix, + (f" - {num_new} rule applications added ({num_unseen} novel)"), + ) + print(prefix) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/crf.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/crf.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc728c8d55c5eecadd7dc214f756f5224b7f017 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/crf.py @@ -0,0 +1,207 @@ +# Natural Language Toolkit: Interface to the CRFSuite Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Long Duong +# URL: +# For license information, see LICENSE.TXT + +""" +A module for POS tagging using CRFSuite +""" + +import re +import unicodedata + +from nltk.tag.api import TaggerI + +try: + import pycrfsuite +except ImportError: + pass + + +class CRFTagger(TaggerI): + """ + A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite + + >>> from nltk.tag import CRFTagger + >>> ct = CRFTagger() # doctest: +SKIP + + >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')], + ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]] + + >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP + >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP + [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]] + + >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]] + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + + Setting learned model file + >>> ct = CRFTagger() # doctest: +SKIP + >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + """ + + def __init__(self, feature_func=None, verbose=False, training_opt={}): + """ + Initialize the CRFSuite tagger + + :param feature_func: The function that extracts features for each token of a sentence. This function should take + 2 parameters: tokens and index which extract features at index position from tokens list. See the build in + _get_features function for more detail. + :param verbose: output the debugging messages during training. + :type verbose: boolean + :param training_opt: python-crfsuite training options + :type training_opt: dictionary + + Set of possible training options (using LBFGS training algorithm). + :'feature.minfreq': The minimum frequency of features. + :'feature.possible_states': Force to generate possible state features. + :'feature.possible_transitions': Force to generate possible transition features. + :'c1': Coefficient for L1 regularization. + :'c2': Coefficient for L2 regularization. + :'max_iterations': The maximum number of iterations for L-BFGS optimization. + :'num_memories': The number of limited memories for approximating the inverse hessian matrix. + :'epsilon': Epsilon for testing the convergence of the objective. + :'period': The duration of iterations to test the stopping criterion. + :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the + improvement of the log likelihood over the last ${period} iterations is no greater than this threshold. + :'linesearch': The line search algorithm used in L-BFGS updates: + + - 'MoreThuente': More and Thuente's method, + - 'Backtracking': Backtracking method with regular Wolfe condition, + - 'StrongBacktracking': Backtracking method with strong Wolfe condition + :'max_linesearch': The maximum number of trials for the line search algorithm. + """ + + self._model_file = "" + self._tagger = pycrfsuite.Tagger() + + if feature_func is None: + self._feature_func = self._get_features + else: + self._feature_func = feature_func + + self._verbose = verbose + self._training_options = training_opt + self._pattern = re.compile(r"\d") + + def set_model_file(self, model_file): + self._model_file = model_file + self._tagger.open(self._model_file) + + def _get_features(self, tokens, idx): + """ + Extract basic features about this word including + - Current word + - is it capitalized? + - Does it have punctuation? + - Does it have a number? + - Suffixes up to length 3 + + Note that : we might include feature over previous word, next word etc. + + :return: a list which contains the features + :rtype: list(str) + """ + token = tokens[idx] + + feature_list = [] + + if not token: + return feature_list + + # Capitalization + if token[0].isupper(): + feature_list.append("CAPITALIZATION") + + # Number + if re.search(self._pattern, token) is not None: + feature_list.append("HAS_NUM") + + # Punctuation + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + if all(unicodedata.category(x) in punc_cat for x in token): + feature_list.append("PUNCTUATION") + + # Suffix up to length 3 + if len(token) > 1: + feature_list.append("SUF_" + token[-1:]) + if len(token) > 2: + feature_list.append("SUF_" + token[-2:]) + if len(token) > 3: + feature_list.append("SUF_" + token[-3:]) + + feature_list.append("WORD_" + token) + + return feature_list + + def tag_sents(self, sents): + """ + Tag a list of sentences. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params sentences: list of sentences needed to tag. + :type sentences: list(list(str)) + :return: list of tagged sentences. + :rtype: list(list(tuple(str,str))) + """ + if self._model_file == "": + raise Exception( + " No model file is found !! Please use train or set_model_file function" + ) + + # We need the list of sentences instead of the list generator for matching the input and output + result = [] + for tokens in sents: + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + labels = self._tagger.tag(features) + + if len(labels) != len(tokens): + raise Exception(" Predicted Length Not Matched, Expect Errors !") + + tagged_sent = list(zip(tokens, labels)) + result.append(tagged_sent) + + return result + + def train(self, train_data, model_file): + """ + Train the CRF tagger using CRFSuite + :params train_data : is the list of annotated sentences. + :type train_data : list (list(tuple(str,str))) + :params model_file : the model will be saved to this file. + + """ + trainer = pycrfsuite.Trainer(verbose=self._verbose) + trainer.set_params(self._training_options) + + for sent in train_data: + tokens, labels = zip(*sent) + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + trainer.append(features, labels) + + # Now train the model, the output should be model_file + trainer.train(model_file) + # Save the model file + self.set_model_file(model_file) + + def tag(self, tokens): + """ + Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params tokens: list of tokens needed to tag. + :type tokens: list(str) + :return: list of tagged tokens. + :rtype: list(tuple(str,str)) + """ + + return self.tag_sents([tokens])[0] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/sequential.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb85c9fade8079ad5fd4ba7a517939741cb2440 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/sequential.py @@ -0,0 +1,755 @@ +# Natural Language Toolkit: Sequential Backoff Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tiago Tresoldi (original affix tagger) +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for tagging sentences sequentially, left to right. The +abstract base class SequentialBackoffTagger serves as the base +class for all the taggers in this module. Tagging of individual words +is performed by the method ``choose_tag()``, which is defined by +subclasses of SequentialBackoffTagger. If a tagger is unable to +determine a tag for the specified token, then its backoff tagger is +consulted instead. Any SequentialBackoffTagger may serve as a +backoff tagger for any other SequentialBackoffTagger. +""" +import ast +import re +from abc import abstractmethod +from typing import List, Optional, Tuple + +from nltk import jsontags +from nltk.classify import NaiveBayesClassifier +from nltk.probability import ConditionalFreqDist +from nltk.tag.api import FeaturesetTaggerI, TaggerI + + +###################################################################### +# Abstract Base Classes +###################################################################### +class SequentialBackoffTagger(TaggerI): + """ + An abstract base class for taggers that tags words sequentially, + left to right. Tagging of individual words is performed by the + ``choose_tag()`` method, which should be defined by subclasses. If + a tagger is unable to determine a tag for the specified token, + then its backoff tagger is consulted. + + :ivar _taggers: A list of all the taggers that should be tried to + tag a token (i.e., self and its backoff taggers). + """ + + def __init__(self, backoff=None): + if backoff is None: + self._taggers = [self] + else: + self._taggers = [self] + backoff._taggers + + @property + def backoff(self): + """The backoff tagger for this tagger.""" + return self._taggers[1] if len(self._taggers) > 1 else None + + def tag(self, tokens): + # docs inherited from TaggerI + tags = [] + for i in range(len(tokens)): + tags.append(self.tag_one(tokens, i, tags)) + return list(zip(tokens, tags)) + + def tag_one(self, tokens, index, history): + """ + Determine an appropriate tag for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, then its backoff tagger is consulted. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + tag = None + for tagger in self._taggers: + tag = tagger.choose_tag(tokens, index, history) + if tag is not None: + break + return tag + + @abstractmethod + def choose_tag(self, tokens, index, history): + """ + Decide which tag should be used for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, return None -- do not consult + the backoff tagger. This method should be overridden by + subclasses of SequentialBackoffTagger. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + + +class ContextTagger(SequentialBackoffTagger): + """ + An abstract base class for sequential backoff taggers that choose + a tag for a token based on the value of its "context". Different + subclasses are used to define different contexts. + + A ContextTagger chooses the tag for a token by calculating the + token's context, and looking up the corresponding tag in a table. + This table can be constructed manually; or it can be automatically + constructed based on a training corpus, using the ``_train()`` + factory method. + + :ivar _context_to_tag: Dictionary mapping contexts to tags. + """ + + def __init__(self, context_to_tag, backoff=None): + """ + :param context_to_tag: A dictionary mapping contexts to tags. + :param backoff: The backoff tagger that should be used for this tagger. + """ + super().__init__(backoff) + self._context_to_tag = context_to_tag if context_to_tag else {} + + @abstractmethod + def context(self, tokens, index, history): + """ + :return: the context that should be used to look up the tag + for the specified token; or None if the specified token + should not be handled by this tagger. + :rtype: (hashable) + """ + + def choose_tag(self, tokens, index, history): + context = self.context(tokens, index, history) + return self._context_to_tag.get(context) + + def size(self): + """ + :return: The number of entries in the table used by this + tagger to map from contexts to tags. + """ + return len(self._context_to_tag) + + def __repr__(self): + return f"<{self.__class__.__name__}: size={self.size()}>" + + def _train(self, tagged_corpus, cutoff=0, verbose=False): + """ + Initialize this ContextTagger's ``_context_to_tag`` table + based on the given training data. In particular, for each + context ``c`` in the training data, set + ``_context_to_tag[c]`` to the most frequent tag for that + context. However, exclude any contexts that are already + tagged perfectly by the backoff tagger(s). + + The old value of ``self._context_to_tag`` (if any) is discarded. + + :param tagged_corpus: A tagged corpus. Each item should be + a list of (word, tag tuples. + :param cutoff: If the most likely tag for a context occurs + fewer than cutoff times, then exclude it from the + context-to-tag table for the new tagger. + """ + + token_count = hit_count = 0 + + # A context is considered 'useful' if it's not already tagged + # perfectly by the backoff tagger. + useful_contexts = set() + + # Count how many times each tag occurs in each context. + fd = ConditionalFreqDist() + for sentence in tagged_corpus: + tokens, tags = zip(*sentence) + for index, (token, tag) in enumerate(sentence): + # Record the event. + token_count += 1 + context = self.context(tokens, index, tags[:index]) + if context is None: + continue + fd[context][tag] += 1 + # If the backoff got it wrong, this context is useful: + if self.backoff is None or tag != self.backoff.tag_one( + tokens, index, tags[:index] + ): + useful_contexts.add(context) + + # Build the context_to_tag table -- for each context, figure + # out what the most likely tag is. Only include contexts that + # we've seen at least `cutoff` times. + for context in useful_contexts: + best_tag = fd[context].max() + hits = fd[context][best_tag] + if hits > cutoff: + self._context_to_tag[context] = best_tag + hit_count += hits + + # Display some stats, if requested. + if verbose: + size = len(self._context_to_tag) + backoff = 100 - (hit_count * 100.0) / token_count + pruning = 100 - (size * 100.0) / len(fd.conditions()) + print("[Trained Unigram tagger:", end=" ") + print( + "size={}, backoff={:.2f}%, pruning={:.2f}%]".format( + size, backoff, pruning + ) + ) + + +###################################################################### +# Tagger Classes +###################################################################### + + +@jsontags.register_tag +class DefaultTagger(SequentialBackoffTagger): + """ + A tagger that assigns the same tag to every token. + + >>> from nltk.tag import DefaultTagger + >>> default_tagger = DefaultTagger('NN') + >>> list(default_tagger.tag('This is a test'.split())) + [('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')] + + This tagger is recommended as a backoff tagger, in cases where + a more powerful tagger is unable to assign a tag to the word + (e.g. because the word was not seen during training). + + :param tag: The tag to assign to each token + :type tag: str + """ + + json_tag = "nltk.tag.sequential.DefaultTagger" + + def __init__(self, tag): + self._tag = tag + super().__init__(None) + + def encode_json_obj(self): + return self._tag + + @classmethod + def decode_json_obj(cls, obj): + tag = obj + return cls(tag) + + def choose_tag(self, tokens, index, history): + return self._tag # ignore token and history + + def __repr__(self): + return f"" + + +@jsontags.register_tag +class NgramTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on its word string and + on the preceding n word's tags. In particular, a tuple + (tags[i-n:i-1], words[i]) is looked up in a table, and the + corresponding tag is returned. N-gram taggers are typically + trained on a tagged corpus. + + Train a new NgramTagger using the given training data or + the supplied model. In particular, construct a new tagger + whose table maps from each context (tag[i-n:i-1], word[i]) + to the most frequent tag for that context. But exclude any + contexts that are already tagged perfectly by the backoff + tagger. + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + :param backoff: A backoff tagger, to be used by the new + tagger if it encounters an unknown context. + :param cutoff: If the most likely tag for a context occurs + fewer than *cutoff* times, then exclude it from the + context-to-tag table for the new tagger. + """ + + json_tag = "nltk.tag.sequential.NgramTagger" + + def __init__( + self, n, train=None, model=None, backoff=None, cutoff=0, verbose=False + ): + self._n = n + self._check_params(train, model) + + super().__init__(model, backoff) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + _context_to_tag = {repr(k): v for k, v in self._context_to_tag.items()} + if "NgramTagger" in self.__class__.__name__: + return self._n, _context_to_tag, self.backoff + else: + return _context_to_tag, self.backoff + + @classmethod + def decode_json_obj(cls, obj): + try: + _n, _context_to_tag, backoff = obj + except ValueError: + _context_to_tag, backoff = obj + + if not _context_to_tag: + return backoff + + _context_to_tag = {ast.literal_eval(k): v for k, v in _context_to_tag.items()} + + if "NgramTagger" in cls.__name__: + return cls(_n, model=_context_to_tag, backoff=backoff) + else: + return cls(model=_context_to_tag, backoff=backoff) + + def context(self, tokens, index, history): + tag_context = tuple(history[max(0, index - self._n + 1) : index]) + return tag_context, tokens[index] + + +@jsontags.register_tag +class UnigramTagger(NgramTagger): + """ + Unigram Tagger + + The UnigramTagger finds the most likely tag for each word in a training + corpus, and then uses that information to assign tags to new tokens. + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> for tok, tag in unigram_tagger.tag(test_sent): + ... print("({}, {}), ".format(tok, tag)) # doctest: +NORMALIZE_WHITESPACE + (The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL), + (Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT), + (investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ), + (primary, NN), (election, NN), (produced, VBD), (``, ``), + (no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI), + (irregularities, NNS), (took, VBD), (place, NN), (., .), + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.UnigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(1, train, model, backoff, cutoff, verbose) + + def context(self, tokens, index, history): + return tokens[index] + + +@jsontags.register_tag +class BigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding words' tag. In particular, a tuple consisting + of the previous tag and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.BigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(2, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class TrigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding two words' tags. In particular, a tuple consisting + of the previous two tags and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.TrigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(3, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class AffixTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on a leading or trailing + substring of its word string. (It is important to note that these + substrings are not necessarily "true" morphological affixes). In + particular, a fixed-length substring of the word is looked up in a + table, and the corresponding tag is returned. Affix taggers are + typically constructed by training them on a tagged corpus. + + Construct a new affix tagger. + + :param affix_length: The length of the affixes that should be + considered during training and tagging. Use negative + numbers for suffixes. + :param min_stem_length: Any words whose length is less than + min_stem_length+abs(affix_length) will be assigned a + tag of None by this tagger. + """ + + json_tag = "nltk.tag.sequential.AffixTagger" + + def __init__( + self, + train=None, + model=None, + affix_length=-3, + min_stem_length=2, + backoff=None, + cutoff=0, + verbose=False, + ): + + self._check_params(train, model) + + super().__init__(model, backoff) + + self._affix_length = affix_length + self._min_word_length = min_stem_length + abs(affix_length) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + return ( + self._affix_length, + self._min_word_length, + self._context_to_tag, + self.backoff, + ) + + @classmethod + def decode_json_obj(cls, obj): + _affix_length, _min_word_length, _context_to_tag, backoff = obj + return cls( + affix_length=_affix_length, + min_stem_length=_min_word_length - abs(_affix_length), + model=_context_to_tag, + backoff=backoff, + ) + + def context(self, tokens, index, history): + token = tokens[index] + if len(token) < self._min_word_length: + return None + elif self._affix_length > 0: + return token[: self._affix_length] + else: + return token[self._affix_length :] + + +@jsontags.register_tag +class RegexpTagger(SequentialBackoffTagger): + r""" + Regular Expression Tagger + + The RegexpTagger assigns tags to tokens by comparing their + word strings to a series of regular expressions. The following tagger + uses word suffixes to make guesses about the correct Brown Corpus part + of speech tag: + + >>> from nltk.corpus import brown + >>> from nltk.tag import RegexpTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> regexp_tagger = RegexpTagger( + ... [(r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + >>> regexp_tagger + + >>> regexp_tagger.tag(test_sent) # doctest: +NORMALIZE_WHITESPACE + [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'), + ('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'), + ("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'), + ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'), + ('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'), + ('place', 'NN'), ('.', 'NN')] + + :type regexps: list(tuple(str, str)) + :param regexps: A list of ``(regexp, tag)`` pairs, each of + which indicates that a word matching ``regexp`` should + be tagged with ``tag``. The pairs will be evaluated in + order. If none of the regexps match a word, then the + optional backoff tagger is invoked, else it is + assigned the tag None. + """ + + json_tag = "nltk.tag.sequential.RegexpTagger" + + def __init__( + self, regexps: List[Tuple[str, str]], backoff: Optional[TaggerI] = None + ): + super().__init__(backoff) + self._regexps = [] + for regexp, tag in regexps: + try: + self._regexps.append((re.compile(regexp), tag)) + except Exception as e: + raise Exception( + f"Invalid RegexpTagger regexp: {e}\n- regexp: {regexp!r}\n- tag: {tag!r}" + ) from e + + def encode_json_obj(self): + return [(regexp.pattern, tag) for regexp, tag in self._regexps], self.backoff + + @classmethod + def decode_json_obj(cls, obj): + regexps, backoff = obj + return cls(regexps, backoff) + + def choose_tag(self, tokens, index, history): + for regexp, tag in self._regexps: + if re.match(regexp, tokens[index]): + return tag + return None + + def __repr__(self): + return f"" + + +class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI): + """ + A sequential tagger that uses a classifier to choose the tag for + each token in a sentence. The featureset input for the classifier + is generated by a feature detector function:: + + feature_detector(tokens, index, history) -> featureset + + Where tokens is the list of unlabeled tokens in the sentence; + index is the index of the token for which feature detection + should be performed; and history is list of the tags for all + tokens before index. + + Construct a new classifier-based sequential tagger. + + :param feature_detector: A function used to generate the + featureset input for the classifier:: + feature_detector(tokens, index, history) -> featureset + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + + :param backoff: A backoff tagger, to be used by the new tagger + if it encounters an unknown context. + + :param classifier_builder: A function used to train a new + classifier based on the data in *train*. It should take + one argument, a list of labeled featuresets (i.e., + (featureset, label) tuples). + + :param classifier: The classifier that should be used by the + tagger. This is only useful if you want to manually + construct the classifier; normally, you would use *train* + instead. + + :param backoff: A backoff tagger, used if this tagger is + unable to determine a tag for a given token. + + :param cutoff_prob: If specified, then this tagger will fall + back on its backoff tagger if the probability of the most + likely tag is less than *cutoff_prob*. + """ + + def __init__( + self, + feature_detector=None, + train=None, + classifier_builder=NaiveBayesClassifier.train, + classifier=None, + backoff=None, + cutoff_prob=None, + verbose=False, + ): + self._check_params(train, classifier) + + super().__init__(backoff) + + if (train and classifier) or (not train and not classifier): + raise ValueError( + "Must specify either training data or " "trained classifier." + ) + + if feature_detector is not None: + self._feature_detector = feature_detector + # The feature detector function, used to generate a featureset + # or each token: feature_detector(tokens, index, history) -> featureset + + self._cutoff_prob = cutoff_prob + """Cutoff probability for tagging -- if the probability of the + most likely tag is less than this, then use backoff.""" + + self._classifier = classifier + """The classifier used to choose a tag for each token.""" + + if train: + self._train(train, classifier_builder, verbose) + + def choose_tag(self, tokens, index, history): + # Use our feature detector to get the featureset. + featureset = self.feature_detector(tokens, index, history) + + # Use the classifier to pick a tag. If a cutoff probability + # was specified, then check that the tag's probability is + # higher than that cutoff first; otherwise, return None. + if self._cutoff_prob is None: + return self._classifier.classify(featureset) + + pdist = self._classifier.prob_classify(featureset) + tag = pdist.max() + return tag if pdist.prob(tag) >= self._cutoff_prob else None + + def _train(self, tagged_corpus, classifier_builder, verbose): + """ + Build a new classifier, based on the given training data + *tagged_corpus*. + """ + + classifier_corpus = [] + if verbose: + print("Constructing training corpus for classifier.") + + for sentence in tagged_corpus: + history = [] + untagged_sentence, tags = zip(*sentence) + for index in range(len(sentence)): + featureset = self.feature_detector(untagged_sentence, index, history) + classifier_corpus.append((featureset, tags[index])) + history.append(tags[index]) + + if verbose: + print(f"Training classifier ({len(classifier_corpus)} instances)") + self._classifier = classifier_builder(classifier_corpus) + + def __repr__(self): + return f"" + + def feature_detector(self, tokens, index, history): + """ + Return the feature detector that this tagger uses to generate + featuresets for its classifier. The feature detector is a + function with the signature:: + + feature_detector(tokens, index, history) -> featureset + + See ``classifier()`` + """ + return self._feature_detector(tokens, index, history) + + def classifier(self): + """ + Return the classifier that this tagger uses to choose a tag + for each word in a sentence. The input for this classifier is + generated using this tagger's feature detector. + See ``feature_detector()`` + """ + return self._classifier + + +class ClassifierBasedPOSTagger(ClassifierBasedTagger): + """ + A classifier based part of speech tagger. + """ + + def feature_detector(self, tokens, index, history): + word = tokens[index] + if index == 0: + prevword = prevprevword = None + prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1].lower() + prevprevword = None + prevtag = history[index - 1] + prevprevtag = None + else: + prevword = tokens[index - 1].lower() + prevprevword = tokens[index - 2].lower() + prevtag = history[index - 1] + prevprevtag = history[index - 2] + + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word): + shape = "number" + elif re.match(r"\W+$", word): + shape = "punct" + elif re.match("[A-Z][a-z]+$", word): + shape = "upcase" + elif re.match("[a-z]+$", word): + shape = "downcase" + elif re.match(r"\w+$", word): + shape = "mixedcase" + else: + shape = "other" + + features = { + "prevtag": prevtag, + "prevprevtag": prevprevtag, + "word": word, + "word.lower": word.lower(), + "suffix3": word.lower()[-3:], + "suffix2": word.lower()[-2:], + "suffix1": word.lower()[-1:], + "prevprevword": prevprevword, + "prevword": prevword, + "prevtag+word": f"{prevtag}+{word.lower()}", + "prevprevtag+word": f"{prevprevtag}+{word.lower()}", + "prevword+word": f"{prevword}+{word.lower()}", + "shape": shape, + } + return features diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/stanford.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..7c21e2dd20dec5c3b242d0e5007a4bf51d8ef8f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/stanford.py @@ -0,0 +1,236 @@ +# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the Stanford taggers. + +Tagger models need to be downloaded from https://nlp.stanford.edu/software +and the STANFORD_MODELS environment variable set (a colon-separated +list of paths). + +For more details see the documentation for StanfordPOSTagger and StanfordNERTagger. +""" + +import os +import tempfile +import warnings +from abc import abstractmethod +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_file, find_jar, java +from nltk.tag.api import TaggerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordTagger(TaggerI): + """ + An interface to Stanford taggers. Subclasses must define: + + - ``_cmd`` property: A property that returns the command that will be + executed. + - ``_SEPARATOR``: Class constant that represents that character that + is used to separate the tokens from their tags. + - ``_JAR`` file: Class constant that represents the jar file name. + """ + + _SEPARATOR = "" + _JAR = "" + + def __init__( + self, + model_filename, + path_to_jar=None, + encoding="utf8", + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.6.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if not self._JAR: + warnings.warn( + "The StanfordTagger class is not meant to be " + "instantiated directly. Did you mean " + "StanfordPOSTagger or StanfordNERTagger?" + ) + self._stanford_jar = find_jar( + self._JAR, path_to_jar, searchpath=(), url=_stanford_url, verbose=verbose + ) + + self._stanford_model = find_file( + model_filename, env_vars=("STANFORD_MODELS",), verbose=verbose + ) + + self._encoding = encoding + self.java_options = java_options + + @property + @abstractmethod + def _cmd(self): + """ + A property that returns the command that will be executed. + """ + + def tag(self, tokens): + # This function should return list of tuple rather than list of list + return sum(self.tag_sents([tokens]), []) + + def tag_sents(self, sentences): + encoding = self._encoding + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=False) + + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + cmd = list(self._cmd) + cmd.extend(["-encoding", encoding]) + + # Write the actual sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + # Run the tagger and get the output + stanpos_output, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stanpos_output = stanpos_output.decode(encoding) + + # Delete the temporary file + os.unlink(self._input_file_path) + + # Return java configurations to their default values + config_java(options=default_options, verbose=False) + + return self.parse_output(stanpos_output, sentences) + + def parse_output(self, text, sentences=None): + # Output the tagged sentences + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + sentence = [] + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + sentence.append( + ("".join(word_tags[:-1]), word_tags[-1].replace("0", "").upper()) + ) + tagged_sentences.append(sentence) + return tagged_sentences + + +class StanfordPOSTagger(StanfordTagger): + """ + A class for pos tagging with Stanford Tagger. The input is the paths to: + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordPOSTagger + >>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP + >>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + """ + + _SEPARATOR = "_" + _JAR = "stanford-postagger.jar" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + return [ + "edu.stanford.nlp.tagger.maxent.MaxentTagger", + "-model", + self._stanford_model, + "-textFile", + self._input_file_path, + "-tokenize", + "false", + "-outputFormatOptions", + "keepEmptySentences", + ] + + +class StanfordNERTagger(StanfordTagger): + """ + A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to: + + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordNERTagger + >>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP + >>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), + ('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'), + ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')] + """ + + _SEPARATOR = "/" + _JAR = "stanford-ner.jar" + _FORMAT = "slashTags" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + # Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer + return [ + "edu.stanford.nlp.ie.crf.CRFClassifier", + "-loadClassifier", + self._stanford_model, + "-textFile", + self._input_file_path, + "-outputFormat", + self._FORMAT, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerOptions", + '"tokenizeNLs=false"', + ] + + def parse_output(self, text, sentences): + if self._FORMAT == "slashTags": + # Joint together to a big list + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + tagged_sentences.append(("".join(word_tags[:-1]), word_tags[-1])) + + # Separate it according to the input + result = [] + start = 0 + for sent in sentences: + result.append(tagged_sentences[start : start + len(sent)]) + start += len(sent) + return result + + raise NotImplementedError diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e35b98195f2b7b448775a49795e0f34d612624a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/util.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Tagger Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def str2tuple(s, sep="/"): + """ + Given the string representation of a tagged token, return the + corresponding tuple representation. The rightmost occurrence of + *sep* in *s* will be used to divide *s* into a word string and + a tag string. If *sep* does not occur in *s*, return (s, None). + + >>> from nltk.tag.util import str2tuple + >>> str2tuple('fly/NN') + ('fly', 'NN') + + :type s: str + :param s: The string representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + loc = s.rfind(sep) + if loc >= 0: + return (s[:loc], s[loc + len(sep) :].upper()) + else: + return (s, None) + + +def tuple2str(tagged_token, sep="/"): + """ + Given the tuple representation of a tagged token, return the + corresponding string representation. This representation is + formed by concatenating the token's word string, followed by the + separator, followed by the token's tag. (If the tag is None, + then just return the bare word string.) + + >>> from nltk.tag.util import tuple2str + >>> tagged_token = ('fly', 'NN') + >>> tuple2str(tagged_token) + 'fly/NN' + + :type tagged_token: tuple(str, str) + :param tagged_token: The tuple representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + word, tag = tagged_token + if tag is None: + return word + else: + assert sep not in tag, "tag may not contain sep!" + return f"{word}{sep}{tag}" + + +def untag(tagged_sentence): + """ + Given a tagged sentence, return an untagged version of that + sentence. I.e., return a list containing the first element + of each tuple in *tagged_sentence*. + + >>> from nltk.tag.util import untag + >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) + ['John', 'saw', 'Mary'] + + """ + return [w for (w, t) in tagged_sentence]