diff --git a/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e943e858ec894508d36106e479df52492f726d85 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee3123e3cc7d1f751d60031d977e500b75fe3aace2d29b6584740b0590abb583 +size 16778396 diff --git a/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d59d8bd176c0d631362c05e17990af8f65c23cc9 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af035e319b0da1b229568996e8f7244178c9d79a1419221cf1e52986cc6f3aa2 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4f6b4a4691e5150df67ba9a446f4634395797706 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5348b5f910e0d2abd0a0a0289104952d95ac88a00a237232d69e63c299d291dc +size 50332749 diff --git a/ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..13912027071797c4bcf0e403b87a7700f8559008 --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09ea2cbd80bd1baa7bea719caf7632905115d09c98cfaa654e0a7fce1e7b9ea1 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/21.attention.dense.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..035742cc1f9b8303f6f43f525c94907dad97162a --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcbf5788ccdba1bac28169fe41079e1cb110bacd42c90655c6247dd45e975cea +size 16778317 diff --git a/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..62bbe67264e98d6f6777f7753fc9b0acbfffc95d --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:058194a69d6117cc31775d6b9b1698f71bb01fdee4ec970e1bd0a2e7a8044d42 +size 9387 diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a8d2b6e5b12b383e70778f7262a0fb23cf004dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a21702ec417d279256d2f3178a7833d6edbf84d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91ad8c1e85f3dff801e5b5580dc85b704f724771 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92c4e901f5653409f6cbb273f35fe6ea6522556c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f896a578496a7d4fc9d18158529a041407d3ab08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/__init__.py b/venv/lib/python3.10/site-packages/nltk/corpus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67f565aaa85618c0268c75cd4b1524829712909c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/__init__.py @@ -0,0 +1,529 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# TODO this docstring isn't up-to-date! +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus files in a variety of formats. These +functions can be used to read both the corpus files that are +distributed in the NLTK corpus package, and corpus files that are part +of external corpora. + +Available Corpora +================= + +Please see https://www.nltk.org/nltk_data/ for a complete list. +Install corpora using nltk.download(). + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a filename, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS + The, Fulton, County, Grand, Jury, said, ... + +""" + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader +from nltk.tokenize import RegexpTokenizer + +abc: PlaintextCorpusReader = LazyCorpusLoader( + "abc", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[("science", "latin_1"), ("rural", "utf8")], +) +alpino: AlpinoCorpusReader = LazyCorpusLoader( + "alpino", AlpinoCorpusReader, tagset="alpino" +) +bcp47: BCP47CorpusReader = LazyCorpusLoader( + "bcp47", BCP47CorpusReader, r"(cldr|iana)/*" +) +brown: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "brown", + CategorizedTaggedCorpusReader, + r"c[a-z]\d\d", + cat_file="cats.txt", + tagset="brown", + encoding="ascii", +) +cess_cat: BracketParseCorpusReader = LazyCorpusLoader( + "cess_cat", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cess_esp: BracketParseCorpusReader = LazyCorpusLoader( + "cess_esp", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cmudict: CMUDictCorpusReader = LazyCorpusLoader( + "cmudict", CMUDictCorpusReader, ["cmudict"] +) +comtrans: AlignedCorpusReader = LazyCorpusLoader( + "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt" +) +comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader( + "comparative_sentences", + ComparativeSentencesCorpusReader, + r"labeledSentences\.txt", + encoding="latin-1", +) +conll2000: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2000", + ConllChunkCorpusReader, + ["train.txt", "test.txt"], + ("NP", "VP", "PP"), + tagset="wsj", + encoding="ascii", +) +conll2002: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2002", + ConllChunkCorpusReader, + r".*\.(test|train).*", + ("LOC", "PER", "ORG", "MISC"), + encoding="utf-8", +) +conll2007: DependencyCorpusReader = LazyCorpusLoader( + "conll2007", + DependencyCorpusReader, + r".*\.(test|train).*", + encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")], +) +crubadan: CrubadanCorpusReader = LazyCorpusLoader( + "crubadan", CrubadanCorpusReader, r".*\.txt" +) +dependency_treebank: DependencyCorpusReader = LazyCorpusLoader( + "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii" +) +extended_omw: CorpusReader = LazyCorpusLoader( + "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8" +) +floresta: BracketParseCorpusReader = LazyCorpusLoader( + "floresta", + BracketParseCorpusReader, + r"(?!\.).*\.ptb", + "#", + tagset="unknown", + encoding="ISO-8859-15", +) +framenet15: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v15", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +framenet: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v17", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +gazetteers: WordListCorpusReader = LazyCorpusLoader( + "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2" +) +genesis: PlaintextCorpusReader = LazyCorpusLoader( + "genesis", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[ + ("finnish|french|german", "latin_1"), + ("swedish", "cp865"), + (".*", "utf_8"), + ], +) +gutenberg: PlaintextCorpusReader = LazyCorpusLoader( + "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") +inaugural: PlaintextCorpusReader = LazyCorpusLoader( + "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +# [XX] This should probably just use TaggedCorpusReader: +indian: IndianCorpusReader = LazyCorpusLoader( + "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8" +) + +jeita: ChasenCorpusReader = LazyCorpusLoader( + "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8" +) +knbc: KNBCorpusReader = LazyCorpusLoader( + "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp" +) +lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader( + "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp" +) +mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader( + "mac_morpho", + MacMorphoCorpusReader, + r"(?!\.).*\.txt", + tagset="unknown", + encoding="latin-1", +) +machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader( + "machado", + PortugueseCategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"([a-z]*)/.*", + encoding="latin-1", +) +masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "masc_tagged", + CategorizedTaggedCorpusReader, + r"(spoken|written)/.*\.txt", + cat_file="categories.txt", + tagset="wsj", + encoding="utf-8", + sep="_", +) +movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "movie_reviews", + CategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"(neg|pos)/.*", + encoding="ascii", +) +multext_east: MTECorpusReader = LazyCorpusLoader( + "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8" +) +names: WordListCorpusReader = LazyCorpusLoader( + "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii" +) +nps_chat: NPSChatCorpusReader = LazyCorpusLoader( + "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj" +) +opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader( + "opinion_lexicon", + OpinionLexiconCorpusReader, + r"(\w+)\-words\.txt", + encoding="ISO-8859-2", +) +ppattach: PPAttachmentCorpusReader = LazyCorpusLoader( + "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"] +) +product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +pros_cons: ProsConsCorpusReader = LazyCorpusLoader( + "pros_cons", + ProsConsCorpusReader, + r"Integrated(Cons|Pros)\.txt", + cat_pattern=r"Integrated(Cons|Pros)\.txt", + encoding="ISO-8859-2", +) +ptb: CategorizedBracketParseCorpusReader = ( + LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions + "ptb", + CategorizedBracketParseCorpusReader, + r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG", + cat_file="allcats.txt", + tagset="wsj", + ) +) +qc: StringCategoryCorpusReader = LazyCorpusLoader( + "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2" +) +reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "reuters", + CategorizedPlaintextCorpusReader, + "(training|test).*", + cat_file="cats.txt", + encoding="ISO-8859-2", +) +rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml") +senseval: SensevalCorpusReader = LazyCorpusLoader( + "senseval", SensevalCorpusReader, r"(?!\.).*\.pos" +) +sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "sentence_polarity", + CategorizedSentencesCorpusReader, + r"rt-polarity\.(neg|pos)", + cat_pattern=r"rt-polarity\.(neg|pos)", + encoding="utf-8", +) +sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader( + "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8" +) +shakespeare: XMLCorpusReader = LazyCorpusLoader( + "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml" +) +sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader( + "sinica_treebank", + SinicaTreebankCorpusReader, + ["parsed"], + tagset="unknown", + encoding="utf-8", +) +state_union: PlaintextCorpusReader = LazyCorpusLoader( + "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2" +) +stopwords: WordListCorpusReader = LazyCorpusLoader( + "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "subjectivity", + CategorizedSentencesCorpusReader, + r"(quote.tok.gt9|plot.tok.gt9)\.5000", + cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]}, + encoding="latin-1", +) +swadesh: SwadeshCorpusReader = LazyCorpusLoader( + "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8" +) +swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8" +) +switchboard: SwitchboardCorpusReader = LazyCorpusLoader( + "switchboard", SwitchboardCorpusReader, tagset="wsj" +) +timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader) +timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader( + "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii" +) +toolbox: ToolboxCorpusReader = LazyCorpusLoader( + "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)" +) +treebank: BracketParseCorpusReader = LazyCorpusLoader( + "treebank/combined", + BracketParseCorpusReader, + r"wsj_.*\.mrg", + tagset="wsj", + encoding="ascii", +) +treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader( + "treebank/tagged", + ChunkedCorpusReader, + r"wsj_.*\.pos", + sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True), + para_block_reader=tagged_treebank_para_block_reader, + tagset="wsj", + encoding="ascii", +) +treebank_raw: PlaintextCorpusReader = LazyCorpusLoader( + "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2" +) +twitter_samples: TwitterCorpusReader = LazyCorpusLoader( + "twitter_samples", TwitterCorpusReader, r".*\.json" +) +udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader) +udhr2: PlaintextCorpusReader = LazyCorpusLoader( + "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8" +) +universal_treebanks: ConllCorpusReader = LazyCorpusLoader( + "universal_treebanks_v20", + ConllCorpusReader, + r".*\.conll", + columntypes=( + "ignore", + "words", + "ignore", + "ignore", + "pos", + "ignore", + "ignore", + "ignore", + "ignore", + "ignore", + ), +) +verbnet: VerbnetCorpusReader = LazyCorpusLoader( + "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml" +) +webtext: PlaintextCorpusReader = LazyCorpusLoader( + "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2" +) +wordnet: WordNetCorpusReader = LazyCorpusLoader( + "wordnet", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet31: WordNetCorpusReader = LazyCorpusLoader( + "wordnet31", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet2021: WordNetCorpusReader = LazyCorpusLoader( + "wordnet2021", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader( + "wordnet_ic", WordNetICCorpusReader, r".*\.dat" +) +words: WordListCorpusReader = LazyCorpusLoader( + "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii" +) + +# defined after treebank +propbank: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +nombank: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +propbank_ptb: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +nombank_ptb: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +semcor: SemcorCorpusReader = LazyCorpusLoader( + "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet +) # Must be defined *after* wordnet corpus. + +nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader( + "nonbreaking_prefixes", + NonbreakingPrefixesCorpusReader, + r"(?!README|\.).*", + encoding="utf8", +) +perluniprops: UnicharsCorpusReader = LazyCorpusLoader( + "perluniprops", + UnicharsCorpusReader, + r"(?!README|\.).*", + nltk_data_subdir="misc", + encoding="utf8", +) + +# mwa_ppdb = LazyCorpusLoader( +# 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8') + +# See https://github.com/nltk/nltk/issues/1579 +# and https://github.com/nltk/nltk/issues/1716 +# +# pl196x = LazyCorpusLoader( +# 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml', +# cat_file='cats.txt', textid_file='textids.txt', encoding='utf8') +# +# ipipan = LazyCorpusLoader( +# 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml') +# +# nkjp = LazyCorpusLoader( +# 'nkjp', NKJPCorpusReader, r'', encoding='utf8') +# +# panlex_lite = LazyCorpusLoader( +# 'panlex_lite', PanLexLiteCorpusReader) +# +# ycoe = LazyCorpusLoader( +# 'ycoe', YCOECorpusReader) +# +# corpus not available with NLTK; these lines caused help(nltk.corpus) to break +# hebrew_treebank = LazyCorpusLoader( +# 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt') + +# FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116 +def demo(): + # This is out-of-date: + abc.demo() + brown.demo() + # chat80.demo() + cmudict.demo() + conll2000.demo() + conll2002.demo() + genesis.demo() + gutenberg.demo() + ieer.demo() + inaugural.demo() + indian.demo() + names.demo() + ppattach.demo() + senseval.demo() + shakespeare.demo() + sinica_treebank.demo() + state_union.demo() + stopwords.demo() + timit.demo() + toolbox.demo() + treebank.demo() + udhr.demo() + webtext.demo() + words.demo() + + +# ycoe.demo() + +if __name__ == "__main__": + # demo() + pass diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049da0e7d2950c1d9db9a04785d2d904a79ab9cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b84486ece61903cf1f661989472a3ceee6533dac Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb0cab67a100250ae6b244e3d718e68e4f6992f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py b/venv/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..2a32ecc86f7b7671445effc2801870c3fc10f295 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Europarl Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader + +# Create a new corpus reader instance for each European language +danish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8" +) + +dutch: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8" +) + +english: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8" +) + +finnish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8" +) + +french: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8" +) + +german: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8" +) + +greek: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8" +) + +italian: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8" +) + +portuguese: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8" +) + +spanish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8" +) + +swedish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8" +) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/api.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe80d902ff8daa5b2a94fcccbc5b050f2d36324 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/api.py @@ -0,0 +1,516 @@ +# Natural Language Toolkit: API for Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +API for corpus readers. +""" + +import os +import re +from collections import defaultdict +from itertools import chain + +from nltk.corpus.reader.util import * +from nltk.data import FileSystemPathPointer, PathPointer, ZipFilePathPointer + + +class CorpusReader: + """ + A base class for "corpus reader" classes, each of which can be + used to read a specific corpus format. Each individual corpus + reader instance is used to read a specific corpus, consisting of + one or more files under a common root directory. Each file is + identified by its ``file identifier``, which is the relative path + to the file from the root directory. + + A separate subclass is defined for each corpus format. These + subclasses define one or more methods that provide 'views' on the + corpus contents, such as ``words()`` (for a list of words) and + ``parsed_sents()`` (for a list of parsed sentences). Called with + no arguments, these methods will return the contents of the entire + corpus. For most corpora, these methods define one or more + selection arguments, such as ``fileids`` or ``categories``, which can + be used to select which portion of the corpus should be returned. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + """ + :type root: PathPointer or str + :param root: A path pointer identifying the root directory for + this corpus. If a string is specified, then it will be + converted to a ``PathPointer`` automatically. + :param fileids: A list of the files that make up this corpus. + This list can either be specified explicitly, as a list of + strings; or implicitly, as a regular expression over file + paths. The absolute path for each file will be constructed + by joining the reader's root to each file name. + :param encoding: The default unicode encoding for the files + that make up the corpus. The value of ``encoding`` can be any + of the following: + + - A string: ``encoding`` is the encoding name for all files. + - A dictionary: ``encoding[file_id]`` is the encoding + name for the file whose identifier is ``file_id``. If + ``file_id`` is not in ``encoding``, then the file + contents will be processed using non-unicode byte strings. + - A list: ``encoding`` should be a list of ``(regexp, encoding)`` + tuples. The encoding for a file whose identifier is ``file_id`` + will be the ``encoding`` value for the first tuple whose + ``regexp`` matches the ``file_id``. If no tuple's ``regexp`` + matches the ``file_id``, the file contents will be processed + using non-unicode byte strings. + - None: the file contents of all files will be + processed using non-unicode byte strings. + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + # Convert the root to a path pointer, if necessary. + if isinstance(root, str) and not isinstance(root, PathPointer): + m = re.match(r"(.*\.zip)/?(.*)$|", root) + zipfile, zipentry = m.groups() + if zipfile: + root = ZipFilePathPointer(zipfile, zipentry) + else: + root = FileSystemPathPointer(root) + elif not isinstance(root, PathPointer): + raise TypeError("CorpusReader: expected a string or a PathPointer") + + # If `fileids` is a regexp, then expand it. + if isinstance(fileids, str): + fileids = find_corpus_fileids(root, fileids) + + self._fileids = fileids + """A list of the relative paths for the fileids that make up + this corpus.""" + + self._root = root + """The root directory for this corpus.""" + + self._readme = "README" + self._license = "LICENSE" + self._citation = "citation.bib" + + # If encoding was specified as a list of regexps, then convert + # it to a dictionary. + if isinstance(encoding, list): + encoding_dict = {} + for fileid in self._fileids: + for x in encoding: + (regexp, enc) = x + if re.match(regexp, fileid): + encoding_dict[fileid] = enc + break + encoding = encoding_dict + + self._encoding = encoding + """The default unicode encoding for the fileids that make up + this corpus. If ``encoding`` is None, then the file + contents are processed using byte strings.""" + self._tagset = tagset + + def __repr__(self): + if isinstance(self._root, ZipFilePathPointer): + path = f"{self._root.zipfile.filename}/{self._root.entry}" + else: + path = "%s" % self._root.path + return f"<{self.__class__.__name__} in {path!r}>" + + def ensure_loaded(self): + """ + Load this corpus (if it has not already been loaded). This is + used by LazyCorpusLoader as a simple method that can be used to + make sure a corpus is loaded -- e.g., in case a user wants to + do help(some_corpus). + """ + pass # no need to actually do anything. + + def readme(self): + """ + Return the contents of the corpus README file, if it exists. + """ + with self.open(self._readme) as f: + return f.read() + + def license(self): + """ + Return the contents of the corpus LICENSE file, if it exists. + """ + with self.open(self._license) as f: + return f.read() + + def citation(self): + """ + Return the contents of the corpus citation.bib file, if it exists. + """ + with self.open(self._citation) as f: + return f.read() + + def fileids(self): + """ + Return a list of file identifiers for the fileids that make up + this corpus. + """ + return self._fileids + + def abspath(self, fileid): + """ + Return the absolute path for the given file. + + :type fileid: str + :param fileid: The file identifier for the file whose path + should be returned. + :rtype: PathPointer + """ + return self._root.join(fileid) + + def abspaths(self, fileids=None, include_encoding=False, include_fileid=False): + """ + Return a list of the absolute paths for all fileids in this corpus; + or for the given list of fileids, if specified. + + :type fileids: None or str or list + :param fileids: Specifies the set of fileids for which paths should + be returned. Can be None, for all fileids; a list of + file identifiers, for a specified set of fileids; or a single + file identifier, for a single file. Note that the return + value is always a list of paths, even if ``fileids`` is a + single file identifier. + + :param include_encoding: If true, then return a list of + ``(path_pointer, encoding)`` tuples. + + :rtype: list(PathPointer) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + paths = [self._root.join(f) for f in fileids] + + if include_encoding and include_fileid: + return list(zip(paths, [self.encoding(f) for f in fileids], fileids)) + elif include_fileid: + return list(zip(paths, fileids)) + elif include_encoding: + return list(zip(paths, [self.encoding(f) for f in fileids])) + else: + return paths + + def raw(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a single string. + :rtype: str + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + contents = [] + for f in fileids: + with self.open(f) as fp: + contents.append(fp.read()) + return concat(contents) + + def open(self, file): + """ + Return an open stream that can be used to read the given file. + If the file's encoding is not None, then the stream will + automatically decode the file's contents into unicode. + + :param file: The file identifier of the file to read. + """ + encoding = self.encoding(file) + stream = self._root.join(file).open(encoding) + return stream + + def encoding(self, file): + """ + Return the unicode encoding for the given corpus file, if known. + If the encoding is unknown, or if the given file should be + processed using byte strings (str), then return None. + """ + if isinstance(self._encoding, dict): + return self._encoding.get(file) + else: + return self._encoding + + def _get_root(self): + return self._root + + root = property( + _get_root, + doc=""" + The directory where this corpus is stored. + + :type: PathPointer""", + ) + + +###################################################################### +# { Corpora containing categorized items +###################################################################### + + +class CategorizedCorpusReader: + """ + A mixin class used to aid in the implementation of corpus readers + for categorized corpora. This class defines the method + ``categories()``, which returns a list of the categories for the + corpus or for a specified set of fileids; and overrides ``fileids()`` + to take a ``categories`` argument, restricting the set of fileids to + be returned. + + Subclasses are expected to: + + - Call ``__init__()`` to set up the mapping. + + - Override all view methods to accept a ``categories`` parameter, + which can be used *instead* of the ``fileids`` parameter, to + select which fileids should be included in the returned view. + """ + + def __init__(self, kwargs): + """ + Initialize this mapping based on keyword arguments, as + follows: + + - cat_pattern: A regular expression pattern used to find the + category for each file identifier. The pattern will be + applied to each file identifier, and the first matching + group will be used as the category label for that file. + + - cat_map: A dictionary, mapping from file identifiers to + category labels. + + - cat_file: The name of a file that contains the mapping + from file identifiers to categories. The argument + ``cat_delimiter`` can be used to specify a delimiter. + + The corresponding argument will be deleted from ``kwargs``. If + more than one argument is specified, an exception will be + raised. + """ + self._f2c = None #: file-to-category mapping + self._c2f = None #: category-to-file mapping + + self._pattern = None #: regexp specifying the mapping + self._map = None #: dict specifying the mapping + self._file = None #: fileid of file containing the mapping + self._delimiter = None #: delimiter for ``self._file`` + + if "cat_pattern" in kwargs: + self._pattern = kwargs["cat_pattern"] + del kwargs["cat_pattern"] + elif "cat_map" in kwargs: + self._map = kwargs["cat_map"] + del kwargs["cat_map"] + elif "cat_file" in kwargs: + self._file = kwargs["cat_file"] + del kwargs["cat_file"] + if "cat_delimiter" in kwargs: + self._delimiter = kwargs["cat_delimiter"] + del kwargs["cat_delimiter"] + else: + raise ValueError( + "Expected keyword argument cat_pattern or " "cat_map or cat_file." + ) + + if "cat_pattern" in kwargs or "cat_map" in kwargs or "cat_file" in kwargs: + raise ValueError( + "Specify exactly one of: cat_pattern, " "cat_map, cat_file." + ) + + def _init(self): + self._f2c = defaultdict(set) + self._c2f = defaultdict(set) + + if self._pattern is not None: + for file_id in self._fileids: + category = re.match(self._pattern, file_id).group(1) + self._add(file_id, category) + + elif self._map is not None: + for (file_id, categories) in self._map.items(): + for category in categories: + self._add(file_id, category) + + elif self._file is not None: + with self.open(self._file) as f: + for line in f.readlines(): + line = line.strip() + file_id, categories = line.split(self._delimiter, 1) + if file_id not in self.fileids(): + raise ValueError( + "In category mapping file %s: %s " + "not found" % (self._file, file_id) + ) + for category in categories.split(self._delimiter): + self._add(file_id, category) + + def _add(self, file_id, category): + self._f2c[file_id].add(category) + self._c2f[category].add(file_id) + + def categories(self, fileids=None): + """ + Return a list of the categories that are defined for this corpus, + or for the file(s) if it is given. + """ + if self._f2c is None: + self._init() + if fileids is None: + return sorted(self._c2f) + if isinstance(fileids, str): + fileids = [fileids] + return sorted(set.union(*(self._f2c[d] for d in fileids))) + + def fileids(self, categories=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that make up the given category(s) if specified. + """ + if categories is None: + return super().fileids() + elif isinstance(categories, str): + if self._f2c is None: + self._init() + if categories in self._c2f: + return sorted(self._c2f[categories]) + else: + raise ValueError("Category %s not found" % categories) + else: + if self._f2c is None: + self._init() + return sorted(set.union(*(self._c2f[c] for c in categories))) + + def _resolve(self, fileids, categories): + if fileids is not None and categories is not None: + raise ValueError("Specify fileids or categories, not both") + if categories is not None: + return self.fileids(categories) + else: + return fileids + + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + +###################################################################### +# { Treebank readers +###################################################################### + +# [xx] is it worth it to factor this out? +class SyntaxCorpusReader(CorpusReader): + """ + An abstract base class for reading corpora consisting of + syntactically parsed text. Subclasses should define: + + - ``__init__``, which specifies the location of the corpus + and a method for detecting the sentence blocks in corpus files. + - ``_read_block``, which reads a block from the input stream. + - ``_word``, which takes a block and returns a list of list of words. + - ``_tag``, which takes a block and returns a list of list of tagged + words. + - ``_parse``, which takes a block and returns a list of parsed + sentences. + """ + + def _parse(self, s): + raise NotImplementedError() + + def _word(self, s): + raise NotImplementedError() + + def _tag(self, s): + raise NotImplementedError() + + def _read_block(self, stream): + raise NotImplementedError() + + def parsed_sents(self, fileids=None): + reader = self._read_parsed_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_sent_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + reader = self._read_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_word_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def words(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_word_block, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + # ------------------------------------------------------------ + # { Block Readers + + def _read_word_block(self, stream): + return list(chain.from_iterable(self._read_sent_block(stream))) + + def _read_tagged_word_block(self, stream, tagset=None): + return list(chain.from_iterable(self._read_tagged_sent_block(stream, tagset))) + + def _read_sent_block(self, stream): + return list(filter(None, [self._word(t) for t in self._read_block(stream)])) + + def _read_tagged_sent_block(self, stream, tagset=None): + return list( + filter(None, [self._tag(t, tagset) for t in self._read_block(stream)]) + ) + + def _read_parsed_sent_block(self, stream): + return list(filter(None, [self._parse(t) for t in self._read_block(stream)])) + + # } End of Block Readers + # ------------------------------------------------------------ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py new file mode 100644 index 0000000000000000000000000000000000000000..429f52a65034f6faee531430a4b1d08aabe20103 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py @@ -0,0 +1,218 @@ +# Natural Language Toolkit: BCP-47 language tags +# +# Copyright (C) 2022-2023 NLTK Project +# Author: Eric Kafe +# URL: +# For license information, see LICENSE.TXT + +import re +from warnings import warn +from xml.etree import ElementTree as et + +from nltk.corpus.reader import CorpusReader + + +class BCP47CorpusReader(CorpusReader): + """ + Parse BCP-47 composite language tags + + Supports all the main subtags, and the 'u-sd' extension: + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('oc-gascon-u-sd-fr64') + 'Occitan (post 1500): Gascon: Pyrénées-Atlantiques' + + Can load a conversion table to Wikidata Q-codes: + >>> bcp47.load_wiki_q() + >>> bcp47.wiki_q['en-GI-spanglis'] + 'Q79388' + + """ + + def __init__(self, root, fileids): + """Read the BCP-47 database""" + super().__init__(root, fileids) + self.langcode = {} + with self.open("iana/language-subtag-registry.txt") as fp: + self.db = self.data_dict(fp.read().split("%%\n")) + with self.open("cldr/common-subdivisions-en.xml") as fp: + self.subdiv = self.subdiv_dict( + et.parse(fp).iterfind("localeDisplayNames/subdivisions/subdivision") + ) + self.morphology() + + def load_wiki_q(self): + """Load conversion table to Wikidata Q-codes (only if needed)""" + with self.open("cldr/tools-cldr-rdf-external-entityToCode.tsv") as fp: + self.wiki_q = self.wiki_dict(fp.read().strip().split("\n")[1:]) + + def wiki_dict(self, lines): + """Convert Wikidata list of Q-codes to a BCP-47 dictionary""" + return { + pair[1]: pair[0].split("/")[-1] + for pair in [line.strip().split("\t") for line in lines] + } + + def subdiv_dict(self, subdivs): + """Convert the CLDR subdivisions list to a dictionary""" + return {sub.attrib["type"]: sub.text for sub in subdivs} + + def morphology(self): + self.casing = { + "language": str.lower, + "extlang": str.lower, + "script": str.title, + "region": str.upper, + "variant": str.lower, + } + dig = "[0-9]" + low = "[a-z]" + up = "[A-Z]" + alnum = "[a-zA-Z0-9]" + self.format = { + "language": re.compile(f"{low*3}?"), + "extlang": re.compile(f"{low*3}"), + "script": re.compile(f"{up}{low*3}"), + "region": re.compile(f"({up*2})|({dig*3})"), + "variant": re.compile(f"{alnum*4}{(alnum+'?')*4}"), + "singleton": re.compile(f"{low}"), + } + + def data_dict(self, records): + """Convert the BCP-47 language subtag registry to a dictionary""" + self.version = records[0].replace("File-Date:", "").strip() + dic = {} + dic["deprecated"] = {} + for label in [ + "language", + "extlang", + "script", + "region", + "variant", + "redundant", + "grandfathered", + ]: + dic["deprecated"][label] = {} + for record in records[1:]: + fields = [field.split(": ") for field in record.strip().split("\n")] + typ = fields[0][1] + tag = fields[1][1] + if typ not in dic: + dic[typ] = {} + subfields = {} + for field in fields[2:]: + if len(field) == 2: + [key, val] = field + if key not in subfields: + subfields[key] = [val] + else: # multiple value + subfields[key].append(val) + else: # multiline field + subfields[key][-1] += " " + field[0].strip() + if ( + "Deprecated" not in record + and typ == "language" + and key == "Description" + ): + self.langcode[subfields[key][-1]] = tag + for key in subfields: + if len(subfields[key]) == 1: # single value + subfields[key] = subfields[key][0] + if "Deprecated" in record: + dic["deprecated"][typ][tag] = subfields + else: + dic[typ][tag] = subfields + return dic + + def val2str(self, val): + """Return only first value""" + if type(val) == list: + # val = "/".join(val) # Concatenate all values + val = val[0] + return val + + def lang2str(self, lg_record): + """Concatenate subtag values""" + name = f"{lg_record['language']}" + for label in ["extlang", "script", "region", "variant", "extension"]: + if label in lg_record: + name += f": {lg_record[label]}" + return name + + def parse_tag(self, tag): + """Convert a BCP-47 tag to a dictionary of labelled subtags""" + subtags = tag.split("-") + lang = {} + labels = ["language", "extlang", "script", "region", "variant", "variant"] + while subtags and labels: + subtag = subtags.pop(0) + found = False + while labels: + label = labels.pop(0) + subtag = self.casing[label](subtag) + if self.format[label].fullmatch(subtag): + if subtag in self.db[label]: + found = True + valstr = self.val2str(self.db[label][subtag]["Description"]) + if label == "variant" and label in lang: + lang[label] += ": " + valstr + else: + lang[label] = valstr + break + elif subtag in self.db["deprecated"][label]: + found = True + note = f"The {subtag!r} {label} code is deprecated" + if "Preferred-Value" in self.db["deprecated"][label][subtag]: + prefer = self.db["deprecated"][label][subtag][ + "Preferred-Value" + ] + note += f"', prefer '{self.val2str(prefer)}'" + lang[label] = self.val2str( + self.db["deprecated"][label][subtag]["Description"] + ) + warn(note) + break + if not found: + if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions + sd = subtags[1] + if sd in self.subdiv: + ext = self.subdiv[sd] + else: + ext = f"" + else: # other extension subtags are not supported yet + ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower() + if not self.format["singleton"].fullmatch(subtag): + ext = f"" + warn(ext) + lang["extension"] = ext + subtags = [] + return lang + + def name(self, tag): + """ + Convert a BCP-47 tag to a colon-separated string of subtag names + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('ca-Latn-ES-valencia') + 'Catalan: Latin: Spain: Valencian' + + """ + for label in ["redundant", "grandfathered"]: + val = None + if tag in self.db[label]: + val = f"{self.db[label][tag]['Description']}" + note = f"The {tag!r} code is {label}" + elif tag in self.db["deprecated"][label]: + val = f"{self.db['deprecated'][label][tag]['Description']}" + note = f"The {tag!r} code is {label} and deprecated" + if "Preferred-Value" in self.db["deprecated"][label][tag]: + prefer = self.db["deprecated"][label][tag]["Preferred-Value"] + note += f", prefer {self.val2str(prefer)!r}" + if val: + warn(note) + return val + try: + return self.lang2str(self.parse_tag(tag)) + except: + warn(f"Tag {tag!r} was not recognized") + return None diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py new file mode 100644 index 0000000000000000000000000000000000000000..e7128bf843b5c24a59b10d8a0cf1f689592bae52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py @@ -0,0 +1,265 @@ +# Natural Language Toolkit: Plaintext Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +"""Corpus reader for the XML version of the British National Corpus.""" + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView + + +class BNCCorpusReader(XMLCorpusReader): + r"""Corpus reader for the XML version of the British National Corpus. + + For access to the complete XML data structure, use the ``xml()`` + method. For access to simple word lists and tagged word lists, use + ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``. + + You can obtain the full version of the BNC corpus at + https://www.ota.ox.ac.uk/desc/2554 + + If you extracted the archive to a directory called `BNC`, then you can + instantiate the reader as:: + + BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml') + + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, False, None, strip_space, stem) + + def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views(fileids, False, tag, strip_space, stem) + + def sents(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, True, None, strip_space, stem) + + def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views( + fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem + ) + + def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False): + """A helper function that instantiates BNCWordViews or the list of words/sentences.""" + f = BNCWordView if self._lazy else self._words + return concat( + [ + f(fileid, sent, tag, strip_space, stem) + for fileid in self.abspaths(fileids) + ] + ) + + def _words(self, fileid, bracket_sent, tag, strip_space, stem): + """ + Helper used to implement the view methods -- returns a list of + words or a list of sentences, optionally tagged. + + :param fileid: The name of the underlying file. + :param bracket_sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + result = [] + + xmldoc = ElementTree.parse(fileid).getroot() + for xmlsent in xmldoc.findall(".//s"): + sent = [] + for xmlword in _all_xmlwords_in(xmlsent): + word = xmlword.text + if not word: + word = "" # fixes issue 337? + if strip_space or stem: + word = word.strip() + if stem: + word = xmlword.get("hw", word) + if tag == "c5": + word = (word, xmlword.get("c5")) + elif tag == "pos": + word = (word, xmlword.get("pos", xmlword.get("c5"))) + sent.append(word) + if bracket_sent: + result.append(BNCSentence(xmlsent.attrib["n"], sent)) + else: + result.extend(sent) + + assert None not in result + return result + + +def _all_xmlwords_in(elt, result=None): + if result is None: + result = [] + for child in elt: + if child.tag in ("c", "w"): + result.append(child) + else: + _all_xmlwords_in(child, result) + return result + + +class BNCSentence(list): + """ + A list of words, augmented by an attribute ``num`` used to record + the sentence identifier (the ``n`` attribute from the XML). + """ + + def __init__(self, num, items): + self.num = num + list.__init__(self, items) + + +class BNCWordView(XMLCorpusView): + """ + A stream backed corpus view specialized for use with the BNC corpus. + """ + + tags_to_ignore = { + "pb", + "gap", + "vocal", + "event", + "unclear", + "shift", + "pause", + "align", + } + """These tags are ignored. For their description refer to the + technical documentation, for example, + http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html + + """ + + def __init__(self, fileid, sent, tag, strip_space, stem): + """ + :param fileid: The name of the underlying file. + :param sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + if sent: + tagspec = ".*/s" + else: + tagspec = ".*/s/(.*/)?(c|w)" + self._sent = sent + self._tag = tag + self._strip_space = strip_space + self._stem = stem + + self.title = None #: Title of the document. + self.author = None #: Author of the document. + self.editor = None #: Editor + self.resps = None #: Statement of responsibility + + XMLCorpusView.__init__(self, fileid, tagspec) + + # Read in a tasty header. + self._open() + self.read_block(self._stream, ".*/teiHeader$", self.handle_header) + self.close() + + # Reset tag context. + self._tag_context = {0: ()} + + def handle_header(self, elt, context): + # Set up some metadata! + titles = elt.findall("titleStmt/title") + if titles: + self.title = "\n".join(title.text.strip() for title in titles) + + authors = elt.findall("titleStmt/author") + if authors: + self.author = "\n".join(author.text.strip() for author in authors) + + editors = elt.findall("titleStmt/editor") + if editors: + self.editor = "\n".join(editor.text.strip() for editor in editors) + + resps = elt.findall("titleStmt/respStmt") + if resps: + self.resps = "\n\n".join( + "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps + ) + + def handle_elt(self, elt, context): + if self._sent: + return self.handle_sent(elt) + else: + return self.handle_word(elt) + + def handle_word(self, elt): + word = elt.text + if not word: + word = "" # fixes issue 337? + if self._strip_space or self._stem: + word = word.strip() + if self._stem: + word = elt.get("hw", word) + if self._tag == "c5": + word = (word, elt.get("c5")) + elif self._tag == "pos": + word = (word, elt.get("pos", elt.get("c5"))) + return word + + def handle_sent(self, elt): + sent = [] + for child in elt: + if child.tag in ("mw", "hi", "corr", "trunc"): + sent += [self.handle_word(w) for w in child] + elif child.tag in ("w", "c"): + sent.append(self.handle_word(child)) + elif child.tag not in self.tags_to_ignore: + raise ValueError("Unexpected element %s" % child.tag) + return BNCSentence(elt.attrib["n"], sent) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6ab8146619bbdc0f448f9771269ab7d3ee5451 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py @@ -0,0 +1,158 @@ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Masato Hagiwara +# URL: +# For license information, see LICENSE.TXT + +import sys + +from nltk.corpus.reader import util +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * + + +class ChasenCorpusReader(CorpusReader): + def __init__(self, root, fileids, encoding="utf8", sent_splitter=None): + self._sent_splitter = sent_splitter + CorpusReader.__init__(self, root, fileids, encoding) + + def words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class ChasenCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``, + but this'll use fixed sets of word and sentence tokenizer. + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sent_splitter=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sent_splitter = sent_splitter + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in read_regexp_block(stream, r".", r"^EOS\n"): + + para = [] + + sent = [] + for line in para_str.splitlines(): + + _eos = line.strip() == "EOS" + _cells = line.split("\t") + w = (_cells[0], "\t".join(_cells[1:])) + if not _eos: + sent.append(w) + + if _eos or (self._sent_splitter and self._sent_splitter(w)): + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + sent = [] + + if len(sent) > 0: + if not self._tagged: + sent = [w for (w, t) in sent] + + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + return block + + +def demo(): + + import nltk + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + print("/".join(jeita.words()[22100:22140])) + + print( + "\nEOS\n".join( + "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent) + for sent in jeita.tagged_sents()[2170:2173] + ) + ) + + +def test(): + + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + + assert isinstance(jeita.tagged_words()[0][1], str) + + +if __name__ == "__main__": + demo() + test() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/childes.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/childes.py new file mode 100644 index 0000000000000000000000000000000000000000..115ccfb927f7bb4d217670f0cd52a55d64563e9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/childes.py @@ -0,0 +1,630 @@ +# CHILDES XML Corpus Reader + +# Copyright (C) 2001-2023 NLTK Project +# Author: Tomonori Nagano +# Alexis Dimitriadis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the XML version of the CHILDES corpus. +""" + +__docformat__ = "epytext en" + +import re +from collections import defaultdict + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader +from nltk.util import LazyConcatenation, LazyMap, flatten + +# to resolve the namespace issue +NS = "http://www.talkbank.org/ns/talkbank" + + +class CHILDESCorpusReader(XMLCorpusReader): + """ + Corpus reader for the XML version of the CHILDES corpus. + The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML + version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``. + Copy the needed parts of the CHILDES XML corpus into the NLTK data directory + (``nltk_data/corpora/CHILDES/``). + + For access to the file text use the usual nltk functions, + ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``. + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of words + :rtype: list(str) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of sentences or utterances, each + encoded as a list of word strings. + :rtype: list(list(str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def corpus(self, fileids=None): + """ + :return: the given file(s) as a dict of ``(corpus_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_corpus, self.abspaths(fileids)) + + def _get_corpus(self, fileid): + results = dict() + xmldoc = ElementTree.parse(fileid).getroot() + for key, value in xmldoc.items(): + results[key] = value + return results + + def participants(self, fileids=None): + """ + :return: the given file(s) as a dict of + ``(participant_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_participants(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_participants, self.abspaths(fileids)) + + def _get_participants(self, fileid): + # multidimensional dicts + def dictOfDicts(): + return defaultdict(dictOfDicts) + + xmldoc = ElementTree.parse(fileid).getroot() + # getting participants' data + pat = dictOfDicts() + for participant in xmldoc.findall( + f".//{{{NS}}}Participants/{{{NS}}}participant" + ): + for (key, value) in participant.items(): + pat[participant.get("id")][key] = value + return pat + + def age(self, fileids=None, speaker="CHI", month=False): + """ + :return: the given file(s) as string or int + :rtype: list or int + + :param month: If true, return months instead of year-month-date + """ + if not self._lazy: + return [ + self._get_age(fileid, speaker, month) + for fileid in self.abspaths(fileids) + ] + get_age = lambda fileid: self._get_age(fileid, speaker, month) + return LazyMap(get_age, self.abspaths(fileids)) + + def _get_age(self, fileid, speaker, month): + xmldoc = ElementTree.parse(fileid).getroot() + for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"): + try: + if pat.get("id") == speaker: + age = pat.get("age") + if month: + age = self.convert_age(age) + return age + # some files don't have age data + except (TypeError, AttributeError) as e: + return None + + def convert_age(self, age_year): + "Caclculate age in months from a string in CHILDES format" + m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year) + age_month = int(m.group(1)) * 12 + int(m.group(2)) + try: + if int(m.group(3)) > 15: + age_month += 1 + # some corpora don't have age information? + except ValueError as e: + pass + return age_month + + def MLU(self, fileids=None, speaker="CHI"): + """ + :return: the given file(s) as a floating number + :rtype: list(float) + """ + if not self._lazy: + return [ + self._getMLU(fileid, speaker=speaker) + for fileid in self.abspaths(fileids) + ] + get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker) + return LazyMap(get_MLU, self.abspaths(fileids)) + + def _getMLU(self, fileid, speaker): + sents = self._get_words( + fileid, + speaker=speaker, + sent=True, + stem=True, + relation=False, + pos=True, + strip_space=True, + replace=True, + ) + results = [] + lastSent = [] + numFillers = 0 + sentDiscount = 0 + for sent in sents: + posList = [pos for (word, pos) in sent] + # if any part of the sentence is intelligible + if any(pos == "unk" for pos in posList): + continue + # if the sentence is null + elif sent == []: + continue + # if the sentence is the same as the last sent + elif sent == lastSent: + continue + else: + results.append([word for (word, pos) in sent]) + # count number of fillers + if len({"co", None}.intersection(posList)) > 0: + numFillers += posList.count("co") + numFillers += posList.count(None) + sentDiscount += 1 + lastSent = sent + try: + thisWordList = flatten(results) + # count number of morphemes + # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes) + numWords = ( + len(flatten([word.split("-") for word in thisWordList])) - numFillers + ) + numSents = len(results) - sentDiscount + mlu = numWords / numSents + except ZeroDivisionError: + mlu = 0 + # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents} + return mlu + + def _get_words( + self, fileid, speaker, sent, stem, relation, pos, strip_space, replace + ): + if ( + isinstance(speaker, str) and speaker != "ALL" + ): # ensure we have a list of speakers + speaker = [speaker] + xmldoc = ElementTree.parse(fileid).getroot() + # processing each xml doc + results = [] + for xmlsent in xmldoc.findall(".//{%s}u" % NS): + sents = [] + # select speakers + if speaker == "ALL" or xmlsent.get("who") in speaker: + for xmlword in xmlsent.findall(".//{%s}w" % NS): + infl = None + suffixStem = None + suffixTag = None + # getting replaced words + if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"): + xmlword = xmlsent.find( + f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w" + ) + elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"): + xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk") + # get text + if xmlword.text: + word = xmlword.text + else: + word = "" + # strip tailing space + if strip_space: + word = word.strip() + # stem + if relation or stem: + try: + xmlstem = xmlword.find(".//{%s}stem" % NS) + word = xmlstem.text + except AttributeError as e: + pass + # if there is an inflection + try: + xmlinfl = xmlword.find( + f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk" + ) + word += "-" + xmlinfl.text + except: + pass + # if there is a suffix + try: + xmlsuffix = xmlword.find( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem" + % (NS, NS, NS, NS) + ) + suffixStem = xmlsuffix.text + except AttributeError: + suffixStem = "" + if suffixStem: + word += "~" + suffixStem + # pos + if relation or pos: + try: + xmlpos = xmlword.findall(".//{%s}c" % NS) + xmlpos2 = xmlword.findall(".//{%s}s" % NS) + if xmlpos2 != []: + tag = xmlpos[0].text + ":" + xmlpos2[0].text + else: + tag = xmlpos[0].text + except (AttributeError, IndexError) as e: + tag = "" + try: + xmlsuffixpos = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c" + % (NS, NS, NS, NS, NS) + ) + xmlsuffixpos2 = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s" + % (NS, NS, NS, NS, NS) + ) + if xmlsuffixpos2: + suffixTag = ( + xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text + ) + else: + suffixTag = xmlsuffixpos[0].text + except: + pass + if suffixTag: + tag += "~" + suffixTag + word = (word, tag) + # relational + # the gold standard is stored in + # + if relation == True: + for xmlstem_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}gra" + ): + if not xmlstem_rel.get("type") == "grt": + word = ( + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + else: + word = ( + word[0], + word[1], + word[2], + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + try: + for xmlpost_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra" + ): + if not xmlpost_rel.get("type") == "grt": + suffixStem = ( + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + else: + suffixStem = ( + suffixStem[0], + suffixStem[1], + suffixStem[2], + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + except: + pass + sents.append(word) + if sent or relation: + results.append(sents) + else: + results.extend(sents) + return LazyMap(lambda x: x, results) + + # Ready-to-use browser opener + + """ + The base URL for viewing files on the childes website. This + shouldn't need to be changed, unless CHILDES changes the configuration + of their server or unless the user sets up their own corpus webserver. + """ + childes_url_base = r"https://childes.talkbank.org/browser/index.php?url=" + + def webview_file(self, fileid, urlbase=None): + """Map a corpus file to its web version on the CHILDES website, + and open it in a web browser. + + The complete URL to be used is: + childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha') + + If no urlbase is passed, we try to calculate it. This + requires that the childes corpus was set up to mirror the + folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.: + nltk_data/corpora/childes/Eng-USA/Cornell/??? or + nltk_data/corpora/childes/Romance/Spanish/Aguirre/??? + + The function first looks (as a special case) if "Eng-USA" is + on the path consisting of +fileid; then if + "childes", possibly followed by "data-xml", appears. If neither + one is found, we use the unmodified fileid and hope for the best. + If this is not right, specify urlbase explicitly, e.g., if the + corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'. + """ + + import webbrowser + + if urlbase: + path = urlbase + "/" + fileid + else: + full = self.root + "/" + fileid + full = re.sub(r"\\", "/", full) + if "/childes/" in full.lower(): + # Discard /data-xml/ if present + path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0] + elif "eng-usa" in full.lower(): + path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0] + else: + path = fileid + + # Strip ".xml" and add ".cha", as necessary: + if path.endswith(".xml"): + path = path[:-4] + + if not path.endswith(".cha"): + path = path + ".cha" + + url = self.childes_url_base + path + + webbrowser.open_new_tab(url) + print("Opening in browser:", url) + # Pausing is a good idea, but it's up to the user... + # raw_input("Hit Return to continue") + + +def demo(corpus_root=None): + """ + The CHILDES corpus should be manually downloaded and saved + to ``[NLTK_Data_Dir]/corpora/childes/`` + """ + if not corpus_root: + from nltk.data import find + + corpus_root = find("corpora/childes/data-xml/Eng-USA/") + + try: + childes = CHILDESCorpusReader(corpus_root, ".*.xml") + # describe all corpus + for file in childes.fileids()[:5]: + corpus = "" + corpus_id = "" + for (key, value) in childes.corpus(file)[0].items(): + if key == "Corpus": + corpus = value + if key == "Id": + corpus_id = value + print("Reading", corpus, corpus_id, " .....") + print("words:", childes.words(file)[:7], "...") + print( + "words with replaced words:", + childes.words(file, replace=True)[:7], + " ...", + ) + print("words with pos tags:", childes.tagged_words(file)[:7], " ...") + print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...") + print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...") + print("stemmed words:", childes.words(file, stem=True)[:7], " ...") + print( + "words with relations and pos-tag:", + childes.words(file, relation=True)[:5], + " ...", + ) + print("sentence:", childes.sents(file)[:2], " ...") + for (participant, values) in childes.participants(file)[0].items(): + for (key, value) in values.items(): + print("\tparticipant", participant, key, ":", value) + print("num of sent:", len(childes.sents(file))) + print("num of morphemes:", len(childes.words(file, stem=True))) + print("age:", childes.age(file)) + print("age in month:", childes.age(file, month=True)) + print("MLU:", childes.MLU(file)) + print() + + except LookupError as e: + print( + """The CHILDES corpus, or the parts you need, should be manually + downloaded from https://childes.talkbank.org/data-xml/ and saved at + [NLTK_Data_Dir]/corpora/childes/ + Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.: + demo('/path/to/childes/data-xml/Eng-USA/") + """ + ) + # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip') + # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read())) + ##this fails + # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist()) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py new file mode 100644 index 0000000000000000000000000000000000000000..66b42e79ca134227357aba4cb493335196e05961 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py @@ -0,0 +1,273 @@ +# Natural Language Toolkit: Chunked Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that contain chunked (and optionally tagged) +documents. +""" + +import codecs +import os.path + +import nltk +from nltk.chunk import tagstr2tree +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import * +from nltk.tree import Tree + + +class ChunkedCorpusReader(CorpusReader): + """ + Reader for chunked (and optionally tagged) corpora. Paragraphs + are split using a block reader. They are then tokenized into + sentences using a sentence tokenizer. Finally, these sentences + are parsed into chunk trees using a string-to-chunktree conversion + function. Each of these steps can be performed using a default + function or a custom function. By default, paragraphs are split + on blank lines; sentences are listed one per line; and sentences + are parsed into chunk trees using ``nltk.chunk.tagstr2tree``. + """ + + def __init__( + self, + root, + fileids, + extension="", + str2chunktree=tagstr2tree, + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset) + """Arguments for corpus views generated by this corpus: a tuple + (str2chunktree, sent_tokenizer, para_block_tokenizer)""" + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and chunks. Words are encoded as ``(word, tag)`` + tuples (if the corpus has tags) or word strings (if the + corpus has no tags). Chunks are encoded as depth-one + trees over ``(word,tag)`` tuples or word strings. + :rtype: list(tuple(str,str) and Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a shallow Tree. The leaves + of these trees are encoded as ``(word, tag)`` tuples (if + the corpus has tags) or word strings (if the corpus has no + tags). + :rtype: list(Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as a shallow Tree. The leaves of these + trees are encoded as ``(word, tag)`` tuples (if the corpus + has tags) or word strings (if the corpus has no tags). + :rtype: list(list(Tree)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_block(self, stream): + return [tagstr2tree(t) for t in read_blankline_block(stream)] + + +class ChunkedCorpusView(StreamBackedCorpusView): + def __init__( + self, + fileid, + encoding, + tagged, + group_by_sent, + group_by_para, + chunked, + str2chunktree, + sent_tokenizer, + para_block_reader, + source_tagset=None, + target_tagset=None, + ): + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._chunked = chunked + self._str2chunktree = str2chunktree + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._source_tagset = source_tagset + self._target_tagset = target_tagset + + def read_block(self, stream): + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = self._str2chunktree( + sent_str, + source_tagset=self._source_tagset, + target_tagset=self._target_tagset, + ) + + # If requested, throw away the tags. + if not self._tagged: + sent = self._untag(sent) + + # If requested, throw away the chunks. + if not self._chunked: + sent = sent.leaves() + + # Add the sentence to `para`. + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + # Add the paragraph to `block`. + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + # Return the block + return block + + def _untag(self, tree): + for i, child in enumerate(tree): + if isinstance(child, Tree): + self._untag(child) + elif isinstance(child, tuple): + tree[i] = child[0] + else: + raise ValueError("expected child to be Tree or tuple") + return tree diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py new file mode 100644 index 0000000000000000000000000000000000000000..7328ca3239c6e746d328d5706dc05a09af918c14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/cmudict.py @@ -0,0 +1,88 @@ +# Natural Language Toolkit: Carnegie Mellon Pronouncing Dictionary Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +The Carnegie Mellon Pronouncing Dictionary [cmudict.0.6] +ftp://ftp.cs.cmu.edu/project/speech/dict/ +Copyright 1998 Carnegie Mellon University + +File Format: Each line consists of an uppercased word, a counter +(for alternative pronunciations), and a transcription. Vowels are +marked for stress (1=primary, 2=secondary, 0=no stress). E.g.: +NATURAL 1 N AE1 CH ER0 AH0 L + +The dictionary contains 127069 entries. Of these, 119400 words are assigned +a unique pronunciation, 6830 words have two pronunciations, and 839 words have +three or more pronunciations. Many of these are fast-speech variants. + +Phonemes: There are 39 phonemes, as shown below: + +Phoneme Example Translation Phoneme Example Translation +------- ------- ----------- ------- ------- ----------- +AA odd AA D AE at AE T +AH hut HH AH T AO ought AO T +AW cow K AW AY hide HH AY D +B be B IY CH cheese CH IY Z +D dee D IY DH thee DH IY +EH Ed EH D ER hurt HH ER T +EY ate EY T F fee F IY +G green G R IY N HH he HH IY +IH it IH T IY eat IY T +JH gee JH IY K key K IY +L lee L IY M me M IY +N knee N IY NG ping P IH NG +OW oat OW T OY toy T OY +P pee P IY R read R IY D +S sea S IY SH she SH IY +T tea T IY TH theta TH EY T AH +UH hood HH UH D UW two T UW +V vee V IY W we W IY +Y yield Y IY L D Z zee Z IY +ZH seizure S IY ZH ER +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.util import Index + + +class CMUDictCorpusReader(CorpusReader): + def entries(self): + """ + :return: the cmudict lexicon as a list of entries + containing (word, transcriptions) tuples. + """ + return concat( + [ + StreamBackedCorpusView(fileid, read_cmudict_block, encoding=enc) + for fileid, enc in self.abspaths(None, True) + ] + ) + + def words(self): + """ + :return: a list of all words defined in the cmudict lexicon. + """ + return [word.lower() for (word, _) in self.entries()] + + def dict(self): + """ + :return: the cmudict lexicon as a dictionary, whose keys are + lowercase words and whose values are lists of pronunciations. + """ + return dict(Index(self.entries())) + + +def read_cmudict_block(stream): + entries = [] + while len(entries) < 100: # Read 100 at a time. + line = stream.readline() + if line == "": + return entries # end of file. + pieces = line.split() + entries.append((pieces[0].lower(), pieces[2:])) + return entries diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py new file mode 100644 index 0000000000000000000000000000000000000000..032ce82c3b2a6a4011c9b1637b882db2df1bcd55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Comparative Sentence Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Comparative Sentence Dataset. + +- Comparative Sentence Dataset information - + +Annotated by: Nitin Jindal and Bing Liu, 2006. + Department of Computer Sicence + University of Illinois at Chicago + +Contact: Nitin Jindal, njindal@cs.uic.edu + Bing Liu, liub@cs.uic.edu (https://www.cs.uic.edu/~liub) + +Distributed with permission. + +Related papers: + +- Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents". + Proceedings of the ACM SIGIR International Conference on Information Retrieval + (SIGIR-06), 2006. + +- Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations". + Proceedings of Twenty First National Conference on Artificial Intelligence + (AAAI-2006), 2006. + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +# Regular expressions for dataset components +STARS = re.compile(r"^\*+$") +COMPARISON = re.compile(r"") +CLOSE_COMPARISON = re.compile(r"") +GRAD_COMPARISON = re.compile(r"") +NON_GRAD_COMPARISON = re.compile(r"") +ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)") +KEYWORD = re.compile(r"\(([^\(]*)\)$") + + +class Comparison: + """ + A Comparison represents a comparative sentence and its constituents. + """ + + def __init__( + self, + text=None, + comp_type=None, + entity_1=None, + entity_2=None, + feature=None, + keyword=None, + ): + """ + :param text: a string (optionally tokenized) containing a comparison. + :param comp_type: an integer defining the type of comparison expressed. + Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative), + 4 (Non-gradable). + :param entity_1: the first entity considered in the comparison relation. + :param entity_2: the second entity considered in the comparison relation. + :param feature: the feature considered in the comparison relation. + :param keyword: the word or phrase which is used for that comparative relation. + """ + self.text = text + self.comp_type = comp_type + self.entity_1 = entity_1 + self.entity_2 = entity_2 + self.feature = feature + self.keyword = keyword + + def __repr__(self): + return ( + 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", ' + 'feature="{}", keyword="{}")' + ).format( + self.text, + self.comp_type, + self.entity_1, + self.entity_2, + self.feature, + self.keyword, + ) + + +class ComparativeSentencesCorpusReader(CorpusReader): + """ + Reader for the Comparative Sentence Dataset by Jindal and Liu (2006). + + >>> from nltk.corpus import comparative_sentences + >>> comparison = comparative_sentences.comparisons()[0] + >>> comparison.text # doctest: +NORMALIZE_WHITESPACE + ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly', + 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve", + 'had', '.'] + >>> comparison.entity_2 + 'models' + >>> (comparison.feature, comparison.keyword) + ('rewind', 'more') + >>> len(comparative_sentences.comparisons()) + 853 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param fileids: a list or regexp specifying the fileids in this corpus. + :param word_tokenizer: tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param sent_tokenizer: tokenizer for breaking paragraphs into sentences. + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._readme = "README.txt" + + def comparisons(self, fileids=None): + """ + Return all comparisons in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + comparisons have to be returned. + :return: the given file(s) as a list of Comparison objects. + :rtype: list(Comparison) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_comparison_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def keywords(self, fileids=None): + """ + Return a set of all keywords used in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + keywords have to be returned. + :return: the set of keywords and comparative phrases used in the corpus. + :rtype: set(str) + """ + all_keywords = concat( + [ + self.CorpusView(path, self._read_keyword_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + keywords_set = {keyword.lower() for keyword in all_keywords if keyword} + return keywords_set + + def keywords_readme(self): + """ + Return the list of words and constituents considered as clues of a + comparison (from listOfkeywords.txt). + """ + keywords = [] + with self.open("listOfkeywords.txt") as fp: + raw_text = fp.read() + for line in raw_text.split("\n"): + if not line or line.startswith("//"): + continue + keywords.append(line.strip()) + return keywords + + def sents(self, fileids=None): + """ + Return all sentences in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: all sentences of the corpus as lists of tokens (or as plain + strings, if no word tokenizer is specified). + :rtype: list(list(str)) or list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_comparison_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + comparison_tags = re.findall(COMPARISON, line) + if comparison_tags: + grad_comparisons = re.findall(GRAD_COMPARISON, line) + non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line) + # Advance to the next line (it contains the comparative sentence) + comparison_text = stream.readline().strip() + if self._word_tokenizer: + comparison_text = self._word_tokenizer.tokenize(comparison_text) + # Skip the next line (it contains closing comparison tags) + stream.readline() + # If gradable comparisons are found, create Comparison instances + # and populate their fields + comparison_bundle = [] + if grad_comparisons: + # Each comparison tag has its own relations on a separate line + for comp in grad_comparisons: + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + line = stream.readline() + entities_feats = ENTITIES_FEATS.findall(line) + if entities_feats: + for (code, entity_feat) in entities_feats: + if code == "1": + comparison.entity_1 = entity_feat.strip() + elif code == "2": + comparison.entity_2 = entity_feat.strip() + elif code == "3": + comparison.feature = entity_feat.strip() + keyword = KEYWORD.findall(line) + if keyword: + comparison.keyword = keyword[0] + comparison_bundle.append(comparison) + # If non-gradable comparisons are found, create a simple Comparison + # instance for each one + if non_grad_comparisons: + for comp in non_grad_comparisons: + # comp_type in this case should always be 4. + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + comparison_bundle.append(comparison) + # Flatten the list of comparisons before returning them + # return concat([comparison_bundle]) + return comparison_bundle + + def _read_keyword_block(self, stream): + keywords = [] + for comparison in self._read_comparison_block(stream): + keywords.append(comparison.keyword) + return keywords + + def _read_sent_block(self, stream): + while True: + line = stream.readline() + if re.match(STARS, line): + while True: + line = stream.readline() + if re.match(STARS, line): + break + continue + if ( + not re.findall(COMPARISON, line) + and not ENTITIES_FEATS.findall(line) + and not re.findall(CLOSE_COMPARISON, line) + ): + if self._sent_tokenizer: + return [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(line) + ] + else: + return [self._word_tokenizer.tokenize(line)] + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..87f56d4b5410a6dc419cd58538d3f4499478a205 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Dependency Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Kepa Sarasola +# Iker Manterola +# +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.parse import DependencyGraph +from nltk.tokenize import * + + +class DependencyCorpusReader(SyntaxCorpusReader): + def __init__( + self, + root, + fileids, + encoding="utf8", + word_tokenizer=TabTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + ): + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + + ######################################################### + + def words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def parsed_sents(self, fileids=None): + sents = concat( + [ + DependencyCorpusView(fileid, False, True, True, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + return [DependencyGraph(sent) for sent in sents] + + +class DependencyCorpusView(StreamBackedCorpusView): + _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da + + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + dependencies, + chunk_types=None, + encoding="utf8", + ): + self._tagged = tagged + self._dependencies = dependencies + self._group_by_sent = group_by_sent + self._chunk_types = chunk_types + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + # Read the next sentence. + sent = read_blankline_block(stream)[0].strip() + # Strip off the docstart marker, if present. + if sent.startswith(self._DOCSTART): + sent = sent[len(self._DOCSTART) :].lstrip() + + # extract word and tag from any of the formats + if not self._dependencies: + lines = [line.split("\t") for line in sent.split("\n")] + if len(lines[0]) == 3 or len(lines[0]) == 4: + sent = [(line[0], line[1]) for line in lines] + elif len(lines[0]) == 10: + sent = [(line[1], line[4]) for line in lines] + else: + raise ValueError("Unexpected number of fields in dependency tree file") + + # discard tags if they weren't requested + if not self._tagged: + sent = [word for (word, tag) in sent] + + # Return the result. + if self._group_by_sent: + return [sent] + else: + return list(sent) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/indian.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/indian.py new file mode 100644 index 0000000000000000000000000000000000000000..23c6434c34b38dcb4e0227851afb2aefde2fd090 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/indian.py @@ -0,0 +1,93 @@ +# Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Indian Language POS-Tagged Corpus +Collected by A Kumaran, Microsoft Research, India +Distributed with permission + +Contents: + - Bangla: IIT Kharagpur + - Hindi: Microsoft Research India + - Marathi: IIT Bombay + - Telugu: IIIT Hyderabad +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple + + +class IndianCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, False) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, False, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, True) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, True, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class IndianCorpusView(StreamBackedCorpusView): + def __init__( + self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + line = stream.readline() + if line.startswith("<"): + return [] + sent = [str2tuple(word, sep="_") for word in line.split()] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + return [sent] + else: + return sent diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py new file mode 100644 index 0000000000000000000000000000000000000000..b64c90b1800b230f411188fe713460160499132e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/knbc.py @@ -0,0 +1,188 @@ +#! /usr/bin/env python +# KNB Corpus reader +# Copyright (C) 2001-2023 NLTK Project +# Author: Masato Hagiwara +# URL: +# For license information, see LICENSE.TXT + +# For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html + +import re + +from nltk.corpus.reader.api import CorpusReader, SyntaxCorpusReader +from nltk.corpus.reader.util import ( + FileSystemPathPointer, + find_corpus_fileids, + read_blankline_block, +) +from nltk.parse import DependencyGraph + +# default function to convert morphlist to str for tree representation +_morphs2str_default = lambda morphs: "/".join(m[0] for m in morphs if m[0] != "EOS") + + +class KNBCorpusReader(SyntaxCorpusReader): + """ + This class implements: + - ``__init__``, which specifies the location of the corpus + and a method for detecting the sentence blocks in corpus files. + - ``_read_block``, which reads a block from the input stream. + - ``_word``, which takes a block and returns a list of list of words. + - ``_tag``, which takes a block and returns a list of list of tagged + words. + - ``_parse``, which takes a block and returns a list of parsed + sentences. + + The structure of tagged words: + tagged_word = (word(str), tags(tuple)) + tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...) + + Usage example + + >>> from nltk.corpus.util import LazyCorpusLoader + >>> knbc = LazyCorpusLoader( + ... 'knbc/corpus1', + ... KNBCorpusReader, + ... r'.*/KN.*', + ... encoding='euc-jp', + ... ) + + >>> len(knbc.sents()[0]) + 9 + + """ + + def __init__(self, root, fileids, encoding="utf8", morphs2str=_morphs2str_default): + """ + Initialize KNBCorpusReader + morphs2str is a function to convert morphlist to str for tree representation + for _parse() + """ + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + self.morphs2str = morphs2str + + def _read_block(self, stream): + # blocks are split by blankline (or EOF) - default + return read_blankline_block(stream) + + def _word(self, t): + res = [] + for line in t.splitlines(): + # ignore the Bunsets headers + if not re.match(r"EOS|\*|\#|\+", line): + cells = line.strip().split(" ") + res.append(cells[0]) + + return res + + # ignores tagset argument + def _tag(self, t, tagset=None): + res = [] + for line in t.splitlines(): + # ignore the Bunsets headers + if not re.match(r"EOS|\*|\#|\+", line): + cells = line.strip().split(" ") + # convert cells to morph tuples + res.append((cells[0], " ".join(cells[1:]))) + + return res + + def _parse(self, t): + dg = DependencyGraph() + i = 0 + for line in t.splitlines(): + if line[0] in "*+": + # start of bunsetsu or tag + + cells = line.strip().split(" ", 3) + m = re.match(r"([\-0-9]*)([ADIP])", cells[1]) + + assert m is not None + + node = dg.nodes[i] + node.update({"address": i, "rel": m.group(2), "word": []}) + + dep_parent = int(m.group(1)) + + if dep_parent == -1: + dg.root = node + else: + dg.nodes[dep_parent]["deps"].append(i) + + i += 1 + elif line[0] != "#": + # normal morph + cells = line.strip().split(" ") + # convert cells to morph tuples + morph = cells[0], " ".join(cells[1:]) + dg.nodes[i - 1]["word"].append(morph) + + if self.morphs2str: + for node in dg.nodes.values(): + node["word"] = self.morphs2str(node["word"]) + + return dg.tree() + + +###################################################################### +# Demo +###################################################################### + + +def demo(): + + import nltk + from nltk.corpus.util import LazyCorpusLoader + + root = nltk.data.find("corpora/knbc/corpus1") + fileids = [ + f + for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") + if re.search(r"\d\-\d\-[\d]+\-[\d]+", f) + ] + + def _knbc_fileids_sort(x): + cells = x.split("-") + return (cells[0], int(cells[1]), int(cells[2]), int(cells[3])) + + knbc = LazyCorpusLoader( + "knbc/corpus1", + KNBCorpusReader, + sorted(fileids, key=_knbc_fileids_sort), + encoding="euc-jp", + ) + + print(knbc.fileids()[:10]) + print("".join(knbc.words()[:100])) + + print("\n\n".join(str(tree) for tree in knbc.parsed_sents()[:2])) + + knbc.morphs2str = lambda morphs: "/".join( + "{}({})".format(m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS" + ).encode("utf-8") + + print("\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])) + + print( + "\n".join( + " ".join("{}/{}".format(w[0], w[1].split(" ")[2]) for w in sent) + for sent in knbc.tagged_sents()[0:2] + ) + ) + + +def test(): + + from nltk.corpus.util import LazyCorpusLoader + + knbc = LazyCorpusLoader( + "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp" + ) + assert isinstance(knbc.words()[0], str) + assert isinstance(knbc.sents()[0][0], str) + assert isinstance(knbc.tagged_words()[0], tuple) + assert isinstance(knbc.tagged_sents()[0][0], tuple) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/lin.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/lin.py new file mode 100644 index 0000000000000000000000000000000000000000..15c20a6803c0c83557cd2f4689cddecfdd2d83da --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/lin.py @@ -0,0 +1,183 @@ +# Natural Language Toolkit: Lin's Thesaurus +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Blanchard +# URL: +# For license information, see LICENSE.txt + +import re +from collections import defaultdict +from functools import reduce + +from nltk.corpus.reader import CorpusReader + + +class LinThesaurusCorpusReader(CorpusReader): + """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin.""" + + # Compiled regular expression for extracting the key from the first line of each + # thesaurus entry + _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+') + + @staticmethod + def __defaultdict_factory(): + """Factory for creating defaultdict of defaultdict(dict)s""" + return defaultdict(dict) + + def __init__(self, root, badscore=0.0): + """ + Initialize the thesaurus. + + :param root: root directory containing thesaurus LISP files + :type root: C{string} + :param badscore: the score to give to words which do not appear in each other's sets of synonyms + :type badscore: C{float} + """ + + super().__init__(root, r"sim[A-Z]\.lsp") + self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory) + self._badscore = badscore + for path, encoding, fileid in self.abspaths( + include_encoding=True, include_fileid=True + ): + with open(path) as lin_file: + first = True + for line in lin_file: + line = line.strip() + # Start of entry + if first: + key = LinThesaurusCorpusReader._key_re.sub(r"\1", line) + first = False + # End of entry + elif line == "))": + first = True + # Lines with pairs of ngrams and scores + else: + split_line = line.split("\t") + if len(split_line) == 2: + ngram, score = split_line + self._thesaurus[fileid][key][ngram.strip('"')] = float( + score + ) + + def similarity(self, ngram1, ngram2, fileid=None): + """ + Returns the similarity score for two ngrams. + + :param ngram1: first ngram to compare + :type ngram1: C{string} + :param ngram2: second ngram to compare + :type ngram2: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, just the score for the two ngrams; otherwise, + list of tuples of fileids and scores. + """ + # Entries don't contain themselves, so make sure similarity between item and itself is 1.0 + if ngram1 == ngram2: + if fileid: + return 1.0 + else: + return [(fid, 1.0) for fid in self._fileids] + else: + if fileid: + return ( + self._thesaurus[fileid][ngram1][ngram2] + if ngram2 in self._thesaurus[fileid][ngram1] + else self._badscore + ) + else: + return [ + ( + fid, + ( + self._thesaurus[fid][ngram1][ngram2] + if ngram2 in self._thesaurus[fid][ngram1] + else self._badscore + ), + ) + for fid in self._fileids + ] + + def scored_synonyms(self, ngram, fileid=None): + """ + Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of tuples of scores and synonyms; otherwise, + list of tuples of fileids and lists, where inner lists consist of tuples of + scores and synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].items() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].items()) + for fileid in self._fileids + ] + + def synonyms(self, ngram, fileid=None): + """ + Returns a list of synonyms for the current ngram. + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and + lists, where inner lists contain synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].keys() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].keys()) + for fileid in self._fileids + ] + + def __contains__(self, ngram): + """ + Determines whether or not the given ngram is in the thesaurus. + + :param ngram: ngram to lookup + :type ngram: C{string} + :return: whether the given ngram is in the thesaurus. + """ + return reduce( + lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]), + self._fileids, + False, + ) + + +###################################################################### +# Demo +###################################################################### + + +def demo(): + from nltk.corpus import lin_thesaurus as thes + + word1 = "business" + word2 = "enterprise" + print("Getting synonyms for " + word1) + print(thes.synonyms(word1)) + + print("Getting scored synonyms for " + word1) + print(thes.scored_synonyms(word1)) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print(f"Similarity score for {word1} and {word2}:") + print(thes.similarity(word1, word2)) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcf51dc66954866ad665a54ba926fc9c8a33116 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py @@ -0,0 +1,90 @@ +# Natural Language Toolkit: NPS Chat Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * +from nltk.internals import ElementWrapper +from nltk.tag import map_tag +from nltk.util import LazyConcatenation + + +class NPSChatCorpusReader(XMLCorpusReader): + def __init__(self, root, fileids, wrap_etree=False, tagset=None): + XMLCorpusReader.__init__(self, root, fileids, wrap_etree) + self._tagset = tagset + + def xml_posts(self, fileids=None): + if self._wrap_etree: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt) + for fileid in self.abspaths(fileids) + ] + ) + else: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post") + for fileid in self.abspaths(fileids) + ] + ) + + def posts(self, fileids=None): + return concat( + [ + XMLCorpusView( + fileid, "Session/Posts/Post/terminals", self._elt_to_words + ) + for fileid in self.abspaths(fileids) + ] + ) + + def tagged_posts(self, fileids=None, tagset=None): + def reader(elt, handler): + return self._elt_to_tagged_words(elt, handler, tagset) + + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader) + for fileid in self.abspaths(fileids) + ] + ) + + def words(self, fileids=None): + return LazyConcatenation(self.posts(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + return LazyConcatenation(self.tagged_posts(fileids, tagset)) + + def _wrap_elt(self, elt, handler): + return ElementWrapper(elt) + + def _elt_to_words(self, elt, handler): + return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")] + + def _elt_to_tagged_words(self, elt, handler, tagset=None): + tagged_post = [ + (self._simplify_username(t.attrib["word"]), t.attrib["pos"]) + for t in elt.findall("t") + ] + if tagset and tagset != self._tagset: + tagged_post = [ + (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post + ] + return tagged_post + + @staticmethod + def _simplify_username(word): + if "User" in word: + word = "U" + word.split("User", 1)[1] + elif isinstance(word, bytes): + word = word.decode("ascii") + return word diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/rte.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/rte.py new file mode 100644 index 0000000000000000000000000000000000000000..98261fae9adf04ecf6938c966ec3cae4fcc775a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/rte.py @@ -0,0 +1,146 @@ +# Natural Language Toolkit: RTE Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora. + +The files were taken from the RTE1, RTE2 and RTE3 datasets and the files +were regularized. + +Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the +gold standard annotated files. + +Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following +example is taken from RTE3:: + + + + The sale was made to pay Yukos' US$ 27.5 billion tax bill, + Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known + company Baikalfinansgroup which was later bought by the Russian + state-owned oil company Rosneft . + + Baikalfinansgroup was sold to Rosneft. + + +In order to provide globally unique IDs for each pair, a new attribute +``challenge`` has been added to the root element ``entailment-corpus`` of each +file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the +challenge number and 'n' is the pair ID. +""" +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * + + +def norm(value_string): + """ + Normalize the string value in an RTE pair's ``value`` or ``entailment`` + attribute as an integer (1, 0). + + :param value_string: the label used to classify a text/hypothesis pair + :type value_string: str + :rtype: int + """ + + valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} + return valdict[value_string.upper()] + + +class RTEPair: + """ + Container for RTE text-hypothesis pairs. + + The entailment relation is signalled by the ``value`` attribute in RTE1, and by + ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment`` + attribute of this class. + """ + + def __init__( + self, + pair, + challenge=None, + id=None, + text=None, + hyp=None, + value=None, + task=None, + length=None, + ): + """ + :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3) + :param id: identifier for the pair + :param text: the text component of the pair + :param hyp: the hypothesis component of the pair + :param value: classification label for the pair + :param task: attribute for the particular NLP task that the data was drawn from + :param length: attribute for the length of the text of the pair + """ + self.challenge = challenge + self.id = pair.attrib["id"] + self.gid = f"{self.challenge}-{self.id}" + self.text = pair[0].text + self.hyp = pair[1].text + + if "value" in pair.attrib: + self.value = norm(pair.attrib["value"]) + elif "entailment" in pair.attrib: + self.value = norm(pair.attrib["entailment"]) + else: + self.value = value + if "task" in pair.attrib: + self.task = pair.attrib["task"] + else: + self.task = task + if "length" in pair.attrib: + self.length = pair.attrib["length"] + else: + self.length = length + + def __repr__(self): + if self.challenge: + return f"" + else: + return "" % self.id + + +class RTECorpusReader(XMLCorpusReader): + """ + Corpus reader for corpora in RTE challenges. + + This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected + structure of input documents. + """ + + def _read_etree(self, doc): + """ + Map the XML input into an RTEPair. + + This uses the ``getiterator()`` method from the ElementTree package to + find all the ```` elements. + + :param doc: a parsed XML document + :rtype: list(RTEPair) + """ + try: + challenge = doc.attrib["challenge"] + except KeyError: + challenge = None + pairiter = doc.iter("pair") + return [RTEPair(pair, challenge=challenge) for pair in pairiter] + + def pairs(self, fileids): + """ + Build a list of RTEPairs from a RTE corpus. + + :param fileids: a list of RTE corpus fileids + :type: list + :rtype: list(RTEPair) + """ + if isinstance(fileids, str): + fileids = [fileids] + return concat([self._read_etree(self.xml(fileid)) for fileid in fileids]) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py new file mode 100644 index 0000000000000000000000000000000000000000..c44474280deda5087069e7c398eaab79656f97b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/semcor.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: SemCor Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the SemCor Corpus. +""" + +__docformat__ = "epytext en" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView +from nltk.tree import Tree + + +class SemcorCorpusReader(XMLCorpusReader): + """ + Corpus reader for the SemCor Corpus. + For access to the complete XML data structure, use the ``xml()`` + method. For access to simple word lists and tagged word lists, use + ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``. + """ + + def __init__(self, root, fileids, wordnet, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + self._wordnet = wordnet + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return self._items(fileids, "word", False, False, False) + + def chunks(self, fileids=None): + """ + :return: the given file(s) as a list of chunks, + each of which is a list of words and punctuation symbols + that form a unit. + :rtype: list(list(str)) + """ + return self._items(fileids, "chunk", False, False, False) + + def tagged_chunks(self, fileids=None, tag=("pos" or "sem" or "both")): + """ + :return: the given file(s) as a list of tagged chunks, represented + in tree form. + :rtype: list(Tree) + + :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'` + to indicate the kind of tags to include. Semantic tags consist of + WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity + without a specific entry in WordNet. (Named entities of type 'other' + have no lemma. Other chunks not in WordNet have no semantic tag. + Punctuation tokens have `None` for their part of speech tag.) + """ + return self._items(fileids, "chunk", False, tag != "sem", tag != "pos") + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of sentences, each encoded + as a list of word strings. + :rtype: list(list(str)) + """ + return self._items(fileids, "word", True, False, False) + + def chunk_sents(self, fileids=None): + """ + :return: the given file(s) as a list of sentences, each encoded + as a list of chunks. + :rtype: list(list(list(str))) + """ + return self._items(fileids, "chunk", True, False, False) + + def tagged_sents(self, fileids=None, tag=("pos" or "sem" or "both")): + """ + :return: the given file(s) as a list of sentences. Each sentence + is represented as a list of tagged chunks (in tree form). + :rtype: list(list(Tree)) + + :param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'` + to indicate the kind of tags to include. Semantic tags consist of + WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity + without a specific entry in WordNet. (Named entities of type 'other' + have no lemma. Other chunks not in WordNet have no semantic tag. + Punctuation tokens have `None` for their part of speech tag.) + """ + return self._items(fileids, "chunk", True, tag != "sem", tag != "pos") + + def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag): + if unit == "word" and not bracket_sent: + # the result of the SemcorWordView may be a multiword unit, so the + # LazyConcatenation will make sure the sentence is flattened + _ = lambda *args: LazyConcatenation( + (SemcorWordView if self._lazy else self._words)(*args) + ) + else: + _ = SemcorWordView if self._lazy else self._words + return concat( + [ + _(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet) + for fileid in self.abspaths(fileids) + ] + ) + + def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag): + """ + Helper used to implement the view methods -- returns a list of + tokens, (segmented) words, chunks, or sentences. The tokens + and chunks may optionally be tagged (with POS and sense + information). + + :param fileid: The name of the underlying file. + :param unit: One of `'token'`, `'word'`, or `'chunk'`. + :param bracket_sent: If true, include sentence bracketing. + :param pos_tag: Whether to include part-of-speech tags. + :param sem_tag: Whether to include semantic tags, namely WordNet lemma + and OOV named entity status. + """ + assert unit in ("token", "word", "chunk") + result = [] + + xmldoc = ElementTree.parse(fileid).getroot() + for xmlsent in xmldoc.findall(".//s"): + sent = [] + for xmlword in _all_xmlwords_in(xmlsent): + itm = SemcorCorpusReader._word( + xmlword, unit, pos_tag, sem_tag, self._wordnet + ) + if unit == "word": + sent.extend(itm) + else: + sent.append(itm) + + if bracket_sent: + result.append(SemcorSentence(xmlsent.attrib["snum"], sent)) + else: + result.extend(sent) + + assert None not in result + return result + + @staticmethod + def _word(xmlword, unit, pos_tag, sem_tag, wordnet): + tkn = xmlword.text + if not tkn: + tkn = "" # fixes issue 337? + + lemma = xmlword.get("lemma", tkn) # lemma or NE class + lexsn = xmlword.get("lexsn") # lex_sense (locator for the lemma's sense) + if lexsn is not None: + sense_key = lemma + "%" + lexsn + wnpos = ("n", "v", "a", "r", "s")[ + int(lexsn.split(":")[0]) - 1 + ] # see http://wordnet.princeton.edu/man/senseidx.5WN.html + else: + sense_key = wnpos = None + redef = xmlword.get( + "rdf", tkn + ) # redefinition--this indicates the lookup string + # does not exactly match the enclosed string, e.g. due to typographical adjustments + # or discontinuity of a multiword expression. If a redefinition has occurred, + # the "rdf" attribute holds its inflected form and "lemma" holds its lemma. + # For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class). + sensenum = xmlword.get("wnsn") # WordNet sense number + isOOVEntity = "pn" in xmlword.keys() # a "personal name" (NE) not in WordNet + pos = xmlword.get( + "pos" + ) # part of speech for the whole chunk (None for punctuation) + + if unit == "token": + if not pos_tag and not sem_tag: + itm = tkn + else: + itm = ( + (tkn,) + + ((pos,) if pos_tag else ()) + + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ()) + ) + return itm + else: + ww = tkn.split("_") # TODO: case where punctuation intervenes in MWE + if unit == "word": + return ww + else: + if sensenum is not None: + try: + sense = wordnet.lemma_from_key(sense_key) # Lemma object + except Exception: + # cannot retrieve the wordnet.Lemma object. possible reasons: + # (a) the wordnet corpus is not downloaded; + # (b) a nonexistent sense is annotated: e.g., such.s.00 triggers: + # nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00' + # solution: just use the lemma name as a string + try: + sense = "%s.%s.%02d" % ( + lemma, + wnpos, + int(sensenum), + ) # e.g.: reach.v.02 + except ValueError: + sense = ( + lemma + "." + wnpos + "." + sensenum + ) # e.g. the sense number may be "2;1" + + bottom = [Tree(pos, ww)] if pos_tag else ww + + if sem_tag and isOOVEntity: + if sensenum is not None: + return Tree(sense, [Tree("NE", bottom)]) + else: # 'other' NE + return Tree("NE", bottom) + elif sem_tag and sensenum is not None: + return Tree(sense, bottom) + elif pos_tag: + return bottom[0] + else: + return bottom # chunk as a list + + +def _all_xmlwords_in(elt, result=None): + if result is None: + result = [] + for child in elt: + if child.tag in ("wf", "punc"): + result.append(child) + else: + _all_xmlwords_in(child, result) + return result + + +class SemcorSentence(list): + """ + A list of words, augmented by an attribute ``num`` used to record + the sentence identifier (the ``n`` attribute from the XML). + """ + + def __init__(self, num, items): + self.num = num + list.__init__(self, items) + + +class SemcorWordView(XMLCorpusView): + """ + A stream backed corpus view specialized for use with the BNC corpus. + """ + + def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet): + """ + :param fileid: The name of the underlying file. + :param unit: One of `'token'`, `'word'`, or `'chunk'`. + :param bracket_sent: If true, include sentence bracketing. + :param pos_tag: Whether to include part-of-speech tags. + :param sem_tag: Whether to include semantic tags, namely WordNet lemma + and OOV named entity status. + """ + if bracket_sent: + tagspec = ".*/s" + else: + tagspec = ".*/s/(punc|wf)" + + self._unit = unit + self._sent = bracket_sent + self._pos_tag = pos_tag + self._sem_tag = sem_tag + self._wordnet = wordnet + + XMLCorpusView.__init__(self, fileid, tagspec) + + def handle_elt(self, elt, context): + if self._sent: + return self.handle_sent(elt) + else: + return self.handle_word(elt) + + def handle_word(self, elt): + return SemcorCorpusReader._word( + elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet + ) + + def handle_sent(self, elt): + sent = [] + for child in elt: + if child.tag in ("wf", "punc"): + itm = self.handle_word(child) + if self._unit == "word": + sent.extend(itm) + else: + sent.append(itm) + else: + raise ValueError("Unexpected element %s" % child.tag) + return SemcorSentence(elt.attrib["snum"], sent) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..42426100da71cf1d6b23353a22ce2e074837424d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/sentiwordnet.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: SentiWordNet +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for SentiWordNet + +SentiWordNet is a lexical resource for opinion mining. +SentiWordNet assigns to each synset of WordNet three +sentiment scores: positivity, negativity, and objectivity. + +For details about SentiWordNet see: +http://sentiwordnet.isti.cnr.it/ + + >>> from nltk.corpus import sentiwordnet as swn + >>> print(swn.senti_synset('breakdown.n.03')) + + >>> list(swn.senti_synsets('slow')) + [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'),\ + SentiSynset('slow.v.03'), SentiSynset('slow.a.01'),\ + SentiSynset('slow.a.02'), SentiSynset('dense.s.04'),\ + SentiSynset('slow.a.04'), SentiSynset('boring.s.01'),\ + SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'),\ + SentiSynset('behind.r.03')] + >>> happy = swn.senti_synsets('happy', 'a') + >>> happy0 = list(happy)[0] + >>> happy0.pos_score() + 0.875 + >>> happy0.neg_score() + 0.0 + >>> happy0.obj_score() + 0.125 +""" + +import re + +from nltk.corpus.reader import CorpusReader + + +class SentiWordNetCorpusReader(CorpusReader): + def __init__(self, root, fileids, encoding="utf-8"): + """ + Construct a new SentiWordNet Corpus Reader, using data from + the specified file. + """ + super().__init__(root, fileids, encoding=encoding) + if len(self._fileids) != 1: + raise ValueError("Exactly one file must be specified") + self._db = {} + self._parse_src_file() + + def _parse_src_file(self): + lines = self.open(self._fileids[0]).read().splitlines() + lines = filter((lambda x: not re.search(r"^\s*#", x)), lines) + for i, line in enumerate(lines): + fields = [field.strip() for field in re.split(r"\t+", line)] + try: + pos, offset, pos_score, neg_score, synset_terms, gloss = fields + except BaseException as e: + raise ValueError(f"Line {i} formatted incorrectly: {line}\n") from e + if pos and offset: + offset = int(offset) + self._db[(pos, offset)] = (float(pos_score), float(neg_score)) + + def senti_synset(self, *vals): + from nltk.corpus import wordnet as wn + + if tuple(vals) in self._db: + pos_score, neg_score = self._db[tuple(vals)] + pos, offset = vals + if pos == "s": + pos = "a" + synset = wn.synset_from_pos_and_offset(pos, offset) + return SentiSynset(pos_score, neg_score, synset) + else: + synset = wn.synset(vals[0]) + pos = synset.pos() + if pos == "s": + pos = "a" + offset = synset.offset() + if (pos, offset) in self._db: + pos_score, neg_score = self._db[(pos, offset)] + return SentiSynset(pos_score, neg_score, synset) + else: + return None + + def senti_synsets(self, string, pos=None): + from nltk.corpus import wordnet as wn + + sentis = [] + synset_list = wn.synsets(string, pos) + for synset in synset_list: + sentis.append(self.senti_synset(synset.name())) + sentis = filter(lambda x: x, sentis) + return sentis + + def all_senti_synsets(self): + from nltk.corpus import wordnet as wn + + for key, fields in self._db.items(): + pos, offset = key + pos_score, neg_score = fields + synset = wn.synset_from_pos_and_offset(pos, offset) + yield SentiSynset(pos_score, neg_score, synset) + + +class SentiSynset: + def __init__(self, pos_score, neg_score, synset): + self._pos_score = pos_score + self._neg_score = neg_score + self._obj_score = 1.0 - (self._pos_score + self._neg_score) + self.synset = synset + + def pos_score(self): + return self._pos_score + + def neg_score(self): + return self._neg_score + + def obj_score(self): + return self._obj_score + + def __str__(self): + """Prints just the Pos/Neg scores for now.""" + s = "<" + s += self.synset.name() + ": " + s += "PosScore=%s " % self._pos_score + s += "NegScore=%s" % self._neg_score + s += ">" + return s + + def __repr__(self): + return "Senti" + repr(self.synset) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py new file mode 100644 index 0000000000000000000000000000000000000000..6aa7f5ec9f34114c499721650bbb307413dd7804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/sinica_treebank.py @@ -0,0 +1,75 @@ +# Natural Language Toolkit: Sinica Treebank Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Sinica Treebank Corpus Sample + +http://rocling.iis.sinica.edu.tw/CKIP/engversion/treebank.htm + +10,000 parsed sentences, drawn from the Academia Sinica Balanced +Corpus of Modern Chinese. Parse tree notation is based on +Information-based Case Grammar. Tagset documentation is available +at https://www.sinica.edu.tw/SinicaCorpus/modern_e_wordtype.html + +Language and Knowledge Processing Group, Institute of Information +Science, Academia Sinica + +The data is distributed with the Natural Language Toolkit under the terms of +the Creative Commons Attribution-NonCommercial-ShareAlike License +[https://creativecommons.org/licenses/by-nc-sa/2.5/]. + +References: + +Feng-Yi Chen, Pi-Fang Tsai, Keh-Jiann Chen, and Chu-Ren Huang (1999) +The Construction of Sinica Treebank. Computational Linguistics and +Chinese Language Processing, 4, pp 87-104. + +Huang Chu-Ren, Keh-Jiann Chen, Feng-Yi Chen, Keh-Jiann Chen, Zhao-Ming +Gao, and Kuang-Yu Chen. 2000. Sinica Treebank: Design Criteria, +Annotation Guidelines, and On-line Interface. Proceedings of 2nd +Chinese Language Processing Workshop, Association for Computational +Linguistics. + +Chen Keh-Jiann and Yu-Ming Hsieh (2004) Chinese Treebanks and Grammar +Extraction, Proceedings of IJCNLP-04, pp560-565. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import sinica_parse + +IDENTIFIER = re.compile(r"^#\S+\s") +APPENDIX = re.compile(r"(?<=\))#.*$") +TAGWORD = re.compile(r":([^:()|]+):([^:()|]+)") +WORD = re.compile(r":[^:()|]+:([^:()|]+)") + + +class SinicaTreebankCorpusReader(SyntaxCorpusReader): + """ + Reader for the sinica treebank. + """ + + def _read_block(self, stream): + sent = stream.readline() + sent = IDENTIFIER.sub("", sent) + sent = APPENDIX.sub("", sent) + return [sent] + + def _parse(self, sent): + return sinica_parse(sent) + + def _tag(self, sent, tagset=None): + tagged_sent = [(w, t) for (t, w) in TAGWORD.findall(sent)] + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_sent + ] + return tagged_sent + + def _word(self, sent): + return WORD.findall(sent) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py new file mode 100644 index 0000000000000000000000000000000000000000..f6a396fb137ccf17c990f41268f77e176380acb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/switchboard.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Switchboard Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT +import re + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple + + +class SwitchboardTurn(list): + """ + A specialized list object used to encode switchboard utterances. + The elements of the list are the words in the utterance; and two + attributes, ``speaker`` and ``id``, are provided to retrieve the + spearker identifier and utterance id. Note that utterance ids + are only unique within a given discourse. + """ + + def __init__(self, words, speaker, id): + list.__init__(self, words) + self.speaker = speaker + self.id = int(id) + + def __repr__(self): + if len(self) == 0: + text = "" + elif isinstance(self[0], tuple): + text = " ".join("%s/%s" % w for w in self) + else: + text = " ".join(self) + return f"<{self.speaker}.{self.id}: {text!r}>" + + +class SwitchboardCorpusReader(CorpusReader): + _FILES = ["tagged"] + # Use the "tagged" file even for non-tagged data methods, since + # it's tokenized. + + def __init__(self, root, tagset=None): + CorpusReader.__init__(self, root, self._FILES) + self._tagset = tagset + + def words(self): + return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader) + + def tagged_words(self, tagset=None): + def tagged_words_block_reader(stream): + return self._tagged_words_block_reader(stream, tagset) + + return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader) + + def turns(self): + return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader) + + def tagged_turns(self, tagset=None): + def tagged_turns_block_reader(stream): + return self._tagged_turns_block_reader(stream, tagset) + + return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader) + + def discourses(self): + return StreamBackedCorpusView( + self.abspath("tagged"), self._discourses_block_reader + ) + + def tagged_discourses(self, tagset=False): + def tagged_discourses_block_reader(stream): + return self._tagged_discourses_block_reader(stream, tagset) + + return StreamBackedCorpusView( + self.abspath("tagged"), tagged_discourses_block_reader + ) + + def _discourses_block_reader(self, stream): + # returns at most 1 discourse. (The other methods depend on this.) + return [ + [ + self._parse_utterance(u, include_tag=False) + for b in read_blankline_block(stream) + for u in b.split("\n") + if u.strip() + ] + ] + + def _tagged_discourses_block_reader(self, stream, tagset=None): + # returns at most 1 discourse. (The other methods depend on this.) + return [ + [ + self._parse_utterance(u, include_tag=True, tagset=tagset) + for b in read_blankline_block(stream) + for u in b.split("\n") + if u.strip() + ] + ] + + def _turns_block_reader(self, stream): + return self._discourses_block_reader(stream)[0] + + def _tagged_turns_block_reader(self, stream, tagset=None): + return self._tagged_discourses_block_reader(stream, tagset)[0] + + def _words_block_reader(self, stream): + return sum(self._discourses_block_reader(stream)[0], []) + + def _tagged_words_block_reader(self, stream, tagset=None): + return sum(self._tagged_discourses_block_reader(stream, tagset)[0], []) + + _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)") + _SEP = "/" + + def _parse_utterance(self, utterance, include_tag, tagset=None): + m = self._UTTERANCE_RE.match(utterance) + if m is None: + raise ValueError("Bad utterance %r" % utterance) + speaker, id, text = m.groups() + words = [str2tuple(s, self._SEP) for s in text.split()] + if not include_tag: + words = [w for (w, t) in words] + elif tagset and tagset != self._tagset: + words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words] + return SwitchboardTurn(words, speaker, id) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/timit.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/timit.py new file mode 100644 index 0000000000000000000000000000000000000000..e399ac2ff31fd39c5dfc9ac9e9de0bc29d1f1842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/timit.py @@ -0,0 +1,510 @@ +# Natural Language Toolkit: TIMIT Corpus Reader +# +# Copyright (C) 2001-2007 NLTK Project +# Author: Haejoong Lee +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +# [xx] this docstring is out-of-date: +""" +Read tokens, phonemes and audio data from the NLTK TIMIT Corpus. + +This corpus contains selected portion of the TIMIT corpus. + + - 16 speakers from 8 dialect regions + - 1 male and 1 female from each dialect region + - total 130 sentences (10 sentences per speaker. Note that some + sentences are shared among other speakers, especially sa1 and sa2 + are spoken by all speakers.) + - total 160 recording of sentences (10 recordings per speaker) + - audio format: NIST Sphere, single channel, 16kHz sampling, + 16 bit sample, PCM encoding + + +Module contents +=============== + +The timit corpus reader provides 4 functions and 4 data items. + + - utterances + + List of utterances in the corpus. There are total 160 utterances, + each of which corresponds to a unique utterance of a speaker. + Here's an example of an utterance identifier in the list:: + + dr1-fvmh0/sx206 + - _---- _--- + | | | | | + | | | | | + | | | | `--- sentence number + | | | `----- sentence type (a:all, i:shared, x:exclusive) + | | `--------- speaker ID + | `------------ sex (m:male, f:female) + `-------------- dialect region (1..8) + + - speakers + + List of speaker IDs. An example of speaker ID:: + + dr1-fvmh0 + + Note that if you split an item ID with colon and take the first element of + the result, you will get a speaker ID. + + >>> itemid = 'dr1-fvmh0/sx206' + >>> spkrid , sentid = itemid.split('/') + >>> spkrid + 'dr1-fvmh0' + + The second element of the result is a sentence ID. + + - dictionary() + + Phonetic dictionary of words contained in this corpus. This is a Python + dictionary from words to phoneme lists. + + - spkrinfo() + + Speaker information table. It's a Python dictionary from speaker IDs to + records of 10 fields. Speaker IDs the same as the ones in timie.speakers. + Each record is a dictionary from field names to values, and the fields are + as follows:: + + id speaker ID as defined in the original TIMIT speaker info table + sex speaker gender (M:male, F:female) + dr speaker dialect region (1:new england, 2:northern, + 3:north midland, 4:south midland, 5:southern, 6:new york city, + 7:western, 8:army brat (moved around)) + use corpus type (TRN:training, TST:test) + in this sample corpus only TRN is available + recdate recording date + birthdate speaker birth date + ht speaker height + race speaker race (WHT:white, BLK:black, AMR:american indian, + SPN:spanish-american, ORN:oriental,???:unknown) + edu speaker education level (HS:high school, AS:associate degree, + BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA), + PHD:doctorate degree (PhD,JD,MD), ??:unknown) + comments comments by the recorder + +The 4 functions are as follows. + + - tokenized(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of word lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the word list is a tuple of word(string), start offset and + end offset, where offset is represented as a number of 16kHz samples. + + - phonetic(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of phoneme lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the phoneme list is a tuple of word(string), start offset + and end offset, where offset is represented as a number of 16kHz samples. + + - audiodata(item, start=0, end=None) + + Given an item, returns a chunk of audio samples formatted into a string. + When the function is called, if start and end are omitted, the entire + samples of the recording will be returned. If only end is omitted, + samples from the start offset to the end of the recording will be returned. + + - play(data) + + Play the given audio samples. The audio samples can be obtained from the + timit.audiodata function. + +""" +import sys +import time + +from nltk.corpus.reader.api import * +from nltk.internals import import_from_stdlib +from nltk.tree import Tree + + +class TimitCorpusReader(CorpusReader): + """ + Reader for the TIMIT corpus (or any other corpus with the same + file layout and use of file formats). The corpus root directory + should contain the following files: + + - timitdic.txt: dictionary of standard transcriptions + - spkrinfo.txt: table of speaker information + + In addition, the root directory should contain one subdirectory + for each speaker, containing three files for each utterance: + + - .txt: text content of utterances + - .wrd: tokenized text content of utterances + - .phn: phonetic transcription of utterances + - .wav: utterance sound file + """ + + _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt" + """A regexp matching fileids that are used by this corpus reader.""" + _UTTERANCE_RE = r"\w+-\w+/\w+\.txt" + + def __init__(self, root, encoding="utf8"): + """ + Construct a new TIMIT corpus reader in the given directory. + :param root: The root directory for this corpus. + """ + # Ensure that wave files don't get treated as unicode data: + if isinstance(encoding, str): + encoding = [(r".*\.wav", None), (".*", encoding)] + + CorpusReader.__init__( + self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding + ) + + self._utterances = [ + name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE) + ] + """A list of the utterance identifiers for all utterances in + this corpus.""" + + self._speakerinfo = None + self._root = root + self.speakers = sorted({u.split("/")[0] for u in self._utterances}) + + def fileids(self, filetype=None): + """ + Return a list of file identifiers for the files that make up + this corpus. + + :param filetype: If specified, then ``filetype`` indicates that + only the files that have the given type should be + returned. Accepted values are: ``txt``, ``wrd``, ``phn``, + ``wav``, or ``metadata``, + """ + if filetype is None: + return CorpusReader.fileids(self) + elif filetype in ("txt", "wrd", "phn", "wav"): + return [f"{u}.{filetype}" for u in self._utterances] + elif filetype == "metadata": + return ["timitdic.txt", "spkrinfo.txt"] + else: + raise ValueError("Bad value for filetype: %r" % filetype) + + def utteranceids( + self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None + ): + """ + :return: A list of the utterance identifiers for all + utterances in this corpus, or for the given speaker, dialect + region, gender, sentence type, or sentence number, if + specified. + """ + if isinstance(dialect, str): + dialect = [dialect] + if isinstance(sex, str): + sex = [sex] + if isinstance(spkrid, str): + spkrid = [spkrid] + if isinstance(sent_type, str): + sent_type = [sent_type] + if isinstance(sentid, str): + sentid = [sentid] + + utterances = self._utterances[:] + if dialect is not None: + utterances = [u for u in utterances if u[2] in dialect] + if sex is not None: + utterances = [u for u in utterances if u[4] in sex] + if spkrid is not None: + utterances = [u for u in utterances if u[:9] in spkrid] + if sent_type is not None: + utterances = [u for u in utterances if u[11] in sent_type] + if sentid is not None: + utterances = [u for u in utterances if u[10:] in spkrid] + return utterances + + def transcription_dict(self): + """ + :return: A dictionary giving the 'standard' transcription for + each word. + """ + _transcriptions = {} + with self.open("timitdic.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line) + if not m: + raise ValueError("Bad line: %r" % line) + _transcriptions[m.group(1)] = m.group(2).split() + return _transcriptions + + def spkrid(self, utterance): + return utterance.split("/")[0] + + def sentid(self, utterance): + return utterance.split("/")[1] + + def utterance(self, spkrid, sentid): + return f"{spkrid}/{sentid}" + + def spkrutteranceids(self, speaker): + """ + :return: A list of all utterances associated with a given + speaker. + """ + return [ + utterance + for utterance in self._utterances + if utterance.startswith(speaker + "/") + ] + + def spkrinfo(self, speaker): + """ + :return: A dictionary mapping .. something. + """ + if speaker in self._utterances: + speaker = self.spkrid(speaker) + + if self._speakerinfo is None: + self._speakerinfo = {} + with self.open("spkrinfo.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + rec = line.strip().split(None, 9) + key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}" + self._speakerinfo[key] = SpeakerInfo(*rec) + + return self._speakerinfo[speaker] + + def phones(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def phone_times(self, utterances=None): + """ + offset is represented as a number of 16kHz samples! + """ + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def words(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def word_times(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def sents(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + results.append([line.split()[-1] for line in fp if line.strip()]) + return results + + def sent_times(self, utterances=None): + # TODO: Check this + return [ + ( + line.split(None, 2)[-1].strip(), + int(line.split()[0]), + int(line.split()[1]), + ) + for fileid in self._utterance_fileids(utterances, ".txt") + for line in self.open(fileid) + if line.strip() + ] + + def phone_trees(self, utterances=None): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + + trees = [] + for utterance in utterances: + word_times = self.word_times(utterance) + phone_times = self.phone_times(utterance) + sent_times = self.sent_times(utterance) + + while sent_times: + (sent, sent_start, sent_end) = sent_times.pop(0) + trees.append(Tree("S", [])) + while ( + word_times and phone_times and phone_times[0][2] <= word_times[0][1] + ): + trees[-1].append(phone_times.pop(0)[0]) + while word_times and word_times[0][2] <= sent_end: + (word, word_start, word_end) = word_times.pop(0) + trees[-1].append(Tree(word, [])) + while phone_times and phone_times[0][2] <= word_end: + trees[-1][-1].append(phone_times.pop(0)[0]) + while phone_times and phone_times[0][2] <= sent_end: + trees[-1].append(phone_times.pop(0)[0]) + return trees + + # [xx] NOTE: This is currently broken -- we're assuming that the + # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE + # fileids. + def wav(self, utterance, start=0, end=None): + # nltk.chunk conflicts with the stdlib module 'chunk' + wave = import_from_stdlib("wave") + + w = wave.open(self.open(utterance + ".wav"), "rb") + + if end is None: + end = w.getnframes() + + # Skip past frames before start, then read the frames we want + w.readframes(start) + frames = w.readframes(end - start) + + # Open a new temporary file -- the wave module requires + # an actual file, and won't work w/ stringio. :( + tf = tempfile.TemporaryFile() + out = wave.open(tf, "w") + + # Write the parameters & data to the new file. + out.setparams(w.getparams()) + out.writeframes(frames) + out.close() + + # Read the data back from the file, and return it. The + # file will automatically be deleted when we return. + tf.seek(0) + return tf.read() + + def audiodata(self, utterance, start=0, end=None): + assert end is None or end > start + headersize = 44 + with self.open(utterance + ".wav") as fp: + if end is None: + data = fp.read() + else: + data = fp.read(headersize + end * 2) + return data[headersize + start * 2 :] + + def _utterance_fileids(self, utterances, extension): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + return [f"{u}{extension}" for u in utterances] + + def play(self, utterance, start=0, end=None): + """ + Play the given audio sample. + + :param utterance: The utterance id of the sample to play + """ + # Method 1: os audio dev. + try: + import ossaudiodev + + try: + dsp = ossaudiodev.open("w") + dsp.setfmt(ossaudiodev.AFMT_S16_LE) + dsp.channels(1) + dsp.speed(16000) + dsp.write(self.audiodata(utterance, start, end)) + dsp.close() + except OSError as e: + print( + ( + "can't acquire the audio device; please " + "activate your audio device." + ), + file=sys.stderr, + ) + print("system error message:", str(e), file=sys.stderr) + return + except ImportError: + pass + + # Method 2: pygame + try: + # FIXME: this won't work under python 3 + import pygame.mixer + import StringIO + + pygame.mixer.init(16000) + f = StringIO.StringIO(self.wav(utterance, start, end)) + pygame.mixer.Sound(f).play() + while pygame.mixer.get_busy(): + time.sleep(0.01) + return + except ImportError: + pass + + # Method 3: complain. :) + print( + ("you must install pygame or ossaudiodev " "for audio playback."), + file=sys.stderr, + ) + + +class SpeakerInfo: + def __init__( + self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None + ): + self.id = id + self.sex = sex + self.dr = dr + self.use = use + self.recdate = recdate + self.birthdate = birthdate + self.ht = ht + self.race = race + self.edu = edu + self.comments = comments + + def __repr__(self): + attribs = "id sex dr use recdate birthdate ht race edu comments" + args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()] + return "SpeakerInfo(%s)" % (", ".join(args)) + + +def read_timit_block(stream): + """ + Block reader for timit tagged sentences, which are preceded by a sentence + number that will be ignored. + """ + line = stream.readline() + if not line: + return [] + n, sent = line.split(" ", 1) + return [sent] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py new file mode 100644 index 0000000000000000000000000000000000000000..5684ea0b90129223ada6e7dc62fd6a6708e90960 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py @@ -0,0 +1,76 @@ +# Natural Language Toolkit: Toolbox Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Aumann +# Stuart Robinson +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Module for reading, writing and manipulating +Toolbox databases and settings fileids. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.toolbox import ToolboxData + + +class ToolboxCorpusReader(CorpusReader): + def xml(self, fileids, key=None): + return concat( + [ + ToolboxData(path, enc).parse(key=key) + for (path, enc) in self.abspaths(fileids, True) + ] + ) + + def fields( + self, + fileids, + strip=True, + unwrap=True, + encoding="utf8", + errors="strict", + unicode_fields=None, + ): + return concat( + [ + list( + ToolboxData(fileid, enc).fields( + strip, unwrap, encoding, errors, unicode_fields + ) + ) + for (fileid, enc) in self.abspaths(fileids, include_encoding=True) + ] + ) + + # should probably be done lazily: + def entries(self, fileids, **kwargs): + if "key" in kwargs: + key = kwargs["key"] + del kwargs["key"] + else: + key = "lx" # the default key in MDF + entries = [] + for marker, contents in self.fields(fileids, **kwargs): + if marker == key: + entries.append((contents, [])) + else: + try: + entries[-1][-1].append((marker, contents)) + except IndexError: + pass + return entries + + def words(self, fileids, key="lx"): + return [contents for marker, contents in self.fields(fileids) if marker == key] + + +def demo(): + pass + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py new file mode 100644 index 0000000000000000000000000000000000000000..a54c6654f0d95aefa3e1bfb55402be505981607e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Twitter Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that consist of Tweets. It is assumed that the Tweets +have been serialised into line-delimited JSON. +""" + +import json +import os + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import StreamBackedCorpusView, ZipFilePathPointer, concat +from nltk.tokenize import TweetTokenizer + + +class TwitterCorpusReader(CorpusReader): + r""" + Reader for corpora that consist of Tweets represented as a list of line-delimited JSON. + + Individual Tweets can be tokenized using the default tokenizer, or by a + custom tokenizer specified as a parameter to the constructor. + + Construct a new Tweet corpus reader for a set of documents + located at the given root directory. + + If you made your own tweet collection in a directory called + `twitter-files`, then you can initialise the reader as:: + + from nltk.corpus import TwitterCorpusReader + reader = TwitterCorpusReader(root='/path/to/twitter-files', '.*\.json') + + However, the recommended approach is to set the relevant directory as the + value of the environmental variable `TWITTER`, and then invoke the reader + as follows:: + + root = os.environ['TWITTER'] + reader = TwitterCorpusReader(root, '.*\.json') + + If you want to work directly with the raw Tweets, the `json` library can + be used:: + + import json + for tweet in reader.docs(): + print(json.dumps(tweet, indent=1, sort_keys=True)) + + """ + + CorpusView = StreamBackedCorpusView + """ + The corpus view class used by this reader. + """ + + def __init__( + self, root, fileids=None, word_tokenizer=TweetTokenizer(), encoding="utf8" + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param word_tokenizer: Tokenizer for breaking the text of Tweets into + smaller units, including but not limited to words. + """ + CorpusReader.__init__(self, root, fileids, encoding) + + for path in self.abspaths(self._fileids): + if isinstance(path, ZipFilePathPointer): + pass + elif os.path.getsize(path) == 0: + raise ValueError(f"File {path} is empty") + """Check that all user-created corpus files are non-empty.""" + + self._word_tokenizer = word_tokenizer + + def docs(self, fileids=None): + """ + Returns the full Tweet objects, as specified by `Twitter + documentation on Tweets + `_ + + :return: the given file(s) as a list of dictionaries deserialised + from JSON. + :rtype: list(dict) + """ + return concat( + [ + self.CorpusView(path, self._read_tweets, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def strings(self, fileids=None): + """ + Returns only the text content of Tweets in the file(s) + + :return: the given file(s) as a list of Tweets. + :rtype: list(str) + """ + fulltweets = self.docs(fileids) + tweets = [] + for jsono in fulltweets: + try: + text = jsono["text"] + if isinstance(text, bytes): + text = text.decode(self.encoding) + tweets.append(text) + except KeyError: + pass + return tweets + + def tokenized(self, fileids=None): + """ + :return: the given file(s) as a list of the text content of Tweets as + as a list of words, screenanames, hashtags, URLs and punctuation symbols. + + :rtype: list(list(str)) + """ + tweets = self.strings(fileids) + tokenizer = self._word_tokenizer + return [tokenizer.tokenize(t) for t in tweets] + + def _read_tweets(self, stream): + """ + Assumes that each line in ``stream`` is a JSON-serialised object. + """ + tweets = [] + for i in range(10): + line = stream.readline() + if not line: + return tweets + tweet = json.loads(line) + tweets.append(tweet) + return tweets diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f10c3436dde87850528529b4ab1b4cf6413a1bce --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py @@ -0,0 +1,2489 @@ +# Natural Language Toolkit: WordNet +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bethard +# Steven Bird +# Edward Loper +# Nitin Madnani +# Nasruddin A’aidil Shari +# Sim Wei Ying Geraldine +# Soe Lynn +# Francis Bond +# Eric Kafe + +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for WordNet + +WordNet is a lexical database of English. +Using synsets, helps find conceptual relationships between words +such as hypernyms, hyponyms, synonyms, antonyms etc. + +For details about WordNet see: +https://wordnet.princeton.edu/ + +This module also allows you to find lemmas in languages +other than English from the Open Multilingual Wordnet +https://omwn.org/ + +""" + +import math +import os +import re +import warnings +from collections import defaultdict, deque +from functools import total_ordering +from itertools import chain, islice +from operator import itemgetter + +from nltk.corpus.reader import CorpusReader +from nltk.internals import deprecated +from nltk.probability import FreqDist +from nltk.util import binary_search_file as _binary_search_file + +###################################################################### +# Table of Contents +###################################################################### +# - Constants +# - Data Classes +# - WordNetError +# - Lemma +# - Synset +# - WordNet Corpus Reader +# - WordNet Information Content Corpus Reader +# - Similarity Metrics +# - Demo + +###################################################################### +# Constants +###################################################################### + +#: Positive infinity (for similarity functions) +_INF = 1e300 + +# { Part-of-speech constants +ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" +# } + +POS_LIST = [NOUN, VERB, ADJ, ADV] + +# A table of strings that are used to express verb frames. +VERB_FRAME_STRINGS = ( + None, + "Something %s", + "Somebody %s", + "It is %sing", + "Something is %sing PP", + "Something %s something Adjective/Noun", + "Something %s Adjective/Noun", + "Somebody %s Adjective", + "Somebody %s something", + "Somebody %s somebody", + "Something %s somebody", + "Something %s something", + "Something %s to somebody", + "Somebody %s on something", + "Somebody %s somebody something", + "Somebody %s something to somebody", + "Somebody %s something from somebody", + "Somebody %s somebody with something", + "Somebody %s somebody of something", + "Somebody %s something on somebody", + "Somebody %s somebody PP", + "Somebody %s something PP", + "Somebody %s PP", + "Somebody's (body part) %s", + "Somebody %s somebody to INFINITIVE", + "Somebody %s somebody INFINITIVE", + "Somebody %s that CLAUSE", + "Somebody %s to somebody", + "Somebody %s to INFINITIVE", + "Somebody %s whether INFINITIVE", + "Somebody %s somebody into V-ing something", + "Somebody %s something with something", + "Somebody %s INFINITIVE", + "Somebody %s VERB-ing", + "It %s that CLAUSE", + "Something %s INFINITIVE", + # OEWN additions: + "Somebody %s at something", + "Somebody %s for something", + "Somebody %s on somebody", + "Somebody %s out of somebody", +) + +SENSENUM_RE = re.compile(r"\.[\d]+\.") + + +###################################################################### +# Data Classes +###################################################################### + + +class WordNetError(Exception): + """An exception class for wordnet-related errors.""" + + +@total_ordering +class _WordNetObject: + """A common base class for lemmas and synsets.""" + + def hypernyms(self): + return self._related("@") + + def _hypernyms(self): + return self._related("@") + + def instance_hypernyms(self): + return self._related("@i") + + def _instance_hypernyms(self): + return self._related("@i") + + def hyponyms(self): + return self._related("~") + + def instance_hyponyms(self): + return self._related("~i") + + def member_holonyms(self): + return self._related("#m") + + def substance_holonyms(self): + return self._related("#s") + + def part_holonyms(self): + return self._related("#p") + + def member_meronyms(self): + return self._related("%m") + + def substance_meronyms(self): + return self._related("%s") + + def part_meronyms(self): + return self._related("%p") + + def topic_domains(self): + return self._related(";c") + + def in_topic_domains(self): + return self._related("-c") + + def region_domains(self): + return self._related(";r") + + def in_region_domains(self): + return self._related("-r") + + def usage_domains(self): + return self._related(";u") + + def in_usage_domains(self): + return self._related("-u") + + def attributes(self): + return self._related("=") + + def entailments(self): + return self._related("*") + + def causes(self): + return self._related(">") + + def also_sees(self): + return self._related("^") + + def verb_groups(self): + return self._related("$") + + def similar_tos(self): + return self._related("&") + + def __hash__(self): + return hash(self._name) + + def __eq__(self, other): + return self._name == other._name + + def __ne__(self, other): + return self._name != other._name + + def __lt__(self, other): + return self._name < other._name + + +class Lemma(_WordNetObject): + """ + The lexical entry for a single morphological form of a + sense-disambiguated word. + + Create a Lemma from a "..." string where: + is the morphological stem identifying the synset + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + is the morphological form of interest + + Note that and can be different, e.g. the Synset + 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and + 'salt.n.03.salinity'. + + Lemma attributes, accessible via methods with the same name: + + - name: The canonical name of this lemma. + - synset: The synset that this lemma belongs to. + - syntactic_marker: For adjectives, the WordNet string identifying the + syntactic position relative modified noun. See: + https://wordnet.princeton.edu/documentation/wninput5wn + For all other parts of speech, this attribute is None. + - count: The frequency of this lemma in wordnet. + + Lemma methods: + + Lemmas have the following methods for retrieving related Lemmas. They + correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Lemmas: + + - antonyms + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - topic_domains, region_domains, usage_domains + - attributes + - derivationally_related_forms + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + - pertainyms + """ + + __slots__ = [ + "_wordnet_corpus_reader", + "_name", + "_syntactic_marker", + "_synset", + "_frame_strings", + "_frame_ids", + "_lexname_index", + "_lex_id", + "_lang", + "_key", + ] + + def __init__( + self, + wordnet_corpus_reader, + synset, + name, + lexname_index, + lex_id, + syntactic_marker, + ): + self._wordnet_corpus_reader = wordnet_corpus_reader + self._name = name + self._syntactic_marker = syntactic_marker + self._synset = synset + self._frame_strings = [] + self._frame_ids = [] + self._lexname_index = lexname_index + self._lex_id = lex_id + self._lang = "eng" + + self._key = None # gets set later. + + def name(self): + return self._name + + def syntactic_marker(self): + return self._syntactic_marker + + def synset(self): + return self._synset + + def frame_strings(self): + return self._frame_strings + + def frame_ids(self): + return self._frame_ids + + def lang(self): + return self._lang + + def key(self): + return self._key + + def __repr__(self): + tup = type(self).__name__, self._synset._name, self._name + return "%s('%s.%s')" % tup + + def _related(self, relation_symbol): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if (self._name, relation_symbol) not in self._synset._lemma_pointers: + return [] + return [ + get_synset(pos, offset)._lemmas[lemma_index] + for pos, offset, lemma_index in self._synset._lemma_pointers[ + self._name, relation_symbol + ] + ] + + def count(self): + """Return the frequency count for this Lemma""" + return self._wordnet_corpus_reader.lemma_count(self) + + def antonyms(self): + return self._related("!") + + def derivationally_related_forms(self): + return self._related("+") + + def pertainyms(self): + return self._related("\\") + + +class Synset(_WordNetObject): + """Create a Synset from a ".." string where: + is the word's morphological stem + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + + Synset attributes, accessible via methods with the same name: + + - name: The canonical name of this synset, formed using the first lemma + of this synset. Note that this may be different from the name + passed to the constructor if that string used a different lemma to + identify the synset. + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB. + - lemmas: A list of the Lemma objects for this synset. + - definition: The definition for this synset. + - examples: A list of example strings for this synset. + - offset: The offset in the WordNet dict file of this synset. + - lexname: The name of the lexicographer file containing this synset. + + Synset methods: + + Synsets have the following methods for retrieving related Synsets. + They correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Synsets. + + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - attributes + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + + Additionally, Synsets support the following methods specific to the + hypernym relation: + + - root_hypernyms + - common_hypernyms + - lowest_common_hypernyms + + Note that Synsets do not support the following relations because + these are defined by WordNet as lexical relations: + + - antonyms + - derivationally_related_forms + - pertainyms + """ + + __slots__ = [ + "_pos", + "_offset", + "_name", + "_frame_ids", + "_lemmas", + "_lemma_names", + "_definition", + "_examples", + "_lexname", + "_pointers", + "_lemma_pointers", + "_max_depth", + "_min_depth", + ] + + def __init__(self, wordnet_corpus_reader): + self._wordnet_corpus_reader = wordnet_corpus_reader + # All of these attributes get initialized by + # WordNetCorpusReader._synset_from_pos_and_line() + + self._pos = None + self._offset = None + self._name = None + self._frame_ids = [] + self._lemmas = [] + self._lemma_names = [] + self._definition = None + self._examples = [] + self._lexname = None # lexicographer name + self._all_hypernyms = None + + self._pointers = defaultdict(set) + self._lemma_pointers = defaultdict(list) + + def pos(self): + return self._pos + + def offset(self): + return self._offset + + def name(self): + return self._name + + def frame_ids(self): + return self._frame_ids + + def _doc(self, doc_type, default, lang="eng"): + """Helper method for Synset.definition and Synset.examples""" + corpus = self._wordnet_corpus_reader + if lang not in corpus.langs(): + return None + elif lang == "eng": + return default + else: + corpus._load_lang_data(lang) + of = corpus.ss2of(self) + i = corpus.lg_attrs.index(doc_type) + if of in corpus._lang_data[lang][i]: + return corpus._lang_data[lang][i][of] + else: + return None + + def definition(self, lang="eng"): + """Return definition in specified language""" + return self._doc("def", self._definition, lang=lang) + + def examples(self, lang="eng"): + """Return examples in specified language""" + return self._doc("exe", self._examples, lang=lang) + + def lexname(self): + return self._lexname + + def _needs_root(self): + if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6": + return False + else: + return True + + def lemma_names(self, lang="eng"): + """Return all the lemma_names associated with the synset""" + if lang == "eng": + return self._lemma_names + else: + reader = self._wordnet_corpus_reader + reader._load_lang_data(lang) + i = reader.ss2of(self) + if i in reader._lang_data[lang][0]: + return reader._lang_data[lang][0][i] + else: + return [] + + def lemmas(self, lang="eng"): + """Return all the lemma objects associated with the synset""" + if lang == "eng": + return self._lemmas + elif self._name: + self._wordnet_corpus_reader._load_lang_data(lang) + lemmark = [] + lemmy = self.lemma_names(lang) + for lem in lemmy: + temp = Lemma( + self._wordnet_corpus_reader, + self, + lem, + self._wordnet_corpus_reader._lexnames.index(self.lexname()), + 0, + None, + ) + temp._lang = lang + lemmark.append(temp) + return lemmark + + def root_hypernyms(self): + """Get the topmost hypernyms of this synset in WordNet.""" + + result = [] + seen = set() + todo = [self] + while todo: + next_synset = todo.pop() + if next_synset not in seen: + seen.add(next_synset) + next_hypernyms = ( + next_synset.hypernyms() + next_synset.instance_hypernyms() + ) + if not next_hypernyms: + result.append(next_synset) + else: + todo.extend(next_hypernyms) + return result + + # Simpler implementation which makes incorrect assumption that + # hypernym hierarchy is acyclic: + # + # if not self.hypernyms(): + # return [self] + # else: + # return list(set(root for h in self.hypernyms() + # for root in h.root_hypernyms())) + def max_depth(self): + """ + :return: The length of the longest hypernym path from this + synset to the root. + """ + + if "_max_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._max_depth = 0 + else: + self._max_depth = 1 + max(h.max_depth() for h in hypernyms) + return self._max_depth + + def min_depth(self): + """ + :return: The length of the shortest hypernym path from this + synset to the root. + """ + + if "_min_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._min_depth = 0 + else: + self._min_depth = 1 + min(h.min_depth() for h in hypernyms) + return self._min_depth + + def closure(self, rel, depth=-1): + """ + Return the transitive closure of source under the rel + relationship, breadth-first, discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> print(list(computer.closure(topic))) + [Synset('computer_science.n.01')] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2 + + + Include redundant paths (but only once), avoiding duplicate searches + (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> print(list(dog.closure(hyp))) + [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\ + Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\ + Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\ + Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\ + Synset('physical_entity.n.01'), Synset('entity.n.01')] + + UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7 + """ + + from nltk.util import acyclic_breadth_first + + for synset in acyclic_breadth_first(self, rel, depth): + if synset != self: + yield synset + + from nltk.util import acyclic_depth_first as acyclic_tree + from nltk.util import unweighted_minimum_spanning_tree as mst + + # Also add this shortcut? + # from nltk.util import unweighted_minimum_spanning_digraph as umsd + + def tree(self, rel, depth=-1, cut_mark=None): + """ + Return the full relation tree, including self, + discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> from pprint import pprint + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> pprint(computer.tree(topic)) + [Synset('computer.n.01'), [Synset('computer_science.n.01')]] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3 + + + But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> pprint(dog.tree(hyp)) + [Synset('dog.n.01'), + [Synset('canine.n.02'), + [Synset('carnivore.n.01'), + [Synset('placental.n.01'), + [Synset('mammal.n.01'), + [Synset('vertebrate.n.01'), + [Synset('chordate.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), + [Synset('entity.n.01')]]]]]]]]]]]]], + [Synset('domestic_animal.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]] + """ + + from nltk.util import acyclic_branches_depth_first + + return acyclic_branches_depth_first(self, rel, depth, cut_mark) + + def hypernym_paths(self): + """ + Get the path(s) from this synset to the root, where each path is a + list of the synset nodes traversed on the way to the root. + + :return: A list of lists, where each list gives the node sequence + connecting the initial ``Synset`` node and a root node. + """ + paths = [] + + hypernyms = self.hypernyms() + self.instance_hypernyms() + if len(hypernyms) == 0: + paths = [[self]] + + for hypernym in hypernyms: + for ancestor_list in hypernym.hypernym_paths(): + ancestor_list.append(self) + paths.append(ancestor_list) + return paths + + def common_hypernyms(self, other): + """ + Find all synsets that are hypernyms of this synset and the + other synset. + + :type other: Synset + :param other: other input synset. + :return: The synsets that are hypernyms of both synsets. + """ + if not self._all_hypernyms: + self._all_hypernyms = { + self_synset + for self_synsets in self._iter_hypernym_lists() + for self_synset in self_synsets + } + if not other._all_hypernyms: + other._all_hypernyms = { + other_synset + for other_synsets in other._iter_hypernym_lists() + for other_synset in other_synsets + } + return list(self._all_hypernyms.intersection(other._all_hypernyms)) + + def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False): + """ + Get a list of lowest synset(s) that both synsets have as a hypernym. + When `use_min_depth == False` this means that the synset which appears + as a hypernym of both `self` and `other` with the lowest maximum depth + is returned or if there are multiple such synsets at the same depth + they are all returned + + However, if `use_min_depth == True` then the synset(s) which has/have + the lowest minimum depth and appear(s) in both paths is/are returned. + + By setting the use_min_depth flag to True, the behavior of NLTK2 can be + preserved. This was changed in NLTK3 to give more accurate results in a + small set of cases, generally with synsets concerning people. (eg: + 'chef.n.01', 'fireman.n.01', etc.) + + This method is an implementation of Ted Pedersen's "Lowest Common + Subsumer" method from the Perl Wordnet module. It can return either + "self" or "other" if they are a hypernym of the other. + + :type other: Synset + :param other: other input synset + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (False by default) + creates a fake root that connects all the taxonomies. Set it + to True to enable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will need to be added + for nouns as well. + :type use_min_depth: bool + :param use_min_depth: This setting mimics older (v2) behavior of NLTK + wordnet If True, will use the min_depth function to calculate the + lowest common hypernyms. This is known to give strange results for + some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained + for backwards compatibility + :return: The synsets that are the lowest common hypernyms of both + synsets + """ + synsets = self.common_hypernyms(other) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset.hypernyms = lambda: [] + fake_synset.instance_hypernyms = lambda: [] + synsets.append(fake_synset) + + try: + if use_min_depth: + max_depth = max(s.min_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.min_depth() == max_depth] + else: + max_depth = max(s.max_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.max_depth() == max_depth] + return sorted(unsorted_lch) + except ValueError: + return [] + + def hypernym_distances(self, distance=0, simulate_root=False): + """ + Get the path(s) from this synset to the root, counting the distance + of each node from the initial node on the way. A set of + (synset, distance) tuples is returned. + + :type distance: int + :param distance: the distance (number of edges) from this hypernym to + the original hypernym ``Synset`` on which this method was called. + :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is + a hypernym of the first ``Synset``. + """ + distances = {(self, distance)} + for hypernym in self._hypernyms() + self._instance_hypernyms(): + distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset_distance = max(distances, key=itemgetter(1))[1] + distances.add((fake_synset, fake_synset_distance + 1)) + return distances + + def _shortest_hypernym_paths(self, simulate_root): + if self._name == "*ROOT*": + return {self: 0} + + queue = deque([(self, 0)]) + path = {} + + while queue: + s, depth = queue.popleft() + if s in path: + continue + path[s] = depth + + depth += 1 + queue.extend((hyp, depth) for hyp in s._hypernyms()) + queue.extend((hyp, depth) for hyp in s._instance_hypernyms()) + + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + path[fake_synset] = max(path.values()) + 1 + + return path + + def shortest_path_distance(self, other, simulate_root=False): + """ + Returns the distance of the shortest path linking the two synsets (if + one exists). For each synset, all the ancestor nodes and their + distances are recorded and compared. The ancestor node common to both + synsets that can be reached with the minimum number of traversals is + used. If no ancestor nodes are common, None is returned. If a node is + compared with itself 0 is returned. + + :type other: Synset + :param other: The Synset to which the shortest path will be found. + :return: The number of edges in the shortest path connecting the two + nodes, or None if no path exists. + """ + + if self == other: + return 0 + + dist_dict1 = self._shortest_hypernym_paths(simulate_root) + dist_dict2 = other._shortest_hypernym_paths(simulate_root) + + # For each ancestor synset common to both subject synsets, find the + # connecting path length. Return the shortest of these. + + inf = float("inf") + path_distance = inf + for synset, d1 in dist_dict1.items(): + d2 = dist_dict2.get(synset, inf) + path_distance = min(path_distance, d1 + d2) + + return None if math.isinf(path_distance) else path_distance + + # interface to similarity methods + def path_similarity(self, other, verbose=False, simulate_root=True): + """ + Path Distance Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses in the is-a (hypernym/hypnoym) + taxonomy. The score is in the range 0 to 1, except in those cases where + a path cannot be found (will only be true for verbs as there are many + distinct verb taxonomies), in which case None is returned. A score of + 1 represents identity i.e. comparing a sense with itself will return 1. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally between 0 and 1. None is returned if no connecting path + could be found. 1 is returned if a ``Synset`` is compared with + itself. + """ + + distance = self.shortest_path_distance( + other, + simulate_root=simulate_root and (self._needs_root() or other._needs_root()), + ) + if distance is None or distance < 0: + return None + return 1.0 / (distance + 1) + + def lch_similarity(self, other, verbose=False, simulate_root=True): + """ + Leacock Chodorow Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses (as above) and the maximum depth + of the taxonomy in which the senses occur. The relationship is given as + -log(p/2d) where p is the shortest path length and d is the taxonomy + depth. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally greater than 0. None is returned if no connecting path + could be found. If a ``Synset`` is compared with itself, the + maximum score is returned, which varies depending on the taxonomy + depth. + """ + + if self._pos != other._pos: + raise WordNetError( + "Computing the lch similarity requires " + "%s and %s to have the same part of speech." % (self, other) + ) + + need_root = self._needs_root() + + if self._pos not in self._wordnet_corpus_reader._max_depth: + self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root) + + depth = self._wordnet_corpus_reader._max_depth[self._pos] + + distance = self.shortest_path_distance( + other, simulate_root=simulate_root and need_root + ) + + if distance is None or distance < 0 or depth == 0: + return None + return -math.log((distance + 1) / (2.0 * depth)) + + def wup_similarity(self, other, verbose=False, simulate_root=True): + """ + Wu-Palmer Similarity: + Return a score denoting how similar two word senses are, based on the + depth of the two senses in the taxonomy and that of their Least Common + Subsumer (most specific ancestor node). Previously, the scores computed + by this implementation did _not_ always agree with those given by + Pedersen's Perl implementation of WordNet Similarity. However, with + the addition of the simulate_root flag (see below), the score for + verbs now almost always agree but not always for nouns. + + The LCS does not necessarily feature in the shortest path connecting + the two senses, as it is by definition the common ancestor deepest in + the taxonomy, not closest to the two senses. Typically, however, it + will so feature. Where multiple candidates for the LCS exist, that + whose shortest path to the root node is the longest will be selected. + Where the LCS has multiple paths to the root, the longer path is used + for the purposes of the calculation. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A float score denoting the similarity of the two ``Synset`` + objects, normally greater than zero. If no connecting path between + the two senses can be found, None is returned. + + """ + need_root = self._needs_root() or other._needs_root() + + # Note that to preserve behavior from NLTK2 we set use_min_depth=True + # It is possible that more accurate results could be obtained by + # removing this setting and it should be tested later on + subsumers = self.lowest_common_hypernyms( + other, simulate_root=simulate_root and need_root, use_min_depth=True + ) + + # If no LCS was found return None + if len(subsumers) == 0: + return None + + subsumer = self if self in subsumers else subsumers[0] + + # Get the longest path from the LCS to the root, + # including a correction: + # - add one because the calculations include both the start and end + # nodes + depth = subsumer.max_depth() + 1 + + # Note: No need for an additional add-one correction for non-nouns + # to account for an imaginary root node because that is now + # automatically handled by simulate_root + # if subsumer._pos != NOUN: + # depth += 1 + + # Get the shortest path from the LCS to each of the synsets it is + # subsuming. Add this to the LCS path length to get the path + # length from each synset to the root. + len1 = self.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + len2 = other.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + if len1 is None or len2 is None: + return None + len1 += depth + len2 += depth + return (2.0 * depth) / (len1 + len2) + + def res_similarity(self, other, ic, verbose=False): + """ + Resnik Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. Synsets whose LCS is the root node of the taxonomy will + have a score of 0 (e.g. N['dog'][0] and N['table'][0]). + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return lcs_ic + + def jcn_similarity(self, other, ic, verbose=False): + """ + Jiang-Conrath Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. + """ + + if self == other: + return _INF + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + + # If either of the input synsets are the root synset, or have a + # frequency of 0 (sparse data problem), return 0. + if ic1 == 0 or ic2 == 0: + return 0 + + ic_difference = ic1 + ic2 - 2 * lcs_ic + + if ic_difference == 0: + return _INF + + return 1 / ic_difference + + def lin_similarity(self, other, ic, verbose=False): + """ + Lin Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects, in the range 0 to 1. + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return (2.0 * lcs_ic) / (ic1 + ic2) + + def _iter_hypernym_lists(self): + """ + :return: An iterator over ``Synset`` objects that are either proper + hypernyms or instance of hypernyms of the synset. + """ + todo = [self] + seen = set() + while todo: + for synset in todo: + seen.add(synset) + yield todo + todo = [ + hypernym + for synset in todo + for hypernym in (synset.hypernyms() + synset.instance_hypernyms()) + if hypernym not in seen + ] + + def __repr__(self): + return f"{type(self).__name__}('{self._name}')" + + def _related(self, relation_symbol, sort=True): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if relation_symbol not in self._pointers: + return [] + pointer_tuples = self._pointers[relation_symbol] + r = [get_synset(pos, offset) for pos, offset in pointer_tuples] + if sort: + r.sort() + return r + + +###################################################################### +# WordNet Corpus Reader +###################################################################### + + +class WordNetCorpusReader(CorpusReader): + """ + A corpus reader used to access wordnet or its variants. + """ + + _ENCODING = "utf8" + + # { Part-of-speech constants + ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" + # } + + # { Filename constants + _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"} + # } + + # { Part of speech constants + _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5} + _pos_names = dict(tup[::-1] for tup in _pos_numbers.items()) + # } + + #: A list of file identifiers for all the fileids used by this + #: corpus reader. + _FILES = ( + "cntlist.rev", + "lexnames", + "index.sense", + "index.adj", + "index.adv", + "index.noun", + "index.verb", + "data.adj", + "data.adv", + "data.noun", + "data.verb", + "adj.exc", + "adv.exc", + "noun.exc", + "verb.exc", + ) + + def __init__(self, root, omw_reader): + """ + Construct a new wordnet corpus reader, with the given root + directory. + """ + + super().__init__(root, self._FILES, encoding=self._ENCODING) + + # A index that provides the file offset + # Map from lemma -> pos -> synset_index -> offset + self._lemma_pos_offset_map = defaultdict(dict) + + # A cache so we don't have to reconstruct synsets + # Map from pos -> offset -> synset + self._synset_offset_cache = defaultdict(dict) + + # A lookup for the maximum depth of each part of speech. Useful for + # the lch similarity metric. + self._max_depth = defaultdict(dict) + + # Corpus reader containing omw data. + self._omw_reader = omw_reader + + # Corpus reader containing extended_omw data. + self._exomw_reader = None + + self.provenances = defaultdict(str) + self.provenances["eng"] = "" + + if self._omw_reader is None: + warnings.warn( + "The multilingual functions are not available with this Wordnet version" + ) + + self.omw_langs = set() + + # A cache to store the wordnet data of multiple languages + self._lang_data = defaultdict(list) + + self._data_file_map = {} + self._exception_map = {} + self._lexnames = [] + self._key_count_file = None + self._key_synset_file = None + + # Load the lexnames + with self.open("lexnames") as fp: + for i, line in enumerate(fp): + index, lexname, _ = line.split() + assert int(index) == i + self._lexnames.append(lexname) + + # Load the indices for lemmas and synset offsets + self._load_lemma_pos_offset_map() + + # load the exception file data into memory + self._load_exception_map() + + self.nomap = [] + self.splits = {} + + # map from WordNet 3.0 for OMW data + self.map30 = self.map_wn30() + + # Language data attributes + self.lg_attrs = ["lemma", "none", "def", "exe"] + + def index_sense(self, version=None): + """Read sense key to synset id mapping from index.sense file in corpus directory""" + fn = "index.sense" + if version: + from nltk.corpus import CorpusReader, LazyCorpusLoader + + ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn) + else: + ixreader = self + with ixreader.open(fn) as fp: + sensekey_map = {} + for line in fp: + fields = line.strip().split() + sensekey = fields[0] + pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])] + sensekey_map[sensekey] = f"{fields[1]}-{pos}" + return sensekey_map + + def map_to_many(self): + sensekey_map1 = self.index_sense("wordnet") + sensekey_map2 = self.index_sense() + synset_to_many = {} + for synsetid in set(sensekey_map1.values()): + synset_to_many[synsetid] = [] + for sensekey in set(sensekey_map1.keys()).intersection( + set(sensekey_map2.keys()) + ): + source = sensekey_map1[sensekey] + target = sensekey_map2[sensekey] + synset_to_many[source].append(target) + return synset_to_many + + def map_to_one(self): + synset_to_many = self.map_to_many() + synset_to_one = {} + for source in synset_to_many: + candidates_bag = synset_to_many[source] + if candidates_bag: + candidates_set = set(candidates_bag) + if len(candidates_set) == 1: + target = candidates_bag[0] + else: + counts = [] + for candidate in candidates_set: + counts.append((candidates_bag.count(candidate), candidate)) + self.splits[source] = counts + target = max(counts)[1] + synset_to_one[source] = target + if source[-1] == "s": + # Add a mapping from "a" to target for applications like omw, + # where only Lithuanian and Slovak use the "s" ss_type. + synset_to_one[f"{source[:-1]}a"] = target + else: + self.nomap.append(source) + return synset_to_one + + def map_wn30(self): + """Mapping from Wordnet 3.0 to currently loaded Wordnet version""" + if self.get_version() == "3.0": + return None + else: + return self.map_to_one() + + # Open Multilingual WordNet functions, contributed by + # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn + + def of2ss(self, of): + """take an id and return the synsets""" + return self.synset_from_pos_and_offset(of[-1], int(of[:8])) + + def ss2of(self, ss): + """return the ID of the synset""" + if ss: + return f"{ss.offset():08d}-{ss.pos()}" + + def _load_lang_data(self, lang): + """load the wordnet data of the requested language from the file to + the cache, _lang_data""" + + if lang in self._lang_data: + return + + if self._omw_reader and not self.omw_langs: + self.add_omw() + + if lang not in self.langs(): + raise WordNetError("Language is not supported.") + + if self._exomw_reader and lang not in self.omw_langs: + reader = self._exomw_reader + else: + reader = self._omw_reader + + prov = self.provenances[lang] + if prov in ["cldr", "wikt"]: + prov2 = prov + else: + prov2 = "data" + + with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp: + self.custom_lemmas(fp, lang) + self.disable_custom_lemmas(lang) + + def add_provs(self, reader): + """Add languages from Multilingual Wordnet to the provenance dictionary""" + fileids = reader.fileids() + for fileid in fileids: + prov, langfile = os.path.split(fileid) + file_name, file_extension = os.path.splitext(langfile) + if file_extension == ".tab": + lang = file_name.split("-")[-1] + if lang in self.provenances or prov in ["cldr", "wikt"]: + # We already have another resource for this lang, + # so we need to further specify the lang id: + lang = f"{lang}_{prov}" + self.provenances[lang] = prov + + def add_omw(self): + self.add_provs(self._omw_reader) + self.omw_langs = set(self.provenances.keys()) + + def add_exomw(self): + """ + Add languages from Extended OMW + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> wn.add_exomw() + >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt")) + [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')] + """ + from nltk.corpus import extended_omw + + self.add_omw() + self._exomw_reader = extended_omw + self.add_provs(self._exomw_reader) + + def langs(self): + """return a list of languages supported by Multilingual Wordnet""" + return list(self.provenances.keys()) + + def _load_lemma_pos_offset_map(self): + for suffix in self._FILEMAP.values(): + + # parse each line of the file (ignoring comment lines) + with self.open("index.%s" % suffix) as fp: + for i, line in enumerate(fp): + if line.startswith(" "): + continue + + _iter = iter(line.split()) + + def _next_token(): + return next(_iter) + + try: + + # get the lemma and part-of-speech + lemma = _next_token() + pos = _next_token() + + # get the number of synsets for this lemma + n_synsets = int(_next_token()) + assert n_synsets > 0 + + # get and ignore the pointer symbols for all synsets of + # this lemma + n_pointers = int(_next_token()) + [_next_token() for _ in range(n_pointers)] + + # same as number of synsets + n_senses = int(_next_token()) + assert n_synsets == n_senses + + # get and ignore number of senses ranked according to + # frequency + _next_token() + + # get synset offsets + synset_offsets = [int(_next_token()) for _ in range(n_synsets)] + + # raise more informative error with file name and line number + except (AssertionError, ValueError) as e: + tup = ("index.%s" % suffix), (i + 1), e + raise WordNetError("file %s, line %i: %s" % tup) from e + + # map lemmas and parts of speech to synsets + self._lemma_pos_offset_map[lemma][pos] = synset_offsets + if pos == ADJ: + self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets + + def _load_exception_map(self): + # load the exception file data into memory + for pos, suffix in self._FILEMAP.items(): + self._exception_map[pos] = {} + with self.open("%s.exc" % suffix) as fp: + for line in fp: + terms = line.split() + self._exception_map[pos][terms[0]] = terms[1:] + self._exception_map[ADJ_SAT] = self._exception_map[ADJ] + + def _compute_max_depth(self, pos, simulate_root): + """ + Compute the max depth for the given part of speech. This is + used by the lch similarity metric. + """ + depth = 0 + for ii in self.all_synsets(pos): + try: + depth = max(depth, ii.max_depth()) + except RuntimeError: + print(ii) + if simulate_root: + depth += 1 + self._max_depth[pos] = depth + + def get_version(self): + fh = self._data_file(ADJ) + fh.seek(0) + for line in fh: + match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line) + if match is not None: + version = match.group(1) + fh.seek(0) + return version + + ############################################################# + # Loading Lemmas + ############################################################# + + def lemma(self, name, lang="eng"): + """Return lemma object that matches the name""" + # cannot simply split on first '.', + # e.g.: '.45_caliber.a.01..45_caliber' + separator = SENSENUM_RE.search(name).end() + + synset_name, lemma_name = name[: separator - 1], name[separator:] + + synset = self.synset(synset_name) + for lemma in synset.lemmas(lang): + if lemma._name == lemma_name: + return lemma + raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}") + + def lemma_from_key(self, key): + # Keys are case sensitive and always lower-case + key = key.lower() + + lemma_name, lex_sense = key.split("%") + pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":") + pos = self._pos_names[int(pos_number)] + + # open the key -> synset file if necessary + if self._key_synset_file is None: + self._key_synset_file = self.open("index.sense") + + # Find the synset for the lemma. + synset_line = _binary_search_file(self._key_synset_file, key) + if not synset_line: + raise WordNetError("No synset found for key %r" % key) + offset = int(synset_line.split()[1]) + synset = self.synset_from_pos_and_offset(pos, offset) + # return the corresponding lemma + for lemma in synset._lemmas: + if lemma._key == key: + return lemma + raise WordNetError("No lemma found for for key %r" % key) + + ############################################################# + # Loading Synsets + ############################################################# + def synset(self, name): + # split name into lemma, part of speech and synset number + lemma, pos, synset_index_str = name.lower().rsplit(".", 2) + synset_index = int(synset_index_str) - 1 + + # get the offset for this synset + try: + offset = self._lemma_pos_offset_map[lemma][pos][synset_index] + except KeyError as e: + raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e + except IndexError as e: + n_senses = len(self._lemma_pos_offset_map[lemma][pos]) + raise WordNetError( + f"Lemma {lemma!r} with part of speech {pos!r} only " + f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}" + ) from e + + # load synset information from the appropriate file + synset = self.synset_from_pos_and_offset(pos, offset) + + # some basic sanity checks on loaded attributes + if pos == "s" and synset._pos == "a": + message = ( + "Adjective satellite requested but only plain " + "adjective found for lemma %r" + ) + raise WordNetError(message % lemma) + assert synset._pos == pos or (pos == "a" and synset._pos == "s") + + # Return the synset object. + return synset + + def _data_file(self, pos): + """ + Return an open file pointer for the data file for the given + part of speech. + """ + if pos == ADJ_SAT: + pos = ADJ + if self._data_file_map.get(pos) is None: + fileid = "data.%s" % self._FILEMAP[pos] + self._data_file_map[pos] = self.open(fileid) + return self._data_file_map[pos] + + def synset_from_pos_and_offset(self, pos, offset): + """ + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v'). + - offset: The byte offset of this synset in the WordNet dict file + for this pos. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_pos_and_offset('n', 1740)) + Synset('entity.n.01') + """ + # Check to see if the synset is in the cache + if offset in self._synset_offset_cache[pos]: + return self._synset_offset_cache[pos][offset] + + data_file = self._data_file(pos) + data_file.seek(offset) + data_file_line = data_file.readline() + # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line: + line_offset = data_file_line[:8] + if ( + line_offset.isalnum() + and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}" + ): + synset = self._synset_from_pos_and_line(pos, data_file_line) + assert synset._offset == offset + self._synset_offset_cache[pos][offset] = synset + else: + synset = None + warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.") + data_file.seek(0) + return synset + + @deprecated("Use public method synset_from_pos_and_offset() instead") + def _synset_from_pos_and_offset(self, *args, **kwargs): + """ + Hack to help people like the readers of + https://stackoverflow.com/a/27145655/1709587 + who were using this function before it was officially a public method + """ + return self.synset_from_pos_and_offset(*args, **kwargs) + + def _synset_from_pos_and_line(self, pos, data_file_line): + # Construct a new (empty) synset. + synset = Synset(self) + + # parse the entry for this synset + try: + + # parse out the definitions and examples from the gloss + columns_str, gloss = data_file_line.strip().split("|") + definition = re.sub(r"[\"].*?[\"]", "", gloss).strip() + examples = re.findall(r'"([^"]*)"', gloss) + for example in examples: + synset._examples.append(example) + + synset._definition = definition.strip("; ") + + # split the other info into fields + _iter = iter(columns_str.split()) + + def _next_token(): + return next(_iter) + + # get the offset + synset._offset = int(_next_token()) + + # determine the lexicographer file name + lexname_index = int(_next_token()) + synset._lexname = self._lexnames[lexname_index] + + # get the part of speech + synset._pos = _next_token() + + # create Lemma objects for each lemma + n_lemmas = int(_next_token(), 16) + for _ in range(n_lemmas): + # get the lemma name + lemma_name = _next_token() + # get the lex_id (used for sense_keys) + lex_id = int(_next_token(), 16) + # If the lemma has a syntactic marker, extract it. + m = re.match(r"(.*?)(\(.*\))?$", lemma_name) + lemma_name, syn_mark = m.groups() + # create the lemma object + lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark) + synset._lemmas.append(lemma) + synset._lemma_names.append(lemma._name) + + # collect the pointer tuples + n_pointers = int(_next_token()) + for _ in range(n_pointers): + symbol = _next_token() + offset = int(_next_token()) + pos = _next_token() + lemma_ids_str = _next_token() + if lemma_ids_str == "0000": + synset._pointers[symbol].add((pos, offset)) + else: + source_index = int(lemma_ids_str[:2], 16) - 1 + target_index = int(lemma_ids_str[2:], 16) - 1 + source_lemma_name = synset._lemmas[source_index]._name + lemma_pointers = synset._lemma_pointers + tups = lemma_pointers[source_lemma_name, symbol] + tups.append((pos, offset, target_index)) + + # read the verb frames + try: + frame_count = int(_next_token()) + except StopIteration: + pass + else: + for _ in range(frame_count): + # read the plus sign + plus = _next_token() + assert plus == "+" + # read the frame and lemma number + frame_number = int(_next_token()) + frame_string_fmt = VERB_FRAME_STRINGS[frame_number] + lemma_number = int(_next_token(), 16) + # lemma number of 00 means all words in the synset + if lemma_number == 0: + synset._frame_ids.append(frame_number) + for lemma in synset._lemmas: + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + # only a specific word in the synset + else: + lemma = synset._lemmas[lemma_number - 1] + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + + # raise a more informative error with line text + except ValueError as e: + raise WordNetError(f"line {data_file_line!r}: {e}") from e + + # set sense keys for Lemma objects - note that this has to be + # done afterwards so that the relations are available + for lemma in synset._lemmas: + if synset._pos == ADJ_SAT: + head_lemma = synset.similar_tos()[0]._lemmas[0] + head_name = head_lemma._name + head_id = "%02d" % head_lemma._lex_id + else: + head_name = head_id = "" + tup = ( + lemma._name, + WordNetCorpusReader._pos_numbers[synset._pos], + lemma._lexname_index, + lemma._lex_id, + head_name, + head_id, + ) + lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower() + + # the canonical name is based on the first lemma + lemma_name = synset._lemmas[0]._name.lower() + offsets = self._lemma_pos_offset_map[lemma_name][synset._pos] + sense_index = offsets.index(synset._offset) + tup = lemma_name, synset._pos, sense_index + 1 + synset._name = "%s.%s.%02i" % tup + + return synset + + def synset_from_sense_key(self, sense_key): + """ + Retrieves synset based on a given sense_key. Sense keys can be + obtained from lemma.key() + + From https://wordnet.princeton.edu/documentation/senseidx5wn: + A sense_key is represented as:: + + lemma % lex_sense (e.g. 'dog%1:18:01::') + + where lex_sense is encoded as:: + + ss_type:lex_filenum:lex_id:head_word:head_id + + :lemma: ASCII text of word/collocation, in lower case + :ss_type: synset type for the sense (1 digit int) + The synset type is encoded as follows:: + + 1 NOUN + 2 VERB + 3 ADJECTIVE + 4 ADVERB + 5 ADJECTIVE SATELLITE + :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int) + :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int) + :head_word: lemma of the first word in satellite's head synset + Only used if sense is in an adjective satellite synset + :head_id: uniquely identifies sense in a lexicographer file when paired with head_word + Only used if head_word is present (2 digit int) + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_sense_key("drive%1:04:03::")) + Synset('drive.n.06') + + >>> print(wn.synset_from_sense_key("driving%1:04:03::")) + Synset('drive.n.06') + """ + return self.lemma_from_key(sense_key).synset() + + ############################################################# + # Retrieve synsets and lemmas. + ############################################################# + + def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True): + """Load all synsets with a given lemma and part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + If lang is specified, all the synsets associated with the lemma name + of that language will be returned. + """ + lemma = lemma.lower() + + if lang == "eng": + get_synset = self.synset_from_pos_and_offset + index = self._lemma_pos_offset_map + if pos is None: + pos = POS_LIST + return [ + get_synset(p, offset) + for p in pos + for form in self._morphy(lemma, p, check_exceptions) + for offset in index[form].get(p, []) + ] + + else: + self._load_lang_data(lang) + synset_list = [] + if lemma in self._lang_data[lang][1]: + for l in self._lang_data[lang][1][lemma]: + if pos is not None and l[-1] != pos: + continue + synset_list.append(self.of2ss(l)) + return synset_list + + def lemmas(self, lemma, pos=None, lang="eng"): + """Return all Lemma objects with a name matching the specified lemma + name and part of speech tag. Matches any part of speech tag if none is + specified.""" + + lemma = lemma.lower() + if lang == "eng": + return [ + lemma_obj + for synset in self.synsets(lemma, pos) + for lemma_obj in synset.lemmas() + if lemma_obj.name().lower() == lemma + ] + + else: + self._load_lang_data(lang) + lemmas = [] + syn = self.synsets(lemma, lang=lang) + for s in syn: + if pos is not None and s.pos() != pos: + continue + for lemma_obj in s.lemmas(lang=lang): + if lemma_obj.name().lower() == lemma: + lemmas.append(lemma_obj) + return lemmas + + def all_lemma_names(self, pos=None, lang="eng"): + """Return all lemma names for all synsets for the given + part of speech tag and language or languages. If pos is + not specified, all synsets for all parts of speech will + be used.""" + + if lang == "eng": + if pos is None: + return iter(self._lemma_pos_offset_map) + else: + return ( + lemma + for lemma in self._lemma_pos_offset_map + if pos in self._lemma_pos_offset_map[lemma] + ) + else: + self._load_lang_data(lang) + lemma = [] + for i in self._lang_data[lang][0]: + if pos is not None and i[-1] != pos: + continue + lemma.extend(self._lang_data[lang][0][i]) + + lemma = iter(set(lemma)) + return lemma + + def all_omw_synsets(self, pos=None, lang=None): + if lang not in self.langs(): + return None + self._load_lang_data(lang) + for of in self._lang_data[lang][0]: + if not pos or of[-1] == pos: + ss = self.of2ss(of) + if ss: + yield ss + + # else: + # A few OMW offsets don't exist in Wordnet 3.0. + # warnings.warn(f"Language {lang}: no synset found for {of}") + + def all_synsets(self, pos=None, lang="eng"): + """Iterate over all synsets with a given part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + """ + if lang == "eng": + return self.all_eng_synsets(pos=pos) + else: + return self.all_omw_synsets(pos=pos, lang=lang) + + def all_eng_synsets(self, pos=None): + if pos is None: + pos_tags = self._FILEMAP.keys() + else: + pos_tags = [pos] + + cache = self._synset_offset_cache + from_pos_and_line = self._synset_from_pos_and_line + + # generate all synsets for each part of speech + for pos_tag in pos_tags: + # Open the file for reading. Note that we can not re-use + # the file pointers from self._data_file_map here, because + # we're defining an iterator, and those file pointers might + # be moved while we're not looking. + if pos_tag == ADJ_SAT: + pos_file = ADJ + else: + pos_file = pos_tag + fileid = "data.%s" % self._FILEMAP[pos_file] + data_file = self.open(fileid) + + try: + # generate synsets for each line in the POS file + offset = data_file.tell() + line = data_file.readline() + while line: + if not line[0].isspace(): + if offset in cache[pos_tag]: + # See if the synset is cached + synset = cache[pos_tag][offset] + else: + # Otherwise, parse the line + synset = from_pos_and_line(pos_tag, line) + cache[pos_tag][offset] = synset + + # adjective satellites are in the same file as + # adjectives so only yield the synset if it's actually + # a satellite + if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT: + yield synset + # for all other POS tags, yield all synsets (this means + # that adjectives also include adjective satellites) + elif pos_tag != ADJ_SAT: + yield synset + offset = data_file.tell() + line = data_file.readline() + + # close the extra file handle we opened + except: + data_file.close() + raise + else: + data_file.close() + + def words(self, lang="eng"): + """return lemmas of the given language as list of words""" + return self.all_lemma_names(lang=lang) + + def synonyms(self, word, lang="eng"): + """return nested list with the synonyms of the different senses of word in the given language""" + return [ + sorted(list(set(ss.lemma_names(lang=lang)) - {word})) + for ss in self.synsets(word, lang=lang) + ] + + def doc(self, file="README", lang="eng"): + """Return the contents of readme, license or citation file + use lang=lang to get the file for an individual language""" + if lang == "eng": + reader = self + else: + reader = self._omw_reader + if lang in self.langs(): + file = f"{os.path.join(self.provenances[lang],file)}" + try: + with reader.open(file) as fp: + return fp.read() + except: + if lang in self._lang_data: + return f"Cannot determine {file} for {lang}" + else: + return f"Language {lang} is not supported." + + def license(self, lang="eng"): + """Return the contents of LICENSE (for omw) + use lang=lang to get the license for an individual language""" + return self.doc(file="LICENSE", lang=lang) + + def readme(self, lang="eng"): + """Return the contents of README (for omw) + use lang=lang to get the readme for an individual language""" + return self.doc(file="README", lang=lang) + + def citation(self, lang="eng"): + """Return the contents of citation.bib file (for omw) + use lang=lang to get the citation for an individual language""" + return self.doc(file="citation.bib", lang=lang) + + ############################################################# + # Misc + ############################################################# + def lemma_count(self, lemma): + """Return the frequency count for this Lemma""" + # Currently, count is only work for English + if lemma._lang != "eng": + return 0 + # open the count file if we haven't already + if self._key_count_file is None: + self._key_count_file = self.open("cntlist.rev") + # find the key in the counts file and return the count + line = _binary_search_file(self._key_count_file, lemma._key) + if line: + return int(line.rsplit(" ", 1)[-1]) + else: + return 0 + + def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity(synset2, verbose, simulate_root) + + path_similarity.__doc__ = Synset.path_similarity.__doc__ + + def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose, simulate_root) + + lch_similarity.__doc__ = Synset.lch_similarity.__doc__ + + def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose, simulate_root) + + wup_similarity.__doc__ = Synset.wup_similarity.__doc__ + + def res_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose) + + res_similarity.__doc__ = Synset.res_similarity.__doc__ + + def jcn_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose) + + jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ + + def lin_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose) + + lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + ############################################################# + # Morphy + ############################################################# + # Morphy, adapted from Oliver Steele's pywordnet + def morphy(self, form, pos=None, check_exceptions=True): + """ + Find a possible base form for the given form, with the given + part of speech, by checking WordNet's list of exceptional + forms, and by recursively stripping affixes for this part of + speech until a form in WordNet is found. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.morphy('dogs')) + dog + >>> print(wn.morphy('churches')) + church + >>> print(wn.morphy('aardwolves')) + aardwolf + >>> print(wn.morphy('abaci')) + abacus + >>> wn.morphy('hardrock', wn.ADV) + >>> print(wn.morphy('book', wn.NOUN)) + book + >>> wn.morphy('book', wn.ADJ) + """ + + if pos is None: + morphy = self._morphy + analyses = chain(a for p in POS_LIST for a in morphy(form, p)) + else: + analyses = self._morphy(form, pos, check_exceptions) + + # get the first one we find + first = list(islice(analyses, 1)) + if len(first) == 1: + return first[0] + else: + return None + + MORPHOLOGICAL_SUBSTITUTIONS = { + NOUN: [ + ("s", ""), + ("ses", "s"), + ("ves", "f"), + ("xes", "x"), + ("zes", "z"), + ("ches", "ch"), + ("shes", "sh"), + ("men", "man"), + ("ies", "y"), + ], + VERB: [ + ("s", ""), + ("ies", "y"), + ("es", "e"), + ("es", ""), + ("ed", "e"), + ("ed", ""), + ("ing", "e"), + ("ing", ""), + ], + ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")], + ADV: [], + } + + MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ] + + def _morphy(self, form, pos, check_exceptions=True): + # from jordanbg: + # Given an original string x + # 1. Apply rules once to the input to get y1, y2, y3, etc. + # 2. Return all that are in the database + # 3. If there are no matches, keep applying rules until you either + # find a match or you can't go any further + + exceptions = self._exception_map[pos] + substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos] + + def apply_rules(forms): + return [ + form[: -len(old)] + new + for form in forms + for old, new in substitutions + if form.endswith(old) + ] + + def filter_forms(forms): + result = [] + seen = set() + for form in forms: + if form in self._lemma_pos_offset_map: + if pos in self._lemma_pos_offset_map[form]: + if form not in seen: + result.append(form) + seen.add(form) + return result + + # 0. Check the exception lists + if check_exceptions: + if form in exceptions: + return filter_forms([form] + exceptions[form]) + + # 1. Apply rules once to the input to get y1, y2, y3, etc. + forms = apply_rules([form]) + + # 2. Return all that are in the database (and check the original too) + results = filter_forms([form] + forms) + if results: + return results + + # 3. If there are no matches, keep applying rules until we find a match + while forms: + forms = apply_rules(forms) + results = filter_forms(forms) + if results: + return results + + # Return an empty list if we can't find anything + return [] + + ############################################################# + # Create information content from corpus + ############################################################# + def ic(self, corpus, weight_senses_equally=False, smoothing=1.0): + """ + Creates an information content lookup dictionary from a corpus. + + :type corpus: CorpusReader + :param corpus: The corpus from which we create an information + content dictionary. + :type weight_senses_equally: bool + :param weight_senses_equally: If this is True, gives all + possible senses equal weight rather than dividing by the + number of possible senses. (If a word has 3 synses, each + sense gets 0.3333 per appearance when this is False, 1.0 when + it is true.) + :param smoothing: How much do we smooth synset counts (default is 1.0) + :type smoothing: float + :return: An information content dictionary + """ + counts = FreqDist() + for ww in corpus.words(): + counts[ww] += 1 + + ic = {} + for pp in POS_LIST: + ic[pp] = defaultdict(float) + + # Initialize the counts with the smoothing value + if smoothing > 0.0: + for pp in POS_LIST: + ic[pp][0] = smoothing + for ss in self.all_synsets(): + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + ic[pos][ss._offset] = smoothing + + for ww in counts: + possible_synsets = self.synsets(ww) + if len(possible_synsets) == 0: + continue + + # Distribute weight among possible synsets + weight = float(counts[ww]) + if not weight_senses_equally: + weight /= float(len(possible_synsets)) + + for ss in possible_synsets: + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + for level in ss._iter_hypernym_lists(): + for hh in level: + ic[pos][hh._offset] += weight + # Add the weight to the root + ic[pos][0] += weight + return ic + + def custom_lemmas(self, tab_file, lang): + """ + Reads a custom tab file containing mappings of lemmas in the given + language to Princeton WordNet 3.0 synset offsets, allowing NLTK's + WordNet functions to then be used with that language. + + See the "Tab files" section at https://omwn.org/omw1.html for + documentation on the Multilingual WordNet tab file format. + + :param tab_file: Tab file as a file or file-like object + :type: lang str + :param: lang ISO 639-3 code of the language of the tab file + """ + lg = lang.split("_")[0] + if len(lg) != 3: + raise ValueError("lang should be a (3 character) ISO 639-3 code") + self._lang_data[lang] = [ + defaultdict(list), + defaultdict(list), + defaultdict(list), + defaultdict(list), + ] + for line in tab_file.readlines(): + if isinstance(line, bytes): + # Support byte-stream files (e.g. as returned by Python 2's + # open() function) as well as text-stream ones + line = line.decode("utf-8") + if not line.startswith("#"): + triple = line.strip().split("\t") + if len(triple) < 3: + continue + offset_pos, label = triple[:2] + val = triple[-1] + if self.map30: + if offset_pos in self.map30: + # Map offset_pos to current Wordnet version: + offset_pos = self.map30[offset_pos] + else: + # Some OMW offsets were never in Wordnet: + if ( + offset_pos not in self.nomap + and offset_pos.replace("a", "s") not in self.nomap + ): + warnings.warn( + f"{lang}: invalid offset {offset_pos} in '{line}'" + ) + continue + elif offset_pos[-1] == "a": + wnss = self.of2ss(offset_pos) + if wnss and wnss.pos() == "s": # Wordnet pos is "s" + # Label OMW adjective satellites back to their Wordnet pos ("s") + offset_pos = self.ss2of(wnss) + pair = label.split(":") + attr = pair[-1] + if len(pair) == 1 or pair[0] == lg: + if attr == "lemma": + val = val.strip().replace(" ", "_") + self._lang_data[lang][1][val.lower()].append(offset_pos) + if attr in self.lg_attrs: + self._lang_data[lang][self.lg_attrs.index(attr)][ + offset_pos + ].append(val) + + def disable_custom_lemmas(self, lang): + """prevent synsets from being mistakenly added""" + for n in range(len(self.lg_attrs)): + self._lang_data[lang][n].default_factory = None + + ###################################################################### + # Visualize WordNet relation graphs using Graphviz + ###################################################################### + + def digraph( + self, + inputs, + rel=lambda s: s.hypernyms(), + pos=None, + maxdepth=-1, + shapes=None, + attr=None, + verbose=False, + ): + """ + Produce a graphical representation from 'inputs' (a list of + start nodes, which can be a mix of Synsets, Lemmas and/or words), + and a synset relation, for drawing with the 'dot' graph visualisation + program from the Graphviz package. + + Return a string in the DOT graph file language, which can then be + converted to an image by nltk.parse.dependencygraph.dot2img(dot_string). + + Optional Parameters: + :rel: Wordnet synset relation + :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r' + :maxdepth: limit the longest path + :shapes: dictionary of strings that trigger a specified shape + :attr: dictionary with global graph attributes + :verbose: warn about cycles + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.digraph([wn.synset('dog.n.01')])) + digraph G { + "Synset('animal.n.01')" -> "Synset('organism.n.01')"; + "Synset('canine.n.02')" -> "Synset('carnivore.n.01')"; + "Synset('carnivore.n.01')" -> "Synset('placental.n.01')"; + "Synset('chordate.n.01')" -> "Synset('animal.n.01')"; + "Synset('dog.n.01')" -> "Synset('canine.n.02')"; + "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')"; + "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')"; + "Synset('living_thing.n.01')" -> "Synset('whole.n.02')"; + "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')"; + "Synset('object.n.01')" -> "Synset('physical_entity.n.01')"; + "Synset('organism.n.01')" -> "Synset('living_thing.n.01')"; + "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')"; + "Synset('placental.n.01')" -> "Synset('mammal.n.01')"; + "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')"; + "Synset('whole.n.02')" -> "Synset('object.n.01')"; + } + + """ + from nltk.util import edge_closure, edges2dot + + synsets = set() + edges = set() + if not shapes: + shapes = dict() + if not attr: + attr = dict() + + def add_lemma(lem): + ss = lem.synset() + synsets.add(ss) + edges.add((lem, ss)) + + for node in inputs: + typ = type(node) + if typ == Synset: + synsets.add(node) + elif typ == Lemma: + add_lemma(node) + elif typ == str: + for lemma in self.lemmas(node, pos): + add_lemma(lemma) + + for ss in synsets: + edges = edges.union(edge_closure(ss, rel, maxdepth, verbose)) + dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr) + return dot_string + + +###################################################################### +# WordNet Information Content Corpus Reader +###################################################################### + + +class WordNetICCorpusReader(CorpusReader): + """ + A corpus reader for the WordNet information content corpus. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, encoding="utf8") + + # this load function would be more efficient if the data was pickled + # Note that we can't use NLTK's frequency distributions because + # synsets are overlapping (each instance of a synset also counts + # as an instance of its hypernyms) + def ic(self, icfile): + """ + Load an information content file from the wordnet_ic corpus + and return a dictionary. This dictionary has just two keys, + NOUN and VERB, whose values are dictionaries that map from + synsets to information content values. + + :type icfile: str + :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat") + :return: An information content dictionary + """ + ic = {} + ic[NOUN] = defaultdict(float) + ic[VERB] = defaultdict(float) + with self.open(icfile) as fp: + for num, line in enumerate(fp): + if num == 0: # skip the header + continue + fields = line.split() + offset = int(fields[0][:-1]) + value = float(fields[1]) + pos = _get_pos(fields[0]) + if len(fields) == 3 and fields[2] == "ROOT": + # Store root count. + ic[pos][0] += value + if value != 0: + ic[pos][offset] = value + return ic + + +###################################################################### +# Similarity metrics +###################################################################### + +# TODO: Add in the option to manually add a new root node; this will be +# useful for verb similarity as there exist multiple verb taxonomies. + +# More information about the metrics is available at +# http://marimba.d.umn.edu/similarity/measures.html + + +def path_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity( + synset2, verbose=verbose, simulate_root=simulate_root + ) + + +def lch_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def wup_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def res_similarity(synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose=verbose) + + +def jcn_similarity(synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose=verbose) + + +def lin_similarity(synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose=verbose) + + +path_similarity.__doc__ = Synset.path_similarity.__doc__ +lch_similarity.__doc__ = Synset.lch_similarity.__doc__ +wup_similarity.__doc__ = Synset.wup_similarity.__doc__ +res_similarity.__doc__ = Synset.res_similarity.__doc__ +jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ +lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + +def _lcs_ic(synset1, synset2, ic, verbose=False): + """ + Get the information content of the least common subsumer that has + the highest information content value. If two nodes have no + explicit common subsumer, assume that they share an artificial + root node that is the hypernym of all explicit roots. + + :type synset1: Synset + :param synset1: First input synset. + :type synset2: Synset + :param synset2: Second input synset. Must be the same part of + speech as the first synset. + :type ic: dict + :param ic: an information content object (as returned by ``load_ic()``). + :return: The information content of the two synsets and their most + informative subsumer + """ + if synset1._pos != synset2._pos: + raise WordNetError( + "Computing the least common subsumer requires " + "%s and %s to have the same part of speech." % (synset1, synset2) + ) + + ic1 = information_content(synset1, ic) + ic2 = information_content(synset2, ic) + subsumers = synset1.common_hypernyms(synset2) + if len(subsumers) == 0: + subsumer_ic = 0 + else: + subsumer_ic = max(information_content(s, ic) for s in subsumers) + + if verbose: + print("> LCS Subsumer by content:", subsumer_ic) + + return ic1, ic2, subsumer_ic + + +# Utility functions + + +def information_content(synset, ic): + pos = synset._pos + if pos == ADJ_SAT: + pos = ADJ + try: + icpos = ic[pos] + except KeyError as e: + msg = "Information content file has no entries for part-of-speech: %s" + raise WordNetError(msg % pos) from e + + counts = icpos[synset._offset] + if counts == 0: + return _INF + else: + return -math.log(counts / icpos[0]) + + +# get the part of speech (NOUN or VERB) from the information content record +# (each identifier has a 'n' or 'v' suffix) + + +def _get_pos(field): + if field[-1] == "n": + return NOUN + elif field[-1] == "v": + return VERB + else: + msg = ( + "Unidentified part of speech in WordNet Information Content file " + "for field %s" % field + ) + raise ValueError(msg) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py new file mode 100644 index 0000000000000000000000000000000000000000..35bafdfef4f12f934de8e5e4617341fb2ba7b7a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py @@ -0,0 +1,256 @@ +# Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE) +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old +English Prose (YCOE), a 1.5 million word syntactically-annotated +corpus of Old English prose texts. The corpus is distributed by the +Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included +with NLTK. + +The YCOE corpus is divided into 100 files, each representing +an Old English prose text. Tags used within each text complies +to the YCOE standard: https://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm +""" + +import os +import re + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.tagged import TaggedCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import RegexpTokenizer + + +class YCOECorpusReader(CorpusReader): + """ + Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old + English Prose (YCOE), a 1.5 million word syntactically-annotated + corpus of Old English prose texts. + """ + + def __init__(self, root, encoding="utf8"): + CorpusReader.__init__(self, root, [], encoding) + + self._psd_reader = YCOEParseCorpusReader( + self.root.join("psd"), ".*", ".psd", encoding=encoding + ) + self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos") + + # Make sure we have a consistent set of items: + documents = {f[:-4] for f in self._psd_reader.fileids()} + if {f[:-4] for f in self._pos_reader.fileids()} != documents: + raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.") + + fileids = sorted( + ["%s.psd" % doc for doc in documents] + + ["%s.pos" % doc for doc in documents] + ) + CorpusReader.__init__(self, root, fileids, encoding) + self._documents = sorted(documents) + + def documents(self, fileids=None): + """ + Return a list of document identifiers for all documents in + this corpus, or for the documents with the given file(s) if + specified. + """ + if fileids is None: + return self._documents + if isinstance(fileids, str): + fileids = [fileids] + for f in fileids: + if f not in self._fileids: + raise KeyError("File id %s not found" % fileids) + # Strip off the '.pos' and '.psd' extensions. + return sorted({f[:-4] for f in fileids}) + + def fileids(self, documents=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that store the given document(s) if specified. + """ + if documents is None: + return self._fileids + elif isinstance(documents, str): + documents = [documents] + return sorted( + set( + ["%s.pos" % doc for doc in documents] + + ["%s.psd" % doc for doc in documents] + ) + ) + + def _getfileids(self, documents, subcorpus): + """ + Helper that selects the appropriate fileids for a given set of + documents from a given subcorpus (pos or psd). + """ + if documents is None: + documents = self._documents + else: + if isinstance(documents, str): + documents = [documents] + for document in documents: + if document not in self._documents: + if document[-4:] in (".pos", ".psd"): + raise ValueError( + "Expected a document identifier, not a file " + "identifier. (Use corpus.documents() to get " + "a list of document identifiers." + ) + else: + raise ValueError("Document identifier %s not found" % document) + return [f"{d}.{subcorpus}" for d in documents] + + # Delegate to one of our two sub-readers: + def words(self, documents=None): + return self._pos_reader.words(self._getfileids(documents, "pos")) + + def sents(self, documents=None): + return self._pos_reader.sents(self._getfileids(documents, "pos")) + + def paras(self, documents=None): + return self._pos_reader.paras(self._getfileids(documents, "pos")) + + def tagged_words(self, documents=None): + return self._pos_reader.tagged_words(self._getfileids(documents, "pos")) + + def tagged_sents(self, documents=None): + return self._pos_reader.tagged_sents(self._getfileids(documents, "pos")) + + def tagged_paras(self, documents=None): + return self._pos_reader.tagged_paras(self._getfileids(documents, "pos")) + + def parsed_sents(self, documents=None): + return self._psd_reader.parsed_sents(self._getfileids(documents, "psd")) + + +class YCOEParseCorpusReader(BracketParseCorpusReader): + """Specialized version of the standard bracket parse corpus reader + that strips out (CODE ...) and (ID ...) nodes.""" + + def _parse(self, t): + t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t) + if re.match(r"\s*\(\s*\)\s*$", t): + return None + return BracketParseCorpusReader._parse(self, t) + + +class YCOETaggedCorpusReader(TaggedCorpusReader): + def __init__(self, root, items, encoding="utf8"): + gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*" + sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True) + TaggedCorpusReader.__init__( + self, root, items, sep="_", sent_tokenizer=sent_tokenizer + ) + + +#: A list of all documents and their titles in ycoe. +documents = { + "coadrian.o34": "Adrian and Ritheus", + "coaelhom.o3": "Ælfric, Supplemental Homilies", + "coaelive.o3": "Ælfric's Lives of Saints", + "coalcuin": "Alcuin De virtutibus et vitiis", + "coalex.o23": "Alexander's Letter to Aristotle", + "coapollo.o3": "Apollonius of Tyre", + "coaugust": "Augustine", + "cobede.o2": "Bede's History of the English Church", + "cobenrul.o3": "Benedictine Rule", + "coblick.o23": "Blickling Homilies", + "coboeth.o2": "Boethius' Consolation of Philosophy", + "cobyrhtf.o3": "Byrhtferth's Manual", + "cocanedgD": "Canons of Edgar (D)", + "cocanedgX": "Canons of Edgar (X)", + "cocathom1.o3": "Ælfric's Catholic Homilies I", + "cocathom2.o3": "Ælfric's Catholic Homilies II", + "cochad.o24": "Saint Chad", + "cochdrul": "Chrodegang of Metz, Rule", + "cochristoph": "Saint Christopher", + "cochronA.o23": "Anglo-Saxon Chronicle A", + "cochronC": "Anglo-Saxon Chronicle C", + "cochronD": "Anglo-Saxon Chronicle D", + "cochronE.o34": "Anglo-Saxon Chronicle E", + "cocura.o2": "Cura Pastoralis", + "cocuraC": "Cura Pastoralis (Cotton)", + "codicts.o34": "Dicts of Cato", + "codocu1.o1": "Documents 1 (O1)", + "codocu2.o12": "Documents 2 (O1/O2)", + "codocu2.o2": "Documents 2 (O2)", + "codocu3.o23": "Documents 3 (O2/O3)", + "codocu3.o3": "Documents 3 (O3)", + "codocu4.o24": "Documents 4 (O2/O4)", + "coeluc1": "Honorius of Autun, Elucidarium 1", + "coeluc2": "Honorius of Autun, Elucidarium 1", + "coepigen.o3": "Ælfric's Epilogue to Genesis", + "coeuphr": "Saint Euphrosyne", + "coeust": "Saint Eustace and his companions", + "coexodusP": "Exodus (P)", + "cogenesiC": "Genesis (C)", + "cogregdC.o24": "Gregory's Dialogues (C)", + "cogregdH.o23": "Gregory's Dialogues (H)", + "coherbar": "Pseudo-Apuleius, Herbarium", + "coinspolD.o34": "Wulfstan's Institute of Polity (D)", + "coinspolX": "Wulfstan's Institute of Polity (X)", + "cojames": "Saint James", + "colacnu.o23": "Lacnunga", + "colaece.o2": "Leechdoms", + "colaw1cn.o3": "Laws, Cnut I", + "colaw2cn.o3": "Laws, Cnut II", + "colaw5atr.o3": "Laws, Æthelred V", + "colaw6atr.o3": "Laws, Æthelred VI", + "colawaf.o2": "Laws, Alfred", + "colawafint.o2": "Alfred's Introduction to Laws", + "colawger.o34": "Laws, Gerefa", + "colawine.ox2": "Laws, Ine", + "colawnorthu.o3": "Northumbra Preosta Lagu", + "colawwllad.o4": "Laws, William I, Lad", + "coleofri.o4": "Leofric", + "colsigef.o3": "Ælfric's Letter to Sigefyrth", + "colsigewB": "Ælfric's Letter to Sigeweard (B)", + "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)", + "colwgeat": "Ælfric's Letter to Wulfgeat", + "colwsigeT": "Ælfric's Letter to Wulfsige (T)", + "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)", + "colwstan1.o3": "Ælfric's Letter to Wulfstan I", + "colwstan2.o3": "Ælfric's Letter to Wulfstan II", + "comargaC.o34": "Saint Margaret (C)", + "comargaT": "Saint Margaret (T)", + "comart1": "Martyrology, I", + "comart2": "Martyrology, II", + "comart3.o23": "Martyrology, III", + "comarvel.o23": "Marvels of the East", + "comary": "Mary of Egypt", + "coneot": "Saint Neot", + "conicodA": "Gospel of Nicodemus (A)", + "conicodC": "Gospel of Nicodemus (C)", + "conicodD": "Gospel of Nicodemus (D)", + "conicodE": "Gospel of Nicodemus (E)", + "coorosiu.o2": "Orosius", + "cootest.o3": "Heptateuch", + "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I", + "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II", + "coprefcura.o2": "Preface to the Cura Pastoralis", + "coprefgen.o3": "Ælfric's Preface to Genesis", + "copreflives.o3": "Ælfric's Preface to Lives of Saints", + "coprefsolilo": "Preface to Augustine's Soliloquies", + "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus", + "corood": "History of the Holy Rood-Tree", + "cosevensl": "Seven Sleepers", + "cosolilo": "St. Augustine's Soliloquies", + "cosolsat1.o4": "Solomon and Saturn I", + "cosolsat2": "Solomon and Saturn II", + "cotempo.o3": "Ælfric's De Temporibus Anni", + "coverhom": "Vercelli Homilies", + "coverhomE": "Vercelli Homilies (E)", + "coverhomL": "Vercelli Homilies (L)", + "covinceB": "Saint Vincent (Bodley 343)", + "covinsal": "Vindicta Salvatoris", + "cowsgosp.o3": "West-Saxon Gospels", + "cowulf.o34": "Wulfstan's Homilies", +} diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/util.py b/venv/lib/python3.10/site-packages/nltk/corpus/util.py new file mode 100644 index 0000000000000000000000000000000000000000..29a63574264c4859081ef8e36e26d9382f5b087f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/util.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Corpus Reader Utility Functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +###################################################################### +# { Lazy Corpus Loader +###################################################################### + +import gc +import re + +import nltk + +TRY_ZIPFILE_FIRST = False + + +class LazyCorpusLoader: + """ + To see the API documentation for this lazily loaded corpus, first + run corpus.ensure_loaded(), and then run help(this_corpus). + + LazyCorpusLoader is a proxy object which is used to stand in for a + corpus object before the corpus is loaded. This allows NLTK to + create an object for each corpus, but defer the costs associated + with loading those corpora until the first time that they're + actually accessed. + + The first time this object is accessed in any way, it will load + the corresponding corpus, and transform itself into that corpus + (by modifying its own ``__class__`` and ``__dict__`` attributes). + + If the corpus can not be found, then accessing this object will + raise an exception, displaying installation instructions for the + NLTK data package. Once they've properly installed the data + package (or modified ``nltk.data.path`` to point to its location), + they can then use the corpus object without restarting python. + + :param name: The name of the corpus + :type name: str + :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader + :type reader: nltk.corpus.reader.api.CorpusReader + :param nltk_data_subdir: The subdirectory where the corpus is stored. + :type nltk_data_subdir: str + :param `*args`: Any other non-keywords arguments that `reader_cls` might need. + :param `**kwargs`: Any other keywords arguments that `reader_cls` might need. + """ + + def __init__(self, name, reader_cls, *args, **kwargs): + from nltk.corpus.reader.api import CorpusReader + + assert issubclass(reader_cls, CorpusReader) + self.__name = self.__name__ = name + self.__reader_cls = reader_cls + # If nltk_data_subdir is set explicitly + if "nltk_data_subdir" in kwargs: + # Use the specified subdirectory path + self.subdir = kwargs["nltk_data_subdir"] + # Pops the `nltk_data_subdir` argument, we don't need it anymore. + kwargs.pop("nltk_data_subdir", None) + else: # Otherwise use 'nltk_data/corpora' + self.subdir = "corpora" + self.__args = args + self.__kwargs = kwargs + + def __load(self): + # Find the corpus root directory. + zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) + if TRY_ZIPFILE_FIRST: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError: + raise e + else: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError: + raise e + + # Load the corpus. + corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) + + # This is where the magic happens! Transform ourselves into + # the corpus by modifying our own __dict__ and __class__ to + # match that of the corpus. + + args, kwargs = self.__args, self.__kwargs + name, reader_cls = self.__name, self.__reader_cls + + self.__dict__ = corpus.__dict__ + self.__class__ = corpus.__class__ + + # _unload support: assign __dict__ and __class__ back, then do GC. + # after reassigning __dict__ there shouldn't be any references to + # corpus data so the memory should be deallocated after gc.collect() + def _unload(self): + lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) + self.__dict__ = lazy_reader.__dict__ + self.__class__ = lazy_reader.__class__ + gc.collect() + + self._unload = _make_bound_method(_unload, self) + + def __getattr__(self, attr): + + # Fix for inspect.isclass under Python 2.6 + # (see https://bugs.python.org/issue1225107). + # Without this fix tests may take extra 1.5GB RAM + # because all corpora gets loaded during test collection. + if attr == "__bases__": + raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") + + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return getattr(self, attr) + + def __repr__(self): + return "<{} in {!r} (not loaded yet)>".format( + self.__reader_cls.__name__, + ".../corpora/" + self.__name, + ) + + def _unload(self): + # If an exception occurs during corpus loading then + # '_unload' method may be unattached, so __getattr__ can be called; + # we shouldn't trigger corpus loading again in this case. + pass + + +def _make_bound_method(func, self): + """ + Magic for creating bound methods (used for _unload). + """ + + class Foo: + def meth(self): + pass + + f = Foo() + bound_method = type(f.meth) + + try: + return bound_method(func, self, self.__class__) + except TypeError: # python3 + return bound_method(func, self) diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__init__.py b/venv/lib/python3.10/site-packages/nltk/tbl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3387daec4b489d83a4f87b9652a0309f7c4e1ce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/__init__.py @@ -0,0 +1,31 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Transformation Based Learning + +A general purpose package for Transformation Based Learning, +currently used by nltk.tag.BrillTagger. + +isort:skip_file +""" + +from nltk.tbl.template import Template + +# API: Template(...), Template.expand(...) + +from nltk.tbl.feature import Feature + +# API: Feature(...), Feature.expand(...) + +from nltk.tbl.rule import Rule + +# API: Rule.format(...), Rule.templatetid + +from nltk.tbl.erroranalysis import error_list diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..035cc7accabe4f2737015b946aa404884afe7c52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2df9300f07d9f7e201784765633d24a65f04633 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b590f94e4028f58bd0939e90ade743f58b850144 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a40f743bdb7ab5e58682d98e4e3a9aa2f256a98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1acd794d702a2954ceac61be7416d51b6f1bc92b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8f8e8d3dc928aab9c0112ed88233df0368d3faf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a2ea50d053768f365a2448db5c70f8d095c801 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/api.py b/venv/lib/python3.10/site-packages/nltk/tbl/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/demo.py b/venv/lib/python3.10/site-packages/nltk/tbl/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..a5298e396e964f1f33e89a81263014249bca7cfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/demo.py @@ -0,0 +1,418 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import os +import pickle +import random +import time + +from nltk.corpus import treebank +from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger +from nltk.tag.brill import Pos, Word +from nltk.tbl import Template, error_list + + +def demo(): + """ + Run a demo with defaults. See source comments for details, + or docstrings of any of the more specific demo_* functions. + """ + postag() + + +def demo_repr_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="repr") + + +def demo_str_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="str") + + +def demo_verbose_rule_format(): + """ + Exemplify Rule.format("verbose") + """ + postag(ruleformat="verbose") + + +def demo_multiposition_feature(): + """ + The feature/s of a template takes a list of positions + relative to the current word where the feature should be + looked for, conceptually joined by logical OR. For instance, + Pos([-1, 1]), given a value V, will hold whenever V is found + one step to the left and/or one step to the right. + + For contiguous ranges, a 2-arg form giving inclusive end + points can also be used: Pos(-3, -1) is the same as the arg + below. + """ + postag(templates=[Template(Pos([-3, -2, -1]))]) + + +def demo_multifeature_template(): + """ + Templates can have more than a single feature. + """ + postag(templates=[Template(Word([0]), Pos([-2, -1]))]) + + +def demo_template_statistics(): + """ + Show aggregate statistics per template. Little used templates are + candidates for deletion, much used templates may possibly be refined. + + Deleting unused templates is mostly about saving time and/or space: + training is basically O(T) in the number of templates T + (also in terms of memory usage, which often will be the limiting factor). + """ + postag(incremental_stats=True, template_stats=True) + + +def demo_generated_templates(): + """ + Template.expand and Feature.expand are class methods facilitating + generating large amounts of templates. See their documentation for + details. + + Note: training with 500 templates can easily fill all available + even on relatively small corpora + """ + wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False) + tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True) + templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3))) + print( + "Generated {} templates for transformation-based learning".format( + len(templates) + ) + ) + postag(templates=templates, incremental_stats=True, template_stats=True) + + +def demo_learning_curve(): + """ + Plot a learning curve -- the contribution on tagging accuracy of + the individual rules. + Note: requires matplotlib + """ + postag( + incremental_stats=True, + separate_baseline_data=True, + learning_curve_output="learningcurve.png", + ) + + +def demo_error_analysis(): + """ + Writes a file with context for each erroneous word after tagging testing data + """ + postag(error_output="errors.txt") + + +def demo_serialize_tagger(): + """ + Serializes the learned tagger to a file in pickle format; reloads it + and validates the process. + """ + postag(serialize_output="tagger.pcl") + + +def demo_high_accuracy_rules(): + """ + Discard rules with low accuracy. This may hurt performance a bit, + but will often produce rules which are more interesting read to a human. + """ + postag(num_sents=3000, min_acc=0.96, min_score=10) + + +def postag( + templates=None, + tagged_data=None, + num_sents=1000, + max_rules=300, + min_score=3, + min_acc=None, + train=0.8, + trace=3, + randomize=False, + ruleformat="str", + incremental_stats=False, + template_stats=False, + error_output=None, + serialize_output=None, + learning_curve_output=None, + learning_curve_take=300, + baseline_backoff_tagger=None, + separate_baseline_data=False, + cache_baseline_tagger=None, +): + """ + Brill Tagger Demonstration + :param templates: how many sentences of training and testing data to use + :type templates: list of Template + + :param tagged_data: maximum number of rule instances to create + :type tagged_data: C{int} + + :param num_sents: how many sentences of training and testing data to use + :type num_sents: C{int} + + :param max_rules: maximum number of rule instances to create + :type max_rules: C{int} + + :param min_score: the minimum score for a rule in order for it to be considered + :type min_score: C{int} + + :param min_acc: the minimum score for a rule in order for it to be considered + :type min_acc: C{float} + + :param train: the fraction of the the corpus to be used for training (1=all) + :type train: C{float} + + :param trace: the level of diagnostic tracing output to produce (0-4) + :type trace: C{int} + + :param randomize: whether the training data should be a random subset of the corpus + :type randomize: C{bool} + + :param ruleformat: rule output format, one of "str", "repr", "verbose" + :type ruleformat: C{str} + + :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) + :type incremental_stats: C{bool} + + :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing + :type template_stats: C{bool} + + :param error_output: the file where errors will be saved + :type error_output: C{string} + + :param serialize_output: the file where the learned tbl tagger will be saved + :type serialize_output: C{string} + + :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) + :type learning_curve_output: C{string} + + :param learning_curve_take: how many rules plotted + :type learning_curve_take: C{int} + + :param baseline_backoff_tagger: the file where rules will be saved + :type baseline_backoff_tagger: tagger + + :param separate_baseline_data: use a fraction of the training data exclusively for training baseline + :type separate_baseline_data: C{bool} + + :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get + deterministic output from the baseline unigram tagger between python versions) + :type cache_baseline_tagger: C{string} + + + Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This + is fast and fine for a demo, but is likely to generalize worse on unseen data. + Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). + """ + + # defaults + baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER + if templates is None: + from nltk.tag.brill import brill24, describe_template_sets + + # some pre-built template sets taken from typical systems or publications are + # available. Print a list with describe_template_sets() + # for instance: + templates = brill24() + (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data + ) + + # creating (or reloading from cache) a baseline tagger (unigram tagger) + # this is just a mechanism for getting deterministic output from the baseline between + # python versions + if cache_baseline_tagger: + if not os.path.exists(cache_baseline_tagger): + baseline_tagger = UnigramTagger( + baseline_data, backoff=baseline_backoff_tagger + ) + with open(cache_baseline_tagger, "w") as print_rules: + pickle.dump(baseline_tagger, print_rules) + print( + "Trained baseline tagger, pickled it to {}".format( + cache_baseline_tagger + ) + ) + with open(cache_baseline_tagger) as print_rules: + baseline_tagger = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {cache_baseline_tagger}") + else: + baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) + print("Trained baseline tagger") + if gold_data: + print( + " Accuracy on test set: {:0.4f}".format( + baseline_tagger.accuracy(gold_data) + ) + ) + + # creating a Brill tagger + tbrill = time.time() + trainer = BrillTaggerTrainer( + baseline_tagger, templates, trace, ruleformat=ruleformat + ) + print("Training tbl tagger...") + brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) + print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") + if gold_data: + print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) + + # printing the learned rules, if learned silently + if trace == 1: + print("\nLearned rules: ") + for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): + print(f"{ruleno:4d} {rule.format(ruleformat):s}") + + # printing template statistics (optionally including comparison with the training data) + # note: if not separate_baseline_data, then baseline accuracy will be artificially high + if incremental_stats: + print( + "Incrementally tagging the test data, collecting individual rule statistics" + ) + (taggedtest, teststats) = brill_tagger.batch_tag_incremental( + testing_data, gold_data + ) + print(" Rule statistics collected") + if not separate_baseline_data: + print( + "WARNING: train_stats asked for separate_baseline_data=True; the baseline " + "will be artificially high" + ) + trainstats = brill_tagger.train_stats() + if template_stats: + brill_tagger.print_template_statistics(teststats) + if learning_curve_output: + _demo_plot( + learning_curve_output, teststats, trainstats, take=learning_curve_take + ) + print(f"Wrote plot of learning curve to {learning_curve_output}") + else: + print("Tagging the test data") + taggedtest = brill_tagger.tag_sents(testing_data) + if template_stats: + brill_tagger.print_template_statistics() + + # writing error analysis to file + if error_output is not None: + with open(error_output, "w") as f: + f.write("Errors for Brill Tagger %r\n\n" % serialize_output) + f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") + print(f"Wrote tagger errors including context to {error_output}") + + # serializing the tagger to a pickle file and reloading (just to see it works) + if serialize_output is not None: + taggedtest = brill_tagger.tag_sents(testing_data) + with open(serialize_output, "w") as print_rules: + pickle.dump(brill_tagger, print_rules) + print(f"Wrote pickled tagger to {serialize_output}") + with open(serialize_output) as print_rules: + brill_tagger_reloaded = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {serialize_output}") + taggedtest_reloaded = brill_tagger.tag_sents(testing_data) + if taggedtest == taggedtest_reloaded: + print("Reloaded tagger tried on test set, results identical") + else: + print("PROBLEM: Reloaded tagger gave different results on test set") + + +def _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data +): + # train is the proportion of data used in training; the rest is reserved + # for testing. + if tagged_data is None: + print("Loading tagged data from treebank... ") + tagged_data = treebank.tagged_sents() + if num_sents is None or len(tagged_data) <= num_sents: + num_sents = len(tagged_data) + if randomize: + random.seed(len(tagged_data)) + random.shuffle(tagged_data) + cutoff = int(num_sents * train) + training_data = tagged_data[:cutoff] + gold_data = tagged_data[cutoff:num_sents] + testing_data = [[t[0] for t in sent] for sent in gold_data] + if not separate_baseline_data: + baseline_data = training_data + else: + bl_cutoff = len(training_data) // 3 + (baseline_data, training_data) = ( + training_data[:bl_cutoff], + training_data[bl_cutoff:], + ) + (trainseqs, traintokens) = corpus_size(training_data) + (testseqs, testtokens) = corpus_size(testing_data) + (bltrainseqs, bltraintokens) = corpus_size(baseline_data) + print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)") + print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)") + print( + "Read baseline data ({:d} sents/{:d} wds) {:s}".format( + bltrainseqs, + bltraintokens, + "" if separate_baseline_data else "[reused the training set]", + ) + ) + return (training_data, baseline_data, gold_data, testing_data) + + +def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None): + testcurve = [teststats["initialerrors"]] + for rulescore in teststats["rulescores"]: + testcurve.append(testcurve[-1] - rulescore) + testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]] + + traincurve = [trainstats["initialerrors"]] + for rulescore in trainstats["rulescores"]: + traincurve.append(traincurve[-1] - rulescore) + traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]] + + import matplotlib.pyplot as plt + + r = list(range(len(testcurve))) + plt.plot(r, testcurve, r, traincurve) + plt.axis([None, None, None, 1.0]) + plt.savefig(learning_curve_output) + + +NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")]) + +REGEXP_TAGGER = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] +) + + +def corpus_size(seqs): + return (len(seqs), sum(len(x) for x in seqs)) + + +if __name__ == "__main__": + demo_learning_curve() diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py b/venv/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py new file mode 100644 index 0000000000000000000000000000000000000000..8b192e75d8b410942960cbf5ea1476a42f0decf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py @@ -0,0 +1,38 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# returns a list of errors in string format + + +def error_list(train_sents, test_sents): + """ + Returns a list of human-readable strings indicating the errors in the + given tagging of the corpus. + + :param train_sents: The correct tagging of the corpus + :type train_sents: list(tuple) + :param test_sents: The tagged corpus + :type test_sents: list(tuple) + """ + hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % ( + "left context", + "word/test->gold".center(22), + "right context", + ) + errors = [hdr] + for (train_sent, test_sent) in zip(train_sents, test_sents): + for wordnum, (word, train_pos) in enumerate(train_sent): + test_pos = test_sent[wordnum][1] + if train_pos != test_pos: + left = " ".join("%s/%s" % w for w in train_sent[:wordnum]) + right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :]) + mid = f"{word}/{test_pos}->{train_pos}" + errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}") + + return errors diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/feature.py b/venv/lib/python3.10/site-packages/nltk/tbl/feature.py new file mode 100644 index 0000000000000000000000000000000000000000..568425918db4b4b7910ef0d216b03bd10411d287 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/feature.py @@ -0,0 +1,267 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + + +class Feature(metaclass=ABCMeta): + """ + An abstract base class for Features. A Feature is a combination of + a specific property-computing method and a list of relative positions + to apply that method to. + + The property-computing method, M{extract_property(tokens, index)}, + must be implemented by every subclass. It extracts or computes a specific + property for the token at the current index. Typical extract_property() + methods return features such as the token text or tag; but more involved + methods may consider the entire sequence M{tokens} and + for instance compute the length of the sentence the token belongs to. + + In addition, the subclass may have a PROPERTY_NAME, which is how + it will be printed (in Rules and Templates, etc). If not given, defaults + to the classname. + + """ + + json_tag = "nltk.tbl.Feature" + PROPERTY_NAME = None + + def __init__(self, positions, end=None): + """ + Construct a Feature which may apply at C{positions}. + + >>> # For instance, importing some concrete subclasses (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> # Feature Word, applying at one of [-2, -1] + >>> Word([-2,-1]) + Word([-2, -1]) + + >>> # Positions need not be contiguous + >>> Word([-2,-1, 1]) + Word([-2, -1, 1]) + + >>> # Contiguous ranges can alternatively be specified giving the + >>> # two endpoints (inclusive) + >>> Pos(-3, -1) + Pos([-3, -2, -1]) + + >>> # In two-arg form, start <= end is enforced + >>> Pos(2, 1) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tbl/template.py", line 306, in __init__ + raise TypeError + ValueError: illegal interval specification: (start=2, end=1) + + :type positions: list of int + :param positions: the positions at which this features should apply + :raises ValueError: illegal position specifications + + An alternative calling convention, for contiguous positions only, + is Feature(start, end): + + :type start: int + :param start: start of range where this feature should apply + :type end: int + :param end: end of range (NOTE: inclusive!) where this feature should apply + """ + self.positions = None # to avoid warnings + if end is None: + self.positions = tuple(sorted({int(i) for i in positions})) + else: # positions was actually not a list, but only the start index + try: + if positions > end: + raise TypeError + self.positions = tuple(range(positions, end + 1)) + except TypeError as e: + # let any kind of erroneous spec raise ValueError + raise ValueError( + "illegal interval specification: (start={}, end={})".format( + positions, end + ) + ) from e + + # set property name given in subclass, or otherwise name of subclass + self.PROPERTY_NAME = self.__class__.PROPERTY_NAME or self.__class__.__name__ + + def encode_json_obj(self): + return self.positions + + @classmethod + def decode_json_obj(cls, obj): + positions = obj + return cls(positions) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self.positions)!r})" + + @classmethod + def expand(cls, starts, winlens, excludezero=False): + """ + Return a list of features, one for each start point in starts + and for each window length in winlen. If excludezero is True, + no Features containing 0 in its positions will be generated + (many tbl trainers have a special representation for the + target feature at [0]) + + For instance, importing a concrete subclass (Feature is abstract) + + >>> from nltk.tag.brill import Word + + First argument gives the possible start positions, second the + possible window lengths + + >>> Word.expand([-3,-2,-1], [1]) + [Word([-3]), Word([-2]), Word([-1])] + + >>> Word.expand([-2,-1], [1]) + [Word([-2]), Word([-1])] + + >>> Word.expand([-3,-2,-1], [1,2]) + [Word([-3]), Word([-2]), Word([-1]), Word([-3, -2]), Word([-2, -1])] + + >>> Word.expand([-2,-1], [1]) + [Word([-2]), Word([-1])] + + A third optional argument excludes all Features whose positions contain zero + + >>> Word.expand([-2,-1,0], [1,2], excludezero=False) + [Word([-2]), Word([-1]), Word([0]), Word([-2, -1]), Word([-1, 0])] + + >>> Word.expand([-2,-1,0], [1,2], excludezero=True) + [Word([-2]), Word([-1]), Word([-2, -1])] + + All window lengths must be positive + + >>> Word.expand([-2,-1], [0]) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tag/tbl/template.py", line 371, in expand + :param starts: where to start looking for Feature + ValueError: non-positive window length in [0] + + :param starts: where to start looking for Feature + :type starts: list of ints + :param winlens: window lengths where to look for Feature + :type starts: list of ints + :param excludezero: do not output any Feature with 0 in any of its positions. + :type excludezero: bool + :returns: list of Features + :raises ValueError: for non-positive window lengths + """ + if not all(x > 0 for x in winlens): + raise ValueError(f"non-positive window length in {winlens}") + xs = (starts[i : i + w] for w in winlens for i in range(len(starts) - w + 1)) + return [cls(x) for x in xs if not (excludezero and 0 in x)] + + def issuperset(self, other): + """ + Return True if this Feature always returns True when other does + + More precisely, return True if this feature refers to the same property as other; + and this Feature looks at all positions that other does (and possibly + other positions in addition). + + #For instance, importing a concrete subclass (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> Word([-3,-2,-1]).issuperset(Word([-3,-2])) + True + + >>> Word([-3,-2,-1]).issuperset(Word([-3,-2, 0])) + False + + #Feature subclasses must agree + >>> Word([-3,-2,-1]).issuperset(Pos([-3,-2])) + False + + :param other: feature with which to compare + :type other: (subclass of) Feature + :return: True if this feature is superset, otherwise False + :rtype: bool + + + """ + return self.__class__ is other.__class__ and set(self.positions) >= set( + other.positions + ) + + def intersects(self, other): + """ + Return True if the positions of this Feature intersects with those of other + + More precisely, return True if this feature refers to the same property as other; + and there is some overlap in the positions they look at. + + #For instance, importing a concrete subclass (Feature is abstract) + >>> from nltk.tag.brill import Word, Pos + + >>> Word([-3,-2,-1]).intersects(Word([-3,-2])) + True + + >>> Word([-3,-2,-1]).intersects(Word([-3,-2, 0])) + True + + >>> Word([-3,-2,-1]).intersects(Word([0])) + False + + #Feature subclasses must agree + >>> Word([-3,-2,-1]).intersects(Pos([-3,-2])) + False + + :param other: feature with which to compare + :type other: (subclass of) Feature + :return: True if feature classes agree and there is some overlap in the positions they look at + :rtype: bool + """ + + return bool( + self.__class__ is other.__class__ + and set(self.positions) & set(other.positions) + ) + + # Rich comparisons for Features. With @functools.total_ordering (Python 2.7+), + # it will be enough to define __lt__ and __eq__ + def __eq__(self, other): + return self.__class__ is other.__class__ and self.positions == other.positions + + def __lt__(self, other): + return ( + self.__class__.__name__ < other.__class__.__name__ + or + # self.positions is a sorted tuple of ints + self.positions < other.positions + ) + + def __ne__(self, other): + return not (self == other) + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + def __le__(self, other): + return self < other or self == other + + @staticmethod + @abstractmethod + def extract_property(tokens, index): + """ + Any subclass of Feature must define static method extract_property(tokens, index) + + :param tokens: the sequence of tokens + :type tokens: list of tokens + :param index: the current index + :type index: int + :return: feature value + :rtype: any (but usually scalar) + """ diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/rule.py b/venv/lib/python3.10/site-packages/nltk/tbl/rule.py new file mode 100644 index 0000000000000000000000000000000000000000..7faea23bd36ddbf974de4499bb1f9106a78e4c0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/rule.py @@ -0,0 +1,322 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk import jsontags + + +###################################################################### +# Tag Rules +###################################################################### +class TagRule(metaclass=ABCMeta): + """ + An interface for tag transformations on a tagged corpus, as + performed by tbl taggers. Each transformation finds all tokens + in the corpus that are tagged with a specific original tag and + satisfy a specific condition, and replaces their tags with a + replacement tag. For any given transformation, the original + tag, replacement tag, and condition are fixed. Conditions may + depend on the token under consideration, as well as any other + tokens in the corpus. + + Tag rules must be comparable and hashable. + """ + + def __init__(self, original_tag, replacement_tag): + + self.original_tag = original_tag + """The tag which this TagRule may cause to be replaced.""" + + self.replacement_tag = replacement_tag + """The tag with which this TagRule may replace another tag.""" + + def apply(self, tokens, positions=None): + """ + Apply this rule at every position in positions where it + applies to the given sentence. I.e., for each position p + in *positions*, if *tokens[p]* is tagged with this rule's + original tag, and satisfies this rule's condition, then set + its tag to be this rule's replacement tag. + + :param tokens: The tagged sentence + :type tokens: list(tuple(str, str)) + :type positions: list(int) + :param positions: The positions where the transformation is to + be tried. If not specified, try it at all positions. + :return: The indices of tokens whose tags were changed by this + rule. + :rtype: int + """ + if positions is None: + positions = list(range(len(tokens))) + + # Determine the indices at which this rule applies. + change = [i for i in positions if self.applies(tokens, i)] + + # Make the changes. Note: this must be done in a separate + # step from finding applicable locations, since we don't want + # the rule to interact with itself. + for i in change: + tokens[i] = (tokens[i][0], self.replacement_tag) + + return change + + @abstractmethod + def applies(self, tokens, index): + """ + :return: True if the rule would change the tag of + ``tokens[index]``, False otherwise + :rtype: bool + :param tokens: A tagged sentence + :type tokens: list(str) + :param index: The index to check + :type index: int + """ + + # Rules must be comparable and hashable for the algorithm to work + def __eq__(self, other): + raise TypeError("Rules must implement __eq__()") + + def __ne__(self, other): + raise TypeError("Rules must implement __ne__()") + + def __hash__(self): + raise TypeError("Rules must implement __hash__()") + + +@jsontags.register_tag +class Rule(TagRule): + """ + A Rule checks the current corpus position for a certain set of conditions; + if they are all fulfilled, the Rule is triggered, meaning that it + will change tag A to tag B. For other tags than A, nothing happens. + + The conditions are parameters to the Rule instance. Each condition is a feature-value pair, + with a set of positions to check for the value of the corresponding feature. + Conceptually, the positions are joined by logical OR, and the feature set by logical AND. + + More formally, the Rule is then applicable to the M{n}th token iff: + + - The M{n}th token is tagged with the Rule's original tag; and + - For each (Feature(positions), M{value}) tuple: + + - The value of Feature of at least one token in {n+p for p in positions} + is M{value}. + """ + + json_tag = "nltk.tbl.Rule" + + def __init__(self, templateid, original_tag, replacement_tag, conditions): + """ + Construct a new Rule that changes a token's tag from + C{original_tag} to C{replacement_tag} if all of the properties + specified in C{conditions} hold. + + :param templateid: the template id (a zero-padded string, '001' etc, + so it will sort nicely) + :type templateid: string + + :param conditions: A list of Feature(positions), + each of which specifies that the property (computed by + Feature.extract_property()) of at least one + token in M{n} + p in positions is C{value}. + :type conditions: C{iterable} of C{Feature} + + """ + TagRule.__init__(self, original_tag, replacement_tag) + self._conditions = conditions + self.templateid = templateid + + def encode_json_obj(self): + return { + "templateid": self.templateid, + "original": self.original_tag, + "replacement": self.replacement_tag, + "conditions": self._conditions, + } + + @classmethod + def decode_json_obj(cls, obj): + return cls( + obj["templateid"], + obj["original"], + obj["replacement"], + tuple(tuple(feat) for feat in obj["conditions"]), + ) + + def applies(self, tokens, index): + # Inherit docs from TagRule + + # Does the given token have this Rule's "original tag"? + if tokens[index][1] != self.original_tag: + return False + + # Check to make sure that every condition holds. + for (feature, val) in self._conditions: + + # Look for *any* token that satisfies the condition. + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + if feature.extract_property(tokens, index + pos) == val: + break + else: + # No token satisfied the condition; return false. + return False + + # Every condition checked out, so the Rule is applicable. + return True + + def __eq__(self, other): + return self is other or ( + other is not None + and other.__class__ == self.__class__ + and self.original_tag == other.original_tag + and self.replacement_tag == other.replacement_tag + and self._conditions == other._conditions + ) + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + + # Cache our hash value (justified by profiling.) + try: + return self.__hash + except AttributeError: + self.__hash = hash(repr(self)) + return self.__hash + + def __repr__(self): + # Cache the repr (justified by profiling -- this is used as + # a sort key when deterministic=True.) + try: + return self.__repr + except AttributeError: + self.__repr = "{}('{}', {}, {}, [{}])".format( + self.__class__.__name__, + self.templateid, + repr(self.original_tag), + repr(self.replacement_tag), + # list(self._conditions) would be simpler but will not generate + # the same Rule.__repr__ in python 2 and 3 and thus break some tests + ", ".join(f"({f},{repr(v)})" for (f, v) in self._conditions), + ) + + return self.__repr + + def __str__(self): + def _condition_to_logic(feature, value): + """ + Return a compact, predicate-logic styled string representation + of the given condition. + """ + return "{}:{}@[{}]".format( + feature.PROPERTY_NAME, + value, + ",".join(str(w) for w in feature.positions), + ) + + conditions = " & ".join( + [_condition_to_logic(f, v) for (f, v) in self._conditions] + ) + s = f"{self.original_tag}->{self.replacement_tag} if {conditions}" + + return s + + def format(self, fmt): + """ + Return a string representation of this rule. + + >>> from nltk.tbl.rule import Rule + >>> from nltk.tag.brill import Pos + + >>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')]) + + r.format("str") == str(r) + True + >>> r.format("str") + 'VB->NN if Pos:DT@[-2,-1]' + + r.format("repr") == repr(r) + True + >>> r.format("repr") + "Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])" + + >>> r.format("verbose") + 'VB -> NN if the Pos of words i-2...i-1 is "DT"' + + >>> r.format("not_found") + Traceback (most recent call last): + File "", line 1, in + File "nltk/tbl/rule.py", line 256, in format + raise ValueError("unknown rule format spec: {0}".format(fmt)) + ValueError: unknown rule format spec: not_found + >>> + + :param fmt: format specification + :type fmt: str + :return: string representation + :rtype: str + """ + if fmt == "str": + return self.__str__() + elif fmt == "repr": + return self.__repr__() + elif fmt == "verbose": + return self._verbose_format() + else: + raise ValueError(f"unknown rule format spec: {fmt}") + + def _verbose_format(self): + """ + Return a wordy, human-readable string representation + of the given rule. + + Not sure how useful this is. + """ + + def condition_to_str(feature, value): + return 'the {} of {} is "{}"'.format( + feature.PROPERTY_NAME, + range_to_str(feature.positions), + value, + ) + + def range_to_str(positions): + if len(positions) == 1: + p = positions[0] + if p == 0: + return "this word" + if p == -1: + return "the preceding word" + elif p == 1: + return "the following word" + elif p < 0: + return "word i-%d" % -p + elif p > 0: + return "word i+%d" % p + else: + # for complete compatibility with the wordy format of nltk2 + mx = max(positions) + mn = min(positions) + if mx - mn == len(positions) - 1: + return "words i%+d...i%+d" % (mn, mx) + else: + return "words {{{}}}".format( + ",".join("i%+d" % d for d in positions) + ) + + replacement = f"{self.original_tag} -> {self.replacement_tag}" + conditions = (" if " if self._conditions else "") + ", and ".join( + condition_to_str(f, v) for (f, v) in self._conditions + ) + return replacement + conditions diff --git a/venv/lib/python3.10/site-packages/nltk/tbl/template.py b/venv/lib/python3.10/site-packages/nltk/tbl/template.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9ed5df52f5730bd767a04a121637a5c2be01d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tbl/template.py @@ -0,0 +1,325 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import itertools as it +from abc import ABCMeta, abstractmethod + +from nltk.tbl.feature import Feature +from nltk.tbl.rule import Rule + + +class BrillTemplateI(metaclass=ABCMeta): + """ + An interface for generating lists of transformational rules that + apply at given sentence positions. ``BrillTemplateI`` is used by + ``Brill`` training algorithms to generate candidate rules. + """ + + @abstractmethod + def applicable_rules(self, tokens, i, correctTag): + """ + Return a list of the transformational rules that would correct + the ``i``-th subtoken's tag in the given token. In particular, + return a list of zero or more rules that would change + ``tokens[i][1]`` to ``correctTag``, if applied to ``token[i]``. + + If the ``i``-th token already has the correct tag (i.e., if + ``tagged_tokens[i][1] == correctTag``), then + ``applicable_rules()`` should return the empty list. + + :param tokens: The tagged tokens being tagged. + :type tokens: list(tuple) + :param i: The index of the token whose tag should be corrected. + :type i: int + :param correctTag: The correct tag for the ``i``-th token. + :type correctTag: any + :rtype: list(BrillRule) + """ + + @abstractmethod + def get_neighborhood(self, token, index): + """ + Returns the set of indices *i* such that + ``applicable_rules(token, i, ...)`` depends on the value of + the *index*th token of *token*. + + This method is used by the "fast" Brill tagger trainer. + + :param token: The tokens being tagged. + :type token: list(tuple) + :param index: The index whose neighborhood should be returned. + :type index: int + :rtype: set + """ + + +class Template(BrillTemplateI): + """ + A tbl Template that generates a list of L{Rule}s that apply at a given sentence + position. In particular, each C{Template} is parameterized by a list of + independent features (a combination of a specific + property to extract and a list C{L} of relative positions at which to extract + it) and generates all Rules that: + + - use the given features, each at its own independent position; and + - are applicable to the given token. + """ + + ALLTEMPLATES = [] + # record a unique id of form "001", for each template created + # _ids = it.count(0) + + def __init__(self, *features): + + """ + Construct a Template for generating Rules. + + Takes a list of Features. A C{Feature} is a combination + of a specific property and its relative positions and should be + a subclass of L{nltk.tbl.feature.Feature}. + + An alternative calling convention (kept for backwards compatibility, + but less expressive as it only permits one feature type) is + Template(Feature, (start1, end1), (start2, end2), ...) + In new code, that would be better written + Template(Feature(start1, end1), Feature(start2, end2), ...) + + For instance, importing some features + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + Create some features + + >>> wfeat1, wfeat2, pfeat = (Word([-1]), Word([1,2]), Pos([-2,-1])) + + Create a single-feature template + + >>> Template(wfeat1) + Template(Word([-1])) + + Or a two-feature one + + >>> Template(wfeat1, wfeat2) + Template(Word([-1]),Word([1, 2])) + + Or a three-feature one with two different feature types + + >>> Template(wfeat1, wfeat2, pfeat) + Template(Word([-1]),Word([1, 2]),Pos([-2, -1])) + + deprecated api: Feature subclass, followed by list of (start,end) pairs + (permits only a single Feature) + + >>> Template(Word, (-2,-1), (0,0)) + Template(Word([-2, -1]),Word([0])) + + Incorrect specification raises TypeError + + >>> Template(Word, (-2,-1), Pos, (0,0)) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tag/tbl/template.py", line 143, in __init__ + raise TypeError( + TypeError: expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ... + + :type features: list of Features + :param features: the features to build this Template on + """ + # determine the calling form: either + # Template(Feature, args1, [args2, ...)] + # Template(Feature1(args), Feature2(args), ...) + if all(isinstance(f, Feature) for f in features): + self._features = features + elif issubclass(features[0], Feature) and all( + isinstance(a, tuple) for a in features[1:] + ): + self._features = [features[0](*tp) for tp in features[1:]] + else: + raise TypeError( + "expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ..." + ) + self.id = f"{len(self.ALLTEMPLATES):03d}" + self.ALLTEMPLATES.append(self) + + def __repr__(self): + return "{}({})".format( + self.__class__.__name__, + ",".join([str(f) for f in self._features]), + ) + + def applicable_rules(self, tokens, index, correct_tag): + if tokens[index][1] == correct_tag: + return [] + + # For each of this Template's features, find the conditions + # that are applicable for the given token. + # Then, generate one Rule for each combination of features + # (the crossproduct of the conditions). + + applicable_conditions = self._applicable_conditions(tokens, index) + xs = list(it.product(*applicable_conditions)) + return [Rule(self.id, tokens[index][1], correct_tag, tuple(x)) for x in xs] + + def _applicable_conditions(self, tokens, index): + """ + :returns: A set of all conditions for rules + that are applicable to C{tokens[index]}. + """ + conditions = [] + + for feature in self._features: + conditions.append([]) + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + value = feature.extract_property(tokens, index + pos) + conditions[-1].append((feature, value)) + return conditions + + def get_neighborhood(self, tokens, index): + # inherit docs from BrillTemplateI + + # applicable_rules(tokens, index, ...) depends on index. + neighborhood = {index} # set literal for python 2.7+ + + # applicable_rules(tokens, i, ...) depends on index if + # i+start < index <= i+end. + + allpositions = [0] + [p for feat in self._features for p in feat.positions] + start, end = min(allpositions), max(allpositions) + s = max(0, index + (-end)) + e = min(index + (-start) + 1, len(tokens)) + for i in range(s, e): + neighborhood.add(i) + return neighborhood + + @classmethod + def expand(cls, featurelists, combinations=None, skipintersecting=True): + + """ + Factory method to mass generate Templates from a list L of lists of Features. + + #With combinations=(k1, k2), the function will in all possible ways choose k1 ... k2 + #of the sublists in L; it will output all Templates formed by the Cartesian product + #of this selection, with duplicates and other semantically equivalent + #forms removed. Default for combinations is (1, len(L)). + + The feature lists may have been specified + manually, or generated from Feature.expand(). For instance, + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + #creating some features + >>> (wd_0, wd_01) = (Word([0]), Word([0,1])) + + >>> (pos_m2, pos_m33) = (Pos([-2]), Pos([3-2,-1,0,1,2,3])) + + >>> list(Template.expand([[wd_0], [pos_m2]])) + [Template(Word([0])), Template(Pos([-2])), Template(Pos([-2]),Word([0]))] + + >>> list(Template.expand([[wd_0, wd_01], [pos_m2]])) + [Template(Word([0])), Template(Word([0, 1])), Template(Pos([-2])), Template(Pos([-2]),Word([0])), Template(Pos([-2]),Word([0, 1]))] + + #note: with Feature.expand(), it is very easy to generate more templates + #than your system can handle -- for instance, + >>> wordtpls = Word.expand([-2,-1,0,1], [1,2], excludezero=False) + >>> len(wordtpls) + 7 + + >>> postpls = Pos.expand([-3,-2,-1,0,1,2], [1,2,3], excludezero=True) + >>> len(postpls) + 9 + + #and now the Cartesian product of all non-empty combinations of two wordtpls and + #two postpls, with semantic equivalents removed + >>> templates = list(Template.expand([wordtpls, wordtpls, postpls, postpls])) + >>> len(templates) + 713 + + + will return a list of eight templates + Template(Word([0])), + Template(Word([0, 1])), + Template(Pos([-2])), + Template(Pos([-1])), + Template(Pos([-2]),Word([0])), + Template(Pos([-1]),Word([0])), + Template(Pos([-2]),Word([0, 1])), + Template(Pos([-1]),Word([0, 1]))] + + + #Templates where one feature is a subset of another, such as + #Template(Word([0,1]), Word([1]), will not appear in the output. + #By default, this non-subset constraint is tightened to disjointness: + #Templates of type Template(Word([0,1]), Word([1,2]) will also be filtered out. + #With skipintersecting=False, then such Templates are allowed + + WARNING: this method makes it very easy to fill all your memory when training + generated templates on any real-world corpus + + :param featurelists: lists of Features, whose Cartesian product will return a set of Templates + :type featurelists: list of (list of Features) + :param combinations: given n featurelists: if combinations=k, all generated Templates will have + k features; if combinations=(k1,k2) they will have k1..k2 features; if None, defaults to 1..n + :type combinations: None, int, or (int, int) + :param skipintersecting: if True, do not output intersecting Templates (non-disjoint positions for some feature) + :type skipintersecting: bool + :returns: generator of Templates + + """ + + def nonempty_powerset(xs): # xs is a list + # itertools docnonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3) + + # find the correct tuple given combinations, one of {None, k, (k1,k2)} + k = combinations # for brevity + combrange = ( + (1, len(xs) + 1) + if k is None + else (k, k + 1) # n over 1 .. n over n (all non-empty combinations) + if isinstance(k, int) + else (k[0], k[1] + 1) # n over k (only + ) # n over k1, n over k1+1... n over k2 + return it.chain.from_iterable( + it.combinations(xs, r) for r in range(*combrange) + ) + + seentemplates = set() + for picks in nonempty_powerset(featurelists): + for pick in it.product(*picks): + if any( + i != j and x.issuperset(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + if skipintersecting and any( + i != j and x.intersects(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + thistemplate = cls(*sorted(pick)) + strpick = str(thistemplate) + #!!FIXME --this is hackish + if strpick in seentemplates: # already added + cls._poptemplate() + continue + seentemplates.add(strpick) + yield thistemplate + + @classmethod + def _cleartemplates(cls): + cls.ALLTEMPLATES = [] + + @classmethod + def _poptemplate(cls): + return cls.ALLTEMPLATES.pop() if cls.ALLTEMPLATES else None diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82007ac385c35fbfe22e54b94f1ff6d8a6e546b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19e1cd2737777d59f813ae8452c7680607288b2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a481fd54f62525a606e2c635ae343f7afa6ef252 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50cc6c2caace98f5d157ea39556fbbf623df3f99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96103d1a6a05b843c2703d0dc4cd4daf34012edd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e03511a2e50a1d336be0a7b5b60c135794ae82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e34150737bcccc4b6c25fd52e9d7e651679bd81 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acb845745a37691ba1bf725c12103c7dc0c5b7ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed48afb71f74480dc298f6e157f1df3da81076b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db447a1556594071033669ae760987ab97695f1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a62c3a17c34159799909962aef934bd1770b925d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d84f4c8ee6fff1bcfe65f8bf264da3b13e1d02dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3acc8fcd6e2bc073189c4a6ded356f49da4aaaca Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f08bbcc7d7dd9cfa98606163ebf0adcebb8e9c45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34ca1d27d670ec60bada71349590d7b0e160eda2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b64b4e7c4038e9921c36b905a69531796f5be7b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3031e080c39cde1bfb3335ca355af932f8c23b1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca5c2b4a24148c0a8a50e07904da0e1716d97719 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c00f2a8b3a714a0d3400bede523fd229f8a07cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/api.py b/venv/lib/python3.10/site-packages/nltk/tokenize/api.py new file mode 100644 index 0000000000000000000000000000000000000000..419ff646cfb89d5f3b63e645b53bedea09a1b479 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/api.py @@ -0,0 +1,83 @@ +# Natural Language Toolkit: Tokenizer Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Tokenizer Interface +""" + +from abc import ABC, abstractmethod +from typing import Iterator, List, Tuple + +from nltk.internals import overridden +from nltk.tokenize.util import string_span_tokenize + + +class TokenizerI(ABC): + """ + A processing interface for tokenizing a string. + Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both). + """ + + @abstractmethod + def tokenize(self, s: str) -> List[str]: + """ + Return a tokenized copy of *s*. + + :rtype: List[str] + """ + if overridden(self.tokenize_sents): + return self.tokenize_sents([s])[0] + + def span_tokenize(self, s: str) -> Iterator[Tuple[int, int]]: + """ + Identify the tokens using integer offsets ``(start_i, end_i)``, + where ``s[start_i:end_i]`` is the corresponding token. + + :rtype: Iterator[Tuple[int, int]] + """ + raise NotImplementedError() + + def tokenize_sents(self, strings: List[str]) -> List[List[str]]: + """ + Apply ``self.tokenize()`` to each element of ``strings``. I.e.: + + return [self.tokenize(s) for s in strings] + + :rtype: List[List[str]] + """ + return [self.tokenize(s) for s in strings] + + def span_tokenize_sents( + self, strings: List[str] + ) -> Iterator[List[Tuple[int, int]]]: + """ + Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.: + + return [self.span_tokenize(s) for s in strings] + + :yield: List[Tuple[int, int]] + """ + for s in strings: + yield list(self.span_tokenize(s)) + + +class StringTokenizer(TokenizerI): + """A tokenizer that divides a string into substrings by splitting + on the specified string (defined in subclasses). + """ + + @property + @abstractmethod + def _string(self): + raise NotImplementedError + + def tokenize(self, s): + return s.split(self._string) + + def span_tokenize(self, s): + yield from string_span_tokenize(s, self._string) diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/casual.py b/venv/lib/python3.10/site-packages/nltk/tokenize/casual.py new file mode 100644 index 0000000000000000000000000000000000000000..d0545abe50530c20903f8aeaa29fbfc55094e70e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/casual.py @@ -0,0 +1,458 @@ +# +# Natural Language Toolkit: Twitter Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# Ewan Klein (modifications) +# Pierpaolo Pantone <> (modifications) +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT +# + + +""" +Twitter-aware tokenizer, designed to be flexible and easy to adapt to new +domains and tasks. The basic logic is this: + +1. The tuple REGEXPS defines a list of regular expression + strings. + +2. The REGEXPS strings are put, in order, into a compiled + regular expression object called WORD_RE, under the TweetTokenizer + class. + +3. The tokenization is done by WORD_RE.findall(s), where s is the + user-supplied string, inside the tokenize() method of the class + TweetTokenizer. + +4. When instantiating Tokenizer objects, there are several options: + * preserve_case. By default, it is set to True. If it is set to + False, then the tokenizer will downcase everything except for + emoticons. + * reduce_len. By default, it is set to False. It specifies whether + to replace repeated character sequences of length 3 or greater + with sequences of length 3. + * strip_handles. By default, it is set to False. It specifies + whether to remove Twitter handles of text used in the + `tokenize` method. + * match_phone_numbers. By default, it is set to True. It indicates + whether the `tokenize` method should look for phone numbers. +""" + + +###################################################################### + +import html +from typing import List + +import regex # https://github.com/nltk/nltk/issues/2409 + +from nltk.tokenize.api import TokenizerI + +###################################################################### +# The following strings are components in the regular expression +# that is used for tokenizing. It's important that phone_number +# appears first in the final regex (since it can contain whitespace). +# It also could matter that tags comes after emoticons, due to the +# possibility of having text like +# +# <:| and some text >:) +# +# Most importantly, the final element should always be last, since it +# does a last ditch whitespace-based tokenization of whatever is left. + +# ToDo: Update with https://en.wikipedia.org/wiki/List_of_emoticons ? + +# This particular element is used in a couple ways, so we define it +# with a name: +EMOTICONS = r""" + (?: + [<>]? + [:;=8] # eyes + [\-o\*\']? # optional nose + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + | + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + [\-o\*\']? # optional nose + [:;=8] # eyes + [<>]? + | + {}\[\]]+ # Run of non-space, non-()<>{}[] + | # or + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + )+ + (?: # End with: + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + | # or + [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars + ) + | # OR, the following to match naked domains: + (?: + (?\s]+>""", + # ASCII Arrows + r"""[\-]+>|<[\-]+""", + # Twitter username: + r"""(?:@[\w_]+)""", + # Twitter hashtags: + r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""", + # email addresses + r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""", + # Zero-Width-Joiner and Skin tone modifier emojis + """.(?: + [\U0001F3FB-\U0001F3FF]?(?:\u200d.[\U0001F3FB-\U0001F3FF]?)+ + | + [\U0001F3FB-\U0001F3FF] + )""", + # flags + FLAGS, + # Remaining word types: + r""" + (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes. + | + (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals. + | + (?:[\w_]+) # Words without apostrophes or dashes. + | + (?:\.(?:\s*\.){1,}) # Ellipsis dots. + | + (?:\S) # Everything else that isn't whitespace. + """, +) + +# Take the main components and add a phone regex as the second parameter +REGEXPS_PHONE = (REGEXPS[0], PHONE_REGEX, *REGEXPS[1:]) + +###################################################################### +# TweetTokenizer.WORD_RE and TweetTokenizer.PHONE_WORD_RE represent +# the core tokenizing regexes. They are compiled lazily. + +# WORD_RE performs poorly on these patterns: +HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}") + +# The emoticon string gets its own regex so that we can preserve case for +# them as needed: +EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) + +# These are for regularizing HTML entities to Unicode: +ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);") + +# For stripping away handles from a tweet: +HANDLES_RE = regex.compile( + r"(?>> from nltk.tokenize.casual import _replace_html_entities + >>> _replace_html_entities(b'Price: £100') + 'Price: \\xa3100' + >>> print(_replace_html_entities(b'Price: £100')) + Price: £100 + >>> + """ + + def _convert_entity(match): + entity_body = match.group(3) + if match.group(1): + try: + if match.group(2): + number = int(entity_body, 16) + else: + number = int(entity_body, 10) + # Numeric character references in the 80-9F range are typically + # interpreted by browsers as representing the characters mapped + # to bytes 80-9F in the Windows-1252 encoding. For more info + # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets + if 0x80 <= number <= 0x9F: + return bytes((number,)).decode("cp1252") + except ValueError: + number = None + else: + if entity_body in keep: + return match.group(0) + number = html.entities.name2codepoint.get(entity_body) + if number is not None: + try: + return chr(number) + except (ValueError, OverflowError): + pass + + return "" if remove_illegal else match.group(0) + + return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) + + +###################################################################### + + +class TweetTokenizer(TokenizerI): + r""" + Tokenizer for tweets. + + >>> from nltk.tokenize import TweetTokenizer + >>> tknzr = TweetTokenizer() + >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" + >>> tknzr.tokenize(s0) # doctest: +NORMALIZE_WHITESPACE + ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', + '<--'] + + Examples using `strip_handles` and `reduce_len parameters`: + + >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) + >>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!' + >>> tknzr.tokenize(s1) + [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] + """ + + # Values used to lazily compile WORD_RE and PHONE_WORD_RE, + # which are the core tokenizing regexes. + _WORD_RE = None + _PHONE_WORD_RE = None + + ###################################################################### + + def __init__( + self, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, + ): + """ + Create a `TweetTokenizer` instance with settings for use in the `tokenize` method. + + :param preserve_case: Flag indicating whether to preserve the casing (capitalisation) + of text used in the `tokenize` method. Defaults to True. + :type preserve_case: bool + :param reduce_len: Flag indicating whether to replace repeated character sequences + of length 3 or greater with sequences of length 3. Defaults to False. + :type reduce_len: bool + :param strip_handles: Flag indicating whether to remove Twitter handles of text used + in the `tokenize` method. Defaults to False. + :type strip_handles: bool + :param match_phone_numbers: Flag indicating whether the `tokenize` method should look + for phone numbers. Defaults to True. + :type match_phone_numbers: bool + """ + self.preserve_case = preserve_case + self.reduce_len = reduce_len + self.strip_handles = strip_handles + self.match_phone_numbers = match_phone_numbers + + def tokenize(self, text: str) -> List[str]: + """Tokenize the input text. + + :param text: str + :rtype: list(str) + :return: a tokenized list of strings; joining this list returns\ + the original string if `preserve_case=False`. + """ + # Fix HTML character entities: + text = _replace_html_entities(text) + # Remove username handles + if self.strip_handles: + text = remove_handles(text) + # Normalize word lengthening + if self.reduce_len: + text = reduce_lengthening(text) + # Shorten problematic sequences of characters + safe_text = HANG_RE.sub(r"\1\1\1", text) + # Recognise phone numbers during tokenization + if self.match_phone_numbers: + words = self.PHONE_WORD_RE.findall(safe_text) + else: + words = self.WORD_RE.findall(safe_text) + # Possibly alter the case, but avoid changing emoticons like :D into :d: + if not self.preserve_case: + words = list( + map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words) + ) + return words + + @property + def WORD_RE(self) -> "regex.Pattern": + """Core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._WORD_RE: + type(self)._WORD_RE = regex.compile( + f"({'|'.join(REGEXPS)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._WORD_RE + + @property + def PHONE_WORD_RE(self) -> "regex.Pattern": + """Secondary core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._PHONE_WORD_RE: + type(self)._PHONE_WORD_RE = regex.compile( + f"({'|'.join(REGEXPS_PHONE)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._PHONE_WORD_RE + + +###################################################################### +# Normalization Functions +###################################################################### + + +def reduce_lengthening(text): + """ + Replace repeated character sequences of length 3 or greater with sequences + of length 3. + """ + pattern = regex.compile(r"(.)\1{2,}") + return pattern.sub(r"\1\1\1", text) + + +def remove_handles(text): + """ + Remove Twitter username handles from text. + """ + # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly + return HANDLES_RE.sub(" ", text) + + +###################################################################### +# Tokenization Function +###################################################################### + + +def casual_tokenize( + text, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, +): + """ + Convenience function for wrapping the tokenizer. + """ + return TweetTokenizer( + preserve_case=preserve_case, + reduce_len=reduce_len, + strip_handles=strip_handles, + match_phone_numbers=match_phone_numbers, + ).tokenize(text) + + +############################################################################### diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/destructive.py b/venv/lib/python3.10/site-packages/nltk/tokenize/destructive.py new file mode 100644 index 0000000000000000000000000000000000000000..4beb395dde57bf73082dfa91f65ad625d199bc31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/destructive.py @@ -0,0 +1,233 @@ +# Natural Language Toolkit: NLTK's very own tokenizer. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Liling Tan +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT + + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import align_tokens + + +class MacIntyreContractions: + """ + List of contractions adapted from Robert MacIntyre's tokenizer. + """ + + CONTRACTIONS2 = [ + r"(?i)\b(can)(?#X)(not)\b", + r"(?i)\b(d)(?#X)('ye)\b", + r"(?i)\b(gim)(?#X)(me)\b", + r"(?i)\b(gon)(?#X)(na)\b", + r"(?i)\b(got)(?#X)(ta)\b", + r"(?i)\b(lem)(?#X)(me)\b", + r"(?i)\b(more)(?#X)('n)\b", + r"(?i)\b(wan)(?#X)(na)(?=\s)", + ] + CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"] + CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"] + + +class NLTKWordTokenizer(TokenizerI): + """ + The NLTK tokenizer that has improved upon the TreebankWordTokenizer. + + This is the method that is invoked by ``word_tokenize()``. It assumes that the + text has already been segmented into sentences, e.g. using ``sent_tokenize()``. + + The tokenizer is "destructive" such that the regexes applied will munge the + input string to a state beyond re-construction. It is possible to apply + `TreebankWordDetokenizer.detokenize` to the tokenized outputs of + `NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to + revert to the original string. + """ + + # Starting quotes. + STARTING_QUOTES = [ + (re.compile("([«“‘„]|[`]+)", re.U), r" \1 "), + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + (re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d|n)(\w)\b", re.U), r"\1 \2"), + ] + + # Ending quotes. + ENDING_QUOTES = [ + (re.compile("([»”’])", re.U), r" \1 "), + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # For improvements for starting/closing quotes from TreebankWordTokenizer, + # see discussion on https://github.com/nltk/nltk/pull/1437 + # Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on + # - chervon quotes u'\xab' and u'\xbb' . + # - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d' + # See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608 + # Also, behavior of splitting on clitics now follows Stanford CoreNLP + # - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b + + # Punctuation. + PUNCTUATION = [ + (re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "), + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + ( + re.compile(r"\.{2,}", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ( + re.compile(r"[*]", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> NLTKWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> NLTKWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/repp.py b/venv/lib/python3.10/site-packages/nltk/tokenize/repp.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0740a94645f14ec6162814cdb3c92167f503bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/repp.py @@ -0,0 +1,149 @@ +# Natural Language Toolkit: Interface to the Repp Tokenizer +# +# Copyright (C) 2001-2015 NLTK Project +# Authors: Rebecca Dridan and Stephan Oepen +# Contributors: Liling Tan +# +# URL: +# For license information, see LICENSE.TXT + +import os +import re +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir +from nltk.tokenize.api import TokenizerI + + +class ReppTokenizer(TokenizerI): + """ + A class for word tokenization using the REPP parser described in + Rebecca Dridan and Stephan Oepen (2012) Tokenization: Returning to a + Long Solved Problem - A Survey, Contrastive Experiment, Recommendations, + and Toolkit. In ACL. http://anthology.aclweb.org/P/P12/P12-2.pdf#page=406 + + >>> sents = ['Tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve.' , + ... 'But rule-based tokenizers are hard to maintain and their rules language specific.' , + ... 'We evaluated our method on three languages and obtained error rates of 0.27% (English), 0.35% (Dutch) and 0.76% (Italian) for our best models.' + ... ] + >>> tokenizer = ReppTokenizer('/home/alvas/repp/') # doctest: +SKIP + >>> for sent in sents: # doctest: +SKIP + ... tokenizer.tokenize(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + + >>> for sent in tokenizer.tokenize_sents(sents): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + >>> for sent in tokenizer.tokenize_sents(sents, keep_token_positions=True): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + [(u'Tokenization', 0, 12), (u'is', 13, 15), (u'widely', 16, 22), (u'regarded', 23, 31), (u'as', 32, 34), (u'a', 35, 36), (u'solved', 37, 43), (u'problem', 44, 51), (u'due', 52, 55), (u'to', 56, 58), (u'the', 59, 62), (u'high', 63, 67), (u'accuracy', 68, 76), (u'that', 77, 81), (u'rulebased', 82, 91), (u'tokenizers', 92, 102), (u'achieve', 103, 110), (u'.', 110, 111)] + [(u'But', 0, 3), (u'rule-based', 4, 14), (u'tokenizers', 15, 25), (u'are', 26, 29), (u'hard', 30, 34), (u'to', 35, 37), (u'maintain', 38, 46), (u'and', 47, 50), (u'their', 51, 56), (u'rules', 57, 62), (u'language', 63, 71), (u'specific', 72, 80), (u'.', 80, 81)] + [(u'We', 0, 2), (u'evaluated', 3, 12), (u'our', 13, 16), (u'method', 17, 23), (u'on', 24, 26), (u'three', 27, 32), (u'languages', 33, 42), (u'and', 43, 46), (u'obtained', 47, 55), (u'error', 56, 61), (u'rates', 62, 67), (u'of', 68, 70), (u'0.27', 71, 75), (u'%', 75, 76), (u'(', 77, 78), (u'English', 78, 85), (u')', 85, 86), (u',', 86, 87), (u'0.35', 88, 92), (u'%', 92, 93), (u'(', 94, 95), (u'Dutch', 95, 100), (u')', 100, 101), (u'and', 102, 105), (u'0.76', 106, 110), (u'%', 110, 111), (u'(', 112, 113), (u'Italian', 113, 120), (u')', 120, 121), (u'for', 122, 125), (u'our', 126, 129), (u'best', 130, 134), (u'models', 135, 141), (u'.', 141, 142)] + """ + + def __init__(self, repp_dir, encoding="utf8"): + self.repp_dir = self.find_repptokenizer(repp_dir) + # Set a directory to store the temporary files. + self.working_dir = tempfile.gettempdir() + # Set an encoding for the input strings. + self.encoding = encoding + + def tokenize(self, sentence): + """ + Use Repp to tokenize a single sentence. + + :param sentence: A single sentence string. + :type sentence: str + :return: A tuple of tokens. + :rtype: tuple(str) + """ + return next(self.tokenize_sents([sentence])) + + def tokenize_sents(self, sentences, keep_token_positions=False): + """ + Tokenize multiple sentences using Repp. + + :param sentences: A list of sentence strings. + :type sentences: list(str) + :return: A list of tuples of tokens + :rtype: iter(tuple(str)) + """ + with tempfile.NamedTemporaryFile( + prefix="repp_input.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + # Write sentences to temporary input file. + for sent in sentences: + input_file.write(str(sent) + "\n") + input_file.close() + # Generate command to run REPP. + cmd = self.generate_repp_command(input_file.name) + # Decode the stdout and strips the ending newline. + repp_output = self._execute(cmd).decode(self.encoding).strip() + for tokenized_sent in self.parse_repp_outputs(repp_output): + if not keep_token_positions: + # Removes token position information. + tokenized_sent, starts, ends = zip(*tokenized_sent) + yield tokenized_sent + + def generate_repp_command(self, inputfilename): + """ + This module generates the REPP command to be used at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + """ + cmd = [self.repp_dir + "/src/repp"] + cmd += ["-c", self.repp_dir + "/erg/repp.set"] + cmd += ["--format", "triple"] + cmd += [inputfilename] + return cmd + + @staticmethod + def _execute(cmd): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + return stdout + + @staticmethod + def parse_repp_outputs(repp_output): + """ + This module parses the tri-tuple format that REPP outputs using the + "--format triple" option and returns an generator with tuple of string + tokens. + + :param repp_output: + :type repp_output: type + :return: an iterable of the tokenized sentences as tuples of strings + :rtype: iter(tuple) + """ + line_regex = re.compile(r"^\((\d+), (\d+), (.+)\)$", re.MULTILINE) + for section in repp_output.split("\n\n"): + words_with_positions = [ + (token, int(start), int(end)) + for start, end, token in line_regex.findall(section) + ] + words = tuple(t[2] for t in words_with_positions) + yield words_with_positions + + def find_repptokenizer(self, repp_dirname): + """ + A module to find REPP tokenizer binary and its *repp.set* config file. + """ + if os.path.exists(repp_dirname): # If a full path is given. + _repp_dir = repp_dirname + else: # Try to find path to REPP directory in environment variables. + _repp_dir = find_dir(repp_dirname, env_vars=("REPP_TOKENIZER",)) + # Checks for the REPP binary and erg/repp.set config file. + assert os.path.exists(_repp_dir + "/src/repp") + assert os.path.exists(_repp_dir + "/erg/repp.set") + return _repp_dir diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/stanford.py b/venv/lib/python3.10/site-packages/nltk/tokenize/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..81a2d8584aee1d4c39042af6a150bd41c838ee14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/stanford.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Interface to the Stanford Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_jar, java +from nltk.parse.corenlp import CoreNLPParser +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software/tokenizer.shtml" + + +class StanfordTokenizer(TokenizerI): + r""" + Interface to the Stanford Tokenizer + + >>> from nltk.tokenize.stanford import StanfordTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks." + >>> StanfordTokenizer().tokenize(s) # doctest: +SKIP + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> s = "The colour of the wall is blue." + >>> StanfordTokenizer(options={"americanize": True}).tokenize(s) # doctest: +SKIP + ['The', 'color', 'of', 'the', 'wall', 'is', 'blue', '.'] + """ + + _JAR = "stanford-postagger.jar" + + def __init__( + self, + path_to_jar=None, + encoding="utf8", + options=None, + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + + self._stanford_jar = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_POSTAGGER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + + self._encoding = encoding + self.java_options = java_options + + options = {} if options is None else options + self._options_cmd = ",".join(f"{key}={val}" for key, val in options.items()) + + @staticmethod + def _parse_tokenized_output(s): + return s.splitlines() + + def tokenize(self, s): + """ + Use stanford tokenizer's PTBTokenizer to tokenize multiple sentences. + """ + cmd = ["edu.stanford.nlp.process.PTBTokenizer"] + return self._parse_tokenized_output(self._execute(cmd, s)) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-charset", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + cmd.append(input_file.name) + + # Run the tagger and get the output. + stdout, stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py b/venv/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3f16621e3a3c38ee0265e817b04c655856dd70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python +# Natural Language Toolkit: Interface to the Stanford Segmenter +# for Chinese and Arabic +# +# Copyright (C) 2001-2023 NLTK Project +# Author: 52nlp <52nlpcn@gmail.com> +# Casper Lehmann-Strøm +# Alex Constantin +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_dir, + find_file, + find_jar, + java, +) +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordSegmenter(TokenizerI): + """Interface to the Stanford Segmenter + + If stanford-segmenter version is older than 2016-10-31, then path_to_slf4j + should be provieded, for example:: + + seg = StanfordSegmenter(path_to_slf4j='/YOUR_PATH/slf4j-api.jar') + + >>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter + >>> seg = StanfordSegmenter() # doctest: +SKIP + >>> seg.default_config('zh') # doctest: +SKIP + >>> sent = u'这是斯坦福中文分词器测试' + >>> print(seg.segment(sent)) # doctest: +SKIP + \u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5 + + >>> seg.default_config('ar') # doctest: +SKIP + >>> sent = u'هذا هو تصنيف ستانفورد العربي للكلمات' + >>> print(seg.segment(sent.split())) # doctest: +SKIP + \u0647\u0630\u0627 \u0647\u0648 \u062a\u0635\u0646\u064a\u0641 \u0633\u062a\u0627\u0646\u0641\u0648\u0631\u062f \u0627\u0644\u0639\u0631\u0628\u064a \u0644 \u0627\u0644\u0643\u0644\u0645\u0627\u062a + + """ + + _JAR = "stanford-segmenter.jar" + + def __init__( + self, + path_to_jar=None, + path_to_slf4j=None, + java_class=None, + path_to_model=None, + path_to_dict=None, + path_to_sihan_corpora_dict=None, + sihan_post_processing="false", + keep_whitespaces="false", + encoding="UTF-8", + options=None, + verbose=False, + java_options="-mx2g", + ): + # Raise deprecation warning. + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPTokenizer\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("ignore", DeprecationWarning) + + stanford_segmenter = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_SEGMENTER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + if path_to_slf4j is not None: + slf4j = find_jar( + "slf4j-api.jar", + path_to_slf4j, + env_vars=("SLF4J", "STANFORD_SEGMENTER"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + else: + slf4j = None + + # This is passed to java as the -cp option, the old version of segmenter needs slf4j. + # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j + self._stanford_jar = os.pathsep.join( + _ for _ in [stanford_segmenter, slf4j] if _ is not None + ) + + self._java_class = java_class + self._model = path_to_model + self._sihan_corpora_dict = path_to_sihan_corpora_dict + self._sihan_post_processing = sihan_post_processing + self._keep_whitespaces = keep_whitespaces + self._dict = path_to_dict + + self._encoding = encoding + self.java_options = java_options + options = {} if options is None else options + self._options_cmd = ",".join( + f"{key}={json.dumps(val)}" for key, val in options.items() + ) + + def default_config(self, lang): + """ + Attempt to initialize Stanford Word Segmenter for the specified language + using the STANFORD_SEGMENTER and STANFORD_MODELS environment variables + """ + + search_path = () + if os.environ.get("STANFORD_SEGMENTER"): + search_path = {os.path.join(os.environ.get("STANFORD_SEGMENTER"), "data")} + + # init for Chinese-specific files + self._dict = None + self._sihan_corpora_dict = None + self._sihan_post_processing = "false" + + if lang == "ar": + self._java_class = ( + "edu.stanford.nlp.international.arabic.process.ArabicSegmenter" + ) + model = "arabic-segmenter-atb+bn+arztrain.ser.gz" + + elif lang == "zh": + self._java_class = "edu.stanford.nlp.ie.crf.CRFClassifier" + model = "pku.gz" + self._sihan_post_processing = "true" + + path_to_dict = "dict-chris6.ser.gz" + try: + self._dict = find_file( + path_to_dict, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS",), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" + % path_to_dict + ) from e + + sihan_dir = "./data/" + try: + path_to_sihan_dir = find_dir( + sihan_dir, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_SEGMENTER",), + ) + self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using the " + "STANFORD_SEGMENTER environment variable)" % sihan_dir + ) from e + else: + raise LookupError(f"Unsupported language {lang}") + + try: + self._model = find_file( + model, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS", "STANFORD_SEGMENTER"), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" % model + ) from e + + def tokenize(self, s): + super().tokenize(s) + + def segment_file(self, input_file_path): + """ """ + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + return stdout + + def segment(self, tokens): + return self.segment_sents([tokens]) + + def segment_sents(self, sentences): + """ """ + encoding = self._encoding + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + # Write the actural sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + self._input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + # Delete the temporary file + os.unlink(self._input_file_path) + + return stdout + + def _execute(self, cmd, verbose=False): + encoding = self._encoding + cmd.extend(["-inputEncoding", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + stdout, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/venv/lib/python3.10/site-packages/nltk/tokenize/toktok.py b/venv/lib/python3.10/site-packages/nltk/tokenize/toktok.py new file mode 100644 index 0000000000000000000000000000000000000000..4229a7327743ad9788449a82c8d2350b9c8db392 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tokenize/toktok.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the tok-tok.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Jon Dehdari +# Contributors: Liling Tan, Selcuk Ayguney, ikegami, Martijn Pieters +# +# URL: +# For license information, see LICENSE.TXT + +""" +The tok-tok tokenizer is a simple, general tokenizer, where the input has one +sentence per line; thus only final period is tokenized. + +Tok-tok has been tested on, and gives reasonably good results for English, +Persian, Russian, Czech, French, German, Vietnamese, Tajik, and a few others. +The input should be in UTF-8 encoding. + +Reference: +Jon Dehdari. 2014. A Neurophysiologically-Inspired Statistical Language +Model (Doctoral dissertation). Columbus, OH, USA: The Ohio State University. +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class ToktokTokenizer(TokenizerI): + """ + This is a Python port of the tok-tok.pl from + https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl + + >>> toktok = ToktokTokenizer() + >>> text = u'Is 9.5 or 525,600 my favorite number?' + >>> print(toktok.tokenize(text, return_str=True)) + Is 9.5 or 525,600 my favorite number ? + >>> text = u'The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things' + >>> print(toktok.tokenize(text, return_str=True)) + The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things + >>> text = u'\xa1This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf' + >>> expected = u'\xa1 This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf' + >>> assert toktok.tokenize(text, return_str=True) == expected + >>> toktok.tokenize(text) == [u'\xa1', u'This', u',', u'is', u'a', u'sentence', u'with', u'weird', u'\xbb', u'symbols', u'\u2026', u'appearing', u'everywhere', u'\xbf'] + True + """ + + # Replace non-breaking spaces with normal spaces. + NON_BREAKING = re.compile("\u00A0"), " " + + # Pad some funky punctuation. + FUNKY_PUNCT_1 = re.compile(r'([،;؛¿!"\])}»›”؟¡%٪°±©®।॥…])'), r" \1 " + # Pad more funky punctuation. + FUNKY_PUNCT_2 = re.compile(r"([({\[“‘„‚«‹「『])"), r" \1 " + # Pad En dash and em dash + EN_EM_DASHES = re.compile("([–—])"), r" \1 " + + # Replace problematic character with numeric character reference. + AMPERCENT = re.compile("& "), "& " + TAB = re.compile("\t"), " " + PIPE = re.compile(r"\|"), " | " + + # Pad numbers with commas to keep them from further tokenization. + COMMA_IN_NUM = re.compile(r"(? "something ..." + # "something." -> "something ." + FINAL_PERIOD_1 = re.compile(r"(? "... stuff ." + FINAL_PERIOD_2 = re.compile(r"""(? +# URL: +# For license information, see LICENSE.TXT + +from re import finditer +from xml.sax.saxutils import escape, unescape + + +def string_span_tokenize(s, sep): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each occurrence of *sep*. + + >>> from nltk.tokenize.util import string_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37), + (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)] + + :param s: the string to be tokenized + :type s: str + :param sep: the token separator + :type sep: str + :rtype: iter(tuple(int, int)) + """ + if len(sep) == 0: + raise ValueError("Token delimiter must not be empty") + left = 0 + while True: + try: + right = s.index(sep, left) + if right != 0: + yield left, right + except ValueError: + if left != len(s): + yield left, len(s) + break + + left = right + len(sep) + + +def regexp_span_tokenize(s, regexp): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each successive match of *regexp*. + + >>> from nltk.tokenize.util import regexp_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), + (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + + :param s: the string to be tokenized + :type s: str + :param regexp: regular expression that matches token separators (must not be empty) + :type regexp: str + :rtype: iter(tuple(int, int)) + """ + left = 0 + for m in finditer(regexp, s): + right, next = m.span() + if right != left: + yield left, right + left = next + yield left, len(s) + + +def spans_to_relative(spans): + r""" + Return a sequence of relative spans, given a sequence of spans. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> from nltk.tokenize.util import spans_to_relative + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6), + (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)] + + :param spans: a sequence of (start, end) offsets of the tokens + :type spans: iter(tuple(int, int)) + :rtype: iter(tuple(int, int)) + """ + prev = 0 + for left, right in spans: + yield left - prev, right - left + prev = right + + +class CJKChars: + """ + An object that enumerates the code points of the CJK characters as listed on + https://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane + + This is a Python port of the CJK code point enumerations of Moses tokenizer: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl#L309 + """ + + # Hangul Jamo (1100–11FF) + Hangul_Jamo = (4352, 4607) # (ord(u"\u1100"), ord(u"\u11ff")) + + # CJK Radicals Supplement (2E80–2EFF) + # Kangxi Radicals (2F00–2FDF) + # Ideographic Description Characters (2FF0–2FFF) + # CJK Symbols and Punctuation (3000–303F) + # Hiragana (3040–309F) + # Katakana (30A0–30FF) + # Bopomofo (3100–312F) + # Hangul Compatibility Jamo (3130–318F) + # Kanbun (3190–319F) + # Bopomofo Extended (31A0–31BF) + # CJK Strokes (31C0–31EF) + # Katakana Phonetic Extensions (31F0–31FF) + # Enclosed CJK Letters and Months (3200–32FF) + # CJK Compatibility (3300–33FF) + # CJK Unified Ideographs Extension A (3400–4DBF) + # Yijing Hexagram Symbols (4DC0–4DFF) + # CJK Unified Ideographs (4E00–9FFF) + # Yi Syllables (A000–A48F) + # Yi Radicals (A490–A4CF) + CJK_Radicals = (11904, 42191) # (ord(u"\u2e80"), ord(u"\ua4cf")) + + # Phags-pa (A840–A87F) + Phags_Pa = (43072, 43135) # (ord(u"\ua840"), ord(u"\ua87f")) + + # Hangul Syllables (AC00–D7AF) + Hangul_Syllables = (44032, 55215) # (ord(u"\uAC00"), ord(u"\uD7AF")) + + # CJK Compatibility Ideographs (F900–FAFF) + CJK_Compatibility_Ideographs = (63744, 64255) # (ord(u"\uF900"), ord(u"\uFAFF")) + + # CJK Compatibility Forms (FE30–FE4F) + CJK_Compatibility_Forms = (65072, 65103) # (ord(u"\uFE30"), ord(u"\uFE4F")) + + # Range U+FF65–FFDC encodes halfwidth forms, of Katakana and Hangul characters + Katakana_Hangul_Halfwidth = (65381, 65500) # (ord(u"\uFF65"), ord(u"\uFFDC")) + + # Supplementary Ideographic Plane 20000–2FFFF + Supplementary_Ideographic_Plane = ( + 131072, + 196607, + ) # (ord(u"\U00020000"), ord(u"\U0002FFFF")) + + ranges = [ + Hangul_Jamo, + CJK_Radicals, + Phags_Pa, + Hangul_Syllables, + CJK_Compatibility_Ideographs, + CJK_Compatibility_Forms, + Katakana_Hangul_Halfwidth, + Supplementary_Ideographic_Plane, + ] + + +def is_cjk(character): + """ + Python port of Moses' code to check for CJK character. + + >>> CJKChars().ranges + [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] + >>> is_cjk(u'\u33fe') + True + >>> is_cjk(u'\uFE5F') + False + + :param character: The character that needs to be checked. + :type character: char + :return: bool + """ + return any( + [ + start <= ord(character) <= end + for start, end in [ + (4352, 4607), + (11904, 42191), + (43072, 43135), + (44032, 55215), + (63744, 64255), + (65072, 65103), + (65381, 65500), + (131072, 196607), + ] + ] + ) + + +def xml_escape(text): + """ + This function transforms the input text into an "escaped" version suitable + for well-formed XML formatting. + + Note that the default xml.sax.saxutils.escape() function don't escape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> input_str = ''')| & < > ' " ] [''' + >>> expected_output = ''')| & < > ' " ] [''' + >>> escape(input_str) == expected_output + True + >>> xml_escape(input_str) + ')| & < > ' " ] [' + + :param text: The text that needs to be escaped. + :type text: str + :rtype: str + """ + return escape( + text, + entities={ + r"'": r"'", + r'"': r""", + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def xml_unescape(text): + """ + This function transforms the "escaped" version suitable + for well-formed XML formatting into humanly-readable string. + + Note that the default xml.sax.saxutils.unescape() function don't unescape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> from xml.sax.saxutils import unescape + >>> s = ')| & < > ' " ] [' + >>> expected = ''')| & < > \' " ] [''' + >>> xml_unescape(s) == expected + True + + :param text: The text that needs to be unescaped. + :type text: str + :rtype: str + """ + return unescape( + text, + entities={ + r"'": r"'", + r""": r'"', + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def align_tokens(tokens, sentence): + """ + This module attempt to find the offsets of the tokens in *s*, as a sequence + of ``(start, end)`` tuples, given the tokens and also the source string. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> from nltk.tokenize.util import align_tokens + >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's " + ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh " + ... "on Saturday.") + >>> tokens = TreebankWordTokenizer().tokenize(s) + >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), + ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), + ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), + ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), + ... (123, 131), (131, 132)] + >>> output = list(align_tokens(tokens, s)) + >>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same. + True + >>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected. + True + >>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens. + True + + :param tokens: The list of strings that are the result of tokenization + :type tokens: list(str) + :param sentence: The original string + :type sentence: str + :rtype: list(tuple(int,int)) + """ + point = 0 + offsets = [] + for token in tokens: + try: + start = sentence.index(token, point) + except ValueError as e: + raise ValueError(f'substring "{token}" not found in "{sentence}"') from e + point = start + len(token) + offsets.append((start, point)) + return offsets