diff --git a/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9ba412f5caa1a8b008b10d7160a64a55e3583f18 --- /dev/null +++ b/ckpts/universal/global_step40/zero/17.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26326af8e92ed0227fc9dc3c1edb1c9077a434d6406ce50333a14bfb378bacf6 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..4be5925da28e90b394bee1d3178ef5fa9c806e24 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd0a5d4caa536ed869979c948f6104aae2a435253c55157d82817ecd8256ec01 +size 16778411 diff --git a/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..92a1613aa654cb6891eca1b5e7615b6e80766b48 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5262d8e05bf3ea06faaa09b6e49421eacd9edea0e7515bddcca2d430e146f1a +size 50332843 diff --git a/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..889be0c725657b91502c3a6b4ad4276cbb24f4b0 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d06b8717516567a36f233359d2ce065ca8231c3a8bccc47421ac5006639e4642 +size 9372 diff --git a/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1ac62ad07448c174f455e67901b2002f838e7e26 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de86109a9540f8496084ee408c52eb6b0958c31b5cc175fe82d27d6e58a68588 +size 9387 diff --git a/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..24d1197e1f8e39bbb0b879abc7c7dee06566d469 --- /dev/null +++ b/ckpts/universal/global_step40/zero/18.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a511fddab4b7a6eaa1eec9b4ada79eb10e1e1ce577466dfbfc8d95924a013d2 +size 9293 diff --git a/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..052469ef70e508aeddda18d22c8bf9d3762c9194 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c74e93c082aab3c0c574ac431c4f1cccb18bb1b442ff4bcbfeee685b160dc92b +size 9372 diff --git a/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..30fae0a1342e26fcb7b9cea13bcbfaacca353f7d --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf455aaf1bb37dacc8582f385079af75258c4e090732c490576296d5adf5d0a9 +size 9387 diff --git a/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..542b885fedeaad65957037a6bde1b620c96c2a78 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b08635e7b4b0e73f47623ed955e9752df8f7c9d6a507de5f3797814aab5c5e3 +size 9293 diff --git a/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..0c142f4705cfa2c9f11d29741b3d22e77b711eb8 --- /dev/null +++ b/ckpts/universal/global_step40/zero/4.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da90f831b2ef5651a86da8b080e30a803334e264744993bd01f9151384b86a56 +size 9293 diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a5274f09dde2db30aa213800647e19a7d8201981 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py @@ -0,0 +1,186 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus fileids in a variety of formats. These +functions can be used to read both the corpus fileids that are +distributed in the NLTK corpus package, and corpus fileids that are part +of external corpora. + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a fileid, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words()[:6])) # only first 6 words + The, Fulton, County, Grand, Jury, said + +isort:skip_file +""" + +from nltk.corpus.reader.plaintext import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.api import * +from nltk.corpus.reader.tagged import * +from nltk.corpus.reader.cmudict import * +from nltk.corpus.reader.conll import * +from nltk.corpus.reader.chunked import * +from nltk.corpus.reader.wordlist import * +from nltk.corpus.reader.xmldocs import * +from nltk.corpus.reader.ppattach import * +from nltk.corpus.reader.senseval import * +from nltk.corpus.reader.ieer import * +from nltk.corpus.reader.sinica_treebank import * +from nltk.corpus.reader.bracket_parse import * +from nltk.corpus.reader.indian import * +from nltk.corpus.reader.toolbox import * +from nltk.corpus.reader.timit import * +from nltk.corpus.reader.ycoe import * +from nltk.corpus.reader.rte import * +from nltk.corpus.reader.string_category import * +from nltk.corpus.reader.propbank import * +from nltk.corpus.reader.verbnet import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.nps_chat import * +from nltk.corpus.reader.wordnet import * +from nltk.corpus.reader.switchboard import * +from nltk.corpus.reader.dependency import * +from nltk.corpus.reader.nombank import * +from nltk.corpus.reader.ipipan import * +from nltk.corpus.reader.pl196x import * +from nltk.corpus.reader.knbc import * +from nltk.corpus.reader.chasen import * +from nltk.corpus.reader.childes import * +from nltk.corpus.reader.aligned import * +from nltk.corpus.reader.lin import * +from nltk.corpus.reader.semcor import * +from nltk.corpus.reader.framenet import * +from nltk.corpus.reader.udhr import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.sentiwordnet import * +from nltk.corpus.reader.twitter import * +from nltk.corpus.reader.nkjp import * +from nltk.corpus.reader.crubadan import * +from nltk.corpus.reader.mte import * +from nltk.corpus.reader.reviews import * +from nltk.corpus.reader.opinion_lexicon import * +from nltk.corpus.reader.pros_cons import * +from nltk.corpus.reader.categorized_sents import * +from nltk.corpus.reader.comparative_sents import * +from nltk.corpus.reader.panlex_lite import * +from nltk.corpus.reader.panlex_swadesh import * +from nltk.corpus.reader.bcp47 import * + +# Make sure that nltk.corpus.reader.bracket_parse gives the module, not +# the function bracket_parse() defined in nltk.tree: +from nltk.corpus.reader import bracket_parse + +__all__ = [ + "CorpusReader", + "CategorizedCorpusReader", + "PlaintextCorpusReader", + "find_corpus_fileids", + "TaggedCorpusReader", + "CMUDictCorpusReader", + "ConllChunkCorpusReader", + "WordListCorpusReader", + "PPAttachmentCorpusReader", + "SensevalCorpusReader", + "IEERCorpusReader", + "ChunkedCorpusReader", + "SinicaTreebankCorpusReader", + "BracketParseCorpusReader", + "IndianCorpusReader", + "ToolboxCorpusReader", + "TimitCorpusReader", + "YCOECorpusReader", + "MacMorphoCorpusReader", + "SyntaxCorpusReader", + "AlpinoCorpusReader", + "RTECorpusReader", + "StringCategoryCorpusReader", + "EuroparlCorpusReader", + "CategorizedBracketParseCorpusReader", + "CategorizedTaggedCorpusReader", + "CategorizedPlaintextCorpusReader", + "PortugueseCategorizedPlaintextCorpusReader", + "tagged_treebank_para_block_reader", + "PropbankCorpusReader", + "VerbnetCorpusReader", + "BNCCorpusReader", + "ConllCorpusReader", + "XMLCorpusReader", + "NPSChatCorpusReader", + "SwadeshCorpusReader", + "WordNetCorpusReader", + "WordNetICCorpusReader", + "SwitchboardCorpusReader", + "DependencyCorpusReader", + "NombankCorpusReader", + "IPIPANCorpusReader", + "Pl196xCorpusReader", + "TEICorpusView", + "KNBCorpusReader", + "ChasenCorpusReader", + "CHILDESCorpusReader", + "AlignedCorpusReader", + "TimitTaggedCorpusReader", + "LinThesaurusCorpusReader", + "SemcorCorpusReader", + "FramenetCorpusReader", + "UdhrCorpusReader", + "BNCCorpusReader", + "SentiWordNetCorpusReader", + "SentiSynset", + "TwitterCorpusReader", + "NKJPCorpusReader", + "CrubadanCorpusReader", + "MTECorpusReader", + "ReviewsCorpusReader", + "OpinionLexiconCorpusReader", + "ProsConsCorpusReader", + "CategorizedSentencesCorpusReader", + "ComparativeSentencesCorpusReader", + "PanLexLiteCorpusReader", + "NonbreakingPrefixesCorpusReader", + "UnicharsCorpusReader", + "MWAPPDBCorpusReader", + "PanlexSwadeshCorpusReader", + "BCP47CorpusReader", +] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7058a9af22947cbd480ae83e93c794e21d2d97c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77efc65776f5cb031faeb71bce44f41df4f2c53f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f0b956dd241e7525b10df61cf22cfeb875a60fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70014ea5bb1ea97ab244c71030872afb0eabae26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2875e7c63692863ed304bb0aee3b2d170e06b2d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..563c0c57de10656633fe7e136f6bf57da7704657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3af230e7958decac3453621dd4a0e572b0c64c04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0011a95aaccb274df07ff109c828296bc7afc5fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23f8154a74854e7e5c3a3cbcd3988d748fc3fb1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25a21901133fde72a1b1f213bde5b712e04bac33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eea8beae182a3042560470b9d70d4faecd8e42dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7232bb884194722e18bb6b5ef82dbf9dcceac1c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0fd33402728f2a7f13b811ad2eeaa597bd45883 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd3fef47494312e6907fc899f52af03bc87fe4e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4dd14af8b8b6f129be9f25e7de5247e89fe0c3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40e21d2854c17ebeb86fa7cecf7226f8999adbb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfa403873d3f3ad0915510238beb37c28a6010e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22864f5986c01fd6910c13d04f6d651cd22fc607 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a69d98b61d15cf5796db0f8d4f12c4e88bbfc0bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..207c147e4c4bcfcf2a43d989fab560eb8ecd10fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cef141f9d0542874f6cd80ea553f4c4bb81abc98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a872ab93e3829b2070063875ee6cd19b79a64bd5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a4e591668dc21e9b3cb3370d04f2e96d975951a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7613ad2ac060e42867a1a7f8a1c31c31b44f707 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9987f5bef1342c78e0b209b9e4943b35cff334b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79b121c601c85b5fdef9d60bdfde8febdeeebf97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a56bc46bafce9f173dcd9626625431110dbd0e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccacfcfbfddaccca29c433efb66fef81bd078ea7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a313e1dab09662568ffbf7e0f156ee795107609f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d173719ef1fddf266163cffe2dbcb9454e10ffa4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4be0c1ea0d92b15f0a66412b4596d03daecb1c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88ad0a8be929203bad91bc459a9472aa780cfd6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1885a63ce8a04e8b4bc802667b16308e02126194 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88166a69778fddd8b692b80a6251e8ba823e3845 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b550697917a3879143015b07db998d42877d4333 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9603aad437a88101f01dd1d96ac5f97920c6c058 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..323051cd342e4e8e64c961dd6a25a3244c9f7023 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e612008fa44054432156de54ff936d54807864d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73c0c59b821db42042520a7454dd12abd5c9d87f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4738e2ce64193563aac8db51f608c199af5735f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e08ff4075e5f4839345bcfc1cd61808cb8ca5e4b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b9669017a65ca9678a8ff5af4a2bfbdca0ca94e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71a90eb24aef97a9bcd4a34b047a7002935712f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..93caf6233b5d1ee4d66eff0009a0d73fceb67904 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Aligned Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# Author: Steven Bird +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import ( + StreamBackedCorpusView, + concat, + read_alignedsent_block, +) +from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer +from nltk.translate import AlignedSent, Alignment + + +class AlignedCorpusReader(CorpusReader): + """ + Reader for corpora of word-aligned sentences. Tokens are assumed + to be separated by whitespace. Sentences begin on separate lines. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + alignedsent_block_reader=read_alignedsent_block, + encoding="latin1", + ): + """ + Construct a new Aligned Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = AlignedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + False, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def aligned_sents(self, fileids=None): + """ + :return: the given file(s) as a list of AlignedSent objects. + :rtype: list(AlignedSent) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + True, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class AlignedSentCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for aligned sentences. + ``AlignedSentCorpusView`` objects are typically created by + ``AlignedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + aligned, + group_by_sent, + word_tokenizer, + sent_tokenizer, + alignedsent_block_reader, + ): + self._aligned = aligned + self._group_by_sent = group_by_sent + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + block = [ + self._word_tokenizer.tokenize(sent_str) + for alignedsent_str in self._alignedsent_block_reader(stream) + for sent_str in self._sent_tokenizer.tokenize(alignedsent_str) + ] + if self._aligned: + block[2] = Alignment.fromstring( + " ".join(block[2]) + ) # kludge; we shouldn't have tokenized the alignment string + block = [AlignedSent(*block)] + elif self._group_by_sent: + block = [block[0]] + else: + block = block[0] + + return block diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d3ff67b94dcc6b476e7125c62bbe41e03603f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py @@ -0,0 +1,237 @@ +# Natural Language Toolkit: Penn Treebank Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +""" +Corpus reader for corpora that consist of parenthesis-delineated parse trees. +""" + +import sys + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree + +# we use [^\s()]+ instead of \S+? to avoid matching () +SORTTAGWRD = re.compile(r"\((\d+) ([^\s()]+) ([^\s()]+)\)") +TAGWORD = re.compile(r"\(([^\s()]+) ([^\s()]+)\)") +WORD = re.compile(r"\([^\s()]+ ([^\s()]+)\)") +EMPTY_BRACKETS = re.compile(r"\s*\(\s*\(") + + +class BracketParseCorpusReader(SyntaxCorpusReader): + """ + Reader for corpora that consist of parenthesis-delineated parse trees, + like those found in the "combined" section of the Penn Treebank, + e.g. "(S (NP (DT the) (JJ little) (NN dog)) (VP (VBD barked)))". + + """ + + def __init__( + self, + root, + fileids, + comment_char=None, + detect_blocks="unindented_paren", + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param comment_char: The character which can appear at the start of + a line to indicate that the rest of the line is a comment. + :param detect_blocks: The method that is used to find blocks + in the corpus; can be 'unindented_paren' (every unindented + parenthesis starts a new parse) or 'sexpr' (brackets are + matched). + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + self._comment_char = comment_char + self._detect_blocks = detect_blocks + self._tagset = tagset + + def _read_block(self, stream): + if self._detect_blocks == "sexpr": + return read_sexpr_block(stream, comment_char=self._comment_char) + elif self._detect_blocks == "blankline": + return read_blankline_block(stream) + elif self._detect_blocks == "unindented_paren": + # Tokens start with unindented left parens. + toks = read_regexp_block(stream, start_re=r"^\(") + # Strip any comments out of the tokens. + if self._comment_char: + toks = [ + re.sub("(?m)^%s.*" % re.escape(self._comment_char), "", tok) + for tok in toks + ] + return toks + else: + assert 0, "bad block type" + + def _normalize(self, t): + # Replace leaves of the form (!), (,), with (! !), (, ,) + t = re.sub(r"\((.)\)", r"(\1 \1)", t) + # Replace leaves of the form (tag word root) with (tag word) + t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t) + return t + + def _parse(self, t): + try: + tree = Tree.fromstring(self._normalize(t)) + # If there's an empty node at the top, strip it off + if tree.label() == "" and len(tree) == 1: + return tree[0] + else: + return tree + + except ValueError as e: + sys.stderr.write("Bad tree detected; trying to recover...\n") + # Try to recover, if we can: + if e.args == ("mismatched parens",): + for n in range(1, 5): + try: + v = Tree(self._normalize(t + ")" * n)) + sys.stderr.write( + " Recovered by adding %d close " "paren(s)\n" % n + ) + return v + except ValueError: + pass + # Try something else: + sys.stderr.write(" Recovered by returning a flat parse.\n") + # sys.stderr.write(' '.join(t.split())+'\n') + return Tree("S", self._tag(t)) + + def _tag(self, t, tagset=None): + tagged_sent = [(w, p) for (p, w) in TAGWORD.findall(self._normalize(t))] + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (w, p) in tagged_sent + ] + return tagged_sent + + def _word(self, t): + return WORD.findall(self._normalize(t)) + + +class CategorizedBracketParseCorpusReader( + CategorizedCorpusReader, BracketParseCorpusReader +): + """ + A reader for parsed corpora whose documents are + divided into categories based on their file identifiers. + @author: Nathan Schneider + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (C{cat_pattern}, C{cat_map}, and C{cat_file}) are passed to + the L{CategorizedCorpusReader constructor + }. The remaining arguments + are passed to the L{BracketParseCorpusReader constructor + }. + """ + CategorizedCorpusReader.__init__(self, kwargs) + BracketParseCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + def parsed_words(self, fileids=None, categories=None): + return super().parsed_words(self._resolve(fileids, categories)) + + def parsed_sents(self, fileids=None, categories=None): + return super().parsed_sents(self._resolve(fileids, categories)) + + def parsed_paras(self, fileids=None, categories=None): + return super().parsed_paras(self._resolve(fileids, categories)) + + +class AlpinoCorpusReader(BracketParseCorpusReader): + """ + Reader for the Alpino Dutch Treebank. + This corpus has a lexical breakdown structure embedded, as read by `_parse` + Unfortunately this puts punctuation and some other words out of the sentence + order in the xml element tree. This is no good for `tag_` and `word_` + `_tag` and `_word` will be overridden to use a non-default new parameter 'ordered' + to the overridden _normalize function. The _parse function can then remain + untouched. + """ + + def __init__(self, root, encoding="ISO-8859-1", tagset=None): + BracketParseCorpusReader.__init__( + self, + root, + r"alpino\.xml", + detect_blocks="blankline", + encoding=encoding, + tagset=tagset, + ) + + def _normalize(self, t, ordered=False): + """Normalize the xml sentence element in t. + The sentence elements , although embedded in a few overall + xml elements, are separated by blank lines. That's how the reader can + deliver them one at a time. + Each sentence has a few category subnodes that are of no use to us. + The remaining word nodes may or may not appear in the proper order. + Each word node has attributes, among which: + - begin : the position of the word in the sentence + - pos : Part of Speech: the Tag + - word : the actual word + The return value is a string with all xml elementes replaced by + clauses: either a cat clause with nested clauses, or a word clause. + The order of the bracket clauses closely follows the xml. + If ordered == True, the word clauses include an order sequence number. + If ordered == False, the word clauses only have pos and word parts. + """ + if t[:10] != "', r"(\1", t) + if ordered: + t = re.sub( + r' ', + r"(\1 \2 \3)", + t, + ) + else: + t = re.sub(r' ', r"(\1 \2)", t) + t = re.sub(r" ", r")", t) + t = re.sub(r".*", r"", t) + t = re.sub(r"", r"", t) + return t + + def _tag(self, t, tagset=None): + tagged_sent = [ + (int(o), w, p) + for (o, p, w) in SORTTAGWRD.findall(self._normalize(t, ordered=True)) + ] + tagged_sent.sort() + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (o, w, p) in tagged_sent + ] + else: + tagged_sent = [(w, p) for (o, w, p) in tagged_sent] + return tagged_sent + + def _word(self, t): + """Return a correctly ordered list if words""" + tagged_sent = self._tag(t) + return [w for (w, p) in tagged_sent] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py new file mode 100644 index 0000000000000000000000000000000000000000..92bfe47210e9db56aa1cde4fe27a41f4133909c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py @@ -0,0 +1,168 @@ +# Natural Language Toolkit: Categorized Sentences Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader structured for corpora that contain one instance on each row. +This CorpusReader is specifically used for the Subjectivity Dataset and the +Sentence Polarity Dataset. + +- Subjectivity Dataset information - + +Authors: Bo Pang and Lillian Lee. +Url: https://www.cs.cornell.edu/people/pabo/movie-review-data + +Distributed with permission. + +Related papers: + +- Bo Pang and Lillian Lee. "A Sentimental Education: Sentiment Analysis Using + Subjectivity Summarization Based on Minimum Cuts". Proceedings of the ACL, + 2004. + +- Sentence Polarity Dataset information - + +Authors: Bo Pang and Lillian Lee. +Url: https://www.cs.cornell.edu/people/pabo/movie-review-data + +Related papers: + +- Bo Pang and Lillian Lee. "Seeing stars: Exploiting class relationships for + sentiment categorization with respect to rating scales". Proceedings of the + ACL, 2005. +""" + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + + +class CategorizedSentencesCorpusReader(CategorizedCorpusReader, CorpusReader): + """ + A reader for corpora in which each row represents a single instance, mainly + a sentence. Istances are divided into categories based on their file identifiers + (see CategorizedCorpusReader). + Since many corpora allow rows that contain more than one sentence, it is + possible to specify a sentence tokenizer to retrieve all sentences instead + than all rows. + + Examples using the Subjectivity Dataset: + + >>> from nltk.corpus import subjectivity + >>> subjectivity.sents()[23] # doctest: +NORMALIZE_WHITESPACE + ['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits', + 'happened', 'off', 'screen', '.'] + >>> subjectivity.categories() + ['obj', 'subj'] + >>> subjectivity.words(categories='subj') + ['smart', 'and', 'alert', ',', 'thirteen', ...] + + Examples using the Sentence Polarity Dataset: + + >>> from nltk.corpus import sentence_polarity + >>> sentence_polarity.sents() # doctest: +NORMALIZE_WHITESPACE + [['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish', + 'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find', + 'it', 'funny', '.'], ...] + >>> sentence_polarity.categories() + ['neg', 'pos'] + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=None, + encoding="utf8", + **kwargs + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param sent_tokenizer: a tokenizer for breaking paragraphs into sentences. + :param encoding: the encoding that should be used to read the corpus. + :param kwargs: additional parameters passed to CategorizedCorpusReader. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + CategorizedCorpusReader.__init__(self, kwargs) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + + def sents(self, fileids=None, categories=None): + """ + Return all sentences in the corpus or in the specified file(s). + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :param categories: a list specifying the categories whose sentences have + to be returned. + :return: the given file(s) as a list of sentences. + Each sentence is tokenized using the specified word_tokenizer. + :rtype: list(list(str)) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None, categories=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + file(s). + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :param categories: a list specifying the categories whose words have to + be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_sent_block(self, stream): + sents = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + if self._sent_tokenizer: + sents.extend( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(line) + ] + ) + else: + sents.append(self._word_tokenizer.tokenize(line)) + return sents + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/conll.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/conll.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3b30db900ee4eb4648b74d5904af04b60e1692 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/conll.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: CONLL Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read CoNLL-style chunk fileids. +""" + +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree +from nltk.util import LazyConcatenation, LazyMap + + +class ConllCorpusReader(CorpusReader): + """ + A corpus reader for CoNLL-style files. These files consist of a + series of sentences, separated by blank lines. Each sentence is + encoded using a table (or "grid") of values, where each line + corresponds to a single word, and each column corresponds to an + annotation type. The set of columns used by CoNLL-style files can + vary from corpus to corpus; the ``ConllCorpusReader`` constructor + therefore takes an argument, ``columntypes``, which is used to + specify the columns that are used by a given corpus. By default + columns are split by consecutive whitespaces, with the + ``separator`` argument you can set a string to split by (e.g. + ``\'\t\'``). + + + @todo: Add support for reading from corpora where different + parallel files contain different columns. + @todo: Possibly add caching of the grid corpus view? This would + allow the same grid view to be used by different data access + methods (eg words() and parsed_sents() could both share the + same grid corpus view object). + @todo: Better support for -DOCSTART-. Currently, we just ignore + it, but it could be used to define methods that retrieve a + document at a time (eg parsed_documents()). + """ + + # ///////////////////////////////////////////////////////////////// + # Column Types + # ///////////////////////////////////////////////////////////////// + + WORDS = "words" #: column type for words + POS = "pos" #: column type for part-of-speech tags + TREE = "tree" #: column type for parse trees + CHUNK = "chunk" #: column type for chunk structures + NE = "ne" #: column type for named entities + SRL = "srl" #: column type for semantic role labels + IGNORE = "ignore" #: column type for column that should be ignored + + #: A list of all column types supported by the conll corpus reader. + COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__( + self, + root, + fileids, + columntypes, + chunk_types=None, + root_label="S", + pos_in_tree=False, + srl_includes_roleset=True, + encoding="utf8", + tree_class=Tree, + tagset=None, + separator=None, + ): + for columntype in columntypes: + if columntype not in self.COLUMN_TYPES: + raise ValueError("Bad column type %r" % columntype) + if isinstance(chunk_types, str): + chunk_types = [chunk_types] + self._chunk_types = chunk_types + self._colmap = {c: i for (i, c) in enumerate(columntypes)} + self._pos_in_tree = pos_in_tree + self._root_label = root_label # for chunks + self._srl_includes_roleset = srl_includes_roleset + self._tree_class = tree_class + CorpusReader.__init__(self, root, fileids, encoding) + self._tagset = tagset + self.sep = separator + + # ///////////////////////////////////////////////////////////////// + # Data Access Methods + # ///////////////////////////////////////////////////////////////// + + def words(self, fileids=None): + self._require(self.WORDS) + return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids))) + + def sents(self, fileids=None): + self._require(self.WORDS) + return LazyMap(self._get_words, self._grids(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_tagged_words, self._grids(fileids))) + + def tagged_sents(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyMap(get_tagged_words, self._grids(fileids)) + + def chunked_words(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyConcatenation(LazyMap(get_chunked_words, self._grids(fileids))) + + def chunked_sents(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyMap(get_chunked_words, self._grids(fileids)) + + def parsed_sents(self, fileids=None, pos_in_tree=None, tagset=None): + self._require(self.WORDS, self.POS, self.TREE) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_parsed_sent(grid): # capture pos_in_tree as local var + return self._get_parsed_sent(grid, pos_in_tree, tagset) + + return LazyMap(get_parsed_sent, self._grids(fileids)) + + def srl_spans(self, fileids=None): + self._require(self.SRL) + return LazyMap(self._get_srl_spans, self._grids(fileids)) + + def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True): + self._require(self.WORDS, self.POS, self.TREE, self.SRL) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_srl_instances(grid): # capture pos_in_tree as local var + return self._get_srl_instances(grid, pos_in_tree) + + result = LazyMap(get_srl_instances, self._grids(fileids)) + if flatten: + result = LazyConcatenation(result) + return result + + def iob_words(self, fileids=None, tagset=None): + """ + :return: a list of word/tag/IOB tuples + :rtype: list(tuple) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids))) + + def iob_sents(self, fileids=None, tagset=None): + """ + :return: a list of lists of word/tag/IOB tuples + :rtype: list(list) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyMap(get_iob_words, self._grids(fileids)) + + # ///////////////////////////////////////////////////////////////// + # Grid Reading + # ///////////////////////////////////////////////////////////////// + + def _grids(self, fileids=None): + # n.b.: we could cache the object returned here (keyed on + # fileids), which would let us reuse the same corpus view for + # different things (eg srl and parse trees). + return concat( + [ + StreamBackedCorpusView(fileid, self._read_grid_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_grid_block(self, stream): + grids = [] + for block in read_blankline_block(stream): + block = block.strip() + if not block: + continue + + grid = [line.split(self.sep) for line in block.split("\n")] + + # If there's a docstart row, then discard. ([xx] eventually it + # would be good to actually use it) + if grid[0][self._colmap.get("words", 0)] == "-DOCSTART-": + del grid[0] + + # Check that the grid is consistent. + for row in grid: + if len(row) != len(grid[0]): + raise ValueError("Inconsistent number of columns:\n%s" % block) + grids.append(grid) + return grids + + # ///////////////////////////////////////////////////////////////// + # Transforms + # ///////////////////////////////////////////////////////////////// + # given a grid, transform it into some representation (e.g., + # a list of words or a parse tree). + + def _get_words(self, grid): + return self._get_column(grid, self._colmap["words"]) + + def _get_tagged_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list(zip(self._get_column(grid, self._colmap["words"]), pos_tags)) + + def _get_iob_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list( + zip( + self._get_column(grid, self._colmap["words"]), + pos_tags, + self._get_column(grid, self._colmap["chunk"]), + ) + ) + + def _get_chunked_words(self, grid, chunk_types, tagset=None): + # n.b.: this method is very similar to conllstr2tree. + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + chunk_tags = self._get_column(grid, self._colmap["chunk"]) + + stack = [Tree(self._root_label, [])] + + for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags): + if chunk_tag == "O": + state, chunk_type = "O", "" + else: + (state, chunk_type) = chunk_tag.split("-") + # If it's a chunk we don't care about, treat it as O. + if chunk_types is not None and chunk_type not in chunk_types: + state = "O" + # Treat a mismatching I like a B. + if state == "I" and chunk_type != stack[-1].label(): + state = "B" + # For B or I: close any open chunks + if state in "BO" and len(stack) == 2: + stack.pop() + # For B: start a new chunk. + if state == "B": + new_chunk = Tree(chunk_type, []) + stack[-1].append(new_chunk) + stack.append(new_chunk) + # Add the word token. + stack[-1].append((word, pos_tag)) + + return stack[0] + + def _get_parsed_sent(self, grid, pos_in_tree, tagset=None): + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + parse_tags = self._get_column(grid, self._colmap["tree"]) + + treestr = "" + for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags): + if word == "(": + word = "-LRB-" + if word == ")": + word = "-RRB-" + if pos_tag == "(": + pos_tag = "-LRB-" + if pos_tag == ")": + pos_tag = "-RRB-" + (left, right) = parse_tag.split("*") + right = right.count(")") * ")" # only keep ')'. + treestr += f"{left} ({pos_tag} {word}) {right}" + try: + tree = self._tree_class.fromstring(treestr) + except (ValueError, IndexError): + tree = self._tree_class.fromstring(f"({self._root_label} {treestr})") + + if not pos_in_tree: + for subtree in tree.subtrees(): + for i, child in enumerate(subtree): + if ( + isinstance(child, Tree) + and len(child) == 1 + and isinstance(child[0], str) + ): + subtree[i] = (child[0], child.label()) + + return tree + + def _get_srl_spans(self, grid): + """ + list of list of (start, end), tag) tuples + """ + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + start_col = self._colmap["srl"] + 2 + else: + predicates = self._get_column(grid, self._colmap["srl"]) + start_col = self._colmap["srl"] + 1 + + # Count how many predicates there are. This tells us how many + # columns to expect for SRL data. + num_preds = len([p for p in predicates if p != "-"]) + + spanlists = [] + for i in range(num_preds): + col = self._get_column(grid, start_col + i) + spanlist = [] + stack = [] + for wordnum, srl_tag in enumerate(col): + (left, right) = srl_tag.split("*") + for tag in left.split("("): + if tag: + stack.append((tag, wordnum)) + for i in range(right.count(")")): + (tag, start) = stack.pop() + spanlist.append(((start, wordnum + 1), tag)) + spanlists.append(spanlist) + + return spanlists + + def _get_srl_instances(self, grid, pos_in_tree): + tree = self._get_parsed_sent(grid, pos_in_tree) + spanlists = self._get_srl_spans(grid) + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + rolesets = self._get_column(grid, self._colmap["srl"]) + else: + predicates = self._get_column(grid, self._colmap["srl"]) + rolesets = [None] * len(predicates) + + instances = ConllSRLInstanceList(tree) + for wordnum, predicate in enumerate(predicates): + if predicate == "-": + continue + # Decide which spanlist to use. Don't assume that they're + # sorted in the same order as the predicates (even though + # they usually are). + for spanlist in spanlists: + for (start, end), tag in spanlist: + if wordnum in range(start, end) and tag in ("V", "C-V"): + break + else: + continue + break + else: + raise ValueError("No srl column found for %r" % predicate) + instances.append( + ConllSRLInstance(tree, wordnum, predicate, rolesets[wordnum], spanlist) + ) + + return instances + + # ///////////////////////////////////////////////////////////////// + # Helper Methods + # ///////////////////////////////////////////////////////////////// + + def _require(self, *columntypes): + for columntype in columntypes: + if columntype not in self._colmap: + raise ValueError( + "This corpus does not contain a %s " "column." % columntype + ) + + @staticmethod + def _get_column(grid, column_index): + return [grid[i][column_index] for i in range(len(grid))] + + +class ConllSRLInstance: + """ + An SRL instance from a CoNLL corpus, which identifies and + providing labels for the arguments of a single verb. + """ + + # [xx] add inst.core_arguments, inst.argm_arguments? + + def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans): + self.verb = [] + """A list of the word indices of the words that compose the + verb whose arguments are identified by this instance. + This will contain multiple word indices when multi-word + verbs are used (e.g. 'turn on').""" + + self.verb_head = verb_head + """The word index of the head word of the verb whose arguments + are identified by this instance. E.g., for a sentence that + uses the verb 'turn on,' ``verb_head`` will be the word index + of the word 'turn'.""" + + self.verb_stem = verb_stem + + self.roleset = roleset + + self.arguments = [] + """A list of ``(argspan, argid)`` tuples, specifying the location + and type for each of the arguments identified by this + instance. ``argspan`` is a tuple ``start, end``, indicating + that the argument consists of the ``words[start:end]``.""" + + self.tagged_spans = tagged_spans + """A list of ``(span, id)`` tuples, specifying the location and + type for each of the arguments, as well as the verb pieces, + that make up this instance.""" + + self.tree = tree + """The parse tree for the sentence containing this instance.""" + + self.words = tree.leaves() + """A list of the words in the sentence containing this + instance.""" + + # Fill in the self.verb and self.arguments values. + for (start, end), tag in tagged_spans: + if tag in ("V", "C-V"): + self.verb += list(range(start, end)) + else: + self.arguments.append(((start, end), tag)) + + def __repr__(self): + # Originally, its: + ##plural = 's' if len(self.arguments) != 1 else '' + plural = "s" if len(self.arguments) != 1 else "" + return "" % ( + (self.verb_stem, len(self.arguments), plural) + ) + + def pprint(self): + verbstr = " ".join(self.words[i][0] for i in self.verb) + hdr = f"SRL for {verbstr!r} (stem={self.verb_stem!r}):\n" + s = "" + for i, word in enumerate(self.words): + if isinstance(word, tuple): + word = word[0] + for (start, end), argid in self.arguments: + if i == start: + s += "[%s " % argid + if i == end: + s += "] " + if i in self.verb: + word = "<<%s>>" % word + s += word + " " + return hdr + textwrap.fill( + s.replace(" ]", "]"), initial_indent=" ", subsequent_indent=" " + ) + + +class ConllSRLInstanceList(list): + """ + Set of instances for a single sentence + """ + + def __init__(self, tree, instances=()): + self.tree = tree + list.__init__(self, instances) + + def __str__(self): + return self.pprint() + + def pprint(self, include_tree=False): + # Sanity check: trees should be the same + for inst in self: + if inst.tree != self.tree: + raise ValueError("Tree mismatch!") + + # If desired, add trees: + if include_tree: + words = self.tree.leaves() + pos = [None] * len(words) + synt = ["*"] * len(words) + self._tree2conll(self.tree, 0, words, pos, synt) + + s = "" + for i in range(len(words)): + # optional tree columns + if include_tree: + s += "%-20s " % words[i] + s += "%-8s " % pos[i] + s += "%15s*%-8s " % tuple(synt[i].split("*")) + + # verb head column + for inst in self: + if i == inst.verb_head: + s += "%-20s " % inst.verb_stem + break + else: + s += "%-20s " % "-" + # Remaining columns: self + for inst in self: + argstr = "*" + for (start, end), argid in inst.tagged_spans: + if i == start: + argstr = f"({argid}{argstr}" + if i == (end - 1): + argstr += ")" + s += "%-12s " % argstr + s += "\n" + return s + + def _tree2conll(self, tree, wordnum, words, pos, synt): + assert isinstance(tree, Tree) + if len(tree) == 1 and isinstance(tree[0], str): + pos[wordnum] = tree.label() + assert words[wordnum] == tree[0] + return wordnum + 1 + elif len(tree) == 1 and isinstance(tree[0], tuple): + assert len(tree[0]) == 2 + pos[wordnum], pos[wordnum] = tree[0] + return wordnum + 1 + else: + synt[wordnum] = f"({tree.label()}{synt[wordnum]}" + for child in tree: + wordnum = self._tree2conll(child, wordnum, words, pos, synt) + synt[wordnum - 1] += ")" + return wordnum + + +class ConllChunkCorpusReader(ConllCorpusReader): + """ + A ConllCorpusReader whose data file contains three columns: words, + pos, and chunk. + """ + + def __init__( + self, root, fileids, chunk_types, encoding="utf8", tagset=None, separator=None + ): + ConllCorpusReader.__init__( + self, + root, + fileids, + ("words", "pos", "chunk"), + chunk_types=chunk_types, + encoding=encoding, + tagset=tagset, + separator=separator, + ) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py new file mode 100644 index 0000000000000000000000000000000000000000..d7bcf8a05cf86123ce952e802a71bb5dd637bd42 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/crubadan.py @@ -0,0 +1,106 @@ +# Natural Language Toolkit: An Crubadan N-grams Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Avital Pekker +# +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for the n-gram statistics gathered from +the corpora for each language using An Crubadan. + +There are multiple potential applications for the data but +this reader was created with the goal of using it in the +context of language identification. + +For details about An Crubadan, this data, and its potential uses, see: +http://borel.slu.edu/crubadan/index.html +""" + +import re +from os import path + +from nltk.corpus.reader import CorpusReader +from nltk.data import ZipFilePathPointer +from nltk.probability import FreqDist + + +class CrubadanCorpusReader(CorpusReader): + """ + A corpus reader used to access language An Crubadan n-gram files. + """ + + _LANG_MAPPER_FILE = "table.txt" + _all_lang_freq = {} + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + super().__init__(root, fileids, encoding="utf8") + self._lang_mapping_data = [] + self._load_lang_mapping_data() + + def lang_freq(self, lang): + """Return n-gram FreqDist for a specific language + given ISO 639-3 language code""" + + if lang not in self._all_lang_freq: + self._all_lang_freq[lang] = self._load_lang_ngrams(lang) + + return self._all_lang_freq[lang] + + def langs(self): + """Return a list of supported languages as ISO 639-3 codes""" + return [row[1] for row in self._lang_mapping_data] + + def iso_to_crubadan(self, lang): + """Return internal Crubadan code based on ISO 639-3 code""" + for i in self._lang_mapping_data: + if i[1].lower() == lang.lower(): + return i[0] + + def crubadan_to_iso(self, lang): + """Return ISO 639-3 code given internal Crubadan code""" + for i in self._lang_mapping_data: + if i[0].lower() == lang.lower(): + return i[1] + + def _load_lang_mapping_data(self): + """Load language mappings between codes and description from table.txt""" + if isinstance(self.root, ZipFilePathPointer): + raise RuntimeError( + "Please install the 'crubadan' corpus first, use nltk.download()" + ) + + mapper_file = path.join(self.root, self._LANG_MAPPER_FILE) + if self._LANG_MAPPER_FILE not in self.fileids(): + raise RuntimeError("Could not find language mapper file: " + mapper_file) + + with open(mapper_file, encoding="utf-8") as raw: + strip_raw = raw.read().strip() + + self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")] + + def _load_lang_ngrams(self, lang): + """Load single n-gram language file given the ISO 639-3 language code + and return its FreqDist""" + + if lang not in self.langs(): + raise RuntimeError("Unsupported language.") + + crubadan_code = self.iso_to_crubadan(lang) + ngram_file = path.join(self.root, crubadan_code + "-3grams.txt") + + if not path.isfile(ngram_file): + raise RuntimeError("No N-gram file found for requested language.") + + counts = FreqDist() + with open(ngram_file, encoding="utf-8") as f: + for line in f: + data = line.split(" ") + + ngram = data[1].strip("\n") + freq = int(data[0]) + + counts[ngram] = freq + + return counts diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py new file mode 100644 index 0000000000000000000000000000000000000000..6eaa1ad8931ab407bac92d0ea3e6f2e60f74d0e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/framenet.py @@ -0,0 +1,3442 @@ +# Natural Language Toolkit: Framenet Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Chuck Wooters , +# Nathan Schneider +# URL: +# For license information, see LICENSE.TXT + + +""" +Corpus reader for the FrameNet 1.7 lexicon and corpus. +""" + +import itertools +import os +import re +import sys +import textwrap +import types +from collections import OrderedDict, defaultdict +from itertools import zip_longest +from operator import itemgetter +from pprint import pprint + +from nltk.corpus.reader import XMLCorpusReader, XMLCorpusView +from nltk.util import LazyConcatenation, LazyIteratorList, LazyMap + +__docformat__ = "epytext en" + + +def mimic_wrap(lines, wrap_at=65, **kwargs): + """ + Wrap the first of 'lines' with textwrap and the remaining lines at exactly the same + positions as the first. + """ + l0 = textwrap.fill(lines[0], wrap_at, drop_whitespace=False).split("\n") + yield l0 + + def _(line): + il0 = 0 + while line and il0 < len(l0) - 1: + yield line[: len(l0[il0])] + line = line[len(l0[il0]) :] + il0 += 1 + if line: # Remaining stuff on this line past the end of the mimicked line. + # So just textwrap this line. + yield from textwrap.fill(line, wrap_at, drop_whitespace=False).split("\n") + + for l in lines[1:]: + yield list(_(l)) + + +def _pretty_longstring(defstr, prefix="", wrap_at=65): + + """ + Helper function for pretty-printing a long string. + + :param defstr: The string to be printed. + :type defstr: str + :return: A nicely formatted string representation of the long string. + :rtype: str + """ + + outstr = "" + for line in textwrap.fill(defstr, wrap_at).split("\n"): + outstr += prefix + line + "\n" + return outstr + + +def _pretty_any(obj): + + """ + Helper function for pretty-printing any AttrDict object. + + :param obj: The obj to be printed. + :type obj: AttrDict + :return: A nicely formatted string representation of the AttrDict object. + :rtype: str + """ + + outstr = "" + for k in obj: + if isinstance(obj[k], str) and len(obj[k]) > 65: + outstr += f"[{k}]\n" + outstr += "{}".format(_pretty_longstring(obj[k], prefix=" ")) + outstr += "\n" + else: + outstr += f"[{k}] {obj[k]}\n" + + return outstr + + +def _pretty_semtype(st): + + """ + Helper function for pretty-printing a semantic type. + + :param st: The semantic type to be printed. + :type st: AttrDict + :return: A nicely formatted string representation of the semantic type. + :rtype: str + """ + + semkeys = st.keys() + if len(semkeys) == 1: + return "" + + outstr = "" + outstr += "semantic type ({0.ID}): {0.name}\n".format(st) + if "abbrev" in semkeys: + outstr += f"[abbrev] {st.abbrev}\n" + if "definition" in semkeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(st.definition, " ") + outstr += f"[rootType] {st.rootType.name}({st.rootType.ID})\n" + if st.superType is None: + outstr += "[superType] \n" + else: + outstr += f"[superType] {st.superType.name}({st.superType.ID})\n" + outstr += f"[subTypes] {len(st.subTypes)} subtypes\n" + outstr += ( + " " + + ", ".join(f"{x.name}({x.ID})" for x in st.subTypes) + + "\n" * (len(st.subTypes) > 0) + ) + return outstr + + +def _pretty_frame_relation_type(freltyp): + + """ + Helper function for pretty-printing a frame relation type. + + :param freltyp: The frame relation type to be printed. + :type freltyp: AttrDict + :return: A nicely formatted string representation of the frame relation type. + :rtype: str + """ + outstr = " {0.subFrameName}>".format( + freltyp + ) + return outstr + + +def _pretty_frame_relation(frel): + + """ + Helper function for pretty-printing a frame relation. + + :param frel: The frame relation to be printed. + :type frel: AttrDict + :return: A nicely formatted string representation of the frame relation. + :rtype: str + """ + outstr = "<{0.type.superFrameName}={0.superFrameName} -- {0.type.name} -> {0.type.subFrameName}={0.subFrameName}>".format( + frel + ) + return outstr + + +def _pretty_fe_relation(ferel): + + """ + Helper function for pretty-printing an FE relation. + + :param ferel: The FE relation to be printed. + :type ferel: AttrDict + :return: A nicely formatted string representation of the FE relation. + :rtype: str + """ + outstr = "<{0.type.superFrameName}={0.frameRelation.superFrameName}.{0.superFEName} -- {0.type.name} -> {0.type.subFrameName}={0.frameRelation.subFrameName}.{0.subFEName}>".format( + ferel + ) + return outstr + + +def _pretty_lu(lu): + + """ + Helper function for pretty-printing a lexical unit. + + :param lu: The lu to be printed. + :type lu: AttrDict + :return: A nicely formatted string representation of the lexical unit. + :rtype: str + """ + + lukeys = lu.keys() + outstr = "" + outstr += "lexical unit ({0.ID}): {0.name}\n\n".format(lu) + if "definition" in lukeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(lu.definition, " ") + if "frame" in lukeys: + outstr += f"\n[frame] {lu.frame.name}({lu.frame.ID})\n" + if "incorporatedFE" in lukeys: + outstr += f"\n[incorporatedFE] {lu.incorporatedFE}\n" + if "POS" in lukeys: + outstr += f"\n[POS] {lu.POS}\n" + if "status" in lukeys: + outstr += f"\n[status] {lu.status}\n" + if "totalAnnotated" in lukeys: + outstr += f"\n[totalAnnotated] {lu.totalAnnotated} annotated examples\n" + if "lexemes" in lukeys: + outstr += "\n[lexemes] {}\n".format( + " ".join(f"{lex.name}/{lex.POS}" for lex in lu.lexemes) + ) + if "semTypes" in lukeys: + outstr += f"\n[semTypes] {len(lu.semTypes)} semantic types\n" + outstr += ( + " " * (len(lu.semTypes) > 0) + + ", ".join(f"{x.name}({x.ID})" for x in lu.semTypes) + + "\n" * (len(lu.semTypes) > 0) + ) + if "URL" in lukeys: + outstr += f"\n[URL] {lu.URL}\n" + if "subCorpus" in lukeys: + subc = [x.name for x in lu.subCorpus] + outstr += f"\n[subCorpus] {len(lu.subCorpus)} subcorpora\n" + for line in textwrap.fill(", ".join(sorted(subc)), 60).split("\n"): + outstr += f" {line}\n" + if "exemplars" in lukeys: + outstr += "\n[exemplars] {} sentences across all subcorpora\n".format( + len(lu.exemplars) + ) + + return outstr + + +def _pretty_exemplars(exemplars, lu): + """ + Helper function for pretty-printing a list of exemplar sentences for a lexical unit. + + :param sent: The list of exemplar sentences to be printed. + :type sent: list(AttrDict) + :return: An index of the text of the exemplar sentences. + :rtype: str + """ + + outstr = "" + outstr += "exemplar sentences for {0.name} in {0.frame.name}:\n\n".format(lu) + for i, sent in enumerate(exemplars): + outstr += f"[{i}] {sent.text}\n" + outstr += "\n" + return outstr + + +def _pretty_fulltext_sentences(sents): + """ + Helper function for pretty-printing a list of annotated sentences for a full-text document. + + :param sent: The list of sentences to be printed. + :type sent: list(AttrDict) + :return: An index of the text of the sentences. + :rtype: str + """ + + outstr = "" + outstr += "full-text document ({0.ID}) {0.name}:\n\n".format(sents) + outstr += "[corpid] {0.corpid}\n[corpname] {0.corpname}\n[description] {0.description}\n[URL] {0.URL}\n\n".format( + sents + ) + outstr += f"[sentence]\n" + for i, sent in enumerate(sents.sentence): + outstr += f"[{i}] {sent.text}\n" + outstr += "\n" + return outstr + + +def _pretty_fulltext_sentence(sent): + """ + Helper function for pretty-printing an annotated sentence from a full-text document. + + :param sent: The sentence to be printed. + :type sent: list(AttrDict) + :return: The text of the sentence with annotation set indices on frame targets. + :rtype: str + """ + + outstr = "" + outstr += "full-text sentence ({0.ID}) in {1}:\n\n".format( + sent, sent.doc.get("name", sent.doc.description) + ) + outstr += f"\n[POS] {len(sent.POS)} tags\n" + outstr += f"\n[POS_tagset] {sent.POS_tagset}\n\n" + outstr += "[text] + [annotationSet]\n\n" + outstr += sent._ascii() # -> _annotation_ascii() + outstr += "\n" + return outstr + + +def _pretty_pos(aset): + """ + Helper function for pretty-printing a sentence with its POS tags. + + :param aset: The POS annotation set of the sentence to be printed. + :type sent: list(AttrDict) + :return: The text of the sentence and its POS tags. + :rtype: str + """ + + outstr = "" + outstr += "POS annotation set ({0.ID}) {0.POS_tagset} in sentence {0.sent.ID}:\n\n".format( + aset + ) + + # list the target spans and their associated aset index + overt = sorted(aset.POS) + + sent = aset.sent + s0 = sent.text + s1 = "" + s2 = "" + i = 0 + adjust = 0 + for j, k, lbl in overt: + assert j >= i, ("Overlapping targets?", (j, k, lbl)) + s1 += " " * (j - i) + "-" * (k - j) + if len(lbl) > (k - j): + # add space in the sentence to make room for the annotation index + amt = len(lbl) - (k - j) + s0 = ( + s0[: k + adjust] + "~" * amt + s0[k + adjust :] + ) # '~' to prevent line wrapping + s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] + adjust += amt + s2 += " " * (j - i) + lbl.ljust(k - j) + i = k + + long_lines = [s0, s1, s2] + + outstr += "\n\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ).replace("~", " ") + outstr += "\n" + return outstr + + +def _pretty_annotation(sent, aset_level=False): + """ + Helper function for pretty-printing an exemplar sentence for a lexical unit. + + :param sent: An annotation set or exemplar sentence to be printed. + :param aset_level: If True, 'sent' is actually an annotation set within a sentence. + :type sent: AttrDict + :return: A nicely formatted string representation of the exemplar sentence + with its target, frame, and FE annotations. + :rtype: str + """ + + sentkeys = sent.keys() + outstr = "annotation set" if aset_level else "exemplar sentence" + outstr += f" ({sent.ID}):\n" + if aset_level: # TODO: any UNANN exemplars? + outstr += f"\n[status] {sent.status}\n" + for k in ("corpID", "docID", "paragNo", "sentNo", "aPos"): + if k in sentkeys: + outstr += f"[{k}] {sent[k]}\n" + outstr += ( + "\n[LU] ({0.ID}) {0.name} in {0.frame.name}\n".format(sent.LU) + if sent.LU + else "\n[LU] Not found!" + ) + outstr += "\n[frame] ({0.ID}) {0.name}\n".format( + sent.frame + ) # redundant with above, but .frame is convenient + if not aset_level: + outstr += "\n[annotationSet] {} annotation sets\n".format( + len(sent.annotationSet) + ) + outstr += f"\n[POS] {len(sent.POS)} tags\n" + outstr += f"\n[POS_tagset] {sent.POS_tagset}\n" + outstr += "\n[GF] {} relation{}\n".format( + len(sent.GF), "s" if len(sent.GF) != 1 else "" + ) + outstr += "\n[PT] {} phrase{}\n".format( + len(sent.PT), "s" if len(sent.PT) != 1 else "" + ) + """ + Special Layers + -------------- + + The 'NER' layer contains, for some of the data, named entity labels. + + The 'WSL' (word status layer) contains, for some of the data, + spans which should not in principle be considered targets (NT). + + The 'Other' layer records relative clause constructions (Rel=relativizer, Ant=antecedent), + pleonastic 'it' (Null), and existential 'there' (Exist). + On occasion they are duplicated by accident (e.g., annotationSet 1467275 in lu6700.xml). + + The 'Sent' layer appears to contain labels that the annotator has flagged the + sentence with for their convenience: values include + 'sense1', 'sense2', 'sense3', etc.; + 'Blend', 'Canonical', 'Idiom', 'Metaphor', 'Special-Sent', + 'keepS', 'deleteS', 'reexamine' + (sometimes they are duplicated for no apparent reason). + + The POS-specific layers may contain the following kinds of spans: + Asp (aspectual particle), Non-Asp (non-aspectual particle), + Cop (copula), Supp (support), Ctrlr (controller), + Gov (governor), X. Gov and X always cooccur. + + >>> from nltk.corpus import framenet as fn + >>> def f(luRE, lyr, ignore=set()): + ... for i,ex in enumerate(fn.exemplars(luRE)): + ... if lyr in ex and ex[lyr] and set(zip(*ex[lyr])[2]) - ignore: + ... print(i,ex[lyr]) + + - Verb: Asp, Non-Asp + - Noun: Cop, Supp, Ctrlr, Gov, X + - Adj: Cop, Supp, Ctrlr, Gov, X + - Prep: Cop, Supp, Ctrlr + - Adv: Ctrlr + - Scon: (none) + - Art: (none) + """ + for lyr in ("NER", "WSL", "Other", "Sent"): + if lyr in sent and sent[lyr]: + outstr += "\n[{}] {} entr{}\n".format( + lyr, len(sent[lyr]), "ies" if len(sent[lyr]) != 1 else "y" + ) + outstr += "\n[text] + [Target] + [FE]" + # POS-specific layers: syntactically important words that are neither the target + # nor the FEs. Include these along with the first FE layer but with '^' underlining. + for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): + if lyr in sent and sent[lyr]: + outstr += f" + [{lyr}]" + if "FE2" in sentkeys: + outstr += " + [FE2]" + if "FE3" in sentkeys: + outstr += " + [FE3]" + outstr += "\n\n" + outstr += sent._ascii() # -> _annotation_ascii() + outstr += "\n" + + return outstr + + +def _annotation_ascii(sent): + """ + Given a sentence or FE annotation set, construct the width-limited string showing + an ASCII visualization of the sentence's annotations, calling either + _annotation_ascii_frames() or _annotation_ascii_FEs() as appropriate. + This will be attached as a method to appropriate AttrDict instances + and called in the full pretty-printing of the instance. + """ + if sent._type == "fulltext_sentence" or ( + "annotationSet" in sent and len(sent.annotationSet) > 2 + ): + # a full-text sentence OR sentence with multiple targets. + # (multiple targets = >2 annotation sets, because the first annotation set is POS.) + return _annotation_ascii_frames(sent) + else: # an FE annotation set, or an LU sentence with 1 target + return _annotation_ascii_FEs(sent) + + +def _annotation_ascii_frames(sent): + """ + ASCII string rendering of the sentence along with its targets and frame names. + Called for all full-text sentences, as well as the few LU sentences with multiple + targets (e.g., fn.lu(6412).exemplars[82] has two want.v targets). + Line-wrapped to limit the display width. + """ + # list the target spans and their associated aset index + overt = [] + for a, aset in enumerate(sent.annotationSet[1:]): + for j, k in aset.Target: + indexS = f"[{a + 1}]" + if aset.status == "UNANN" or aset.LU.status == "Problem": + indexS += " " + if aset.status == "UNANN": + indexS += "!" # warning indicator that there is a frame annotation but no FE annotation + if aset.LU.status == "Problem": + indexS += "?" # warning indicator that there is a missing LU definition (because the LU has Problem status) + overt.append((j, k, aset.LU.frame.name, indexS)) + overt = sorted(overt) + + duplicates = set() + for o, (j, k, fname, asetIndex) in enumerate(overt): + if o > 0 and j <= overt[o - 1][1]: + # multiple annotation sets on the same target + # (e.g. due to a coordination construction or multiple annotators) + if ( + overt[o - 1][:2] == (j, k) and overt[o - 1][2] == fname + ): # same target, same frame + # splice indices together + combinedIndex = ( + overt[o - 1][3] + asetIndex + ) # e.g., '[1][2]', '[1]! [2]' + combinedIndex = combinedIndex.replace(" !", "! ").replace(" ?", "? ") + overt[o - 1] = overt[o - 1][:3] + (combinedIndex,) + duplicates.add(o) + else: # different frames, same or overlapping targets + s = sent.text + for j, k, fname, asetIndex in overt: + s += "\n" + asetIndex + " " + sent.text[j:k] + " :: " + fname + s += "\n(Unable to display sentence with targets marked inline due to overlap)" + return s + for o in reversed(sorted(duplicates)): + del overt[o] + + s0 = sent.text + s1 = "" + s11 = "" + s2 = "" + i = 0 + adjust = 0 + fAbbrevs = OrderedDict() + for j, k, fname, asetIndex in overt: + if not j >= i: + assert j >= i, ( + "Overlapping targets?" + + ( + " UNANN" + if any(aset.status == "UNANN" for aset in sent.annotationSet[1:]) + else "" + ), + (j, k, asetIndex), + ) + s1 += " " * (j - i) + "*" * (k - j) + short = fname[: k - j] + if (k - j) < len(fname): + r = 0 + while short in fAbbrevs: + if fAbbrevs[short] == fname: + break + r += 1 + short = fname[: k - j - 1] + str(r) + else: # short not in fAbbrevs + fAbbrevs[short] = fname + s11 += " " * (j - i) + short.ljust(k - j) + if len(asetIndex) > (k - j): + # add space in the sentence to make room for the annotation index + amt = len(asetIndex) - (k - j) + s0 = ( + s0[: k + adjust] + "~" * amt + s0[k + adjust :] + ) # '~' to prevent line wrapping + s1 = s1[: k + adjust] + " " * amt + s1[k + adjust :] + s11 = s11[: k + adjust] + " " * amt + s11[k + adjust :] + adjust += amt + s2 += " " * (j - i) + asetIndex.ljust(k - j) + i = k + + long_lines = [s0, s1, s11, s2] + + outstr = "\n\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ).replace("~", " ") + outstr += "\n" + if fAbbrevs: + outstr += " (" + ", ".join("=".join(pair) for pair in fAbbrevs.items()) + ")" + assert len(fAbbrevs) == len(dict(fAbbrevs)), "Abbreviation clash" + + return outstr + + +def _annotation_ascii_FE_layer(overt, ni, feAbbrevs): + """Helper for _annotation_ascii_FEs().""" + s1 = "" + s2 = "" + i = 0 + for j, k, fename in overt: + s1 += " " * (j - i) + ("^" if fename.islower() else "-") * (k - j) + short = fename[: k - j] + if len(fename) > len(short): + r = 0 + while short in feAbbrevs: + if feAbbrevs[short] == fename: + break + r += 1 + short = fename[: k - j - 1] + str(r) + else: # short not in feAbbrevs + feAbbrevs[short] = fename + s2 += " " * (j - i) + short.ljust(k - j) + i = k + + sNI = "" + if ni: + sNI += " [" + ", ".join(":".join(x) for x in sorted(ni.items())) + "]" + return [s1, s2, sNI] + + +def _annotation_ascii_FEs(sent): + """ + ASCII string rendering of the sentence along with a single target and its FEs. + Secondary and tertiary FE layers are included if present. + 'sent' can be an FE annotation set or an LU sentence with a single target. + Line-wrapped to limit the display width. + """ + feAbbrevs = OrderedDict() + posspec = [] # POS-specific layer spans (e.g., Supp[ort], Cop[ula]) + posspec_separate = False + for lyr in ("Verb", "Noun", "Adj", "Adv", "Prep", "Scon", "Art"): + if lyr in sent and sent[lyr]: + for a, b, lbl in sent[lyr]: + if ( + lbl == "X" + ): # skip this, which covers an entire phrase typically containing the target and all its FEs + # (but do display the Gov) + continue + if any(1 for x, y, felbl in sent.FE[0] if x <= a < y or a <= x < b): + # overlap between one of the POS-specific layers and first FE layer + posspec_separate = ( + True # show POS-specific layers on a separate line + ) + posspec.append( + (a, b, lbl.lower().replace("-", "")) + ) # lowercase Cop=>cop, Non-Asp=>nonasp, etc. to distinguish from FE names + if posspec_separate: + POSSPEC = _annotation_ascii_FE_layer(posspec, {}, feAbbrevs) + FE1 = _annotation_ascii_FE_layer( + sorted(sent.FE[0] + (posspec if not posspec_separate else [])), + sent.FE[1], + feAbbrevs, + ) + FE2 = FE3 = None + if "FE2" in sent: + FE2 = _annotation_ascii_FE_layer(sent.FE2[0], sent.FE2[1], feAbbrevs) + if "FE3" in sent: + FE3 = _annotation_ascii_FE_layer(sent.FE3[0], sent.FE3[1], feAbbrevs) + + for i, j in sent.Target: + FE1span, FE1name, FE1exp = FE1 + if len(FE1span) < j: + FE1span += " " * (j - len(FE1span)) + if len(FE1name) < j: + FE1name += " " * (j - len(FE1name)) + FE1[1] = FE1name + FE1[0] = ( + FE1span[:i] + FE1span[i:j].replace(" ", "*").replace("-", "=") + FE1span[j:] + ) + long_lines = [sent.text] + if posspec_separate: + long_lines.extend(POSSPEC[:2]) + long_lines.extend([FE1[0], FE1[1] + FE1[2]]) # lines with no length limit + if FE2: + long_lines.extend([FE2[0], FE2[1] + FE2[2]]) + if FE3: + long_lines.extend([FE3[0], FE3[1] + FE3[2]]) + long_lines.append("") + outstr = "\n".join( + map("\n".join, zip_longest(*mimic_wrap(long_lines), fillvalue=" ")) + ) + if feAbbrevs: + outstr += "(" + ", ".join("=".join(pair) for pair in feAbbrevs.items()) + ")" + assert len(feAbbrevs) == len(dict(feAbbrevs)), "Abbreviation clash" + outstr += "\n" + + return outstr + + +def _pretty_fe(fe): + + """ + Helper function for pretty-printing a frame element. + + :param fe: The frame element to be printed. + :type fe: AttrDict + :return: A nicely formatted string representation of the frame element. + :rtype: str + """ + fekeys = fe.keys() + outstr = "" + outstr += "frame element ({0.ID}): {0.name}\n of {1.name}({1.ID})\n".format( + fe, fe.frame + ) + if "definition" in fekeys: + outstr += "[definition]\n" + outstr += _pretty_longstring(fe.definition, " ") + if "abbrev" in fekeys: + outstr += f"[abbrev] {fe.abbrev}\n" + if "coreType" in fekeys: + outstr += f"[coreType] {fe.coreType}\n" + if "requiresFE" in fekeys: + outstr += "[requiresFE] " + if fe.requiresFE is None: + outstr += "\n" + else: + outstr += f"{fe.requiresFE.name}({fe.requiresFE.ID})\n" + if "excludesFE" in fekeys: + outstr += "[excludesFE] " + if fe.excludesFE is None: + outstr += "\n" + else: + outstr += f"{fe.excludesFE.name}({fe.excludesFE.ID})\n" + if "semType" in fekeys: + outstr += "[semType] " + if fe.semType is None: + outstr += "\n" + else: + outstr += "\n " + f"{fe.semType.name}({fe.semType.ID})" + "\n" + + return outstr + + +def _pretty_frame(frame): + + """ + Helper function for pretty-printing a frame. + + :param frame: The frame to be printed. + :type frame: AttrDict + :return: A nicely formatted string representation of the frame. + :rtype: str + """ + + outstr = "" + outstr += "frame ({0.ID}): {0.name}\n\n".format(frame) + outstr += f"[URL] {frame.URL}\n\n" + outstr += "[definition]\n" + outstr += _pretty_longstring(frame.definition, " ") + "\n" + + outstr += f"[semTypes] {len(frame.semTypes)} semantic types\n" + outstr += ( + " " * (len(frame.semTypes) > 0) + + ", ".join(f"{x.name}({x.ID})" for x in frame.semTypes) + + "\n" * (len(frame.semTypes) > 0) + ) + + outstr += "\n[frameRelations] {} frame relations\n".format( + len(frame.frameRelations) + ) + outstr += " " + "\n ".join(repr(frel) for frel in frame.frameRelations) + "\n" + + outstr += f"\n[lexUnit] {len(frame.lexUnit)} lexical units\n" + lustrs = [] + for luName, lu in sorted(frame.lexUnit.items()): + tmpstr = f"{luName} ({lu.ID})" + lustrs.append(tmpstr) + outstr += "{}\n".format(_pretty_longstring(", ".join(lustrs), prefix=" ")) + + outstr += f"\n[FE] {len(frame.FE)} frame elements\n" + fes = {} + for feName, fe in sorted(frame.FE.items()): + try: + fes[fe.coreType].append(f"{feName} ({fe.ID})") + except KeyError: + fes[fe.coreType] = [] + fes[fe.coreType].append(f"{feName} ({fe.ID})") + for ct in sorted( + fes.keys(), + key=lambda ct2: [ + "Core", + "Core-Unexpressed", + "Peripheral", + "Extra-Thematic", + ].index(ct2), + ): + outstr += "{:>16}: {}\n".format(ct, ", ".join(sorted(fes[ct]))) + + outstr += "\n[FEcoreSets] {} frame element core sets\n".format( + len(frame.FEcoreSets) + ) + outstr += ( + " " + + "\n ".join( + ", ".join([x.name for x in coreSet]) for coreSet in frame.FEcoreSets + ) + + "\n" + ) + + return outstr + + +class FramenetError(Exception): + + """An exception class for framenet-related errors.""" + + +class AttrDict(dict): + + """A class that wraps a dict and allows accessing the keys of the + dict as if they were attributes. Taken from here: + https://stackoverflow.com/a/14620633/8879 + + >>> foo = {'a':1, 'b':2, 'c':3} + >>> bar = AttrDict(foo) + >>> pprint(dict(bar)) + {'a': 1, 'b': 2, 'c': 3} + >>> bar.b + 2 + >>> bar.d = 4 + >>> pprint(dict(bar)) + {'a': 1, 'b': 2, 'c': 3, 'd': 4} + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # self.__dict__ = self + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + if name == "_short_repr": + return self._short_repr + return self[name] + + def __getitem__(self, name): + v = super().__getitem__(name) + if isinstance(v, Future): + return v._data() + return v + + def _short_repr(self): + if "_type" in self: + if self["_type"].endswith("relation"): + return self.__repr__() + try: + return "<{} ID={} name={}>".format( + self["_type"], self["ID"], self["name"] + ) + except KeyError: + try: # no ID--e.g., for _type=lusubcorpus + return "<{} name={}>".format(self["_type"], self["name"]) + except KeyError: # no name--e.g., for _type=lusentence + return "<{} ID={}>".format(self["_type"], self["ID"]) + else: + return self.__repr__() + + def _str(self): + outstr = "" + + if "_type" not in self: + outstr = _pretty_any(self) + elif self["_type"] == "frame": + outstr = _pretty_frame(self) + elif self["_type"] == "fe": + outstr = _pretty_fe(self) + elif self["_type"] == "lu": + outstr = _pretty_lu(self) + elif self["_type"] == "luexemplars": # list of ALL exemplars for LU + outstr = _pretty_exemplars(self, self[0].LU) + elif ( + self["_type"] == "fulltext_annotation" + ): # list of all sentences for full-text doc + outstr = _pretty_fulltext_sentences(self) + elif self["_type"] == "lusentence": + outstr = _pretty_annotation(self) + elif self["_type"] == "fulltext_sentence": + outstr = _pretty_fulltext_sentence(self) + elif self["_type"] in ("luannotationset", "fulltext_annotationset"): + outstr = _pretty_annotation(self, aset_level=True) + elif self["_type"] == "posannotationset": + outstr = _pretty_pos(self) + elif self["_type"] == "semtype": + outstr = _pretty_semtype(self) + elif self["_type"] == "framerelationtype": + outstr = _pretty_frame_relation_type(self) + elif self["_type"] == "framerelation": + outstr = _pretty_frame_relation(self) + elif self["_type"] == "ferelation": + outstr = _pretty_fe_relation(self) + else: + outstr = _pretty_any(self) + + # ensure result is unicode string prior to applying the + # decorator (because non-ASCII characters + # could in principle occur in the data and would trigger an encoding error when + # passed as arguments to str.format()). + # assert isinstance(outstr, unicode) # not in Python 3.2 + return outstr + + def __str__(self): + return self._str() + + def __repr__(self): + return self.__str__() + + +class SpecialList(list): + """ + A list subclass which adds a '_type' attribute for special printing + (similar to an AttrDict, though this is NOT an AttrDict subclass). + """ + + def __init__(self, typ, *args, **kwargs): + super().__init__(*args, **kwargs) + self._type = typ + + def _str(self): + outstr = "" + + assert self._type + if len(self) == 0: + outstr = "[]" + elif self._type == "luexemplars": # list of ALL exemplars for LU + outstr = _pretty_exemplars(self, self[0].LU) + else: + assert False, self._type + return outstr + + def __str__(self): + return self._str() + + def __repr__(self): + return self.__str__() + + +class Future: + """ + Wraps and acts as a proxy for a value to be loaded lazily (on demand). + Adapted from https://gist.github.com/sergey-miryanov/2935416 + """ + + def __init__(self, loader, *args, **kwargs): + """ + :param loader: when called with no arguments, returns the value to be stored + :type loader: callable + """ + super().__init__(*args, **kwargs) + self._loader = loader + self._d = None + + def _data(self): + if callable(self._loader): + self._d = self._loader() + self._loader = None # the data is now cached + return self._d + + def __nonzero__(self): + return bool(self._data()) + + def __len__(self): + return len(self._data()) + + def __setitem__(self, key, value): + return self._data().__setitem__(key, value) + + def __getitem__(self, key): + return self._data().__getitem__(key) + + def __getattr__(self, key): + return self._data().__getattr__(key) + + def __str__(self): + return self._data().__str__() + + def __repr__(self): + return self._data().__repr__() + + +class PrettyDict(AttrDict): + """ + Displays an abbreviated repr of values where possible. + Inherits from AttrDict, so a callable value will + be lazily converted to an actual value. + """ + + def __init__(self, *args, **kwargs): + _BREAK_LINES = kwargs.pop("breakLines", False) + super().__init__(*args, **kwargs) + dict.__setattr__(self, "_BREAK_LINES", _BREAK_LINES) + + def __repr__(self): + parts = [] + for k, v in sorted(self.items()): + kv = repr(k) + ": " + try: + kv += v._short_repr() + except AttributeError: + kv += repr(v) + parts.append(kv) + return "{" + (",\n " if self._BREAK_LINES else ", ").join(parts) + "}" + + +class PrettyList(list): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + def __init__(self, *args, **kwargs): + self._MAX_REPR_SIZE = kwargs.pop("maxReprSize", 60) + self._BREAK_LINES = kwargs.pop("breakLines", False) + super().__init__(*args, **kwargs) + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if self._MAX_REPR_SIZE and length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % str(",\n " if self._BREAK_LINES else ", ").join( + pieces[:-1] + ) + return "[%s]" % str(",\n " if self._BREAK_LINES else ", ").join(pieces) + + +class PrettyLazyMap(LazyMap): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + +class PrettyLazyIteratorList(LazyIteratorList): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + +class PrettyLazyConcatenation(LazyConcatenation): + """ + Displays an abbreviated repr of only the first several elements, not the whole list. + """ + + # from nltk.util + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append( + elt._short_repr() + ) # key difference from inherited version: call to _short_repr() + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + def __add__(self, other): + """Return a list concatenating self with other.""" + return PrettyLazyIteratorList(itertools.chain(self, other)) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return PrettyLazyIteratorList(itertools.chain(other, self)) + + +class FramenetCorpusReader(XMLCorpusReader): + """A corpus reader for the Framenet Corpus. + + >>> from nltk.corpus import framenet as fn + >>> fn.lu(3238).frame.lexUnit['glint.v'] is fn.lu(3238) + True + >>> fn.frame_by_name('Replacing') is fn.lus('replace.v')[0].frame + True + >>> fn.lus('prejudice.n')[0].frame.frameRelations == fn.frame_relations('Partiality') + True + """ + + _bad_statuses = ["Problem"] + """ + When loading LUs for a frame, those whose status is in this list will be ignored. + Due to caching, if user code modifies this, it should do so before loading any data. + 'Problem' should always be listed for FrameNet 1.5, as these LUs are not included + in the XML index. + """ + + _warnings = False + + def warnings(self, v): + """Enable or disable warnings of data integrity issues as they are encountered. + If v is truthy, warnings will be enabled. + + (This is a function rather than just an attribute/property to ensure that if + enabling warnings is the first action taken, the corpus reader is instantiated first.) + """ + self._warnings = v + + def __init__(self, root, fileids): + XMLCorpusReader.__init__(self, root, fileids) + + # framenet corpus sub dirs + # sub dir containing the xml files for frames + self._frame_dir = "frame" + # sub dir containing the xml files for lexical units + self._lu_dir = "lu" + # sub dir containing the xml files for fulltext annotation files + self._fulltext_dir = "fulltext" + + # location of latest development version of FrameNet + self._fnweb_url = "https://framenet2.icsi.berkeley.edu/fnReports/data" + + # Indexes used for faster look-ups + self._frame_idx = None + self._cached_frames = {} # name -> ID + self._lu_idx = None + self._fulltext_idx = None + self._semtypes = None + self._freltyp_idx = None # frame relation types (Inheritance, Using, etc.) + self._frel_idx = None # frame-to-frame relation instances + self._ferel_idx = None # FE-to-FE relation instances + self._frel_f_idx = None # frame-to-frame relations associated with each frame + + self._readme = "README.txt" + + def help(self, attrname=None): + """Display help information summarizing the main methods.""" + + if attrname is not None: + return help(self.__getattribute__(attrname)) + + # No need to mention frame_by_name() or frame_by_id(), + # as it's easier to just call frame(). + # Also not mentioning lu_basic(). + + msg = """ +Citation: Nathan Schneider and Chuck Wooters (2017), +"The NLTK FrameNet API: Designing for Discoverability with a Rich Linguistic Resource". +Proceedings of EMNLP: System Demonstrations. https://arxiv.org/abs/1703.07438 + +Use the following methods to access data in FrameNet. +Provide a method name to `help()` for more information. + +FRAMES +====== + +frame() to look up a frame by its exact name or ID +frames() to get frames matching a name pattern +frames_by_lemma() to get frames containing an LU matching a name pattern +frame_ids_and_names() to get a mapping from frame IDs to names + +FRAME ELEMENTS +============== + +fes() to get frame elements (a.k.a. roles) matching a name pattern, optionally constrained + by a frame name pattern + +LEXICAL UNITS +============= + +lu() to look up an LU by its ID +lus() to get lexical units matching a name pattern, optionally constrained by frame +lu_ids_and_names() to get a mapping from LU IDs to names + +RELATIONS +========= + +frame_relation_types() to get the different kinds of frame-to-frame relations + (Inheritance, Subframe, Using, etc.). +frame_relations() to get the relation instances, optionally constrained by + frame(s) or relation type +fe_relations() to get the frame element pairs belonging to a frame-to-frame relation + +SEMANTIC TYPES +============== + +semtypes() to get the different kinds of semantic types that can be applied to + FEs, LUs, and entire frames +semtype() to look up a particular semtype by name, ID, or abbreviation +semtype_inherits() to check whether two semantic types have a subtype-supertype + relationship in the semtype hierarchy +propagate_semtypes() to apply inference rules that distribute semtypes over relations + between FEs + +ANNOTATIONS +=========== + +annotations() to get annotation sets, in which a token in a sentence is annotated + with a lexical unit in a frame, along with its frame elements and their syntactic properties; + can be constrained by LU name pattern and limited to lexicographic exemplars or full-text. + Sentences of full-text annotation can have multiple annotation sets. +sents() to get annotated sentences illustrating one or more lexical units +exemplars() to get sentences of lexicographic annotation, most of which have + just 1 annotation set; can be constrained by LU name pattern, frame, and overt FE(s) +doc() to look up a document of full-text annotation by its ID +docs() to get documents of full-text annotation that match a name pattern +docs_metadata() to get metadata about all full-text documents without loading them +ft_sents() to iterate over sentences of full-text annotation + +UTILITIES +========= + +buildindexes() loads metadata about all frames, LUs, etc. into memory to avoid + delay when one is accessed for the first time. It does not load annotations. +readme() gives the text of the FrameNet README file +warnings(True) to display corpus consistency warnings when loading data + """ + print(msg) + + def _buildframeindex(self): + # The total number of Frames in Framenet is fairly small (~1200) so + # this index should not be very large + if not self._frel_idx: + self._buildrelationindex() # always load frame relations before frames, + # otherwise weird ordering effects might result in incomplete information + self._frame_idx = {} + with XMLCorpusView( + self.abspath("frameIndex.xml"), "frameIndex/frame", self._handle_elt + ) as view: + for f in view: + self._frame_idx[f["ID"]] = f + + def _buildcorpusindex(self): + # The total number of fulltext annotated documents in Framenet + # is fairly small (~90) so this index should not be very large + self._fulltext_idx = {} + with XMLCorpusView( + self.abspath("fulltextIndex.xml"), + "fulltextIndex/corpus", + self._handle_fulltextindex_elt, + ) as view: + for doclist in view: + for doc in doclist: + self._fulltext_idx[doc.ID] = doc + + def _buildluindex(self): + # The number of LUs in Framenet is about 13,000 so this index + # should not be very large + self._lu_idx = {} + with XMLCorpusView( + self.abspath("luIndex.xml"), "luIndex/lu", self._handle_elt + ) as view: + for lu in view: + self._lu_idx[ + lu["ID"] + ] = lu # populate with LU index entries. if any of these + # are looked up they will be replaced by full LU objects. + + def _buildrelationindex(self): + # print('building relation index...', file=sys.stderr) + self._freltyp_idx = {} + self._frel_idx = {} + self._frel_f_idx = defaultdict(set) + self._ferel_idx = {} + + with XMLCorpusView( + self.abspath("frRelation.xml"), + "frameRelations/frameRelationType", + self._handle_framerelationtype_elt, + ) as view: + for freltyp in view: + self._freltyp_idx[freltyp.ID] = freltyp + for frel in freltyp.frameRelations: + supF = frel.superFrame = frel[freltyp.superFrameName] = Future( + (lambda fID: lambda: self.frame_by_id(fID))(frel.supID) + ) + subF = frel.subFrame = frel[freltyp.subFrameName] = Future( + (lambda fID: lambda: self.frame_by_id(fID))(frel.subID) + ) + self._frel_idx[frel.ID] = frel + self._frel_f_idx[frel.supID].add(frel.ID) + self._frel_f_idx[frel.subID].add(frel.ID) + for ferel in frel.feRelations: + ferel.superFrame = supF + ferel.subFrame = subF + ferel.superFE = Future( + (lambda fer: lambda: fer.superFrame.FE[fer.superFEName])( + ferel + ) + ) + ferel.subFE = Future( + (lambda fer: lambda: fer.subFrame.FE[fer.subFEName])(ferel) + ) + self._ferel_idx[ferel.ID] = ferel + # print('...done building relation index', file=sys.stderr) + + def _warn(self, *message, **kwargs): + if self._warnings: + kwargs.setdefault("file", sys.stderr) + print(*message, **kwargs) + + def buildindexes(self): + """ + Build the internal indexes to make look-ups faster. + """ + # Frames + self._buildframeindex() + # LUs + self._buildluindex() + # Fulltext annotation corpora index + self._buildcorpusindex() + # frame and FE relations + self._buildrelationindex() + + def doc(self, fn_docid): + """ + Returns the annotated document whose id number is + ``fn_docid``. This id number can be obtained by calling the + Documents() function. + + The dict that is returned from this function will contain the + following keys: + + - '_type' : 'fulltextannotation' + - 'sentence' : a list of sentences in the document + - Each item in the list is a dict containing the following keys: + - 'ID' : the ID number of the sentence + - '_type' : 'sentence' + - 'text' : the text of the sentence + - 'paragNo' : the paragraph number + - 'sentNo' : the sentence number + - 'docID' : the document ID number + - 'corpID' : the corpus ID number + - 'aPos' : the annotation position + - 'annotationSet' : a list of annotation layers for the sentence + - Each item in the list is a dict containing the following keys: + - 'ID' : the ID number of the annotation set + - '_type' : 'annotationset' + - 'status' : either 'MANUAL' or 'UNANN' + - 'luName' : (only if status is 'MANUAL') + - 'luID' : (only if status is 'MANUAL') + - 'frameID' : (only if status is 'MANUAL') + - 'frameName': (only if status is 'MANUAL') + - 'layer' : a list of labels for the layer + - Each item in the layer is a dict containing the following keys: + - '_type': 'layer' + - 'rank' + - 'name' + - 'label' : a list of labels in the layer + - Each item is a dict containing the following keys: + - 'start' + - 'end' + - 'name' + - 'feID' (optional) + + :param fn_docid: The Framenet id number of the document + :type fn_docid: int + :return: Information about the annotated document + :rtype: dict + """ + try: + xmlfname = self._fulltext_idx[fn_docid].filename + except TypeError: # happens when self._fulltext_idx == None + # build the index + self._buildcorpusindex() + xmlfname = self._fulltext_idx[fn_docid].filename + except KeyError as e: # probably means that fn_docid was not in the index + raise FramenetError(f"Unknown document id: {fn_docid}") from e + + # construct the path name for the xml file containing the document info + locpath = os.path.join(f"{self._root}", self._fulltext_dir, xmlfname) + + # Grab the top-level xml element containing the fulltext annotation + with XMLCorpusView(locpath, "fullTextAnnotation") as view: + elt = view[0] + info = self._handle_fulltextannotation_elt(elt) + # add metadata + for k, v in self._fulltext_idx[fn_docid].items(): + info[k] = v + return info + + def frame_by_id(self, fn_fid, ignorekeys=[]): + """ + Get the details for the specified Frame using the frame's id + number. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame_by_id(256) + >>> f.ID + 256 + >>> f.name + 'Medical_specialties' + >>> f.definition # doctest: +NORMALIZE_WHITESPACE + "This frame includes words that name medical specialties and is closely related to the + Medical_professionals frame. The FE Type characterizing a sub-are in a Specialty may also be + expressed. 'Ralph practices paediatric oncology.'" + + :param fn_fid: The Framenet id number of the frame + :type fn_fid: int + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + + Also see the ``frame()`` function for details about what is + contained in the dict that is returned. + """ + + # get the name of the frame with this id number + try: + fentry = self._frame_idx[fn_fid] + if "_type" in fentry: + return fentry # full frame object is cached + name = fentry["name"] + except TypeError: + self._buildframeindex() + name = self._frame_idx[fn_fid]["name"] + except KeyError as e: + raise FramenetError(f"Unknown frame id: {fn_fid}") from e + + return self.frame_by_name(name, ignorekeys, check_cache=False) + + def frame_by_name(self, fn_fname, ignorekeys=[], check_cache=True): + """ + Get the details for the specified Frame using the frame's name. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame_by_name('Medical_specialties') + >>> f.ID + 256 + >>> f.name + 'Medical_specialties' + >>> f.definition # doctest: +NORMALIZE_WHITESPACE + "This frame includes words that name medical specialties and is closely related to the + Medical_professionals frame. The FE Type characterizing a sub-are in a Specialty may also be + expressed. 'Ralph practices paediatric oncology.'" + + :param fn_fname: The name of the frame + :type fn_fname: str + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + + Also see the ``frame()`` function for details about what is + contained in the dict that is returned. + """ + + if check_cache and fn_fname in self._cached_frames: + return self._frame_idx[self._cached_frames[fn_fname]] + elif not self._frame_idx: + self._buildframeindex() + + # construct the path name for the xml file containing the Frame info + locpath = os.path.join(f"{self._root}", self._frame_dir, fn_fname + ".xml") + # print(locpath, file=sys.stderr) + # Grab the xml for the frame + try: + with XMLCorpusView(locpath, "frame") as view: + elt = view[0] + except OSError as e: + raise FramenetError(f"Unknown frame: {fn_fname}") from e + + fentry = self._handle_frame_elt(elt, ignorekeys) + assert fentry + + fentry.URL = self._fnweb_url + "/" + self._frame_dir + "/" + fn_fname + ".xml" + + # INFERENCE RULE: propagate lexical semtypes from the frame to all its LUs + for st in fentry.semTypes: + if st.rootType.name == "Lexical_type": + for lu in fentry.lexUnit.values(): + if not any( + x is st for x in lu.semTypes + ): # identity containment check + lu.semTypes.append(st) + + self._frame_idx[fentry.ID] = fentry + self._cached_frames[fentry.name] = fentry.ID + """ + # now set up callables to resolve the LU pointers lazily. + # (could also do this here--caching avoids infinite recursion.) + for luName,luinfo in fentry.lexUnit.items(): + fentry.lexUnit[luName] = (lambda luID: Future(lambda: self.lu(luID)))(luinfo.ID) + """ + return fentry + + def frame(self, fn_fid_or_fname, ignorekeys=[]): + """ + Get the details for the specified Frame using the frame's name + or id number. + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame(256) + >>> f.name + 'Medical_specialties' + >>> f = fn.frame('Medical_specialties') + >>> f.ID + 256 + >>> # ensure non-ASCII character in definition doesn't trigger an encoding error: + >>> fn.frame('Imposing_obligation') # doctest: +ELLIPSIS + frame (1494): Imposing_obligation... + + + The dict that is returned from this function will contain the + following information about the Frame: + + - 'name' : the name of the Frame (e.g. 'Birth', 'Apply_heat', etc.) + - 'definition' : textual definition of the Frame + - 'ID' : the internal ID number of the Frame + - 'semTypes' : a list of semantic types for this frame + - Each item in the list is a dict containing the following keys: + - 'name' : can be used with the semtype() function + - 'ID' : can be used with the semtype() function + + - 'lexUnit' : a dict containing all of the LUs for this frame. + The keys in this dict are the names of the LUs and + the value for each key is itself a dict containing + info about the LU (see the lu() function for more info.) + + - 'FE' : a dict containing the Frame Elements that are part of this frame + The keys in this dict are the names of the FEs (e.g. 'Body_system') + and the values are dicts containing the following keys + + - 'definition' : The definition of the FE + - 'name' : The name of the FE e.g. 'Body_system' + - 'ID' : The id number + - '_type' : 'fe' + - 'abbrev' : Abbreviation e.g. 'bod' + - 'coreType' : one of "Core", "Peripheral", or "Extra-Thematic" + - 'semType' : if not None, a dict with the following two keys: + - 'name' : name of the semantic type. can be used with + the semtype() function + - 'ID' : id number of the semantic type. can be used with + the semtype() function + - 'requiresFE' : if not None, a dict with the following two keys: + - 'name' : the name of another FE in this frame + - 'ID' : the id of the other FE in this frame + - 'excludesFE' : if not None, a dict with the following two keys: + - 'name' : the name of another FE in this frame + - 'ID' : the id of the other FE in this frame + + - 'frameRelation' : a list of objects describing frame relations + - 'FEcoreSets' : a list of Frame Element core sets for this frame + - Each item in the list is a list of FE objects + + :param fn_fid_or_fname: The Framenet name or id number of the frame + :type fn_fid_or_fname: int or str + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: Information about a frame + :rtype: dict + """ + + # get the frame info by name or id number + if isinstance(fn_fid_or_fname, str): + f = self.frame_by_name(fn_fid_or_fname, ignorekeys) + else: + f = self.frame_by_id(fn_fid_or_fname, ignorekeys) + + return f + + def frames_by_lemma(self, pat): + """ + Returns a list of all frames that contain LUs in which the + ``name`` attribute of the LU matches the given regular expression + ``pat``. Note that LU names are composed of "lemma.POS", where + the "lemma" part can be made up of either a single lexeme + (e.g. 'run') or multiple lexemes (e.g. 'a little'). + + Note: if you are going to be doing a lot of this type of + searching, you'd want to build an index that maps from lemmas to + frames because each time frames_by_lemma() is called, it has to + search through ALL of the frame XML files in the db. + + >>> from nltk.corpus import framenet as fn + >>> from nltk.corpus.reader.framenet import PrettyList + >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID'))) # doctest: +ELLIPSIS + [, ] + + :return: A list of frame objects. + :rtype: list(AttrDict) + """ + return PrettyList( + f + for f in self.frames() + if any(re.search(pat, luName) for luName in f.lexUnit) + ) + + def lu_basic(self, fn_luid): + """ + Returns basic information about the LU whose id is + ``fn_luid``. This is basically just a wrapper around the + ``lu()`` function with "subCorpus" info excluded. + + >>> from nltk.corpus import framenet as fn + >>> lu = PrettyDict(fn.lu_basic(256), breakLines=True) + >>> # ellipses account for differences between FN 1.5 and 1.7 + >>> lu # doctest: +ELLIPSIS + {'ID': 256, + 'POS': 'V', + 'URL': 'https://framenet2.icsi.berkeley.edu/fnReports/data/lu/lu256.xml', + '_type': 'lu', + 'cBy': ..., + 'cDate': '02/08/2001 01:27:50 PST Thu', + 'definition': 'COD: be aware of beforehand; predict.', + 'definitionMarkup': 'COD: be aware of beforehand; predict.', + 'frame': , + 'lemmaID': 15082, + 'lexemes': [{'POS': 'V', 'breakBefore': 'false', 'headword': 'false', 'name': 'foresee', 'order': 1}], + 'name': 'foresee.v', + 'semTypes': [], + 'sentenceCount': {'annotated': ..., 'total': ...}, + 'status': 'FN1_Sent'} + + :param fn_luid: The id number of the desired LU + :type fn_luid: int + :return: Basic information about the lexical unit + :rtype: dict + """ + return self.lu(fn_luid, ignorekeys=["subCorpus", "exemplars"]) + + def lu(self, fn_luid, ignorekeys=[], luName=None, frameID=None, frameName=None): + """ + Access a lexical unit by its ID. luName, frameID, and frameName are used + only in the event that the LU does not have a file in the database + (which is the case for LUs with "Problem" status); in this case, + a placeholder LU is created which just contains its name, ID, and frame. + + + Usage examples: + + >>> from nltk.corpus import framenet as fn + >>> fn.lu(256).name + 'foresee.v' + >>> fn.lu(256).definition + 'COD: be aware of beforehand; predict.' + >>> fn.lu(256).frame.name + 'Expectation' + >>> list(map(PrettyDict, fn.lu(256).lexemes)) + [{'POS': 'V', 'breakBefore': 'false', 'headword': 'false', 'name': 'foresee', 'order': 1}] + + >>> fn.lu(227).exemplars[23] # doctest: +NORMALIZE_WHITESPACE + exemplar sentence (352962): + [sentNo] 0 + [aPos] 59699508 + + [LU] (227) guess.v in Coming_to_believe + + [frame] (23) Coming_to_believe + + [annotationSet] 2 annotation sets + + [POS] 18 tags + + [POS_tagset] BNC + + [GF] 3 relations + + [PT] 3 phrases + + [Other] 1 entry + + [text] + [Target] + [FE] + + When he was inside the house , Culley noticed the characteristic + ------------------ + Content + + he would n't have guessed at . + -- ******* -- + Co C1 [Evidence:INI] + (Co=Cognizer, C1=Content) + + + + The dict that is returned from this function will contain most of the + following information about the LU. Note that some LUs do not contain + all of these pieces of information - particularly 'totalAnnotated' and + 'incorporatedFE' may be missing in some LUs: + + - 'name' : the name of the LU (e.g. 'merger.n') + - 'definition' : textual definition of the LU + - 'ID' : the internal ID number of the LU + - '_type' : 'lu' + - 'status' : e.g. 'Created' + - 'frame' : Frame that this LU belongs to + - 'POS' : the part of speech of this LU (e.g. 'N') + - 'totalAnnotated' : total number of examples annotated with this LU + - 'incorporatedFE' : FE that incorporates this LU (e.g. 'Ailment') + - 'sentenceCount' : a dict with the following two keys: + - 'annotated': number of sentences annotated with this LU + - 'total' : total number of sentences with this LU + + - 'lexemes' : a list of dicts describing the lemma of this LU. + Each dict in the list contains these keys: + + - 'POS' : part of speech e.g. 'N' + - 'name' : either single-lexeme e.g. 'merger' or + multi-lexeme e.g. 'a little' + - 'order': the order of the lexeme in the lemma (starting from 1) + - 'headword': a boolean ('true' or 'false') + - 'breakBefore': Can this lexeme be separated from the previous lexeme? + Consider: "take over.v" as in:: + + Germany took over the Netherlands in 2 days. + Germany took the Netherlands over in 2 days. + + In this case, 'breakBefore' would be "true" for the lexeme + "over". Contrast this with "take after.v" as in:: + + Mary takes after her grandmother. + *Mary takes her grandmother after. + + In this case, 'breakBefore' would be "false" for the lexeme "after" + + - 'lemmaID' : Can be used to connect lemmas in different LUs + - 'semTypes' : a list of semantic type objects for this LU + - 'subCorpus' : a list of subcorpora + - Each item in the list is a dict containing the following keys: + - 'name' : + - 'sentence' : a list of sentences in the subcorpus + - each item in the list is a dict with the following keys: + - 'ID': + - 'sentNo': + - 'text': the text of the sentence + - 'aPos': + - 'annotationSet': a list of annotation sets + - each item in the list is a dict with the following keys: + - 'ID': + - 'status': + - 'layer': a list of layers + - each layer is a dict containing the following keys: + - 'name': layer name (e.g. 'BNC') + - 'rank': + - 'label': a list of labels for the layer + - each label is a dict containing the following keys: + - 'start': start pos of label in sentence 'text' (0-based) + - 'end': end pos of label in sentence 'text' (0-based) + - 'name': name of label (e.g. 'NN1') + + Under the hood, this implementation looks up the lexical unit information + in the *frame* definition file. That file does not contain + corpus annotations, so the LU files will be accessed on demand if those are + needed. In principle, valence patterns could be loaded here too, + though these are not currently supported. + + :param fn_luid: The id number of the lexical unit + :type fn_luid: int + :param ignorekeys: The keys to ignore. These keys will not be + included in the output. (optional) + :type ignorekeys: list(str) + :return: All information about the lexical unit + :rtype: dict + """ + # look for this LU in cache + if not self._lu_idx: + self._buildluindex() + OOV = object() + luinfo = self._lu_idx.get(fn_luid, OOV) + if luinfo is OOV: + # LU not in the index. We create a placeholder by falling back to + # luName, frameID, and frameName. However, this will not be listed + # among the LUs for its frame. + self._warn( + "LU ID not found: {} ({}) in {} ({})".format( + luName, fn_luid, frameName, frameID + ) + ) + luinfo = AttrDict( + { + "_type": "lu", + "ID": fn_luid, + "name": luName, + "frameID": frameID, + "status": "Problem", + } + ) + f = self.frame_by_id(luinfo.frameID) + assert f.name == frameName, (f.name, frameName) + luinfo["frame"] = f + self._lu_idx[fn_luid] = luinfo + elif "_type" not in luinfo: + # we only have an index entry for the LU. loading the frame will replace this. + f = self.frame_by_id(luinfo.frameID) + luinfo = self._lu_idx[fn_luid] + if ignorekeys: + return AttrDict({k: v for k, v in luinfo.items() if k not in ignorekeys}) + + return luinfo + + def _lu_file(self, lu, ignorekeys=[]): + """ + Augment the LU information that was loaded from the frame file + with additional information from the LU file. + """ + fn_luid = lu.ID + + fname = f"lu{fn_luid}.xml" + locpath = os.path.join(f"{self._root}", self._lu_dir, fname) + # print(locpath, file=sys.stderr) + if not self._lu_idx: + self._buildluindex() + + try: + with XMLCorpusView(locpath, "lexUnit") as view: + elt = view[0] + except OSError as e: + raise FramenetError(f"Unknown LU id: {fn_luid}") from e + + lu2 = self._handle_lexunit_elt(elt, ignorekeys) + lu.URL = self._fnweb_url + "/" + self._lu_dir + "/" + fname + lu.subCorpus = lu2.subCorpus + lu.exemplars = SpecialList( + "luexemplars", [sent for subc in lu.subCorpus for sent in subc.sentence] + ) + for sent in lu.exemplars: + sent["LU"] = lu + sent["frame"] = lu.frame + for aset in sent.annotationSet: + aset["LU"] = lu + aset["frame"] = lu.frame + + return lu + + def _loadsemtypes(self): + """Create the semantic types index.""" + self._semtypes = AttrDict() + with XMLCorpusView( + self.abspath("semTypes.xml"), + "semTypes/semType", + self._handle_semtype_elt, + ) as view: + for st in view: + n = st["name"] + a = st["abbrev"] + i = st["ID"] + # Both name and abbrev should be able to retrieve the + # ID. The ID will retrieve the semantic type dict itself. + self._semtypes[n] = i + self._semtypes[a] = i + self._semtypes[i] = st + # now that all individual semtype XML is loaded, we can link them together + roots = [] + for st in self.semtypes(): + if st.superType: + st.superType = self.semtype(st.superType.supID) + st.superType.subTypes.append(st) + else: + if st not in roots: + roots.append(st) + st.rootType = st + queue = list(roots) + assert queue + while queue: + st = queue.pop(0) + for child in st.subTypes: + child.rootType = st.rootType + queue.append(child) + # self.propagate_semtypes() # apply inferencing over FE relations + + def propagate_semtypes(self): + """ + Apply inference rules to distribute semtypes over relations between FEs. + For FrameNet 1.5, this results in 1011 semtypes being propagated. + (Not done by default because it requires loading all frame files, + which takes several seconds. If this needed to be fast, it could be rewritten + to traverse the neighboring relations on demand for each FE semtype.) + + >>> from nltk.corpus import framenet as fn + >>> x = sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType) + >>> fn.propagate_semtypes() + >>> y = sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType) + >>> y-x > 1000 + True + """ + if not self._semtypes: + self._loadsemtypes() + if not self._ferel_idx: + self._buildrelationindex() + changed = True + i = 0 + nPropagations = 0 + while changed: + # make a pass and see if anything needs to be propagated + i += 1 + changed = False + for ferel in self.fe_relations(): + superST = ferel.superFE.semType + subST = ferel.subFE.semType + try: + if superST and superST is not subST: + # propagate downward + assert subST is None or self.semtype_inherits(subST, superST), ( + superST.name, + ferel, + subST.name, + ) + if subST is None: + ferel.subFE.semType = subST = superST + changed = True + nPropagations += 1 + if ( + ferel.type.name in ["Perspective_on", "Subframe", "Precedes"] + and subST + and subST is not superST + ): + # propagate upward + assert superST is None, (superST.name, ferel, subST.name) + ferel.superFE.semType = superST = subST + changed = True + nPropagations += 1 + except AssertionError as ex: + # bug in the data! ignore + # print(ex, file=sys.stderr) + continue + # print(i, nPropagations, file=sys.stderr) + + def semtype(self, key): + """ + >>> from nltk.corpus import framenet as fn + >>> fn.semtype(233).name + 'Temperature' + >>> fn.semtype(233).abbrev + 'Temp' + >>> fn.semtype('Temperature').ID + 233 + + :param key: The name, abbreviation, or id number of the semantic type + :type key: string or int + :return: Information about a semantic type + :rtype: dict + """ + if isinstance(key, int): + stid = key + else: + try: + stid = self._semtypes[key] + except TypeError: + self._loadsemtypes() + stid = self._semtypes[key] + + try: + st = self._semtypes[stid] + except TypeError: + self._loadsemtypes() + st = self._semtypes[stid] + + return st + + def semtype_inherits(self, st, superST): + if not isinstance(st, dict): + st = self.semtype(st) + if not isinstance(superST, dict): + superST = self.semtype(superST) + par = st.superType + while par: + if par is superST: + return True + par = par.superType + return False + + def frames(self, name=None): + """ + Obtain details for a specific frame. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.frames()) in (1019, 1221) # FN 1.5 and 1.7, resp. + True + >>> x = PrettyList(fn.frames(r'(?i)crim'), maxReprSize=0, breakLines=True) + >>> x.sort(key=itemgetter('ID')) + >>> x + [, + , + , + ] + + A brief intro to Frames (excerpted from "FrameNet II: Extended + Theory and Practice" by Ruppenhofer et. al., 2010): + + A Frame is a script-like conceptual structure that describes a + particular type of situation, object, or event along with the + participants and props that are needed for that Frame. For + example, the "Apply_heat" frame describes a common situation + involving a Cook, some Food, and a Heating_Instrument, and is + evoked by words such as bake, blanch, boil, broil, brown, + simmer, steam, etc. + + We call the roles of a Frame "frame elements" (FEs) and the + frame-evoking words are called "lexical units" (LUs). + + FrameNet includes relations between Frames. Several types of + relations are defined, of which the most important are: + + - Inheritance: An IS-A relation. The child frame is a subtype + of the parent frame, and each FE in the parent is bound to + a corresponding FE in the child. An example is the + "Revenge" frame which inherits from the + "Rewards_and_punishments" frame. + + - Using: The child frame presupposes the parent frame as + background, e.g the "Speed" frame "uses" (or presupposes) + the "Motion" frame; however, not all parent FEs need to be + bound to child FEs. + + - Subframe: The child frame is a subevent of a complex event + represented by the parent, e.g. the "Criminal_process" frame + has subframes of "Arrest", "Arraignment", "Trial", and + "Sentencing". + + - Perspective_on: The child frame provides a particular + perspective on an un-perspectivized parent frame. A pair of + examples consists of the "Hiring" and "Get_a_job" frames, + which perspectivize the "Employment_start" frame from the + Employer's and the Employee's point of view, respectively. + + :param name: A regular expression pattern used to match against + Frame names. If 'name' is None, then a list of all + Framenet Frames will be returned. + :type name: str + :return: A list of matching Frames (or all Frames). + :rtype: list(AttrDict) + """ + try: + fIDs = list(self._frame_idx.keys()) + except AttributeError: + self._buildframeindex() + fIDs = list(self._frame_idx.keys()) + + if name is not None: + return PrettyList( + self.frame(fID) for fID, finfo in self.frame_ids_and_names(name).items() + ) + else: + return PrettyLazyMap(self.frame, fIDs) + + def frame_ids_and_names(self, name=None): + """ + Uses the frame index, which is much faster than looking up each frame definition + if only the names and IDs are needed. + """ + if not self._frame_idx: + self._buildframeindex() + return { + fID: finfo.name + for fID, finfo in self._frame_idx.items() + if name is None or re.search(name, finfo.name) is not None + } + + def fes(self, name=None, frame=None): + """ + Lists frame element objects. If 'name' is provided, this is treated as + a case-insensitive regular expression to filter by frame name. + (Case-insensitivity is because casing of frame element names is not always + consistent across frames.) Specify 'frame' to filter by a frame name pattern, + ID, or object. + + >>> from nltk.corpus import framenet as fn + >>> fn.fes('Noise_maker') + [] + >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound')]) # doctest: +NORMALIZE_WHITESPACE + [('Cause_to_make_noise', 'Sound_maker'), ('Make_noise', 'Sound'), + ('Make_noise', 'Sound_source'), ('Sound_movement', 'Location_of_sound_source'), + ('Sound_movement', 'Sound'), ('Sound_movement', 'Sound_source'), + ('Sounds', 'Component_sound'), ('Sounds', 'Location_of_sound_source'), + ('Sounds', 'Sound_source'), ('Vocalizations', 'Location_of_sound_source'), + ('Vocalizations', 'Sound_source')] + >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound',r'(?i)make_noise')]) # doctest: +NORMALIZE_WHITESPACE + [('Cause_to_make_noise', 'Sound_maker'), + ('Make_noise', 'Sound'), + ('Make_noise', 'Sound_source')] + >>> sorted(set(fe.name for fe in fn.fes('^sound'))) + ['Sound', 'Sound_maker', 'Sound_source'] + >>> len(fn.fes('^sound$')) + 2 + + :param name: A regular expression pattern used to match against + frame element names. If 'name' is None, then a list of all + frame elements will be returned. + :type name: str + :return: A list of matching frame elements + :rtype: list(AttrDict) + """ + # what frames are we searching in? + if frame is not None: + if isinstance(frame, int): + frames = [self.frame(frame)] + elif isinstance(frame, str): + frames = self.frames(frame) + else: + frames = [frame] + else: + frames = self.frames() + + return PrettyList( + fe + for f in frames + for fename, fe in f.FE.items() + if name is None or re.search(name, fename, re.I) + ) + + def lus(self, name=None, frame=None): + """ + Obtain details for lexical units. + Optionally restrict by lexical unit name pattern, and/or to a certain frame + or frames whose name matches a pattern. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.lus()) in (11829, 13572) # FN 1.5 and 1.7, resp. + True + >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')), maxReprSize=0, breakLines=True) + [, + , + ] + >>> PrettyList(sorted(fn.lus(r'interest', r'(?i)stimulus'), key=itemgetter('ID'))) + [, ] + + A brief intro to Lexical Units (excerpted from "FrameNet II: + Extended Theory and Practice" by Ruppenhofer et. al., 2010): + + A lexical unit (LU) is a pairing of a word with a meaning. For + example, the "Apply_heat" Frame describes a common situation + involving a Cook, some Food, and a Heating Instrument, and is + _evoked_ by words such as bake, blanch, boil, broil, brown, + simmer, steam, etc. These frame-evoking words are the LUs in the + Apply_heat frame. Each sense of a polysemous word is a different + LU. + + We have used the word "word" in talking about LUs. The reality + is actually rather complex. When we say that the word "bake" is + polysemous, we mean that the lemma "bake.v" (which has the + word-forms "bake", "bakes", "baked", and "baking") is linked to + three different frames: + + - Apply_heat: "Michelle baked the potatoes for 45 minutes." + + - Cooking_creation: "Michelle baked her mother a cake for her birthday." + + - Absorb_heat: "The potatoes have to bake for more than 30 minutes." + + These constitute three different LUs, with different + definitions. + + Multiword expressions such as "given name" and hyphenated words + like "shut-eye" can also be LUs. Idiomatic phrases such as + "middle of nowhere" and "give the slip (to)" are also defined as + LUs in the appropriate frames ("Isolated_places" and "Evading", + respectively), and their internal structure is not analyzed. + + Framenet provides multiple annotated examples of each sense of a + word (i.e. each LU). Moreover, the set of examples + (approximately 20 per LU) illustrates all of the combinatorial + possibilities of the lexical unit. + + Each LU is linked to a Frame, and hence to the other words which + evoke that Frame. This makes the FrameNet database similar to a + thesaurus, grouping together semantically similar words. + + In the simplest case, frame-evoking words are verbs such as + "fried" in: + + "Matilde fried the catfish in a heavy iron skillet." + + Sometimes event nouns may evoke a Frame. For example, + "reduction" evokes "Cause_change_of_scalar_position" in: + + "...the reduction of debt levels to $665 million from $2.6 billion." + + Adjectives may also evoke a Frame. For example, "asleep" may + evoke the "Sleep" frame as in: + + "They were asleep for hours." + + Many common nouns, such as artifacts like "hat" or "tower", + typically serve as dependents rather than clearly evoking their + own frames. + + :param name: A regular expression pattern used to search the LU + names. Note that LU names take the form of a dotted + string (e.g. "run.v" or "a little.adv") in which a + lemma precedes the "." and a POS follows the + dot. The lemma may be composed of a single lexeme + (e.g. "run") or of multiple lexemes (e.g. "a + little"). If 'name' is not given, then all LUs will + be returned. + + The valid POSes are: + + v - verb + n - noun + a - adjective + adv - adverb + prep - preposition + num - numbers + intj - interjection + art - article + c - conjunction + scon - subordinating conjunction + + :type name: str + :type frame: str or int or frame + :return: A list of selected (or all) lexical units + :rtype: list of LU objects (dicts). See the lu() function for info + about the specifics of LU objects. + + """ + if not self._lu_idx: + self._buildluindex() + + if name is not None: # match LUs, then restrict by frame + result = PrettyList( + self.lu(luID) for luID, luName in self.lu_ids_and_names(name).items() + ) + if frame is not None: + if isinstance(frame, int): + frameIDs = {frame} + elif isinstance(frame, str): + frameIDs = {f.ID for f in self.frames(frame)} + else: + frameIDs = {frame.ID} + result = PrettyList(lu for lu in result if lu.frame.ID in frameIDs) + elif frame is not None: # all LUs in matching frames + if isinstance(frame, int): + frames = [self.frame(frame)] + elif isinstance(frame, str): + frames = self.frames(frame) + else: + frames = [frame] + result = PrettyLazyIteratorList( + iter(LazyConcatenation(list(f.lexUnit.values()) for f in frames)) + ) + else: # all LUs + luIDs = [ + luID + for luID, lu in self._lu_idx.items() + if lu.status not in self._bad_statuses + ] + result = PrettyLazyMap(self.lu, luIDs) + return result + + def lu_ids_and_names(self, name=None): + """ + Uses the LU index, which is much faster than looking up each LU definition + if only the names and IDs are needed. + """ + if not self._lu_idx: + self._buildluindex() + return { + luID: luinfo.name + for luID, luinfo in self._lu_idx.items() + if luinfo.status not in self._bad_statuses + and (name is None or re.search(name, luinfo.name) is not None) + } + + def docs_metadata(self, name=None): + """ + Return an index of the annotated documents in Framenet. + + Details for a specific annotated document can be obtained using this + class's doc() function and pass it the value of the 'ID' field. + + >>> from nltk.corpus import framenet as fn + >>> len(fn.docs()) in (78, 107) # FN 1.5 and 1.7, resp. + True + >>> set([x.corpname for x in fn.docs_metadata()])>=set(['ANC', 'KBEval', \ + 'LUCorpus-v0.3', 'Miscellaneous', 'NTI', 'PropBank']) + True + + :param name: A regular expression pattern used to search the + file name of each annotated document. The document's + file name contains the name of the corpus that the + document is from, followed by two underscores "__" + followed by the document name. So, for example, the + file name "LUCorpus-v0.3__20000410_nyt-NEW.xml" is + from the corpus named "LUCorpus-v0.3" and the + document name is "20000410_nyt-NEW.xml". + :type name: str + :return: A list of selected (or all) annotated documents + :rtype: list of dicts, where each dict object contains the following + keys: + + - 'name' + - 'ID' + - 'corpid' + - 'corpname' + - 'description' + - 'filename' + """ + try: + ftlist = PrettyList(self._fulltext_idx.values()) + except AttributeError: + self._buildcorpusindex() + ftlist = PrettyList(self._fulltext_idx.values()) + + if name is None: + return ftlist + else: + return PrettyList( + x for x in ftlist if re.search(name, x["filename"]) is not None + ) + + def docs(self, name=None): + """ + Return a list of the annotated full-text documents in FrameNet, + optionally filtered by a regex to be matched against the document name. + """ + return PrettyLazyMap((lambda x: self.doc(x.ID)), self.docs_metadata(name)) + + def sents(self, exemplars=True, full_text=True): + """ + Annotated sentences matching the specified criteria. + """ + if exemplars: + if full_text: + return self.exemplars() + self.ft_sents() + else: + return self.exemplars() + elif full_text: + return self.ft_sents() + + def annotations(self, luNamePattern=None, exemplars=True, full_text=True): + """ + Frame annotation sets matching the specified criteria. + """ + + if exemplars: + epart = PrettyLazyIteratorList( + sent.frameAnnotation for sent in self.exemplars(luNamePattern) + ) + else: + epart = [] + + if full_text: + if luNamePattern is not None: + matchedLUIDs = set(self.lu_ids_and_names(luNamePattern).keys()) + ftpart = PrettyLazyIteratorList( + aset + for sent in self.ft_sents() + for aset in sent.annotationSet[1:] + if luNamePattern is None or aset.get("luID", "CXN_ASET") in matchedLUIDs + ) + else: + ftpart = [] + + if exemplars: + if full_text: + return epart + ftpart + else: + return epart + elif full_text: + return ftpart + + def exemplars(self, luNamePattern=None, frame=None, fe=None, fe2=None): + """ + Lexicographic exemplar sentences, optionally filtered by LU name and/or 1-2 FEs that + are realized overtly. 'frame' may be a name pattern, frame ID, or frame instance. + 'fe' may be a name pattern or FE instance; if specified, 'fe2' may also + be specified to retrieve sentences with both overt FEs (in either order). + """ + if fe is None and fe2 is not None: + raise FramenetError("exemplars(..., fe=None, fe2=) is not allowed") + elif fe is not None and fe2 is not None: + if not isinstance(fe2, str): + if isinstance(fe, str): + # fe2 is specific to a particular frame. swap fe and fe2 so fe is always used to determine the frame. + fe, fe2 = fe2, fe + elif fe.frame is not fe2.frame: # ensure frames match + raise FramenetError( + "exemplars() call with inconsistent `fe` and `fe2` specification (frames must match)" + ) + if frame is None and fe is not None and not isinstance(fe, str): + frame = fe.frame + + # narrow down to frames matching criteria + + lusByFrame = defaultdict( + list + ) # frame name -> matching LUs, if luNamePattern is specified + if frame is not None or luNamePattern is not None: + if frame is None or isinstance(frame, str): + if luNamePattern is not None: + frames = set() + for lu in self.lus(luNamePattern, frame=frame): + frames.add(lu.frame.ID) + lusByFrame[lu.frame.name].append(lu) + frames = LazyMap(self.frame, list(frames)) + else: + frames = self.frames(frame) + else: + if isinstance(frame, int): + frames = [self.frame(frame)] + else: # frame object + frames = [frame] + + if luNamePattern is not None: + lusByFrame = {frame.name: self.lus(luNamePattern, frame=frame)} + + if fe is not None: # narrow to frames that define this FE + if isinstance(fe, str): + frames = PrettyLazyIteratorList( + f + for f in frames + if fe in f.FE + or any(re.search(fe, ffe, re.I) for ffe in f.FE.keys()) + ) + else: + if fe.frame not in frames: + raise FramenetError( + "exemplars() call with inconsistent `frame` and `fe` specification" + ) + frames = [fe.frame] + + if fe2 is not None: # narrow to frames that ALSO define this FE + if isinstance(fe2, str): + frames = PrettyLazyIteratorList( + f + for f in frames + if fe2 in f.FE + or any(re.search(fe2, ffe, re.I) for ffe in f.FE.keys()) + ) + # else we already narrowed it to a single frame + else: # frame, luNamePattern are None. fe, fe2 are None or strings + if fe is not None: + frames = {ffe.frame.ID for ffe in self.fes(fe)} + if fe2 is not None: + frames2 = {ffe.frame.ID for ffe in self.fes(fe2)} + frames = frames & frames2 + frames = LazyMap(self.frame, list(frames)) + else: + frames = self.frames() + + # we've narrowed down 'frames' + # now get exemplars for relevant LUs in those frames + + def _matching_exs(): + for f in frames: + fes = fes2 = None # FEs of interest + if fe is not None: + fes = ( + {ffe for ffe in f.FE.keys() if re.search(fe, ffe, re.I)} + if isinstance(fe, str) + else {fe.name} + ) + if fe2 is not None: + fes2 = ( + {ffe for ffe in f.FE.keys() if re.search(fe2, ffe, re.I)} + if isinstance(fe2, str) + else {fe2.name} + ) + + for lu in ( + lusByFrame[f.name] + if luNamePattern is not None + else f.lexUnit.values() + ): + for ex in lu.exemplars: + if (fes is None or self._exemplar_of_fes(ex, fes)) and ( + fes2 is None or self._exemplar_of_fes(ex, fes2) + ): + yield ex + + return PrettyLazyIteratorList(_matching_exs()) + + def _exemplar_of_fes(self, ex, fes=None): + """ + Given an exemplar sentence and a set of FE names, return the subset of FE names + that are realized overtly in the sentence on the FE, FE2, or FE3 layer. + + If 'fes' is None, returns all overt FE names. + """ + overtNames = set(list(zip(*ex.FE[0]))[2]) if ex.FE[0] else set() + if "FE2" in ex: + overtNames |= set(list(zip(*ex.FE2[0]))[2]) if ex.FE2[0] else set() + if "FE3" in ex: + overtNames |= set(list(zip(*ex.FE3[0]))[2]) if ex.FE3[0] else set() + return overtNames & fes if fes is not None else overtNames + + def ft_sents(self, docNamePattern=None): + """ + Full-text annotation sentences, optionally filtered by document name. + """ + return PrettyLazyIteratorList( + sent for d in self.docs(docNamePattern) for sent in d.sentence + ) + + def frame_relation_types(self): + """ + Obtain a list of frame relation types. + + >>> from nltk.corpus import framenet as fn + >>> frts = sorted(fn.frame_relation_types(), key=itemgetter('ID')) + >>> isinstance(frts, list) + True + >>> len(frts) in (9, 10) # FN 1.5 and 1.7, resp. + True + >>> PrettyDict(frts[0], breakLines=True) + {'ID': 1, + '_type': 'framerelationtype', + 'frameRelations': [ Child=Change_of_consistency>, Child=Rotting>, ...], + 'name': 'Inheritance', + 'subFrameName': 'Child', + 'superFrameName': 'Parent'} + + :return: A list of all of the frame relation types in framenet + :rtype: list(dict) + """ + if not self._freltyp_idx: + self._buildrelationindex() + return self._freltyp_idx.values() + + def frame_relations(self, frame=None, frame2=None, type=None): + """ + :param frame: (optional) frame object, name, or ID; only relations involving + this frame will be returned + :param frame2: (optional; 'frame' must be a different frame) only show relations + between the two specified frames, in either direction + :param type: (optional) frame relation type (name or object); show only relations + of this type + :type frame: int or str or AttrDict + :return: A list of all of the frame relations in framenet + :rtype: list(dict) + + >>> from nltk.corpus import framenet as fn + >>> frels = fn.frame_relations() + >>> isinstance(frels, list) + True + >>> len(frels) in (1676, 2070) # FN 1.5 and 1.7, resp. + True + >>> PrettyList(fn.frame_relations('Cooking_creation'), maxReprSize=0, breakLines=True) + [ Child=Cooking_creation>, + Child=Cooking_creation>, + ReferringEntry=Cooking_creation>] + >>> PrettyList(fn.frame_relations(274), breakLines=True) + [ Child=Dodging>, + Child=Evading>, ...] + >>> PrettyList(fn.frame_relations(fn.frame('Cooking_creation')), breakLines=True) + [ Child=Cooking_creation>, + Child=Cooking_creation>, ...] + >>> PrettyList(fn.frame_relations('Cooking_creation', type='Inheritance')) + [ Child=Cooking_creation>] + >>> PrettyList(fn.frame_relations('Cooking_creation', 'Apply_heat'), breakLines=True) # doctest: +NORMALIZE_WHITESPACE + [ Child=Cooking_creation>, + ReferringEntry=Cooking_creation>] + """ + relation_type = type + + if not self._frel_idx: + self._buildrelationindex() + + rels = None + + if relation_type is not None: + if not isinstance(relation_type, dict): + type = [rt for rt in self.frame_relation_types() if rt.name == type][0] + assert isinstance(type, dict) + + # lookup by 'frame' + if frame is not None: + if isinstance(frame, dict) and "frameRelations" in frame: + rels = PrettyList(frame.frameRelations) + else: + if not isinstance(frame, int): + if isinstance(frame, dict): + frame = frame.ID + else: + frame = self.frame_by_name(frame).ID + rels = [self._frel_idx[frelID] for frelID in self._frel_f_idx[frame]] + + # filter by 'type' + if type is not None: + rels = [rel for rel in rels if rel.type is type] + elif type is not None: + # lookup by 'type' + rels = type.frameRelations + else: + rels = self._frel_idx.values() + + # filter by 'frame2' + if frame2 is not None: + if frame is None: + raise FramenetError( + "frame_relations(frame=None, frame2=) is not allowed" + ) + if not isinstance(frame2, int): + if isinstance(frame2, dict): + frame2 = frame2.ID + else: + frame2 = self.frame_by_name(frame2).ID + if frame == frame2: + raise FramenetError( + "The two frame arguments to frame_relations() must be different frames" + ) + rels = [ + rel + for rel in rels + if rel.superFrame.ID == frame2 or rel.subFrame.ID == frame2 + ] + + return PrettyList( + sorted( + rels, + key=lambda frel: (frel.type.ID, frel.superFrameName, frel.subFrameName), + ) + ) + + def fe_relations(self): + """ + Obtain a list of frame element relations. + + >>> from nltk.corpus import framenet as fn + >>> ferels = fn.fe_relations() + >>> isinstance(ferels, list) + True + >>> len(ferels) in (10020, 12393) # FN 1.5 and 1.7, resp. + True + >>> PrettyDict(ferels[0], breakLines=True) # doctest: +NORMALIZE_WHITESPACE + {'ID': 14642, + '_type': 'ferelation', + 'frameRelation': Child=Lively_place>, + 'subFE': , + 'subFEName': 'Degree', + 'subFrame': , + 'subID': 11370, + 'supID': 2271, + 'superFE': , + 'superFEName': 'Degree', + 'superFrame': , + 'type': } + + :return: A list of all of the frame element relations in framenet + :rtype: list(dict) + """ + if not self._ferel_idx: + self._buildrelationindex() + return PrettyList( + sorted( + self._ferel_idx.values(), + key=lambda ferel: ( + ferel.type.ID, + ferel.frameRelation.superFrameName, + ferel.superFEName, + ferel.frameRelation.subFrameName, + ferel.subFEName, + ), + ) + ) + + def semtypes(self): + """ + Obtain a list of semantic types. + + >>> from nltk.corpus import framenet as fn + >>> stypes = fn.semtypes() + >>> len(stypes) in (73, 109) # FN 1.5 and 1.7, resp. + True + >>> sorted(stypes[0].keys()) + ['ID', '_type', 'abbrev', 'definition', 'definitionMarkup', 'name', 'rootType', 'subTypes', 'superType'] + + :return: A list of all of the semantic types in framenet + :rtype: list(dict) + """ + if not self._semtypes: + self._loadsemtypes() + return PrettyList( + self._semtypes[i] for i in self._semtypes if isinstance(i, int) + ) + + def _load_xml_attributes(self, d, elt): + """ + Extracts a subset of the attributes from the given element and + returns them in a dictionary. + + :param d: A dictionary in which to store the attributes. + :type d: dict + :param elt: An ElementTree Element + :type elt: Element + :return: Returns the input dict ``d`` possibly including attributes from ``elt`` + :rtype: dict + """ + + d = type(d)(d) + + try: + attr_dict = elt.attrib + except AttributeError: + return d + + if attr_dict is None: + return d + + # Ignore these attributes when loading attributes from an xml node + ignore_attrs = [ #'cBy', 'cDate', 'mDate', # <-- annotation metadata that could be of interest + "xsi", + "schemaLocation", + "xmlns", + "bgColor", + "fgColor", + ] + + for attr in attr_dict: + + if any(attr.endswith(x) for x in ignore_attrs): + continue + + val = attr_dict[attr] + if val.isdigit(): + d[attr] = int(val) + else: + d[attr] = val + + return d + + def _strip_tags(self, data): + """ + Gets rid of all tags and newline characters from the given input + + :return: A cleaned-up version of the input string + :rtype: str + """ + + try: + r""" + # Look for boundary issues in markup. (Sometimes FEs are pluralized in definitions.) + m = re.search(r'\w[<][^/]|[<][/][^>]+[>](s\w|[a-rt-z0-9])', data) + if m: + print('Markup boundary:', data[max(0,m.start(0)-10):m.end(0)+10].replace('\n',' '), file=sys.stderr) + """ + + data = data.replace("", "") + data = data.replace("", "") + data = re.sub('', "", data) + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "'") + data = data.replace("", "'") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + data = data.replace("", "") + + # Get rid of and tags + data = data.replace("", "") + data = data.replace("", "") + + data = data.replace("\n", " ") + except AttributeError: + pass + + return data + + def _handle_elt(self, elt, tagspec=None): + """Extracts and returns the attributes of the given element""" + return self._load_xml_attributes(AttrDict(), elt) + + def _handle_fulltextindex_elt(self, elt, tagspec=None): + """ + Extracts corpus/document info from the fulltextIndex.xml file. + + Note that this function "flattens" the information contained + in each of the "corpus" elements, so that each "document" + element will contain attributes for the corpus and + corpusid. Also, each of the "document" items will contain a + new attribute called "filename" that is the base file name of + the xml file for the document in the "fulltext" subdir of the + Framenet corpus. + """ + ftinfo = self._load_xml_attributes(AttrDict(), elt) + corpname = ftinfo.name + corpid = ftinfo.ID + retlist = [] + for sub in elt: + if sub.tag.endswith("document"): + doc = self._load_xml_attributes(AttrDict(), sub) + if "name" in doc: + docname = doc.name + else: + docname = doc.description + doc.filename = f"{corpname}__{docname}.xml" + doc.URL = ( + self._fnweb_url + "/" + self._fulltext_dir + "/" + doc.filename + ) + doc.corpname = corpname + doc.corpid = corpid + retlist.append(doc) + + return retlist + + def _handle_frame_elt(self, elt, ignorekeys=[]): + """Load the info for a Frame from a frame xml file""" + frinfo = self._load_xml_attributes(AttrDict(), elt) + + frinfo["_type"] = "frame" + frinfo["definition"] = "" + frinfo["definitionMarkup"] = "" + frinfo["FE"] = PrettyDict() + frinfo["FEcoreSets"] = [] + frinfo["lexUnit"] = PrettyDict() + frinfo["semTypes"] = [] + for k in ignorekeys: + if k in frinfo: + del frinfo[k] + + for sub in elt: + if sub.tag.endswith("definition") and "definition" not in ignorekeys: + frinfo["definitionMarkup"] = sub.text + frinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("FE") and "FE" not in ignorekeys: + feinfo = self._handle_fe_elt(sub) + frinfo["FE"][feinfo.name] = feinfo + feinfo["frame"] = frinfo # backpointer + elif sub.tag.endswith("FEcoreSet") and "FEcoreSet" not in ignorekeys: + coreset = self._handle_fecoreset_elt(sub) + # assumes all FEs have been loaded before coresets + frinfo["FEcoreSets"].append( + PrettyList(frinfo["FE"][fe.name] for fe in coreset) + ) + elif sub.tag.endswith("lexUnit") and "lexUnit" not in ignorekeys: + luentry = self._handle_framelexunit_elt(sub) + if luentry["status"] in self._bad_statuses: + # problematic LU entry; ignore it + continue + luentry["frame"] = frinfo + luentry["URL"] = ( + self._fnweb_url + + "/" + + self._lu_dir + + "/" + + "lu{}.xml".format(luentry["ID"]) + ) + luentry["subCorpus"] = Future( + (lambda lu: lambda: self._lu_file(lu).subCorpus)(luentry) + ) + luentry["exemplars"] = Future( + (lambda lu: lambda: self._lu_file(lu).exemplars)(luentry) + ) + frinfo["lexUnit"][luentry.name] = luentry + if not self._lu_idx: + self._buildluindex() + self._lu_idx[luentry.ID] = luentry + elif sub.tag.endswith("semType") and "semTypes" not in ignorekeys: + semtypeinfo = self._load_xml_attributes(AttrDict(), sub) + frinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + frinfo["frameRelations"] = self.frame_relations(frame=frinfo) + + # resolve 'requires' and 'excludes' links between FEs of this frame + for fe in frinfo.FE.values(): + if fe.requiresFE: + name, ID = fe.requiresFE.name, fe.requiresFE.ID + fe.requiresFE = frinfo.FE[name] + assert fe.requiresFE.ID == ID + if fe.excludesFE: + name, ID = fe.excludesFE.name, fe.excludesFE.ID + fe.excludesFE = frinfo.FE[name] + assert fe.excludesFE.ID == ID + + return frinfo + + def _handle_fecoreset_elt(self, elt): + """Load fe coreset info from xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + tmp = [] + for sub in elt: + tmp.append(self._load_xml_attributes(AttrDict(), sub)) + + return tmp + + def _handle_framerelationtype_elt(self, elt, *args): + """Load frame-relation element and its child fe-relation elements from frRelation.xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "framerelationtype" + info["frameRelations"] = PrettyList() + + for sub in elt: + if sub.tag.endswith("frameRelation"): + frel = self._handle_framerelation_elt(sub) + frel["type"] = info # backpointer + for ferel in frel.feRelations: + ferel["type"] = info + info["frameRelations"].append(frel) + + return info + + def _handle_framerelation_elt(self, elt): + """Load frame-relation element and its child fe-relation elements from frRelation.xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + assert info["superFrameName"] != info["subFrameName"], (elt, info) + info["_type"] = "framerelation" + info["feRelations"] = PrettyList() + + for sub in elt: + if sub.tag.endswith("FERelation"): + ferel = self._handle_elt(sub) + ferel["_type"] = "ferelation" + ferel["frameRelation"] = info # backpointer + info["feRelations"].append(ferel) + + return info + + def _handle_fulltextannotation_elt(self, elt): + """Load full annotation info for a document from its xml + file. The main element (fullTextAnnotation) contains a 'header' + element (which we ignore here) and a bunch of 'sentence' + elements.""" + info = AttrDict() + info["_type"] = "fulltext_annotation" + info["sentence"] = [] + + for sub in elt: + if sub.tag.endswith("header"): + continue # not used + elif sub.tag.endswith("sentence"): + s = self._handle_fulltext_sentence_elt(sub) + s.doc = info + info["sentence"].append(s) + + return info + + def _handle_fulltext_sentence_elt(self, elt): + """Load information from the given 'sentence' element. Each + 'sentence' element contains a "text" and "annotationSet" sub + elements.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "fulltext_sentence" + info["annotationSet"] = [] + info["targets"] = [] + target_spans = set() + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + info["text"] = "" + + for sub in elt: + if sub.tag.endswith("text"): + info["text"] = self._strip_tags(sub.text) + elif sub.tag.endswith("annotationSet"): + a = self._handle_fulltextannotationset_elt( + sub, is_pos=(len(info["annotationSet"]) == 0) + ) + if "cxnID" in a: # ignoring construction annotations for now + continue + a.sent = info + a.text = info.text + info["annotationSet"].append(a) + if "Target" in a: + for tspan in a.Target: + if tspan in target_spans: + self._warn( + 'Duplicate target span "{}"'.format( + info.text[slice(*tspan)] + ), + tspan, + "in sentence", + info["ID"], + info.text, + ) + # this can happen in cases like "chemical and biological weapons" + # being annotated as "chemical weapons" and "biological weapons" + else: + target_spans.add(tspan) + info["targets"].append((a.Target, a.luName, a.frameName)) + + assert info["annotationSet"][0].status == "UNANN" + info["POS"] = info["annotationSet"][0].POS + info["POS_tagset"] = info["annotationSet"][0].POS_tagset + return info + + def _handle_fulltextannotationset_elt(self, elt, is_pos=False): + """Load information from the given 'annotationSet' element. Each + 'annotationSet' contains several "layer" elements.""" + + info = self._handle_luannotationset_elt(elt, is_pos=is_pos) + if not is_pos: + info["_type"] = "fulltext_annotationset" + if "cxnID" not in info: # ignoring construction annotations for now + info["LU"] = self.lu( + info.luID, + luName=info.luName, + frameID=info.frameID, + frameName=info.frameName, + ) + info["frame"] = info.LU.frame + return info + + def _handle_fulltextlayer_elt(self, elt): + """Load information from the given 'layer' element. Each + 'layer' contains several "label" elements.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "layer" + info["label"] = [] + + for sub in elt: + if sub.tag.endswith("label"): + l = self._load_xml_attributes(AttrDict(), sub) + info["label"].append(l) + + return info + + def _handle_framelexunit_elt(self, elt): + """Load the lexical unit info from an xml element in a frame's xml file.""" + luinfo = AttrDict() + luinfo["_type"] = "lu" + luinfo = self._load_xml_attributes(luinfo, elt) + luinfo["definition"] = "" + luinfo["definitionMarkup"] = "" + luinfo["sentenceCount"] = PrettyDict() + luinfo["lexemes"] = PrettyList() # multiword LUs have multiple lexemes + luinfo["semTypes"] = PrettyList() # an LU can have multiple semtypes + + for sub in elt: + if sub.tag.endswith("definition"): + luinfo["definitionMarkup"] = sub.text + luinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("sentenceCount"): + luinfo["sentenceCount"] = self._load_xml_attributes(PrettyDict(), sub) + elif sub.tag.endswith("lexeme"): + lexemeinfo = self._load_xml_attributes(PrettyDict(), sub) + if not isinstance(lexemeinfo.name, str): + # some lexeme names are ints by default: e.g., + # thousand.num has lexeme with name="1000" + lexemeinfo.name = str(lexemeinfo.name) + luinfo["lexemes"].append(lexemeinfo) + elif sub.tag.endswith("semType"): + semtypeinfo = self._load_xml_attributes(PrettyDict(), sub) + luinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + # sort lexemes by 'order' attribute + # otherwise, e.g., 'write down.v' may have lexemes in wrong order + luinfo["lexemes"].sort(key=lambda x: x.order) + + return luinfo + + def _handle_lexunit_elt(self, elt, ignorekeys): + """ + Load full info for a lexical unit from its xml file. + This should only be called when accessing corpus annotations + (which are not included in frame files). + """ + luinfo = self._load_xml_attributes(AttrDict(), elt) + luinfo["_type"] = "lu" + luinfo["definition"] = "" + luinfo["definitionMarkup"] = "" + luinfo["subCorpus"] = PrettyList() + luinfo["lexemes"] = PrettyList() # multiword LUs have multiple lexemes + luinfo["semTypes"] = PrettyList() # an LU can have multiple semtypes + for k in ignorekeys: + if k in luinfo: + del luinfo[k] + + for sub in elt: + if sub.tag.endswith("header"): + continue # not used + elif sub.tag.endswith("valences"): + continue # not used + elif sub.tag.endswith("definition") and "definition" not in ignorekeys: + luinfo["definitionMarkup"] = sub.text + luinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("subCorpus") and "subCorpus" not in ignorekeys: + sc = self._handle_lusubcorpus_elt(sub) + if sc is not None: + luinfo["subCorpus"].append(sc) + elif sub.tag.endswith("lexeme") and "lexeme" not in ignorekeys: + luinfo["lexemes"].append(self._load_xml_attributes(PrettyDict(), sub)) + elif sub.tag.endswith("semType") and "semType" not in ignorekeys: + semtypeinfo = self._load_xml_attributes(AttrDict(), sub) + luinfo["semTypes"].append(self.semtype(semtypeinfo.ID)) + + return luinfo + + def _handle_lusubcorpus_elt(self, elt): + """Load a subcorpus of a lexical unit from the given xml.""" + sc = AttrDict() + try: + sc["name"] = elt.get("name") + except AttributeError: + return None + sc["_type"] = "lusubcorpus" + sc["sentence"] = [] + + for sub in elt: + if sub.tag.endswith("sentence"): + s = self._handle_lusentence_elt(sub) + if s is not None: + sc["sentence"].append(s) + + return sc + + def _handle_lusentence_elt(self, elt): + """Load a sentence from a subcorpus of an LU from xml.""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "lusentence" + info["annotationSet"] = [] + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + for sub in elt: + if sub.tag.endswith("text"): + info["text"] = self._strip_tags(sub.text) + elif sub.tag.endswith("annotationSet"): + annset = self._handle_luannotationset_elt( + sub, is_pos=(len(info["annotationSet"]) == 0) + ) + if annset is not None: + assert annset.status == "UNANN" or "FE" in annset, annset + if annset.status != "UNANN": + info["frameAnnotation"] = annset + # copy layer info up to current level + for k in ( + "Target", + "FE", + "FE2", + "FE3", + "GF", + "PT", + "POS", + "POS_tagset", + "Other", + "Sent", + "Verb", + "Noun", + "Adj", + "Adv", + "Prep", + "Scon", + "Art", + ): + if k in annset: + info[k] = annset[k] + info["annotationSet"].append(annset) + annset["sent"] = info + annset["text"] = info.text + return info + + def _handle_luannotationset_elt(self, elt, is_pos=False): + """Load an annotation set from a sentence in an subcorpus of an LU""" + info = self._load_xml_attributes(AttrDict(), elt) + info["_type"] = "posannotationset" if is_pos else "luannotationset" + info["layer"] = [] + info["_ascii"] = types.MethodType( + _annotation_ascii, info + ) # attach a method for this instance + + if "cxnID" in info: # ignoring construction annotations for now. + return info + + for sub in elt: + if sub.tag.endswith("layer"): + l = self._handle_lulayer_elt(sub) + if l is not None: + overt = [] + ni = {} # null instantiations + + info["layer"].append(l) + for lbl in l.label: + if "start" in lbl: + thespan = (lbl.start, lbl.end + 1, lbl.name) + if l.name not in ( + "Sent", + "Other", + ): # 'Sent' and 'Other' layers sometimes contain accidental duplicate spans + assert thespan not in overt, (info.ID, l.name, thespan) + overt.append(thespan) + else: # null instantiation + if lbl.name in ni: + self._warn( + "FE with multiple NI entries:", + lbl.name, + ni[lbl.name], + lbl.itype, + ) + else: + ni[lbl.name] = lbl.itype + overt = sorted(overt) + + if l.name == "Target": + if not overt: + self._warn( + "Skipping empty Target layer in annotation set ID={}".format( + info.ID + ) + ) + continue + assert all(lblname == "Target" for i, j, lblname in overt) + if "Target" in info: + self._warn( + "Annotation set {} has multiple Target layers".format( + info.ID + ) + ) + else: + info["Target"] = [(i, j) for (i, j, _) in overt] + elif l.name == "FE": + if l.rank == 1: + assert "FE" not in info + info["FE"] = (overt, ni) + # assert False,info + else: + # sometimes there are 3 FE layers! e.g. Change_position_on_a_scale.fall.v + assert 2 <= l.rank <= 3, l.rank + k = "FE" + str(l.rank) + assert k not in info + info[k] = (overt, ni) + elif l.name in ("GF", "PT"): + assert l.rank == 1 + info[l.name] = overt + elif l.name in ("BNC", "PENN"): + assert l.rank == 1 + info["POS"] = overt + info["POS_tagset"] = l.name + else: + if is_pos: + if l.name not in ("NER", "WSL"): + self._warn( + "Unexpected layer in sentence annotationset:", + l.name, + ) + else: + if l.name not in ( + "Sent", + "Verb", + "Noun", + "Adj", + "Adv", + "Prep", + "Scon", + "Art", + "Other", + ): + self._warn( + "Unexpected layer in frame annotationset:", l.name + ) + info[l.name] = overt + if not is_pos and "cxnID" not in info: + if "Target" not in info: + self._warn(f"Missing target in annotation set ID={info.ID}") + assert "FE" in info + if "FE3" in info: + assert "FE2" in info + + return info + + def _handle_lulayer_elt(self, elt): + """Load a layer from an annotation set""" + layer = self._load_xml_attributes(AttrDict(), elt) + layer["_type"] = "lulayer" + layer["label"] = [] + + for sub in elt: + if sub.tag.endswith("label"): + l = self._load_xml_attributes(AttrDict(), sub) + if l is not None: + layer["label"].append(l) + return layer + + def _handle_fe_elt(self, elt): + feinfo = self._load_xml_attributes(AttrDict(), elt) + feinfo["_type"] = "fe" + feinfo["definition"] = "" + feinfo["definitionMarkup"] = "" + feinfo["semType"] = None + feinfo["requiresFE"] = None + feinfo["excludesFE"] = None + for sub in elt: + if sub.tag.endswith("definition"): + feinfo["definitionMarkup"] = sub.text + feinfo["definition"] = self._strip_tags(sub.text) + elif sub.tag.endswith("semType"): + stinfo = self._load_xml_attributes(AttrDict(), sub) + feinfo["semType"] = self.semtype(stinfo.ID) + elif sub.tag.endswith("requiresFE"): + feinfo["requiresFE"] = self._load_xml_attributes(AttrDict(), sub) + elif sub.tag.endswith("excludesFE"): + feinfo["excludesFE"] = self._load_xml_attributes(AttrDict(), sub) + + return feinfo + + def _handle_semtype_elt(self, elt, tagspec=None): + semt = self._load_xml_attributes(AttrDict(), elt) + semt["_type"] = "semtype" + semt["superType"] = None + semt["subTypes"] = PrettyList() + for sub in elt: + if sub.text is not None: + semt["definitionMarkup"] = sub.text + semt["definition"] = self._strip_tags(sub.text) + else: + supertypeinfo = self._load_xml_attributes(AttrDict(), sub) + semt["superType"] = supertypeinfo + # the supertype may not have been loaded yet + + return semt + + +# +# Demo +# +def demo(): + from nltk.corpus import framenet as fn + + # + # It is not necessary to explicitly build the indexes by calling + # buildindexes(). We do this here just for demo purposes. If the + # indexes are not built explicitly, they will be built as needed. + # + print("Building the indexes...") + fn.buildindexes() + + # + # Get some statistics about the corpus + # + print("Number of Frames:", len(fn.frames())) + print("Number of Lexical Units:", len(fn.lus())) + print("Number of annotated documents:", len(fn.docs())) + print() + + # + # Frames + # + print( + 'getting frames whose name matches the (case insensitive) regex: "(?i)medical"' + ) + medframes = fn.frames(r"(?i)medical") + print(f'Found {len(medframes)} Frames whose name matches "(?i)medical":') + print([(f.name, f.ID) for f in medframes]) + + # + # store the first frame in the list of frames + # + tmp_id = medframes[0].ID + m_frame = fn.frame(tmp_id) # reads all info for the frame + + # + # get the frame relations + # + print( + '\nNumber of frame relations for the "{}" ({}) frame:'.format( + m_frame.name, m_frame.ID + ), + len(m_frame.frameRelations), + ) + for fr in m_frame.frameRelations: + print(" ", fr) + + # + # get the names of the Frame Elements + # + print( + f'\nNumber of Frame Elements in the "{m_frame.name}" frame:', + len(m_frame.FE), + ) + print(" ", [x for x in m_frame.FE]) + + # + # get the names of the "Core" Frame Elements + # + print(f'\nThe "core" Frame Elements in the "{m_frame.name}" frame:') + print(" ", [x.name for x in m_frame.FE.values() if x.coreType == "Core"]) + + # + # get all of the Lexical Units that are incorporated in the + # 'Ailment' FE of the 'Medical_conditions' frame (id=239) + # + print('\nAll Lexical Units that are incorporated in the "Ailment" FE:') + m_frame = fn.frame(239) + ailment_lus = [ + x + for x in m_frame.lexUnit.values() + if "incorporatedFE" in x and x.incorporatedFE == "Ailment" + ] + print(" ", [x.name for x in ailment_lus]) + + # + # get all of the Lexical Units for the frame + # + print( + f'\nNumber of Lexical Units in the "{m_frame.name}" frame:', + len(m_frame.lexUnit), + ) + print(" ", [x.name for x in m_frame.lexUnit.values()][:5], "...") + + # + # get basic info on the second LU in the frame + # + tmp_id = m_frame.lexUnit["ailment.n"].ID # grab the id of the specified LU + luinfo = fn.lu_basic(tmp_id) # get basic info on the LU + print(f"\nInformation on the LU: {luinfo.name}") + pprint(luinfo) + + # + # Get a list of all of the corpora used for fulltext annotation + # + print("\nNames of all of the corpora used for fulltext annotation:") + allcorpora = {x.corpname for x in fn.docs_metadata()} + pprint(list(allcorpora)) + + # + # Get the names of the annotated documents in the first corpus + # + firstcorp = list(allcorpora)[0] + firstcorp_docs = fn.docs(firstcorp) + print(f'\nNames of the annotated documents in the "{firstcorp}" corpus:') + pprint([x.filename for x in firstcorp_docs]) + + # + # Search for frames containing LUs whose name attribute matches a + # regexp pattern. + # + # Note: if you were going to be doing a lot of this type of + # searching, you'd want to build an index that maps from + # lemmas to frames because each time frames_by_lemma() is + # called, it has to search through ALL of the frame XML files + # in the db. + print( + '\nSearching for all Frames that have a lemma that matches the regexp: "^run.v$":' + ) + pprint(fn.frames_by_lemma(r"^run.v$")) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py new file mode 100644 index 0000000000000000000000000000000000000000..24f83cfaebcf9a583a33806136f8788b112aaf95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py @@ -0,0 +1,116 @@ +# Natural Language Toolkit: IEER Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Information Extraction and Entity Recognition Corpus. + +NIST 1999 Information Extraction: Entity Recognition Evaluation +https://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm + +This corpus contains the NEWSWIRE development test data for the +NIST 1999 IE-ER Evaluation. The files were taken from the +subdirectory: ``/ie_er_99/english/devtest/newswire/*.ref.nwt`` +and filenames were shortened. + +The corpus contains the following files: APW_19980314, APW_19980424, +APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407. +""" + +import nltk +from nltk.corpus.reader.api import * + +#: A dictionary whose keys are the names of documents in this corpus; +#: and whose values are descriptions of those documents' contents. +titles = { + "APW_19980314": "Associated Press Weekly, 14 March 1998", + "APW_19980424": "Associated Press Weekly, 24 April 1998", + "APW_19980429": "Associated Press Weekly, 29 April 1998", + "NYT_19980315": "New York Times, 15 March 1998", + "NYT_19980403": "New York Times, 3 April 1998", + "NYT_19980407": "New York Times, 7 April 1998", +} + +#: A list of all documents in this corpus. +documents = sorted(titles) + + +class IEERDocument: + def __init__(self, text, docno=None, doctype=None, date_time=None, headline=""): + self.text = text + self.docno = docno + self.doctype = doctype + self.date_time = date_time + self.headline = headline + + def __repr__(self): + if self.headline: + headline = " ".join(self.headline.leaves()) + else: + headline = ( + " ".join([w for w in self.text.leaves() if w[:1] != "<"][:12]) + "..." + ) + if self.docno is not None: + return f"" + else: + return "" % headline + + +class IEERCorpusReader(CorpusReader): + """ """ + + def docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def parsed_docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_parsed_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_parsed_block(self, stream): + # TODO: figure out while empty documents are being returned + return [ + self._parse(doc) + for doc in self._read_block(stream) + if self._parse(doc).docno is not None + ] + + def _parse(self, doc): + val = nltk.chunk.ieerstr2tree(doc, root_label="DOCUMENT") + if isinstance(val, dict): + return IEERDocument(**val) + else: + return IEERDocument(val) + + def _read_block(self, stream): + out = [] + # Skip any preamble. + while True: + line = stream.readline() + if not line: + break + if line.strip() == "": + break + out.append(line) + # Read the document + while True: + line = stream.readline() + if not line: + break + out.append(line) + if line.strip() == "": + break + # Return the document + return ["\n".join(out)] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d16c90f4edf380658af969a0488c28d5f1b24a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py @@ -0,0 +1,356 @@ +# Natural Language Toolkit: IPI PAN Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Konrad Goluchowski +# URL: +# For license information, see LICENSE.TXT + +import functools + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import StreamBackedCorpusView, concat + + +def _parse_args(fun): + @functools.wraps(fun) + def decorator(self, fileids=None, **kwargs): + kwargs.pop("tags", None) + if not fileids: + fileids = self.fileids() + return fun(self, fileids, **kwargs) + + return decorator + + +class IPIPANCorpusReader(CorpusReader): + """ + Corpus reader designed to work with corpus created by IPI PAN. + See http://korpus.pl/en/ for more details about IPI PAN corpus. + + The corpus includes information about text domain, channel and categories. + You can access possible values using ``domains()``, ``channels()`` and + ``categories()``. You can use also this metadata to filter files, e.g.: + ``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``. + + The reader supports methods: words, sents, paras and their tagged versions. + You can get part of speech instead of full tag by giving "simplify_tags=True" + parameter, e.g.: ``tagged_sents(simplify_tags=True)``. + + Also you can get all tags disambiguated tags specifying parameter + "one_tag=False", e.g.: ``tagged_paras(one_tag=False)``. + + You can get all tags that were assigned by a morphological analyzer specifying + parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``. + + The IPIPAN Corpus contains tags indicating if there is a space between two + tokens. To add special "no space" markers, you should specify parameter + "append_no_space=True", e.g. ``tagged_words(append_no_space=True)``. + As a result in place where there should be no space between two tokens new + pair ('', 'no-space') will be inserted (for tagged data) and just '' for + methods without tags. + + The corpus reader can also try to append spaces between words. To enable this + option, specify parameter "append_space=True", e.g. ``words(append_space=True)``. + As a result either ' ' or (' ', 'space') will be inserted between tokens. + + By default, xml entities like " and & are replaced by corresponding + characters. You can turn off this feature, specifying parameter + "replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, None, None) + + def channels(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "channel") + + def domains(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "domain") + + def categories(self, fileids=None): + if not fileids: + fileids = self.fileids() + return [ + self._map_category(cat) for cat in self._parse_header(fileids, "keyTerm") + ] + + def fileids(self, channels=None, domains=None, categories=None): + if channels is not None and domains is not None and categories is not None: + raise ValueError( + "You can specify only one of channels, domains " + "and categories parameter at once" + ) + if channels is None and domains is None and categories is None: + return CorpusReader.fileids(self) + if isinstance(channels, str): + channels = [channels] + if isinstance(domains, str): + domains = [domains] + if isinstance(categories, str): + categories = [categories] + if channels: + return self._list_morph_files_by("channel", channels) + elif domains: + return self._list_morph_files_by("domain", domains) + else: + return self._list_morph_files_by( + "keyTerm", categories, map=self._map_category + ) + + @_parse_args + def sents(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def paras(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def words(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, tags=False, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_sents(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_paras(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_words(self, fileids=None, **kwargs): + return concat( + [self._view(fileid, **kwargs) for fileid in self._list_morph_files(fileids)] + ) + + def _list_morph_files(self, fileids): + return [f for f in self.abspaths(fileids)] + + def _list_header_files(self, fileids): + return [ + f.replace("morph.xml", "header.xml") + for f in self._list_morph_files(fileids) + ] + + def _parse_header(self, fileids, tag): + values = set() + for f in self._list_header_files(fileids): + values_list = self._get_tag(f, tag) + for v in values_list: + values.add(v) + return list(values) + + def _list_morph_files_by(self, tag, values, map=None): + fileids = self.fileids() + ret_fileids = set() + for f in fileids: + fp = self.abspath(f).replace("morph.xml", "header.xml") + values_list = self._get_tag(fp, tag) + for value in values_list: + if map is not None: + value = map(value) + if value in values: + ret_fileids.add(f) + return list(ret_fileids) + + def _get_tag(self, f, tag): + tags = [] + with open(f) as infile: + header = infile.read() + tag_end = 0 + while True: + tag_pos = header.find("<" + tag, tag_end) + if tag_pos < 0: + return tags + tag_end = header.find("", tag_pos) + tags.append(header[tag_pos + len(tag) + 2 : tag_end]) + + def _map_category(self, cat): + pos = cat.find(">") + if pos == -1: + return cat + else: + return cat[pos + 1 :] + + def _view(self, filename, **kwargs): + tags = kwargs.pop("tags", True) + mode = kwargs.pop("mode", 0) + simplify_tags = kwargs.pop("simplify_tags", False) + one_tag = kwargs.pop("one_tag", True) + disamb_only = kwargs.pop("disamb_only", True) + append_no_space = kwargs.pop("append_no_space", False) + append_space = kwargs.pop("append_space", False) + replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + if len(kwargs) > 0: + raise ValueError("Unexpected arguments: %s" % kwargs.keys()) + if not one_tag and not disamb_only: + raise ValueError( + "You cannot specify both one_tag=False and " "disamb_only=False" + ) + if not tags and (simplify_tags or not one_tag or not disamb_only): + raise ValueError( + "You cannot specify simplify_tags, one_tag or " + "disamb_only with functions other than tagged_*" + ) + + return IPIPANCorpusView( + filename, + tags=tags, + mode=mode, + simplify_tags=simplify_tags, + one_tag=one_tag, + disamb_only=disamb_only, + append_no_space=append_no_space, + append_space=append_space, + replace_xmlentities=replace_xmlentities, + ) + + +class IPIPANCorpusView(StreamBackedCorpusView): + + WORDS_MODE = 0 + SENTS_MODE = 1 + PARAS_MODE = 2 + + def __init__(self, filename, startpos=0, **kwargs): + StreamBackedCorpusView.__init__(self, filename, None, startpos, None) + self.in_sentence = False + self.position = 0 + + self.show_tags = kwargs.pop("tags", True) + self.disamb_only = kwargs.pop("disamb_only", True) + self.mode = kwargs.pop("mode", IPIPANCorpusView.WORDS_MODE) + self.simplify_tags = kwargs.pop("simplify_tags", False) + self.one_tag = kwargs.pop("one_tag", True) + self.append_no_space = kwargs.pop("append_no_space", False) + self.append_space = kwargs.pop("append_space", False) + self.replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + def read_block(self, stream): + sentence = [] + sentences = [] + space = False + no_space = False + + tags = set() + + lines = self._read_data(stream) + + while True: + + # we may have only part of last line + if len(lines) <= 1: + self._seek(stream) + lines = self._read_data(stream) + + if lines == [""]: + assert not sentences + return [] + + line = lines.pop() + self.position += len(line) + 1 + + if line.startswith('"): + if self.append_space: + no_space = True + if self.append_no_space: + if self.show_tags: + sentence.append(("", "no-space")) + else: + sentence.append("") + elif line.startswith(" self.truncate_at else ''}" + ) + + @property + def raw(self): + return self.content + + @property + def words(self): + return word_tokenize(self.content) + + @property + def sents(self): + return [word_tokenize(sent) for sent in sent_tokenize(self.content)] + + @property + def paras(self): + return [ + [word_tokenize(sent) for sent in sent_tokenize(para)] + for para in blankline_tokenize(self.content) + ] + + +class CodeBlock(MarkdownBlock): + def __init__(self, language, *args): + self.language = language + super().__init__(*args) + + @property + def sents(self): + return [word_tokenize(line) for line in self.content.splitlines()] + + @property + def lines(self): + return self.content.splitlines() + + @property + def paras(self): + return [ + [word_tokenize(line) for line in para.splitlines()] + for para in blankline_tokenize(self.content) + ] + + +class MarkdownSection(MarkdownBlock): + def __init__(self, heading, level, *args): + self.heading = heading + self.level = level + super().__init__(*args) + + +Image = namedtuple("Image", "label, src, title") +Link = namedtuple("Link", "label, href, title") +List = namedtuple("List", "is_ordered, items") + + +class MarkdownCorpusReader(PlaintextCorpusReader): + def __init__(self, *args, parser=None, **kwargs): + from markdown_it import MarkdownIt + from mdit_plain.renderer import RendererPlain + from mdit_py_plugins.front_matter import front_matter_plugin + + self.parser = parser + if self.parser is None: + self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain) + self.parser.use(front_matter_plugin) + + kwargs.setdefault( + "para_block_reader", partial(read_parse_blankline_block, parser=self.parser) + ) + super().__init__(*args, **kwargs) + + # This override takes care of removing markup. + def _read_word_block(self, stream): + words = list() + for para in self._para_block_reader(stream): + words.extend(self._word_tokenizer.tokenize(para)) + return words + + +class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader): + """ + A reader for markdown corpora whose documents are divided into + categories based on their file identifiers. + + Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader: + https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader + """ + + def __init__(self, *args, cat_field="tags", **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``MarkdownCorpusReader`` constructor. + """ + cat_args = ["cat_pattern", "cat_map", "cat_file"] + if not any(arg in kwargs for arg in cat_args): + # Initialize with a blank map now, + # and try to build categories from document metadata later. + kwargs["cat_map"] = dict() + CategorizedCorpusReader.__init__(self, kwargs) + MarkdownCorpusReader.__init__(self, *args, **kwargs) + + # Map file IDs to categories if self._map exists but is still empty: + if self._map is not None and not self._map: + for file_id in self._fileids: + metadata = self.metadata(file_id) + if metadata: + self._map[file_id] = metadata[0].get(cat_field, []) + + ### Begin CategorizedCorpusReader Overrides + @comma_separated_string_args + def categories(self, fileids=None): + return super().categories(fileids) + + @comma_separated_string_args + def fileids(self, categories=None): + if categories is None: + return self._fileids + return super().fileids(categories) + + ### End CategorizedCorpusReader Overrides + + ### Begin MarkdownCorpusReader Overrides + @comma_separated_string_args + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + @comma_separated_string_args + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + @comma_separated_string_args + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + @comma_separated_string_args + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + ### End MarkdownCorpusReader Overrides + + def concatenated_view(self, reader, fileids, categories): + return concat( + [ + self.CorpusView(path, reader, encoding=enc) + for (path, enc) in self.abspaths( + self._resolve(fileids, categories), include_encoding=True + ) + ] + ) + + def metadata_reader(self, stream): + from yaml import safe_load + + return [ + safe_load(t.content) + for t in self.parser.parse(stream.read()) + if t.type == "front_matter" + ] + + @comma_separated_string_args + def metadata(self, fileids=None, categories=None): + return self.concatenated_view(self.metadata_reader, fileids, categories) + + def blockquote_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_open", tokens + ) + closing_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_close", tokens + ) + blockquotes = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + blockquotes.append(tokens[opening_index : closing_index + 1]) + return [ + MarkdownBlock( + self.parser.renderer.render(block, self.parser.options, env=None) + ) + for block in blockquotes + ] + + @comma_separated_string_args + def blockquotes(self, fileids=None, categories=None): + return self.concatenated_view(self.blockquote_reader, fileids, categories) + + def code_block_reader(self, stream): + return [ + CodeBlock( + t.info, + t.content, + ) + for t in self.parser.parse(stream.read()) + if t.level == 0 and t.type in ("fence", "code_block") + ] + + @comma_separated_string_args + def code_blocks(self, fileids=None, categories=None): + return self.concatenated_view(self.code_block_reader, fileids, categories) + + def image_reader(self, stream): + return [ + Image( + child_token.content, + child_token.attrGet("src"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for child_token in inline_token.children + if child_token.type == "image" + ] + + @comma_separated_string_args + def images(self, fileids=None, categories=None): + return self.concatenated_view(self.image_reader, fileids, categories) + + def link_reader(self, stream): + return [ + Link( + inline_token.children[i + 1].content, + child_token.attrGet("href"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for i, child_token in enumerate(inline_token.children) + if child_token.type == "link_open" + ] + + @comma_separated_string_args + def links(self, fileids=None, categories=None): + return self.concatenated_view(self.link_reader, fileids, categories) + + def list_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_types = ("bullet_list_open", "ordered_list_open") + opening_tokens = filter( + lambda t: t.level == 0 and t.type in opening_types, tokens + ) + closing_types = ("bullet_list_close", "ordered_list_close") + closing_tokens = filter( + lambda t: t.level == 0 and t.type in closing_types, tokens + ) + list_blocks = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + list_blocks.append(tokens[opening_index : closing_index + 1]) + return [ + List( + tokens[0].type == "ordered_list_open", + [t.content for t in tokens if t.content], + ) + for tokens in list_blocks + ] + + @comma_separated_string_args + def lists(self, fileids=None, categories=None): + return self.concatenated_view(self.list_reader, fileids, categories) + + def section_reader(self, stream): + section_blocks, block = list(), list() + in_heading = False + for t in self.parser.parse(stream.read()): + if t.level == 0 and t.type == "heading_open": + if block: + section_blocks.append(block) + block = list() + in_heading = True + if in_heading: + block.append(t) + return [ + MarkdownSection( + block[1].content, + block[0].markup.count("#"), + self.parser.renderer.render(block, self.parser.options, env=None), + ) + for block in section_blocks + ] + + @comma_separated_string_args + def sections(self, fileids=None, categories=None): + return self.concatenated_view(self.section_reader, fileids, categories) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/mte.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/mte.py new file mode 100644 index 0000000000000000000000000000000000000000..99190bed452095dc948e324ce5cc0f3c94c46505 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/mte.py @@ -0,0 +1,397 @@ +""" +A reader for corpora whose documents are in MTE format. +""" +import os +import re +from functools import reduce + +from nltk.corpus.reader import TaggedCorpusReader, concat +from nltk.corpus.reader.xmldocs import XMLCorpusView + + +def xpath(root, path, ns): + return root.findall(path, ns) + + +class MTECorpusView(XMLCorpusView): + """ + Class for lazy viewing the MTE Corpus. + """ + + def __init__(self, fileid, tagspec, elt_handler=None): + XMLCorpusView.__init__(self, fileid, tagspec, elt_handler) + + def read_block(self, stream, tagspec=None, elt_handler=None): + return list( + filter( + lambda x: x is not None, + XMLCorpusView.read_block(self, stream, tagspec, elt_handler), + ) + ) + + +class MTEFileReader: + """ + Class for loading the content of the multext-east corpus. It + parses the xml files and does some tag-filtering depending on the + given method parameters. + """ + + ns = { + "tei": "https://www.tei-c.org/ns/1.0", + "xml": "https://www.w3.org/XML/1998/namespace", + } + tag_ns = "{https://www.tei-c.org/ns/1.0}" + xml_ns = "{https://www.w3.org/XML/1998/namespace}" + word_path = "TEI/text/body/div/div/p/s/(w|c)" + sent_path = "TEI/text/body/div/div/p/s" + para_path = "TEI/text/body/div/div/p" + + def __init__(self, file_path): + self.__file_path = file_path + + @classmethod + def _word_elt(cls, elt, context): + return elt.text + + @classmethod + def _sent_elt(cls, elt, context): + return [cls._word_elt(w, None) for w in xpath(elt, "*", cls.ns)] + + @classmethod + def _para_elt(cls, elt, context): + return [cls._sent_elt(s, None) for s in xpath(elt, "*", cls.ns)] + + @classmethod + def _tagged_word_elt(cls, elt, context): + if "ana" not in elt.attrib: + return (elt.text, "") + + if cls.__tags == "" and cls.__tagset == "msd": + return (elt.text, elt.attrib["ana"]) + elif cls.__tags == "" and cls.__tagset == "universal": + return (elt.text, MTETagConverter.msd_to_universal(elt.attrib["ana"])) + else: + tags = re.compile("^" + re.sub("-", ".", cls.__tags) + ".*$") + if tags.match(elt.attrib["ana"]): + if cls.__tagset == "msd": + return (elt.text, elt.attrib["ana"]) + else: + return ( + elt.text, + MTETagConverter.msd_to_universal(elt.attrib["ana"]), + ) + else: + return None + + @classmethod + def _tagged_sent_elt(cls, elt, context): + return list( + filter( + lambda x: x is not None, + [cls._tagged_word_elt(w, None) for w in xpath(elt, "*", cls.ns)], + ) + ) + + @classmethod + def _tagged_para_elt(cls, elt, context): + return list( + filter( + lambda x: x is not None, + [cls._tagged_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)], + ) + ) + + @classmethod + def _lemma_word_elt(cls, elt, context): + if "lemma" not in elt.attrib: + return (elt.text, "") + else: + return (elt.text, elt.attrib["lemma"]) + + @classmethod + def _lemma_sent_elt(cls, elt, context): + return [cls._lemma_word_elt(w, None) for w in xpath(elt, "*", cls.ns)] + + @classmethod + def _lemma_para_elt(cls, elt, context): + return [cls._lemma_sent_elt(s, None) for s in xpath(elt, "*", cls.ns)] + + def words(self): + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._word_elt + ) + + def sents(self): + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._sent_elt + ) + + def paras(self): + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._para_elt + ) + + def lemma_words(self): + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._lemma_word_elt + ) + + def tagged_words(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.word_path, MTEFileReader._tagged_word_elt + ) + + def lemma_sents(self): + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._lemma_sent_elt + ) + + def tagged_sents(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.sent_path, MTEFileReader._tagged_sent_elt + ) + + def lemma_paras(self): + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._lemma_para_elt + ) + + def tagged_paras(self, tagset, tags): + MTEFileReader.__tagset = tagset + MTEFileReader.__tags = tags + return MTECorpusView( + self.__file_path, MTEFileReader.para_path, MTEFileReader._tagged_para_elt + ) + + +class MTETagConverter: + """ + Class for converting msd tags to universal tags, more conversion + options are currently not implemented. + """ + + mapping_msd_universal = { + "A": "ADJ", + "S": "ADP", + "R": "ADV", + "C": "CONJ", + "D": "DET", + "N": "NOUN", + "M": "NUM", + "Q": "PRT", + "P": "PRON", + "V": "VERB", + ".": ".", + "-": "X", + } + + @staticmethod + def msd_to_universal(tag): + """ + This function converts the annotation from the Multex-East to the universal tagset + as described in Chapter 5 of the NLTK-Book + + Unknown Tags will be mapped to X. Punctuation marks are not supported in MSD tags, so + """ + indicator = tag[0] if not tag[0] == "#" else tag[1] + + if not indicator in MTETagConverter.mapping_msd_universal: + indicator = "-" + + return MTETagConverter.mapping_msd_universal[indicator] + + +class MTECorpusReader(TaggedCorpusReader): + """ + Reader for corpora following the TEI-p5 xml scheme, such as MULTEXT-East. + MULTEXT-East contains part-of-speech-tagged words with a quite precise tagging + scheme. These tags can be converted to the Universal tagset + """ + + def __init__(self, root=None, fileids=None, encoding="utf8"): + """ + Construct a new MTECorpusreader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = MTECorpusReader(root, 'oana-*.xml', 'utf8') # doctest: +SKIP + + :param root: The root directory for this corpus. (default points to location in multext config file) + :param fileids: A list or regexp specifying the fileids in this corpus. (default is oana-en.xml) + :param encoding: The encoding of the given files (default is utf8) + """ + TaggedCorpusReader.__init__(self, root, fileids, encoding) + self._readme = "00README.txt" + + def __fileids(self, fileids): + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + # filter wrong userinput + fileids = filter(lambda x: x in self._fileids, fileids) + # filter multext-east sourcefiles that are not compatible to the teip5 specification + fileids = filter(lambda x: x not in ["oana-bg.xml", "oana-mk.xml"], fileids) + if not fileids: + print("No valid multext-east file specified") + return fileids + + def words(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).words() + for f in self.__fileids(fileids) + ] + ) + + def sents(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of sentences or utterances, + each encoded as a list of word strings + :rtype: list(list(str)) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).sents() + for f in self.__fileids(fileids) + ] + ) + + def paras(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of paragraphs, each encoded as a list + of sentences, which are in turn encoded as lists of word string + :rtype: list(list(list(str))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).paras() + for f in self.__fileids(fileids) + ] + ) + + def lemma_words(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of words, the corresponding lemmas + and punctuation symbols, encoded as tuples (word, lemma) + :rtype: list(tuple(str,str)) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_words() + for f in self.__fileids(fileids) + ] + ) + + def tagged_words(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of tagged words and punctuation symbols + encoded as tuples (word, tag) + :rtype: list(tuple(str, str)) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_words( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") + + def lemma_sents(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of sentences or utterances, each + encoded as a list of tuples of the word and the corresponding + lemma (word, lemma) + :rtype: list(list(tuple(str, str))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_sents() + for f in self.__fileids(fileids) + ] + ) + + def tagged_sents(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of sentences or utterances, each + each encoded as a list of (word,tag) tuples + :rtype: list(list(tuple(str, str))) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_sents( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") + + def lemma_paras(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a list of paragraphs, each encoded as a + list of sentences, which are in turn encoded as a list of + tuples of the word and the corresponding lemma (word, lemma) + :rtype: list(List(List(tuple(str, str)))) + """ + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).lemma_paras() + for f in self.__fileids(fileids) + ] + ) + + def tagged_paras(self, fileids=None, tagset="msd", tags=""): + """ + :param fileids: A list specifying the fileids that should be used. + :param tagset: The tagset that should be used in the returned object, + either "universal" or "msd", "msd" is the default + :param tags: An MSD Tag that is used to filter all parts of the used corpus + that are not more precise or at least equal to the given tag + :return: the given file(s) as a list of paragraphs, each encoded as a + list of sentences, which are in turn encoded as a list + of (word,tag) tuples + :rtype: list(list(list(tuple(str, str)))) + """ + if tagset == "universal" or tagset == "msd": + return concat( + [ + MTEFileReader(os.path.join(self._root, f)).tagged_paras( + tagset, tags + ) + for f in self.__fileids(fileids) + ] + ) + else: + print("Unknown tagset specified.") diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py new file mode 100644 index 0000000000000000000000000000000000000000..685485590727fb8231062eedba6727cf3dc45d81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nkjp.py @@ -0,0 +1,487 @@ +# Natural Language Toolkit: NKJP Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Gabriela Kaczka +# URL: +# For license information, see LICENSE.TXT + +import functools +import os +import re +import tempfile + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView + + +def _parse_args(fun): + """ + Wraps function arguments: + if fileids not specified then function set NKJPCorpusReader paths. + """ + + @functools.wraps(fun) + def decorator(self, fileids=None, **kwargs): + if not fileids: + fileids = self._paths + return fun(self, fileids, **kwargs) + + return decorator + + +class NKJPCorpusReader(XMLCorpusReader): + WORDS_MODE = 0 + SENTS_MODE = 1 + HEADER_MODE = 2 + RAW_MODE = 3 + + def __init__(self, root, fileids=".*"): + """ + Corpus reader designed to work with National Corpus of Polish. + See http://nkjp.pl/ for more details about NKJP. + use example: + import nltk + import nkjp + from nkjp import NKJPCorpusReader + x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='') # obtain the whole corpus + x.header() + x.raw() + x.words() + x.tagged_words(tags=['subst', 'comp']) #Link to find more tags: nkjp.pl/poliqarp/help/ense2.html + x.sents() + x = NKJPCorpusReader(root='/home/USER/nltk_data/corpora/nkjp/', fileids='Wilk*') # obtain particular file(s) + x.header(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy']) + x.tagged_words(fileids=['WilkDom', '/home/USER/nltk_data/corpora/nkjp/WilkWilczy'], tags=['subst', 'comp']) + """ + if isinstance(fileids, str): + XMLCorpusReader.__init__(self, root, fileids + ".*/header.xml") + else: + XMLCorpusReader.__init__( + self, root, [fileid + "/header.xml" for fileid in fileids] + ) + self._paths = self.get_paths() + + def get_paths(self): + return [ + os.path.join(str(self._root), f.split("header.xml")[0]) + for f in self._fileids + ] + + def fileids(self): + """ + Returns a list of file identifiers for the fileids that make up + this corpus. + """ + return [f.split("header.xml")[0] for f in self._fileids] + + def _view(self, filename, tags=None, **kwargs): + """ + Returns a view specialised for use with particular corpus file. + """ + mode = kwargs.pop("mode", NKJPCorpusReader.WORDS_MODE) + if mode is NKJPCorpusReader.WORDS_MODE: + return NKJPCorpus_Morph_View(filename, tags=tags) + elif mode is NKJPCorpusReader.SENTS_MODE: + return NKJPCorpus_Segmentation_View(filename, tags=tags) + elif mode is NKJPCorpusReader.HEADER_MODE: + return NKJPCorpus_Header_View(filename, tags=tags) + elif mode is NKJPCorpusReader.RAW_MODE: + return NKJPCorpus_Text_View( + filename, tags=tags, mode=NKJPCorpus_Text_View.RAW_MODE + ) + + else: + raise NameError("No such mode!") + + def add_root(self, fileid): + """ + Add root if necessary to specified fileid. + """ + if self.root in fileid: + return fileid + return self.root + fileid + + @_parse_args + def header(self, fileids=None, **kwargs): + """ + Returns header(s) of specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.HEADER_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def sents(self, fileids=None, **kwargs): + """ + Returns sentences in specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.SENTS_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def words(self, fileids=None, **kwargs): + """ + Returns words in specified fileids. + """ + + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.WORDS_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def tagged_words(self, fileids=None, **kwargs): + """ + Call with specified tags as a list, e.g. tags=['subst', 'comp']. + Returns tagged words in specified fileids. + """ + tags = kwargs.pop("tags", []) + return concat( + [ + self._view( + self.add_root(fileid), + mode=NKJPCorpusReader.WORDS_MODE, + tags=tags, + **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + @_parse_args + def raw(self, fileids=None, **kwargs): + """ + Returns words in specified fileids. + """ + return concat( + [ + self._view( + self.add_root(fileid), mode=NKJPCorpusReader.RAW_MODE, **kwargs + ).handle_query() + for fileid in fileids + ] + ) + + +class NKJPCorpus_Header_View(XMLCorpusView): + def __init__(self, filename, **kwargs): + """ + HEADER_MODE + A stream backed corpus view specialized for use with + header.xml files in NKJP corpus. + """ + self.tagspec = ".*/sourceDesc$" + XMLCorpusView.__init__(self, filename + "header.xml", self.tagspec) + + def handle_query(self): + self._open() + header = [] + while True: + segm = XMLCorpusView.read_block(self, self._stream) + if len(segm) == 0: + break + header.extend(segm) + self.close() + return header + + def handle_elt(self, elt, context): + titles = elt.findall("bibl/title") + title = [] + if titles: + title = "\n".join(title.text.strip() for title in titles) + + authors = elt.findall("bibl/author") + author = [] + if authors: + author = "\n".join(author.text.strip() for author in authors) + + dates = elt.findall("bibl/date") + date = [] + if dates: + date = "\n".join(date.text.strip() for date in dates) + + publishers = elt.findall("bibl/publisher") + publisher = [] + if publishers: + publisher = "\n".join(publisher.text.strip() for publisher in publishers) + + idnos = elt.findall("bibl/idno") + idno = [] + if idnos: + idno = "\n".join(idno.text.strip() for idno in idnos) + + notes = elt.findall("bibl/note") + note = [] + if notes: + note = "\n".join(note.text.strip() for note in notes) + + return { + "title": title, + "author": author, + "date": date, + "publisher": publisher, + "idno": idno, + "note": note, + } + + +class XML_Tool: + """ + Helper class creating xml file to one without references to nkjp: namespace. + That's needed because the XMLCorpusView assumes that one can find short substrings + of XML that are valid XML, which is not true if a namespace is declared at top level + """ + + def __init__(self, root, filename): + self.read_file = os.path.join(root, filename) + self.write_file = tempfile.NamedTemporaryFile(delete=False) + + def build_preprocessed_file(self): + try: + fr = open(self.read_file) + fw = self.write_file + line = " " + while len(line): + line = fr.readline() + x = re.split(r"nkjp:[^ ]* ", line) # in all files + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + x = re.split("", ret) # in ann_segmentation.xml + ret = " ".join(x) + fw.write(ret) + fr.close() + fw.close() + return self.write_file.name + except Exception as e: + self.remove_preprocessed_file() + raise Exception from e + + def remove_preprocessed_file(self): + os.remove(self.write_file.name) + + +class NKJPCorpus_Segmentation_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + ann_segmentation.xml files in NKJP corpus. + """ + + def __init__(self, filename, **kwargs): + self.tagspec = ".*p/.*s" + # intersperse NKJPCorpus_Text_View + self.text_view = NKJPCorpus_Text_View( + filename, mode=NKJPCorpus_Text_View.SENTS_MODE + ) + self.text_view.handle_query() + # xml preprocessing + self.xml_tool = XML_Tool(filename, "ann_segmentation.xml") + # base class init + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def get_segm_id(self, example_word): + return example_word.split("(")[1].split(",")[0] + + def get_sent_beg(self, beg_word): + # returns index of beginning letter in sentence + return int(beg_word.split(",")[1]) + + def get_sent_end(self, end_word): + # returns index of end letter in sentence + splitted = end_word.split(")")[0].split(",") + return int(splitted[1]) + int(splitted[2]) + + def get_sentences(self, sent_segm): + # returns one sentence + id = self.get_segm_id(sent_segm[0]) + segm = self.text_view.segm_dict[id] # text segment + beg = self.get_sent_beg(sent_segm[0]) + end = self.get_sent_end(sent_segm[len(sent_segm) - 1]) + return segm[beg:end] + + def remove_choice(self, segm): + ret = [] + prev_txt_end = -1 + prev_txt_nr = -1 + for word in segm: + txt_nr = self.get_segm_id(word) + # get increasing sequence of ids: in case of choice get first possibility + if self.get_sent_beg(word) > prev_txt_end - 1 or prev_txt_nr != txt_nr: + ret.append(word) + prev_txt_end = self.get_sent_end(word) + prev_txt_nr = txt_nr + + return ret + + def handle_query(self): + try: + self._open() + sentences = [] + while True: + sent_segm = XMLCorpusView.read_block(self, self._stream) + if len(sent_segm) == 0: + break + for segm in sent_segm: + segm = self.remove_choice(segm) + sentences.append(self.get_sentences(segm)) + self.close() + self.xml_tool.remove_preprocessed_file() + return sentences + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def handle_elt(self, elt, context): + ret = [] + for seg in elt: + ret.append(seg.get("corresp")) + return ret + + +class NKJPCorpus_Text_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + text.xml files in NKJP corpus. + """ + + SENTS_MODE = 0 + RAW_MODE = 1 + + def __init__(self, filename, **kwargs): + self.mode = kwargs.pop("mode", 0) + self.tagspec = ".*/div/ab" + self.segm_dict = dict() + # xml preprocessing + self.xml_tool = XML_Tool(filename, "text.xml") + # base class init + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def handle_query(self): + try: + self._open() + x = self.read_block(self._stream) + self.close() + self.xml_tool.remove_preprocessed_file() + return x + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def read_block(self, stream, tagspec=None, elt_handler=None): + """ + Returns text as a list of sentences. + """ + txt = [] + while True: + segm = XMLCorpusView.read_block(self, stream) + if len(segm) == 0: + break + for part in segm: + txt.append(part) + + return [" ".join([segm for segm in txt])] + + def get_segm_id(self, elt): + for attr in elt.attrib: + if attr.endswith("id"): + return elt.get(attr) + + def handle_elt(self, elt, context): + # fill dictionary to use later in sents mode + if self.mode is NKJPCorpus_Text_View.SENTS_MODE: + self.segm_dict[self.get_segm_id(elt)] = elt.text + return elt.text + + +class NKJPCorpus_Morph_View(XMLCorpusView): + """ + A stream backed corpus view specialized for use with + ann_morphosyntax.xml files in NKJP corpus. + """ + + def __init__(self, filename, **kwargs): + self.tags = kwargs.pop("tags", None) + self.tagspec = ".*/seg/fs" + self.xml_tool = XML_Tool(filename, "ann_morphosyntax.xml") + XMLCorpusView.__init__( + self, self.xml_tool.build_preprocessed_file(), self.tagspec + ) + + def handle_query(self): + try: + self._open() + words = [] + while True: + segm = XMLCorpusView.read_block(self, self._stream) + if len(segm) == 0: + break + for part in segm: + if part is not None: + words.append(part) + self.close() + self.xml_tool.remove_preprocessed_file() + return words + except Exception as e: + self.xml_tool.remove_preprocessed_file() + raise Exception from e + + def handle_elt(self, elt, context): + word = "" + flag = False + is_not_interp = True + # if tags not specified, then always return word + if self.tags is None: + flag = True + + for child in elt: + + # get word + if "name" in child.keys() and child.attrib["name"] == "orth": + for symbol in child: + if symbol.tag == "string": + word = symbol.text + elif "name" in child.keys() and child.attrib["name"] == "interps": + for symbol in child: + if "type" in symbol.keys() and symbol.attrib["type"] == "lex": + for symbol2 in symbol: + if ( + "name" in symbol2.keys() + and symbol2.attrib["name"] == "ctag" + ): + for symbol3 in symbol2: + if ( + "value" in symbol3.keys() + and self.tags is not None + and symbol3.attrib["value"] in self.tags + ): + flag = True + elif ( + "value" in symbol3.keys() + and symbol3.attrib["value"] == "interp" + ): + is_not_interp = False + if flag and is_not_interp: + return word diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py new file mode 100644 index 0000000000000000000000000000000000000000..ddee6206019c644968058e7cb6cac83f5076ade6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py @@ -0,0 +1,466 @@ +# Natural Language Toolkit: NomBank Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Paul Bedaride +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +from functools import total_ordering +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.internals import raise_unorderable_types +from nltk.tree import Tree + + +class NombankCorpusReader(CorpusReader): + """ + Corpus reader for the nombank corpus, which augments the Penn + Treebank with information about the predicate argument structure + of every noun instance. The corpus consists of two parts: the + predicate-argument annotations themselves, and a set of "frameset + files" which define the argument labels used by the annotations, + on a per-noun basis. Each "frameset file" contains one or more + predicates, such as ``'turn'`` or ``'turn_on'``, each of which is + divided into coarse-grained word senses called "rolesets". For + each "roleset", the frameset file provides descriptions of the + argument roles, along with examples. + """ + + def __init__( + self, + root, + nomfile, + framefiles="", + nounsfile=None, + parse_fileid_xform=None, + parse_corpus=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param nomfile: The name of the file containing the predicate- + argument annotations (relative to ``root``). + :param framefiles: A list or regexp specifying the frameset + fileids for this corpus. + :param parse_fileid_xform: A transform that should be applied + to the fileids in this corpus. This should be a function + of one argument (a fileid) that returns a string (the new + fileid). + :param parse_corpus: The corpus containing the parse trees + corresponding to this corpus. These parse trees are + necessary to resolve the tree pointers used by nombank. + """ + + # If framefiles is specified as a regexp, expand it. + if isinstance(framefiles, str): + self._fileids = find_corpus_fileids(root, framefiles) + self._fileids = list(framefiles) + # Initialize the corpus reader. + CorpusReader.__init__(self, root, framefiles, encoding) + + # Record our nom file & nouns file. + self._nomfile = nomfile + self._nounsfile = nounsfile + self._parse_fileid_xform = parse_fileid_xform + self._parse_corpus = parse_corpus + + def instances(self, baseform=None): + """ + :return: a corpus view that acts as a list of + ``NombankInstance`` objects, one for each noun in the corpus. + """ + kwargs = {} + if baseform is not None: + kwargs["instance_filter"] = lambda inst: inst.baseform == baseform + return StreamBackedCorpusView( + self.abspath(self._nomfile), + lambda stream: self._read_instance_block(stream, **kwargs), + encoding=self.encoding(self._nomfile), + ) + + def lines(self): + """ + :return: a corpus view that acts as a list of strings, one for + each line in the predicate-argument annotation file. + """ + return StreamBackedCorpusView( + self.abspath(self._nomfile), + read_line_block, + encoding=self.encoding(self._nomfile), + ) + + def roleset(self, roleset_id): + """ + :return: the xml description for the given roleset. + """ + baseform = roleset_id.split(".")[0] + baseform = baseform.replace("perc-sign", "%") + baseform = baseform.replace("oneslashonezero", "1/10").replace( + "1/10", "1-slash-10" + ) + framefile = "frames/%s.xml" % baseform + if framefile not in self.fileids(): + raise ValueError("Frameset file for %s not found" % roleset_id) + + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + for roleset in etree.findall("predicate/roleset"): + if roleset.attrib["id"] == roleset_id: + return roleset + raise ValueError(f"Roleset {roleset_id} not found in {framefile}") + + def rolesets(self, baseform=None): + """ + :return: list of xml descriptions for rolesets. + """ + if baseform is not None: + framefile = "frames/%s.xml" % baseform + if framefile not in self.fileids(): + raise ValueError("Frameset file for %s not found" % baseform) + framefiles = [framefile] + else: + framefiles = self.fileids() + + rsets = [] + for framefile in framefiles: + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + rsets.append(etree.findall("predicate/roleset")) + return LazyConcatenation(rsets) + + def nouns(self): + """ + :return: a corpus view that acts as a list of all noun lemmas + in this corpus (from the nombank.1.0.words file). + """ + return StreamBackedCorpusView( + self.abspath(self._nounsfile), + read_line_block, + encoding=self.encoding(self._nounsfile), + ) + + def _read_instance_block(self, stream, instance_filter=lambda inst: True): + block = [] + + # Read 100 at a time. + for i in range(100): + line = stream.readline().strip() + if line: + inst = NombankInstance.parse( + line, self._parse_fileid_xform, self._parse_corpus + ) + if instance_filter(inst): + block.append(inst) + + return block + + +###################################################################### +# { Nombank Instance & related datatypes +###################################################################### + + +class NombankInstance: + def __init__( + self, + fileid, + sentnum, + wordnum, + baseform, + sensenumber, + predicate, + predid, + arguments, + parse_corpus=None, + ): + + self.fileid = fileid + """The name of the file containing the parse tree for this + instance's sentence.""" + + self.sentnum = sentnum + """The sentence number of this sentence within ``fileid``. + Indexing starts from zero.""" + + self.wordnum = wordnum + """The word number of this instance's predicate within its + containing sentence. Word numbers are indexed starting from + zero, and include traces and other empty parse elements.""" + + self.baseform = baseform + """The baseform of the predicate.""" + + self.sensenumber = sensenumber + """The sense number of the predicate.""" + + self.predicate = predicate + """A ``NombankTreePointer`` indicating the position of this + instance's predicate within its containing sentence.""" + + self.predid = predid + """Identifier of the predicate.""" + + self.arguments = tuple(arguments) + """A list of tuples (argloc, argid), specifying the location + and identifier for each of the predicate's argument in the + containing sentence. Argument identifiers are strings such as + ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain + the predicate.""" + + self.parse_corpus = parse_corpus + """A corpus reader for the parse trees corresponding to the + instances in this nombank corpus.""" + + @property + def roleset(self): + """The name of the roleset used by this instance's predicate. + Use ``nombank.roleset() `` to + look up information about the roleset.""" + r = self.baseform.replace("%", "perc-sign") + r = r.replace("1/10", "1-slash-10").replace("1-slash-10", "oneslashonezero") + return f"{r}.{self.sensenumber}" + + def __repr__(self): + return "".format( + self.fileid, + self.sentnum, + self.wordnum, + ) + + def __str__(self): + s = "{} {} {} {} {}".format( + self.fileid, + self.sentnum, + self.wordnum, + self.baseform, + self.sensenumber, + ) + items = self.arguments + ((self.predicate, "rel"),) + for (argloc, argid) in sorted(items): + s += f" {argloc}-{argid}" + return s + + def _get_tree(self): + if self.parse_corpus is None: + return None + if self.fileid not in self.parse_corpus.fileids(): + return None + return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum] + + tree = property( + _get_tree, + doc=""" + The parse tree corresponding to this instance, or None if + the corresponding tree is not available.""", + ) + + @staticmethod + def parse(s, parse_fileid_xform=None, parse_corpus=None): + pieces = s.split() + if len(pieces) < 6: + raise ValueError("Badly formatted nombank line: %r" % s) + + # Divide the line into its basic pieces. + (fileid, sentnum, wordnum, baseform, sensenumber) = pieces[:5] + + args = pieces[5:] + rel = [args.pop(i) for i, p in enumerate(args) if "-rel" in p] + if len(rel) != 1: + raise ValueError("Badly formatted nombank line: %r" % s) + + # Apply the fileid selector, if any. + if parse_fileid_xform is not None: + fileid = parse_fileid_xform(fileid) + + # Convert sentence & word numbers to ints. + sentnum = int(sentnum) + wordnum = int(wordnum) + + # Parse the predicate location. + + predloc, predid = rel[0].split("-", 1) + predicate = NombankTreePointer.parse(predloc) + + # Parse the arguments. + arguments = [] + for arg in args: + argloc, argid = arg.split("-", 1) + arguments.append((NombankTreePointer.parse(argloc), argid)) + + # Put it all together. + return NombankInstance( + fileid, + sentnum, + wordnum, + baseform, + sensenumber, + predicate, + predid, + arguments, + parse_corpus, + ) + + +class NombankPointer: + """ + A pointer used by nombank to identify one or more constituents in + a parse tree. ``NombankPointer`` is an abstract base class with + three concrete subclasses: + + - ``NombankTreePointer`` is used to point to single constituents. + - ``NombankSplitTreePointer`` is used to point to 'split' + constituents, which consist of a sequence of two or more + ``NombankTreePointer`` pointers. + - ``NombankChainTreePointer`` is used to point to entire trace + chains in a tree. It consists of a sequence of pieces, which + can be ``NombankTreePointer`` or ``NombankSplitTreePointer`` pointers. + """ + + def __init__(self): + if self.__class__ == NombankPointer: + raise NotImplementedError() + + +class NombankChainTreePointer(NombankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements may + be either ``NombankSplitTreePointer`` or + ``NombankTreePointer`` pointers.""" + + def __str__(self): + return "*".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*CHAIN*", [p.select(tree) for p in self.pieces]) + + +class NombankSplitTreePointer(NombankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements are + all ``NombankTreePointer`` pointers.""" + + def __str__(self): + return ",".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*SPLIT*", [p.select(tree) for p in self.pieces]) + + +@total_ordering +class NombankTreePointer(NombankPointer): + """ + wordnum:height*wordnum:height*... + wordnum:height, + + """ + + def __init__(self, wordnum, height): + self.wordnum = wordnum + self.height = height + + @staticmethod + def parse(s): + # Deal with chains (xx*yy*zz) + pieces = s.split("*") + if len(pieces) > 1: + return NombankChainTreePointer( + [NombankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with split args (xx,yy,zz) + pieces = s.split(",") + if len(pieces) > 1: + return NombankSplitTreePointer( + [NombankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with normal pointers. + pieces = s.split(":") + if len(pieces) != 2: + raise ValueError("bad nombank pointer %r" % s) + return NombankTreePointer(int(pieces[0]), int(pieces[1])) + + def __str__(self): + return f"{self.wordnum}:{self.height}" + + def __repr__(self): + return "NombankTreePointer(%d, %d)" % (self.wordnum, self.height) + + def __eq__(self, other): + while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, NombankTreePointer): + return self is other + + return self.wordnum == other.wordnum and self.height == other.height + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, NombankTreePointer): + return id(self) < id(other) + + return (self.wordnum, -self.height) < (other.wordnum, -other.height) + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return tree[self.treepos(tree)] + + def treepos(self, tree): + """ + Convert this pointer to a standard 'tree position' pointer, + given that it points to the given tree. + """ + if tree is None: + raise ValueError("Parse tree not available") + stack = [tree] + treepos = [] + + wordnum = 0 + while True: + # tree node: + if isinstance(stack[-1], Tree): + # Select the next child. + if len(treepos) < len(stack): + treepos.append(0) + else: + treepos[-1] += 1 + # Update the stack. + if treepos[-1] < len(stack[-1]): + stack.append(stack[-1][treepos[-1]]) + else: + # End of node's child list: pop up a level. + stack.pop() + treepos.pop() + # word node: + else: + if wordnum == self.wordnum: + return tuple(treepos[: len(treepos) - self.height - 1]) + else: + wordnum += 1 + stack.pop() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..87be7c97e6151c8ce19e64e2f8ac6683918e3aad --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Opinion Lexicon Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Opinion Lexicon. + +Opinion Lexicon information +=========================== + +Authors: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery + & Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and + Comparing Opinions on the Web". Proceedings of the 14th International World + Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan. +""" + +from nltk.corpus.reader import WordListCorpusReader +from nltk.corpus.reader.api import * + + +class IgnoreReadmeCorpusView(StreamBackedCorpusView): + """ + This CorpusView is used to skip the initial readme block of the corpus. + """ + + def __init__(self, *args, **kwargs): + StreamBackedCorpusView.__init__(self, *args, **kwargs) + # open self._stream + self._open() + # skip the readme block + read_blankline_block(self._stream) + # Set the initial position to the current stream position + self._filepos = [self._stream.tell()] + + +class OpinionLexiconCorpusReader(WordListCorpusReader): + """ + Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored. + + >>> from nltk.corpus import opinion_lexicon + >>> opinion_lexicon.words() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative + words: + + >>> opinion_lexicon.negative() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + Note that words from `words()` method are sorted by file id, not alphabetically: + + >>> opinion_lexicon.words()[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort', 'aborted'] + >>> sorted(opinion_lexicon.words())[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort'] + """ + + CorpusView = IgnoreReadmeCorpusView + + def words(self, fileids=None): + """ + Return all words in the opinion lexicon. Note that these words are not + sorted in alphabetical order. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def positive(self): + """ + Return all positive words in alphabetical order. + + :return: a list of positive words. + :rtype: list(str) + """ + return self.words("positive-words.txt") + + def negative(self): + """ + Return all negative words in alphabetical order. + + :return: a list of negative words. + :rtype: list(str) + """ + return self.words("negative-words.txt") + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + words.append(line.strip()) + return words diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py new file mode 100644 index 0000000000000000000000000000000000000000..59492992353ca876eea00f63e3759f14ec5b0e02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py @@ -0,0 +1,174 @@ +# Natural Language Toolkit: PanLex Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: David Kamholz +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for PanLex Lite, a stripped down version of PanLex distributed +as an SQLite database. See the README.txt in the panlex_lite corpus directory +for more information on PanLex Lite. +""" + +import os +import sqlite3 + +from nltk.corpus.reader.api import CorpusReader + + +class PanLexLiteCorpusReader(CorpusReader): + MEANING_Q = """ + SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ? + ORDER BY dnx2.uq DESC + """ + + TRANSLATION_Q = """ + SELECT s.tt, sum(s.uq) AS trq FROM ( + SELECT ex2.tt, max(dnx.uq) AS uq + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ? + GROUP BY ex2.tt, dnx.ui + ) s + GROUP BY s.tt + ORDER BY trq DESC, s.tt + """ + + def __init__(self, root): + self._c = sqlite3.connect(os.path.join(root, "db.sqlite")).cursor() + + self._uid_lv = {} + self._lv_uid = {} + + for row in self._c.execute("SELECT uid, lv FROM lv"): + self._uid_lv[row[0]] = row[1] + self._lv_uid[row[1]] = row[0] + + def language_varieties(self, lc=None): + """ + Return a list of PanLex language varieties. + + :param lc: ISO 639 alpha-3 code. If specified, filters returned varieties + by this code. If unspecified, all varieties are returned. + :return: the specified language varieties as a list of tuples. The first + element is the language variety's seven-character uniform identifier, + and the second element is its default name. + :rtype: list(tuple) + """ + + if lc is None: + return self._c.execute("SELECT uid, tt FROM lv ORDER BY uid").fetchall() + else: + return self._c.execute( + "SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid", (lc,) + ).fetchall() + + def meanings(self, expr_uid, expr_tt): + """ + Return a list of meanings for an expression. + + :param expr_uid: the expression's language variety, as a seven-character + uniform identifier. + :param expr_tt: the expression's text. + :return: a list of Meaning objects. + :rtype: list(Meaning) + """ + + expr_lv = self._uid_lv[expr_uid] + + mn_info = {} + + for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)): + mn = i[0] + uid = self._lv_uid[i[5]] + + if not mn in mn_info: + mn_info[mn] = { + "uq": i[1], + "ap": i[2], + "ui": i[3], + "ex": {expr_uid: [expr_tt]}, + } + + if not uid in mn_info[mn]["ex"]: + mn_info[mn]["ex"][uid] = [] + + mn_info[mn]["ex"][uid].append(i[4]) + + return [Meaning(mn, mn_info[mn]) for mn in mn_info] + + def translations(self, from_uid, from_tt, to_uid): + """ + Return a list of translations for an expression into a single language + variety. + + :param from_uid: the source expression's language variety, as a + seven-character uniform identifier. + :param from_tt: the source expression's text. + :param to_uid: the target language variety, as a seven-character + uniform identifier. + :return: a list of translation tuples. The first element is the expression + text and the second element is the translation quality. + :rtype: list(tuple) + """ + + from_lv = self._uid_lv[from_uid] + to_lv = self._uid_lv[to_uid] + + return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall() + + +class Meaning(dict): + """ + Represents a single PanLex meaning. A meaning is a translation set derived + from a single source. + """ + + def __init__(self, mn, attr): + super().__init__(**attr) + self["mn"] = mn + + def id(self): + """ + :return: the meaning's id. + :rtype: int + """ + return self["mn"] + + def quality(self): + """ + :return: the meaning's source's quality (0=worst, 9=best). + :rtype: int + """ + return self["uq"] + + def source(self): + """ + :return: the meaning's source id. + :rtype: int + """ + return self["ap"] + + def source_group(self): + """ + :return: the meaning's source group id. + :rtype: int + """ + return self["ui"] + + def expressions(self): + """ + :return: the meaning's expressions as a dictionary whose keys are language + variety uniform identifiers and whose values are lists of expression + texts. + :rtype: dict + """ + return self["ex"] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py new file mode 100644 index 0000000000000000000000000000000000000000..182960f2ebc4b3e2411e3980ce4e445412af9bcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py @@ -0,0 +1,95 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + + +import re +from collections import defaultdict, namedtuple + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.wordlist import WordListCorpusReader +from nltk.tokenize import line_tokenize + +PanlexLanguage = namedtuple( + "PanlexLanguage", + [ + "panlex_uid", # (1) PanLex UID + "iso639", # (2) ISO 639 language code + "iso639_type", # (3) ISO 639 language type, see README + "script", # (4) normal scripts of expressions + "name", # (5) PanLex default name + "langvar_uid", # (6) UID of the language variety in which the default name is an expression + ], +) + + +class PanlexSwadeshCorpusReader(WordListCorpusReader): + """ + This is a class to read the PanLex Swadesh list from + + David Kamholz, Jonathan Pool, and Susan M. Colowick (2014). + PanLex: Building a Resource for Panlingual Lexical Translation. + In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf + + License: CC0 1.0 Universal + https://creativecommons.org/publicdomain/zero/1.0/legalcode + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Find the swadesh size using the fileids' path. + self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1) + self._languages = {lang.panlex_uid: lang for lang in self.get_languages()} + self._macro_langauges = self.get_macrolanguages() + + def license(self): + return "CC0 1.0 Universal" + + def language_codes(self): + return self._languages.keys() + + def get_languages(self): + for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"): + if not line.strip(): # Skip empty lines. + continue + yield PanlexLanguage(*line.strip().split("\t")) + + def get_macrolanguages(self): + macro_langauges = defaultdict(list) + for lang in self._languages.values(): + macro_langauges[lang.iso639].append(lang.panlex_uid) + return macro_langauges + + def words_by_lang(self, lang_code): + """ + :return: a list of list(str) + """ + fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt" + return [concept.split("\t") for concept in self.words(fileid)] + + def words_by_iso639(self, iso63_code): + """ + :return: a list of list(str) + """ + fileids = [ + f"swadesh{self.swadesh_size}/{lang_code}.txt" + for lang_code in self._macro_langauges[iso63_code] + ] + return [ + concept.split("\t") for fileid in fileids for concept in self.words(fileid) + ] + + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py new file mode 100644 index 0000000000000000000000000000000000000000..e59d297c0100f46b484b02bfc125532e4ca9d8ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py @@ -0,0 +1,375 @@ +# Natural Language Toolkit: +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Piotr Kasprzyk +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.xmldocs import XMLCorpusReader + +PARA = re.compile(r"]*){0,1}>(.*?)

") +SENT = re.compile(r"]*){0,1}>(.*?)") + +TAGGEDWORD = re.compile(r"<([wc](?: [^>]*){0,1}>)(.*?)") +WORD = re.compile(r"<[wc](?: [^>]*){0,1}>(.*?)") + +TYPE = re.compile(r'type="(.*?)"') +ANA = re.compile(r'ana="(.*?)"') + +TEXTID = re.compile(r'text id="(.*?)"') + + +class TEICorpusView(StreamBackedCorpusView): + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + group_by_para, + tagset=None, + head_len=0, + textids=None, + ): + + self._tagged = tagged + self._textids = textids + + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + # WARNING -- skip header + StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len) + + _pagesize = 4096 + + def read_block(self, stream): + block = stream.readlines(self._pagesize) + block = concat(block) + while (block.count(" block.count("")) or block.count( + "") + len("") + block = block[:beg] + block[beg + end :] + + output = [] + for para_str in PARA.findall(block): + para = [] + for sent_str in SENT.findall(para_str): + if not self._tagged: + sent = WORD.findall(sent_str) + else: + sent = list(map(self._parse_tag, TAGGEDWORD.findall(sent_str))) + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + output.append(para) + else: + output.extend(para) + return output + + def _parse_tag(self, tag_word_tuple): + (tag, word) = tag_word_tuple + if tag.startswith("w"): + tag = ANA.search(tag).group(1) + else: # tag.startswith('c') + tag = TYPE.search(tag).group(1) + return word, tag + + +class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader): + head_len = 2770 + + def __init__(self, *args, **kwargs): + if "textid_file" in kwargs: + self._textids = kwargs["textid_file"] + else: + self._textids = None + + XMLCorpusReader.__init__(self, *args) + CategorizedCorpusReader.__init__(self, kwargs) + + self._init_textids() + + def _init_textids(self): + self._f2t = defaultdict(list) + self._t2f = defaultdict(list) + if self._textids is not None: + with open(self._textids) as fp: + for line in fp: + line = line.strip() + file_id, text_ids = line.split(" ", 1) + if file_id not in self.fileids(): + raise ValueError( + "In text_id mapping file %s: %s not found" + % (self._textids, file_id) + ) + for text_id in text_ids.split(self._delimiter): + self._add_textids(file_id, text_id) + + def _add_textids(self, file_id, text_id): + self._f2t[file_id].append(text_id) + self._t2f[text_id].append(file_id) + + def _resolve(self, fileids, categories, textids=None): + tmp = None + if ( + len( + list( + filter( + lambda accessor: accessor is None, + (fileids, categories, textids), + ) + ) + ) + != 1 + ): + + raise ValueError( + "Specify exactly one of: fileids, " "categories or textids" + ) + + if fileids is not None: + return fileids, None + + if categories is not None: + return self.fileids(categories), None + + if textids is not None: + if isinstance(textids, str): + textids = [textids] + files = sum((self._t2f[t] for t in textids), []) + tdict = dict() + for f in files: + tdict[f] = set(self._f2t[f]) & set(textids) + return files, tdict + + def decode_tag(self, tag): + # to be implemented + return tag + + def textids(self, fileids=None, categories=None): + """ + In the pl196x corpus each category is stored in single + file and thus both methods provide identical functionality. In order + to accommodate finer granularity, a non-standard textids() method was + implemented. All the main functions can be supplied with a list + of required chunks---giving much more control to the user. + """ + fileids, _ = self._resolve(fileids, categories) + if fileids is None: + return sorted(self._t2f) + + if isinstance(fileids, str): + fileids = [fileids] + return sorted(sum((self._f2t[d] for d in fileids), [])) + + def words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + ) + for fileid in fileids + ] + ) + + def sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, False, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def xml(self, fileids=None, categories=None): + fileids, _ = self._resolve(fileids, categories) + if len(fileids) == 1: + return XMLCorpusReader.xml(self, fileids[0]) + else: + raise TypeError("Expected a single file") diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py new file mode 100644 index 0000000000000000000000000000000000000000..f096f3ecb0ef7196950071723393656ec91aa363 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/plaintext.py @@ -0,0 +1,227 @@ +# Natural Language Toolkit: Plaintext Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that consist of plaintext documents. +""" + +import nltk.data +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import * + + +class PlaintextCorpusReader(CorpusReader): + """ + Reader for corpora that consist of plaintext documents. Paragraphs + are assumed to be split using blank lines. Sentences and words can + be tokenized using the default tokenizers, or by custom tokenizers + specified as parameters to the constructor. + + This corpus reader can be customized (e.g., to skip preface + sections of specific document formats) by creating a subclass and + overriding the ``CorpusView`` class variable. + """ + + CorpusView = StreamBackedCorpusView + """The corpus view class used by this reader. Subclasses of + ``PlaintextCorpusReader`` may specify alternative corpus view + classes (e.g., to skip the preface sections of documents.)""" + + def __init__( + self, + root, + fileids, + word_tokenizer=WordPunctTokenizer(), + sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"), + para_block_reader=read_blankline_block, + encoding="utf8", + ): + r""" + Construct a new plaintext corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/usr/local/share/nltk_data/corpora/webtext/' + >>> reader = PlaintextCorpusReader(root, '.*\.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param word_tokenizer: Tokenizer for breaking sentences or + paragraphs into words. + :param sent_tokenizer: Tokenizer for breaking paragraphs + into words. + :param para_block_reader: The block reader used to divide the + corpus into paragraph blocks. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + if self._sent_tokenizer is None: + raise ValueError("No sentence tokenizer for this corpus") + + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + if self._sent_tokenizer is None: + raise ValueError("No sentence tokenizer for this corpus") + + return concat( + [ + self.CorpusView(path, self._read_para_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + words.extend(self._word_tokenizer.tokenize(stream.readline())) + return words + + def _read_sent_block(self, stream): + sents = [] + for para in self._para_block_reader(stream): + sents.extend( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(para) + ] + ) + return sents + + def _read_para_block(self, stream): + paras = [] + for para in self._para_block_reader(stream): + paras.append( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(para) + ] + ) + return paras + + +class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader): + """ + A reader for plaintext corpora whose documents are divided into + categories based on their file identifiers. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``PlaintextCorpusReader`` constructor. + """ + CategorizedCorpusReader.__init__(self, kwargs) + PlaintextCorpusReader.__init__(self, *args, **kwargs) + + +# FIXME: Is there a better way? How to not hardcode this? +# Possibly, add a language kwargs to CategorizedPlaintextCorpusReader to +# override the `sent_tokenizer`. +class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader): + def __init__(self, *args, **kwargs): + CategorizedCorpusReader.__init__(self, kwargs) + kwargs["sent_tokenizer"] = nltk.data.LazyLoader( + "tokenizers/punkt/portuguese.pickle" + ) + PlaintextCorpusReader.__init__(self, *args, **kwargs) + + +class EuroparlCorpusReader(PlaintextCorpusReader): + + """ + Reader for Europarl corpora that consist of plaintext documents. + Documents are divided into chapters instead of paragraphs as + for regular plaintext documents. Chapters are separated using blank + lines. Everything is inherited from ``PlaintextCorpusReader`` except + that: + + - Since the corpus is pre-processed and pre-tokenized, the + word tokenizer should just split the line at whitespaces. + - For the same reason, the sentence tokenizer should just + split the paragraph at line breaks. + - There is a new 'chapters()' method that returns chapters instead + instead of paragraphs. + - The 'paras()' method inherited from PlaintextCorpusReader is + made non-functional to remove any confusion between chapters + and paragraphs for Europarl. + """ + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + words.extend(stream.readline().split()) + return words + + def _read_sent_block(self, stream): + sents = [] + for para in self._para_block_reader(stream): + sents.extend([sent.split() for sent in para.splitlines()]) + return sents + + def _read_para_block(self, stream): + paras = [] + for para in self._para_block_reader(stream): + paras.append([sent.split() for sent in para.splitlines()]) + return paras + + def chapters(self, fileids=None): + """ + :return: the given file(s) as a list of + chapters, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + self.CorpusView(fileid, self._read_para_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + raise NotImplementedError( + "The Europarl corpus reader does not support paragraphs. Please use chapters() instead." + ) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py new file mode 100644 index 0000000000000000000000000000000000000000..0006e640e9ef30cb50fbdee621b13f2f78b484dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/ppattach.py @@ -0,0 +1,95 @@ +# Natural Language Toolkit: PP Attachment Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read lines from the Prepositional Phrase Attachment Corpus. + +The PP Attachment Corpus contains several files having the format: + +sentence_id verb noun1 preposition noun2 attachment + +For example: + +42960 gives authority to administration V +46742 gives inventors of microchip N + +The PP attachment is to the verb phrase (V) or noun phrase (N), i.e.: + +(VP gives (NP authority) (PP to administration)) +(VP gives (NP inventors (PP of microchip))) + +The corpus contains the following files: + +training: training set +devset: development test set, used for algorithm development. +test: test set, used to report results +bitstrings: word classes derived from Mutual Information Clustering for the Wall Street Journal. + +Ratnaparkhi, Adwait (1994). A Maximum Entropy Model for Prepositional +Phrase Attachment. Proceedings of the ARPA Human Language Technology +Conference. [http://www.cis.upenn.edu/~adwait/papers/hlt94.ps] + +The PP Attachment Corpus is distributed with NLTK with the permission +of the author. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * + + +class PPAttachment: + def __init__(self, sent, verb, noun1, prep, noun2, attachment): + self.sent = sent + self.verb = verb + self.noun1 = noun1 + self.prep = prep + self.noun2 = noun2 + self.attachment = attachment + + def __repr__(self): + return ( + "PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, " + "noun2=%r, attachment=%r)" + % (self.sent, self.verb, self.noun1, self.prep, self.noun2, self.attachment) + ) + + +class PPAttachmentCorpusReader(CorpusReader): + """ + sentence_id verb noun1 preposition noun2 attachment + """ + + def attachments(self, fileids): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_obj_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tuples(self, fileids): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_tuple_block(self, stream): + line = stream.readline() + if line: + return [tuple(line.split())] + else: + return [] + + def _read_obj_block(self, stream): + line = stream.readline() + if line: + return [PPAttachment(*line.split())] + else: + return [] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py new file mode 100644 index 0000000000000000000000000000000000000000..c254a8416f2c1bb38f684819e43bae76a4308eeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py @@ -0,0 +1,520 @@ +# Natural Language Toolkit: PropBank Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +from functools import total_ordering +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.internals import raise_unorderable_types +from nltk.tree import Tree + + +class PropbankCorpusReader(CorpusReader): + """ + Corpus reader for the propbank corpus, which augments the Penn + Treebank with information about the predicate argument structure + of every verb instance. The corpus consists of two parts: the + predicate-argument annotations themselves, and a set of "frameset + files" which define the argument labels used by the annotations, + on a per-verb basis. Each "frameset file" contains one or more + predicates, such as ``'turn'`` or ``'turn_on'``, each of which is + divided into coarse-grained word senses called "rolesets". For + each "roleset", the frameset file provides descriptions of the + argument roles, along with examples. + """ + + def __init__( + self, + root, + propfile, + framefiles="", + verbsfile=None, + parse_fileid_xform=None, + parse_corpus=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param propfile: The name of the file containing the predicate- + argument annotations (relative to ``root``). + :param framefiles: A list or regexp specifying the frameset + fileids for this corpus. + :param parse_fileid_xform: A transform that should be applied + to the fileids in this corpus. This should be a function + of one argument (a fileid) that returns a string (the new + fileid). + :param parse_corpus: The corpus containing the parse trees + corresponding to this corpus. These parse trees are + necessary to resolve the tree pointers used by propbank. + """ + # If framefiles is specified as a regexp, expand it. + if isinstance(framefiles, str): + framefiles = find_corpus_fileids(root, framefiles) + framefiles = list(framefiles) + # Initialize the corpus reader. + CorpusReader.__init__(self, root, [propfile, verbsfile] + framefiles, encoding) + + # Record our frame fileids & prop file. + self._propfile = propfile + self._framefiles = framefiles + self._verbsfile = verbsfile + self._parse_fileid_xform = parse_fileid_xform + self._parse_corpus = parse_corpus + + def instances(self, baseform=None): + """ + :return: a corpus view that acts as a list of + ``PropBankInstance`` objects, one for each noun in the corpus. + """ + kwargs = {} + if baseform is not None: + kwargs["instance_filter"] = lambda inst: inst.baseform == baseform + return StreamBackedCorpusView( + self.abspath(self._propfile), + lambda stream: self._read_instance_block(stream, **kwargs), + encoding=self.encoding(self._propfile), + ) + + def lines(self): + """ + :return: a corpus view that acts as a list of strings, one for + each line in the predicate-argument annotation file. + """ + return StreamBackedCorpusView( + self.abspath(self._propfile), + read_line_block, + encoding=self.encoding(self._propfile), + ) + + def roleset(self, roleset_id): + """ + :return: the xml description for the given roleset. + """ + baseform = roleset_id.split(".")[0] + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % roleset_id) + + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + for roleset in etree.findall("predicate/roleset"): + if roleset.attrib["id"] == roleset_id: + return roleset + raise ValueError(f"Roleset {roleset_id} not found in {framefile}") + + def rolesets(self, baseform=None): + """ + :return: list of xml descriptions for rolesets. + """ + if baseform is not None: + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % baseform) + framefiles = [framefile] + else: + framefiles = self._framefiles + + rsets = [] + for framefile in framefiles: + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + rsets.append(etree.findall("predicate/roleset")) + return LazyConcatenation(rsets) + + def verbs(self): + """ + :return: a corpus view that acts as a list of all verb lemmas + in this corpus (from the verbs.txt file). + """ + return StreamBackedCorpusView( + self.abspath(self._verbsfile), + read_line_block, + encoding=self.encoding(self._verbsfile), + ) + + def _read_instance_block(self, stream, instance_filter=lambda inst: True): + block = [] + + # Read 100 at a time. + for i in range(100): + line = stream.readline().strip() + if line: + inst = PropbankInstance.parse( + line, self._parse_fileid_xform, self._parse_corpus + ) + if instance_filter(inst): + block.append(inst) + + return block + + +###################################################################### +# { Propbank Instance & related datatypes +###################################################################### + + +class PropbankInstance: + def __init__( + self, + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus=None, + ): + + self.fileid = fileid + """The name of the file containing the parse tree for this + instance's sentence.""" + + self.sentnum = sentnum + """The sentence number of this sentence within ``fileid``. + Indexing starts from zero.""" + + self.wordnum = wordnum + """The word number of this instance's predicate within its + containing sentence. Word numbers are indexed starting from + zero, and include traces and other empty parse elements.""" + + self.tagger = tagger + """An identifier for the tagger who tagged this instance; or + ``'gold'`` if this is an adjuticated instance.""" + + self.roleset = roleset + """The name of the roleset used by this instance's predicate. + Use ``propbank.roleset() `` to + look up information about the roleset.""" + + self.inflection = inflection + """A ``PropbankInflection`` object describing the inflection of + this instance's predicate.""" + + self.predicate = predicate + """A ``PropbankTreePointer`` indicating the position of this + instance's predicate within its containing sentence.""" + + self.arguments = tuple(arguments) + """A list of tuples (argloc, argid), specifying the location + and identifier for each of the predicate's argument in the + containing sentence. Argument identifiers are strings such as + ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain + the predicate.""" + + self.parse_corpus = parse_corpus + """A corpus reader for the parse trees corresponding to the + instances in this propbank corpus.""" + + @property + def baseform(self): + """The baseform of the predicate.""" + return self.roleset.split(".")[0] + + @property + def sensenumber(self): + """The sense number of the predicate.""" + return self.roleset.split(".")[1] + + @property + def predid(self): + """Identifier of the predicate.""" + return "rel" + + def __repr__(self): + return "".format( + self.fileid, + self.sentnum, + self.wordnum, + ) + + def __str__(self): + s = "{} {} {} {} {} {}".format( + self.fileid, + self.sentnum, + self.wordnum, + self.tagger, + self.roleset, + self.inflection, + ) + items = self.arguments + ((self.predicate, "rel"),) + for (argloc, argid) in sorted(items): + s += f" {argloc}-{argid}" + return s + + def _get_tree(self): + if self.parse_corpus is None: + return None + if self.fileid not in self.parse_corpus.fileids(): + return None + return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum] + + tree = property( + _get_tree, + doc=""" + The parse tree corresponding to this instance, or None if + the corresponding tree is not available.""", + ) + + @staticmethod + def parse(s, parse_fileid_xform=None, parse_corpus=None): + pieces = s.split() + if len(pieces) < 7: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Divide the line into its basic pieces. + (fileid, sentnum, wordnum, tagger, roleset, inflection) = pieces[:6] + rel = [p for p in pieces[6:] if p.endswith("-rel")] + args = [p for p in pieces[6:] if not p.endswith("-rel")] + if len(rel) != 1: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Apply the fileid selector, if any. + if parse_fileid_xform is not None: + fileid = parse_fileid_xform(fileid) + + # Convert sentence & word numbers to ints. + sentnum = int(sentnum) + wordnum = int(wordnum) + + # Parse the inflection + inflection = PropbankInflection.parse(inflection) + + # Parse the predicate location. + predicate = PropbankTreePointer.parse(rel[0][:-4]) + + # Parse the arguments. + arguments = [] + for arg in args: + argloc, argid = arg.split("-", 1) + arguments.append((PropbankTreePointer.parse(argloc), argid)) + + # Put it all together. + return PropbankInstance( + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus, + ) + + +class PropbankPointer: + """ + A pointer used by propbank to identify one or more constituents in + a parse tree. ``PropbankPointer`` is an abstract base class with + three concrete subclasses: + + - ``PropbankTreePointer`` is used to point to single constituents. + - ``PropbankSplitTreePointer`` is used to point to 'split' + constituents, which consist of a sequence of two or more + ``PropbankTreePointer`` pointers. + - ``PropbankChainTreePointer`` is used to point to entire trace + chains in a tree. It consists of a sequence of pieces, which + can be ``PropbankTreePointer`` or ``PropbankSplitTreePointer`` pointers. + """ + + def __init__(self): + if self.__class__ == PropbankPointer: + raise NotImplementedError() + + +class PropbankChainTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements may + be either ``PropbankSplitTreePointer`` or + ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return "*".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*CHAIN*", [p.select(tree) for p in self.pieces]) + + +class PropbankSplitTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements are + all ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return ",".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*SPLIT*", [p.select(tree) for p in self.pieces]) + + +@total_ordering +class PropbankTreePointer(PropbankPointer): + """ + wordnum:height*wordnum:height*... + wordnum:height, + + """ + + def __init__(self, wordnum, height): + self.wordnum = wordnum + self.height = height + + @staticmethod + def parse(s): + # Deal with chains (xx*yy*zz) + pieces = s.split("*") + if len(pieces) > 1: + return PropbankChainTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with split args (xx,yy,zz) + pieces = s.split(",") + if len(pieces) > 1: + return PropbankSplitTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with normal pointers. + pieces = s.split(":") + if len(pieces) != 2: + raise ValueError("bad propbank pointer %r" % s) + return PropbankTreePointer(int(pieces[0]), int(pieces[1])) + + def __str__(self): + return f"{self.wordnum}:{self.height}" + + def __repr__(self): + return "PropbankTreePointer(%d, %d)" % (self.wordnum, self.height) + + def __eq__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return self is other + + return self.wordnum == other.wordnum and self.height == other.height + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return id(self) < id(other) + + return (self.wordnum, -self.height) < (other.wordnum, -other.height) + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return tree[self.treepos(tree)] + + def treepos(self, tree): + """ + Convert this pointer to a standard 'tree position' pointer, + given that it points to the given tree. + """ + if tree is None: + raise ValueError("Parse tree not available") + stack = [tree] + treepos = [] + + wordnum = 0 + while True: + # tree node: + if isinstance(stack[-1], Tree): + # Select the next child. + if len(treepos) < len(stack): + treepos.append(0) + else: + treepos[-1] += 1 + # Update the stack. + if treepos[-1] < len(stack[-1]): + stack.append(stack[-1][treepos[-1]]) + else: + # End of node's child list: pop up a level. + stack.pop() + treepos.pop() + # word node: + else: + if wordnum == self.wordnum: + return tuple(treepos[: len(treepos) - self.height - 1]) + else: + wordnum += 1 + stack.pop() + + +class PropbankInflection: + # { Inflection Form + INFINITIVE = "i" + GERUND = "g" + PARTICIPLE = "p" + FINITE = "v" + # { Inflection Tense + FUTURE = "f" + PAST = "p" + PRESENT = "n" + # { Inflection Aspect + PERFECT = "p" + PROGRESSIVE = "o" + PERFECT_AND_PROGRESSIVE = "b" + # { Inflection Person + THIRD_PERSON = "3" + # { Inflection Voice + ACTIVE = "a" + PASSIVE = "p" + # { Inflection + NONE = "-" + # } + + def __init__(self, form="-", tense="-", aspect="-", person="-", voice="-"): + self.form = form + self.tense = tense + self.aspect = aspect + self.person = person + self.voice = voice + + def __str__(self): + return self.form + self.tense + self.aspect + self.person + self.voice + + def __repr__(self): + return "" % self + + _VALIDATE = re.compile(r"[igpv\-][fpn\-][pob\-][3\-][ap\-]$") + + @staticmethod + def parse(s): + if not isinstance(s, str): + raise TypeError("expected a string") + if len(s) != 5 or not PropbankInflection._VALIDATE.match(s): + raise ValueError("Bad propbank inflection string %r" % s) + return PropbankInflection(*s) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py new file mode 100644 index 0000000000000000000000000000000000000000..31f1b02f701bc68a652af9617751d78b1c04d56d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py @@ -0,0 +1,133 @@ +# Natural Language Toolkit: Pros and Cons Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Pros and Cons dataset. + +- Pros and Cons dataset information - + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and Comparing + Opinions on the Web". Proceedings of the 14th international World Wide Web + conference (WWW-2005), May 10-14, 2005, in Chiba, Japan. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + + +class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader): + """ + Reader for the Pros and Cons sentence dataset. + + >>> from nltk.corpus import pros_cons + >>> pros_cons.sents(categories='Cons') # doctest: +NORMALIZE_WHITESPACE + [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy', + 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'], + ...] + >>> pros_cons.words('IntegratedPros.txt') + ['Easy', 'to', 'use', ',', 'economical', '!', ...] + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WordPunctTokenizer(), + encoding="utf8", + **kwargs + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param encoding: the encoding that should be used to read the corpus. + :param kwargs: additional parameters passed to CategorizedCorpusReader. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + CategorizedCorpusReader.__init__(self, kwargs) + self._word_tokenizer = word_tokenizer + + def sents(self, fileids=None, categories=None): + """ + Return all sentences in the corpus or in the specified files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :param categories: a list specifying the categories whose sentences + have to be returned. + :return: the given file(s) as a list of sentences. Each sentence is + tokenized using the specified word_tokenizer. + :rtype: list(list(str)) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None, categories=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :param categories: a list specifying the categories whose words have + to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_sent_block(self, stream): + sents = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)", line) + if sent: + sents.append(self._word_tokenizer.tokenize(sent.group(2).strip())) + return sents + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py new file mode 100644 index 0000000000000000000000000000000000000000..5f52425c0f7c260f62d7d953b90d241a6c00a2b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py @@ -0,0 +1,331 @@ +# Natural Language Toolkit: Product Reviews Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for reviews corpora (syntax based on Customer Review Corpus). + +Customer Review Corpus information +================================== + +Annotated by: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +The "product_reviews_1" and "product_reviews_2" datasets respectively contain +annotated customer reviews of 5 and 9 products from amazon.com. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge + Discovery & Data Mining (KDD-04), 2004. + +- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews". + Proceedings of Nineteeth National Conference on Artificial Intelligence + (AAAI-2004), 2004. + +- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to + Opinion Mining." Proceedings of First ACM International Conference on Web + Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University, + Stanford, California, USA. + +Symbols used in the annotated reviews: + + :[t]: the title of the review: Each [t] tag starts a review. + :xxxx[+|-n]: xxxx is a product feature. + :[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest. + Note that the strength is quite subjective. + You may want ignore it, but only considering + and - + :[-n]: Negative opinion + :##: start of each sentence. Each line is a sentence. + :[u]: feature not appeared in the sentence. + :[p]: feature not appeared in the sentence. Pronoun resolution is needed. + :[s]: suggestion or recommendation. + :[cc]: comparison with a competing product from a different brand. + :[cs]: comparison with a competing product from the same brand. + +Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not + provide separation between different reviews. This is due to the fact that + the dataset was specifically designed for aspect/feature-based sentiment + analysis, for which sentence-level annotation is sufficient. For document- + level classification and analysis, this peculiarity should be taken into + consideration. +""" + +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +TITLE = re.compile(r"^\[t\](.*)$") # [t] Title +FEATURES = re.compile( + r"((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]" +) # find 'feature' in feature[+3] +NOTES = re.compile(r"\[(?!t)(p|u|s|cc|cs)\]") # find 'p' in camera[+2][p] +SENT = re.compile(r"##(.*)$") # find tokenized sentence + + +class Review: + """ + A Review is the main block of a ReviewsCorpusReader. + """ + + def __init__(self, title=None, review_lines=None): + """ + :param title: the title of the review. + :param review_lines: the list of the ReviewLines that belong to the Review. + """ + self.title = title + if review_lines is None: + self.review_lines = [] + else: + self.review_lines = review_lines + + def add_line(self, review_line): + """ + Add a line (ReviewLine) to the review. + + :param review_line: a ReviewLine instance that belongs to the Review. + """ + assert isinstance(review_line, ReviewLine) + self.review_lines.append(review_line) + + def features(self): + """ + Return a list of features in the review. Each feature is a tuple made of + the specific item feature and the opinion strength about that feature. + + :return: all features of the review as a list of tuples (feat, score). + :rtype: list(tuple) + """ + features = [] + for review_line in self.review_lines: + features.extend(review_line.features) + return features + + def sents(self): + """ + Return all tokenized sentences in the review. + + :return: all sentences of the review as lists of tokens. + :rtype: list(list(str)) + """ + return [review_line.sent for review_line in self.review_lines] + + def __repr__(self): + return 'Review(title="{}", review_lines={})'.format( + self.title, self.review_lines + ) + + +class ReviewLine: + """ + A ReviewLine represents a sentence of the review, together with (optional) + annotations of its features and notes about the reviewed item. + """ + + def __init__(self, sent, features=None, notes=None): + self.sent = sent + if features is None: + self.features = [] + else: + self.features = features + + if notes is None: + self.notes = [] + else: + self.notes = notes + + def __repr__(self): + return "ReviewLine(features={}, notes={}, sent={})".format( + self.features, self.notes, self.sent + ) + + +class ReviewsCorpusReader(CorpusReader): + """ + Reader for the Customer Review Data dataset by Hu, Liu (2004). + Note: we are not applying any sentence tokenization at the moment, just word + tokenization. + + >>> from nltk.corpus import product_reviews_1 + >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt') + >>> review = camera_reviews[0] + >>> review.sents()[0] # doctest: +NORMALIZE_WHITESPACE + ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am', + 'extremely', 'satisfied', 'with', 'the', 'purchase', '.'] + >>> review.features() # doctest: +NORMALIZE_WHITESPACE + [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'), + ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'), + ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'), + ('option', '+1')] + + We can also reach the same information directly from the stream: + + >>> product_reviews_1.features('Canon_G3.txt') + [('canon powershot g3', '+3'), ('use', '+2'), ...] + + We can compute stats for specific product features: + + >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> mean = tot / n_reviews + >>> print(n_reviews, tot, mean) + 15 24 1.6 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8" + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WordPunctTokenizer` + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._readme = "README.txt" + + def features(self, fileids=None): + """ + Return a list of features. Each feature is a tuple made of the specific + item feature and the opinion strength about that feature. + + :param fileids: a list or regexp specifying the ids of the files whose + features have to be returned. + :return: all features for the item(s) in the given file(s). + :rtype: list(tuple) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(fileid, self._read_features, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def reviews(self, fileids=None): + """ + Return all the reviews as a list of Review objects. If `fileids` is + specified, return all the reviews from each of the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + reviews have to be returned. + :return: the given file(s) as a list of reviews. + """ + if fileids is None: + fileids = self._fileids + return concat( + [ + self.CorpusView(fileid, self._read_review_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + Return all sentences in the corpus or in the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: the given file(s) as a list of sentences, each encoded as a + list of word strings. + :rtype: list(list(str)) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_features(self, stream): + features = [] + for i in range(20): + line = stream.readline() + if not line: + return features + features.extend(re.findall(FEATURES, line)) + return features + + def _read_review_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + title_match = re.match(TITLE, line) + if title_match: + review = Review( + title=title_match.group(1).strip() + ) # We create a new review + break + + # Scan until we find another line matching the regexp, or EOF. + while True: + oldpos = stream.tell() + line = stream.readline() + # End of file: + if not line: + return [review] + # Start of a new review: backup to just before it starts, and + # return the review we've already collected. + if re.match(TITLE, line): + stream.seek(oldpos) + return [review] + # Anything else is part of the review line. + feats = re.findall(FEATURES, line) + notes = re.findall(NOTES, line) + sent = re.findall(SENT, line) + if sent: + sent = self._word_tokenizer.tokenize(sent[0]) + review_line = ReviewLine(sent=sent, features=feats, notes=notes) + review.add_line(review_line) + + def _read_sent_block(self, stream): + sents = [] + for review in self._read_review_block(stream): + sents.extend([sent for sent in review.sents()]) + return sents + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + sent = re.findall(SENT, line) + if sent: + words.extend(self._word_tokenizer.tokenize(sent[0])) + return words diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py new file mode 100644 index 0000000000000000000000000000000000000000..99f09fe9f486f7770bddb290550f844898aef966 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py @@ -0,0 +1,196 @@ +# Natural Language Toolkit: Senseval 2 Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Steven Bird (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +Read from the Senseval 2 Corpus. + +SENSEVAL [http://www.senseval.org/] +Evaluation exercises for Word Sense Disambiguation. +Organized by ACL-SIGLEX [https://www.siglex.org/] + +Prepared by Ted Pedersen , University of Minnesota, +https://www.d.umn.edu/~tpederse/data.html +Distributed with permission. + +The NLTK version of the Senseval 2 files uses well-formed XML. +Each instance of the ambiguous words "hard", "interest", "line", and "serve" +is tagged with a sense identifier, and supplied with context. +""" + +import re +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import * + + +class SensevalInstance: + def __init__(self, word, position, context, senses): + self.word = word + self.senses = tuple(senses) + self.position = position + self.context = context + + def __repr__(self): + return "SensevalInstance(word=%r, position=%r, " "context=%r, senses=%r)" % ( + self.word, + self.position, + self.context, + self.senses, + ) + + +class SensevalCorpusReader(CorpusReader): + def instances(self, fileids=None): + return concat( + [ + SensevalCorpusView(fileid, enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _entry(self, tree): + elts = [] + for lexelt in tree.findall("lexelt"): + for inst in lexelt.findall("instance"): + sense = inst[0].attrib["senseid"] + context = [(w.text, w.attrib["pos"]) for w in inst[1]] + elts.append((sense, context)) + return elts + + +class SensevalCorpusView(StreamBackedCorpusView): + def __init__(self, fileid, encoding): + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + + self._word_tokenizer = WhitespaceTokenizer() + self._lexelt_starts = [0] # list of streampos + self._lexelts = [None] # list of lexelt names + + def read_block(self, stream): + # Decide which lexical element we're in. + lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell()) - 1 + lexelt = self._lexelts[lexelt_num] + + instance_lines = [] + in_instance = False + while True: + line = stream.readline() + if line == "": + assert instance_lines == [] + return [] + + # Start of a lexical element? + if line.lstrip().startswith(" has no 'item=...' + lexelt = m.group(1)[1:-1] + if lexelt_num < len(self._lexelts): + assert lexelt == self._lexelts[lexelt_num] + else: + self._lexelts.append(lexelt) + self._lexelt_starts.append(stream.tell()) + + # Start of an instance? + if line.lstrip().startswith("" + elif cword.tag == "wf": + context.append((cword.text, cword.attrib["pos"])) + elif cword.tag == "s": + pass # Sentence boundary marker. + + else: + print("ACK", cword.tag) + assert False, "expected CDATA or or " + if cword.tail: + context += self._word_tokenizer.tokenize(cword.tail) + else: + assert False, "unexpected tag %s" % child.tag + return SensevalInstance(lexelt, position, context, senses) + + +def _fixXML(text): + """ + Fix the various issues with Senseval pseudo-XML. + """ + # <~> or <^> => ~ or ^ + text = re.sub(r"<([~\^])>", r"\1", text) + # fix lone & + text = re.sub(r"(\s+)\&(\s+)", r"\1&\2", text) + # fix """ + text = re.sub(r'"""', "'\"'", text) + # fix => + text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text) + # fix foreign word tag + text = re.sub(r"<\&frasl>\s*]*>", "FRASL", text) + # remove <&I .> + text = re.sub(r"<\&I[^>]*>", "", text) + # fix <{word}> + text = re.sub(r"<{([^}]+)}>", r"\1", text) + # remove <@>,

,

+ text = re.sub(r"<(@|/?p)>", r"", text) + # remove <&M .> and <&T .> and <&Ms .> + text = re.sub(r"<&\w+ \.>", r"", text) + # remove lines + text = re.sub(r"]*>", r"", text) + # remove <[hi]> and <[/p]> etc + text = re.sub(r"<\[\/?[^>]+\]*>", r"", text) + # take the thing out of the brackets: <…> + text = re.sub(r"<(\&\w+;)>", r"\1", text) + # and remove the & for those patterns that aren't regular XML + text = re.sub(r"&(?!amp|gt|lt|apos|quot)", r"", text) + # fix 'abc ' style tags - now abc + text = re.sub( + r'[ \t]*([^<>\s]+?)[ \t]*', r' \1', text + ) + text = re.sub(r'\s*"\s*', " \"", text) + return text diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ae423eb920d6d86c0fce8a43881f7bdeaf5b35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: String Category Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read tuples from a corpus consisting of categorized strings. +For example, from the question classification corpus: + +NUM:dist How far is it from Denver to Aspen ? +LOC:city What county is Modesto , California in ? +HUM:desc Who was Galileo ? +DESC:def What is an atom ? +NUM:date When did Hawaii become a state ? +""" + +from nltk.corpus.reader.api import * + +# based on PPAttachmentCorpusReader +from nltk.corpus.reader.util import * + + +# [xx] Should the order of the tuple be reversed -- in most other places +# in nltk, we use the form (data, tag) -- e.g., tagged words and +# labeled texts for classifiers. +class StringCategoryCorpusReader(CorpusReader): + def __init__(self, root, fileids, delimiter=" ", encoding="utf8"): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param delimiter: Field delimiter + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._delimiter = delimiter + + def tuples(self, fileids=None): + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_tuple_block(self, stream): + line = stream.readline().strip() + if line: + return [tuple(line.split(self._delimiter, 1))] + else: + return [] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcfe1b6ff5487a57da8b5e8a9b919eba8b3b6e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py @@ -0,0 +1,354 @@ +# Natural Language Toolkit: Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora whose documents contain part-of-speech-tagged words. +""" + +import os + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.timit import read_timit_block +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple +from nltk.tokenize import * + + +class TaggedCorpusReader(CorpusReader): + """ + Reader for simple part-of-speech tagged corpora. Paragraphs are + assumed to be split using blank lines. Sentences and words can be + tokenized using the default tokenizers, or by custom tokenizers + specified as parameters to the constructor. Words are parsed + using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the + separator. I.e., words should have the form:: + + word1/tag1 word2/tag2 word3/tag3 ... + + But custom separators may be specified as parameters to the + constructor. Part of speech tags are case-normalized to upper + case. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + Construct a new Tagged Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tagset = tagset + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader): + """ + A reader for part-of-speech tagged corpora whose documents are + divided into categories based on their file identifiers. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``TaggedCorpusReader``. + """ + CategorizedCorpusReader.__init__(self, kwargs) + TaggedCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + +class TaggedCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for tagged documents. It can be + customized via flags to divide the tagged corpus documents up by + sentence or paragraph, and to include or omit part of speech tags. + ``TaggedCorpusView`` objects are typically created by + ``TaggedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sep, + word_tokenizer, + sent_tokenizer, + para_block_reader, + tag_mapping_function=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = [ + str2tuple(s, self._sep) + for s in self._word_tokenizer.tokenize(sent_str) + ] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + block.append(para) + else: + block.extend(para) + return block + + +# needs to implement simplified tags +class MacMorphoCorpusReader(TaggedCorpusReader): + """ + A corpus reader for the MAC_MORPHO corpus. Each line contains a + single tagged word, using '_' as a separator. Sentence boundaries + are based on the end-sentence tag ('_.'). Paragraph information + is not included in the corpus, so each paragraph returned by + ``self.paras()`` and ``self.tagged_paras()`` contains a single + sentence. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + TaggedCorpusReader.__init__( + self, + root, + fileids, + sep="_", + word_tokenizer=LineTokenizer(), + sent_tokenizer=RegexpTokenizer(".*\n"), + para_block_reader=self._read_block, + encoding=encoding, + tagset=tagset, + ) + + def _read_block(self, stream): + return read_regexp_block(stream, r".*", r".*_\.") + + +class TimitTaggedCorpusReader(TaggedCorpusReader): + """ + A corpus reader for tagged sentences that are included in the TIMIT corpus. + """ + + def __init__(self, *args, **kwargs): + TaggedCorpusReader.__init__( + self, para_block_reader=read_timit_block, *args, **kwargs + ) + + def paras(self): + raise NotImplementedError("use sents() instead") + + def tagged_paras(self): + raise NotImplementedError("use tagged_sents() instead") diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py new file mode 100644 index 0000000000000000000000000000000000000000..e6309ff4559659ff9b97bf679b563bcb957d18f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py @@ -0,0 +1,75 @@ +""" +UDHR corpus reader. It mostly deals with encodings. +""" + +from nltk.corpus.reader.plaintext import PlaintextCorpusReader +from nltk.corpus.reader.util import find_corpus_fileids + + +class UdhrCorpusReader(PlaintextCorpusReader): + + ENCODINGS = [ + (".*-Latin1$", "latin-1"), + (".*-Hebrew$", "hebrew"), + (".*-Arabic$", "cp1256"), + ("Czech_Cesky-UTF8", "cp1250"), # yeah + ("Polish-Latin2", "cp1250"), + ("Polish_Polski-Latin2", "cp1250"), + (".*-Cyrillic$", "cyrillic"), + (".*-SJIS$", "SJIS"), + (".*-GB2312$", "GB2312"), + (".*-Latin2$", "ISO-8859-2"), + (".*-Greek$", "greek"), + (".*-UTF8$", "utf-8"), + ("Hungarian_Magyar-Unicode", "utf-16-le"), + ("Amahuaca", "latin1"), + ("Turkish_Turkce-Turkish", "latin5"), + ("Lithuanian_Lietuviskai-Baltic", "latin4"), + ("Japanese_Nihongo-EUC", "EUC-JP"), + ("Japanese_Nihongo-JIS", "iso2022_jp"), + ("Chinese_Mandarin-HZ", "hz"), + (r"Abkhaz\-Cyrillic\+Abkh", "cp1251"), + ] + + SKIP = { + # The following files are not fully decodable because they + # were truncated at wrong bytes: + "Burmese_Myanmar-UTF8", + "Japanese_Nihongo-JIS", + "Chinese_Mandarin-HZ", + "Chinese_Mandarin-UTF8", + "Gujarati-UTF8", + "Hungarian_Magyar-Unicode", + "Lao-UTF8", + "Magahi-UTF8", + "Marathi-UTF8", + "Tamil-UTF8", + # Unfortunately, encodings required for reading + # the following files are not supported by Python: + "Vietnamese-VPS", + "Vietnamese-VIQR", + "Vietnamese-TCVN", + "Magahi-Agra", + "Bhojpuri-Agra", + "Esperanto-T61", # latin3 raises an exception + # The following files are encoded for specific fonts: + "Burmese_Myanmar-WinResearcher", + "Armenian-DallakHelv", + "Tigrinya_Tigrigna-VG2Main", + "Amharic-Afenegus6..60375", # ? + "Navaho_Dine-Navajo-Navaho-font", + # What are these? + "Azeri_Azerbaijani_Cyrillic-Az.Times.Cyr.Normal0117", + "Azeri_Azerbaijani_Latin-Az.Times.Lat0117", + # The following files are unintended: + "Czech-Latin2-err", + "Russian_Russky-UTF8~", + } + + def __init__(self, root="udhr"): + fileids = find_corpus_fileids(root, r"(?!README|\.).*") + super().__init__( + root, + [fileid for fileid in fileids if fileid not in self.SKIP], + encoding=self.ENCODINGS, + ) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/util.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/util.py new file mode 100644 index 0000000000000000000000000000000000000000..0934f1705952b4c00d8884da76c8e052c5a23d58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/util.py @@ -0,0 +1,867 @@ +# Natural Language Toolkit: Corpus Reader Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import bisect +import os +import pickle +import re +import tempfile +from functools import reduce +from xml.etree import ElementTree + +from nltk.data import ( + FileSystemPathPointer, + PathPointer, + SeekableUnicodeStreamReader, + ZipFilePathPointer, +) +from nltk.internals import slice_bounds +from nltk.tokenize import wordpunct_tokenize +from nltk.util import AbstractLazySequence, LazyConcatenation, LazySubsequence + +###################################################################### +# { Corpus View +###################################################################### + + +class StreamBackedCorpusView(AbstractLazySequence): + """ + A 'view' of a corpus file, which acts like a sequence of tokens: + it can be accessed by index, iterated over, etc. However, the + tokens are only constructed as-needed -- the entire corpus is + never stored in memory at once. + + The constructor to ``StreamBackedCorpusView`` takes two arguments: + a corpus fileid (specified as a string or as a ``PathPointer``); + and a block reader. A "block reader" is a function that reads + zero or more tokens from a stream, and returns them as a list. A + very simple example of a block reader is: + + >>> def simple_block_reader(stream): + ... return stream.readline().split() + + This simple block reader reads a single line at a time, and + returns a single token (consisting of a string) for each + whitespace-separated substring on the line. + + When deciding how to define the block reader for a given + corpus, careful consideration should be given to the size of + blocks handled by the block reader. Smaller block sizes will + increase the memory requirements of the corpus view's internal + data structures (by 2 integers per block). On the other hand, + larger block sizes may decrease performance for random access to + the corpus. (But note that larger block sizes will *not* + decrease performance for iteration.) + + Internally, ``CorpusView`` maintains a partial mapping from token + index to file position, with one entry per block. When a token + with a given index *i* is requested, the ``CorpusView`` constructs + it as follows: + + 1. First, it searches the toknum/filepos mapping for the token + index closest to (but less than or equal to) *i*. + + 2. Then, starting at the file position corresponding to that + index, it reads one block at a time using the block reader + until it reaches the requested token. + + The toknum/filepos mapping is created lazily: it is initially + empty, but every time a new block is read, the block's + initial token is added to the mapping. (Thus, the toknum/filepos + map has one entry per block.) + + In order to increase efficiency for random access patterns that + have high degrees of locality, the corpus view may cache one or + more blocks. + + :note: Each ``CorpusView`` object internally maintains an open file + object for its underlying corpus file. This file should be + automatically closed when the ``CorpusView`` is garbage collected, + but if you wish to close it manually, use the ``close()`` + method. If you access a ``CorpusView``'s items after it has been + closed, the file object will be automatically re-opened. + + :warning: If the contents of the file are modified during the + lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior + is undefined. + + :warning: If a unicode encoding is specified when constructing a + ``CorpusView``, then the block reader may only call + ``stream.seek()`` with offsets that have been returned by + ``stream.tell()``; in particular, calling ``stream.seek()`` with + relative offsets, or with offsets based on string lengths, may + lead to incorrect behavior. + + :ivar _block_reader: The function used to read + a single block from the underlying file stream. + :ivar _toknum: A list containing the token index of each block + that has been processed. In particular, ``_toknum[i]`` is the + token index of the first token in block ``i``. Together + with ``_filepos``, this forms a partial mapping between token + indices and file positions. + :ivar _filepos: A list containing the file position of each block + that has been processed. In particular, ``_toknum[i]`` is the + file position of the first character in block ``i``. Together + with ``_toknum``, this forms a partial mapping between token + indices and file positions. + :ivar _stream: The stream used to access the underlying corpus file. + :ivar _len: The total number of tokens in the corpus, if known; + or None, if the number of tokens is not yet known. + :ivar _eofpos: The character position of the last character in the + file. This is calculated when the corpus view is initialized, + and is used to decide when the end of file has been reached. + :ivar _cache: A cache of the most recently read block. It + is encoded as a tuple (start_toknum, end_toknum, tokens), where + start_toknum is the token index of the first token in the block; + end_toknum is the token index of the first token not in the + block; and tokens is a list of the tokens in the block. + """ + + def __init__(self, fileid, block_reader=None, startpos=0, encoding="utf8"): + """ + Create a new corpus view, based on the file ``fileid``, and + read with ``block_reader``. See the class documentation + for more information. + + :param fileid: The path to the file that is read by this + corpus view. ``fileid`` can either be a string or a + ``PathPointer``. + + :param startpos: The file position at which the view will + start reading. This can be used to skip over preface + sections. + + :param encoding: The unicode encoding that should be used to + read the file's contents. If no encoding is specified, + then the file's contents will be read as a non-unicode + string (i.e., a str). + """ + if block_reader: + self.read_block = block_reader + # Initialize our toknum/filepos mapping. + self._toknum = [0] + self._filepos = [startpos] + self._encoding = encoding + # We don't know our length (number of tokens) yet. + self._len = None + + self._fileid = fileid + self._stream = None + + self._current_toknum = None + """This variable is set to the index of the next token that + will be read, immediately before ``self.read_block()`` is + called. This is provided for the benefit of the block + reader, which under rare circumstances may need to know + the current token number.""" + + self._current_blocknum = None + """This variable is set to the index of the next block that + will be read, immediately before ``self.read_block()`` is + called. This is provided for the benefit of the block + reader, which under rare circumstances may need to know + the current block number.""" + + # Find the length of the file. + try: + if isinstance(self._fileid, PathPointer): + self._eofpos = self._fileid.file_size() + else: + self._eofpos = os.stat(self._fileid).st_size + except Exception as exc: + raise ValueError(f"Unable to open or access {fileid!r} -- {exc}") from exc + + # Maintain a cache of the most recently read block, to + # increase efficiency of random access. + self._cache = (-1, -1, None) + + fileid = property( + lambda self: self._fileid, + doc=""" + The fileid of the file that is accessed by this view. + + :type: str or PathPointer""", + ) + + def read_block(self, stream): + """ + Read a block from the input stream. + + :return: a block of tokens from the input stream + :rtype: list(any) + :param stream: an input stream + :type stream: stream + """ + raise NotImplementedError("Abstract Method") + + def _open(self): + """ + Open the file stream associated with this corpus view. This + will be called performed if any value is read from the view + while its file stream is closed. + """ + if isinstance(self._fileid, PathPointer): + self._stream = self._fileid.open(self._encoding) + elif self._encoding: + self._stream = SeekableUnicodeStreamReader( + open(self._fileid, "rb"), self._encoding + ) + else: + self._stream = open(self._fileid, "rb") + + def close(self): + """ + Close the file stream associated with this corpus view. This + can be useful if you are worried about running out of file + handles (although the stream should automatically be closed + upon garbage collection of the corpus view). If the corpus + view is accessed after it is closed, it will be automatically + re-opened. + """ + if self._stream is not None: + self._stream.close() + self._stream = None + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __len__(self): + if self._len is None: + # iterate_from() sets self._len when it reaches the end + # of the file: + for tok in self.iterate_from(self._toknum[-1]): + pass + return self._len + + def __getitem__(self, i): + if isinstance(i, slice): + start, stop = slice_bounds(self, i) + # Check if it's in the cache. + offset = self._cache[0] + if offset <= start and stop <= self._cache[1]: + return self._cache[2][start - offset : stop - offset] + # Construct & return the result. + return LazySubsequence(self, start, stop) + else: + # Handle negative indices + if i < 0: + i += len(self) + if i < 0: + raise IndexError("index out of range") + # Check if it's in the cache. + offset = self._cache[0] + if offset <= i < self._cache[1]: + return self._cache[2][i - offset] + # Use iterate_from to extract it. + try: + return next(self.iterate_from(i)) + except StopIteration as e: + raise IndexError("index out of range") from e + + # If we wanted to be thread-safe, then this method would need to + # do some locking. + def iterate_from(self, start_tok): + # Start by feeding from the cache, if possible. + if self._cache[0] <= start_tok < self._cache[1]: + for tok in self._cache[2][start_tok - self._cache[0] :]: + yield tok + start_tok += 1 + + # Decide where in the file we should start. If `start` is in + # our mapping, then we can jump straight to the correct block; + # otherwise, start at the last block we've processed. + if start_tok < self._toknum[-1]: + block_index = bisect.bisect_right(self._toknum, start_tok) - 1 + toknum = self._toknum[block_index] + filepos = self._filepos[block_index] + else: + block_index = len(self._toknum) - 1 + toknum = self._toknum[-1] + filepos = self._filepos[-1] + + # Open the stream, if it's not open already. + if self._stream is None: + self._open() + + # If the file is empty, the while loop will never run. + # This *seems* to be all the state we need to set: + if self._eofpos == 0: + self._len = 0 + + # Each iteration through this loop, we read a single block + # from the stream. + while filepos < self._eofpos: + # Read the next block. + self._stream.seek(filepos) + self._current_toknum = toknum + self._current_blocknum = block_index + tokens = self.read_block(self._stream) + assert isinstance(tokens, (tuple, list, AbstractLazySequence)), ( + "block reader %s() should return list or tuple." + % self.read_block.__name__ + ) + num_toks = len(tokens) + new_filepos = self._stream.tell() + assert ( + new_filepos > filepos + ), "block reader %s() should consume at least 1 byte (filepos=%d)" % ( + self.read_block.__name__, + filepos, + ) + + # Update our cache. + self._cache = (toknum, toknum + num_toks, list(tokens)) + + # Update our mapping. + assert toknum <= self._toknum[-1] + if num_toks > 0: + block_index += 1 + if toknum == self._toknum[-1]: + assert new_filepos > self._filepos[-1] # monotonic! + self._filepos.append(new_filepos) + self._toknum.append(toknum + num_toks) + else: + # Check for consistency: + assert ( + new_filepos == self._filepos[block_index] + ), "inconsistent block reader (num chars read)" + assert ( + toknum + num_toks == self._toknum[block_index] + ), "inconsistent block reader (num tokens returned)" + + # If we reached the end of the file, then update self._len + if new_filepos == self._eofpos: + self._len = toknum + num_toks + # Generate the tokens in this block (but skip any tokens + # before start_tok). Note that between yields, our state + # may be modified. + for tok in tokens[max(0, start_tok - toknum) :]: + yield tok + # If we're at the end of the file, then we're done. + assert new_filepos <= self._eofpos + if new_filepos == self._eofpos: + break + # Update our indices + toknum += num_toks + filepos = new_filepos + + # If we reach this point, then we should know our length. + assert self._len is not None + # Enforce closing of stream once we reached end of file + # We should have reached EOF once we're out of the while loop. + self.close() + + # Use concat for these, so we can use a ConcatenatedCorpusView + # when possible. + def __add__(self, other): + return concat([self, other]) + + def __radd__(self, other): + return concat([other, self]) + + def __mul__(self, count): + return concat([self] * count) + + def __rmul__(self, count): + return concat([self] * count) + + +class ConcatenatedCorpusView(AbstractLazySequence): + """ + A 'view' of a corpus file that joins together one or more + ``StreamBackedCorpusViews``. At most + one file handle is left open at any time. + """ + + def __init__(self, corpus_views): + self._pieces = corpus_views + """A list of the corpus subviews that make up this + concatenation.""" + + self._offsets = [0] + """A list of offsets, indicating the index at which each + subview begins. In particular:: + offsets[i] = sum([len(p) for p in pieces[:i]])""" + + self._open_piece = None + """The most recently accessed corpus subview (or None). + Before a new subview is accessed, this subview will be closed.""" + + def __len__(self): + if len(self._offsets) <= len(self._pieces): + # Iterate to the end of the corpus. + for tok in self.iterate_from(self._offsets[-1]): + pass + + return self._offsets[-1] + + def close(self): + for piece in self._pieces: + piece.close() + + def iterate_from(self, start_tok): + piecenum = bisect.bisect_right(self._offsets, start_tok) - 1 + + while piecenum < len(self._pieces): + offset = self._offsets[piecenum] + piece = self._pieces[piecenum] + + # If we've got another piece open, close it first. + if self._open_piece is not piece: + if self._open_piece is not None: + self._open_piece.close() + self._open_piece = piece + + # Get everything we can from this piece. + yield from piece.iterate_from(max(0, start_tok - offset)) + + # Update the offset table. + if piecenum + 1 == len(self._offsets): + self._offsets.append(self._offsets[-1] + len(piece)) + + # Move on to the next piece. + piecenum += 1 + + +def concat(docs): + """ + Concatenate together the contents of multiple documents from a + single corpus, using an appropriate concatenation function. This + utility function is used by corpus readers when the user requests + more than one document at a time. + """ + if len(docs) == 1: + return docs[0] + if len(docs) == 0: + raise ValueError("concat() expects at least one object!") + + types = {d.__class__ for d in docs} + + # If they're all strings, use string concatenation. + if all(isinstance(doc, str) for doc in docs): + return "".join(docs) + + # If they're all corpus views, then use ConcatenatedCorpusView. + for typ in types: + if not issubclass(typ, (StreamBackedCorpusView, ConcatenatedCorpusView)): + break + else: + return ConcatenatedCorpusView(docs) + + # If they're all lazy sequences, use a lazy concatenation + for typ in types: + if not issubclass(typ, AbstractLazySequence): + break + else: + return LazyConcatenation(docs) + + # Otherwise, see what we can do: + if len(types) == 1: + typ = list(types)[0] + + if issubclass(typ, list): + return reduce((lambda a, b: a + b), docs, []) + + if issubclass(typ, tuple): + return reduce((lambda a, b: a + b), docs, ()) + + if ElementTree.iselement(typ): + xmltree = ElementTree.Element("documents") + for doc in docs: + xmltree.append(doc) + return xmltree + + # No method found! + raise ValueError("Don't know how to concatenate types: %r" % types) + + +###################################################################### +# { Corpus View for Pickled Sequences +###################################################################### + + +class PickleCorpusView(StreamBackedCorpusView): + """ + A stream backed corpus view for corpus files that consist of + sequences of serialized Python objects (serialized using + ``pickle.dump``). One use case for this class is to store the + result of running feature detection on a corpus to disk. This can + be useful when performing feature detection is expensive (so we + don't want to repeat it); but the corpus is too large to store in + memory. The following example illustrates this technique: + + >>> from nltk.corpus.reader.util import PickleCorpusView + >>> from nltk.util import LazyMap + >>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP + >>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP + >>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP + """ + + BLOCK_SIZE = 100 + PROTOCOL = -1 + + def __init__(self, fileid, delete_on_gc=False): + """ + Create a new corpus view that reads the pickle corpus + ``fileid``. + + :param delete_on_gc: If true, then ``fileid`` will be deleted + whenever this object gets garbage-collected. + """ + self._delete_on_gc = delete_on_gc + StreamBackedCorpusView.__init__(self, fileid) + + def read_block(self, stream): + result = [] + for i in range(self.BLOCK_SIZE): + try: + result.append(pickle.load(stream)) + except EOFError: + break + return result + + def __del__(self): + """ + If ``delete_on_gc`` was set to true when this + ``PickleCorpusView`` was created, then delete the corpus view's + fileid. (This method is called whenever a + ``PickledCorpusView`` is garbage-collected. + """ + if getattr(self, "_delete_on_gc"): + if os.path.exists(self._fileid): + try: + os.remove(self._fileid) + except OSError: + pass + self.__dict__.clear() # make the garbage collector's job easier + + @classmethod + def write(cls, sequence, output_file): + if isinstance(output_file, str): + output_file = open(output_file, "wb") + for item in sequence: + pickle.dump(item, output_file, cls.PROTOCOL) + + @classmethod + def cache_to_tempfile(cls, sequence, delete_on_gc=True): + """ + Write the given sequence to a temporary file as a pickle + corpus; and then return a ``PickleCorpusView`` view for that + temporary corpus file. + + :param delete_on_gc: If true, then the temporary file will be + deleted whenever this object gets garbage-collected. + """ + try: + fd, output_file_name = tempfile.mkstemp(".pcv", "nltk-") + output_file = os.fdopen(fd, "wb") + cls.write(sequence, output_file) + output_file.close() + return PickleCorpusView(output_file_name, delete_on_gc) + except OSError as e: + raise ValueError("Error while creating temp file: %s" % e) from e + + +###################################################################### +# { Block Readers +###################################################################### + + +def read_whitespace_block(stream): + toks = [] + for i in range(20): # Read 20 lines at a time. + toks.extend(stream.readline().split()) + return toks + + +def read_wordpunct_block(stream): + toks = [] + for i in range(20): # Read 20 lines at a time. + toks.extend(wordpunct_tokenize(stream.readline())) + return toks + + +def read_line_block(stream): + toks = [] + for i in range(20): + line = stream.readline() + if not line: + return toks + toks.append(line.rstrip("\n")) + return toks + + +def read_blankline_block(stream): + s = "" + while True: + line = stream.readline() + # End of file: + if not line: + if s: + return [s] + else: + return [] + # Blank line: + elif line and not line.strip(): + if s: + return [s] + # Other line: + else: + s += line + + +def read_alignedsent_block(stream): + s = "" + while True: + line = stream.readline() + if line[0] == "=" or line[0] == "\n" or line[:2] == "\r\n": + continue + # End of file: + if not line: + if s: + return [s] + else: + return [] + # Other line: + else: + s += line + if re.match(r"^\d+-\d+", line) is not None: + return [s] + + +def read_regexp_block(stream, start_re, end_re=None): + """ + Read a sequence of tokens from a stream, where tokens begin with + lines that match ``start_re``. If ``end_re`` is specified, then + tokens end with lines that match ``end_re``; otherwise, tokens end + whenever the next line matching ``start_re`` or EOF is found. + """ + # Scan until we find a line matching the start regexp. + while True: + line = stream.readline() + if not line: + return [] # end of file. + if re.match(start_re, line): + break + + # Scan until we find another line matching the regexp, or EOF. + lines = [line] + while True: + oldpos = stream.tell() + line = stream.readline() + # End of file: + if not line: + return ["".join(lines)] + # End of token: + if end_re is not None and re.match(end_re, line): + return ["".join(lines)] + # Start of new token: backup to just before it starts, and + # return the token we've already collected. + if end_re is None and re.match(start_re, line): + stream.seek(oldpos) + return ["".join(lines)] + # Anything else is part of the token. + lines.append(line) + + +def read_sexpr_block(stream, block_size=16384, comment_char=None): + """ + Read a sequence of s-expressions from the stream, and leave the + stream's file position at the end the last complete s-expression + read. This function will always return at least one s-expression, + unless there are no more s-expressions in the file. + + If the file ends in in the middle of an s-expression, then that + incomplete s-expression is returned when the end of the file is + reached. + + :param block_size: The default block size for reading. If an + s-expression is longer than one block, then more than one + block will be read. + :param comment_char: A character that marks comments. Any lines + that begin with this character will be stripped out. + (If spaces or tabs precede the comment character, then the + line will not be stripped.) + """ + start = stream.tell() + block = stream.read(block_size) + encoding = getattr(stream, "encoding", None) + assert encoding is not None or isinstance(block, str) + if encoding not in (None, "utf-8"): + import warnings + + warnings.warn( + "Parsing may fail, depending on the properties " + "of the %s encoding!" % encoding + ) + # (e.g., the utf-16 encoding does not work because it insists + # on adding BOMs to the beginning of encoded strings.) + + if comment_char: + COMMENT = re.compile("(?m)^%s.*$" % re.escape(comment_char)) + while True: + try: + # If we're stripping comments, then make sure our block ends + # on a line boundary; and then replace any comments with + # space characters. (We can't just strip them out -- that + # would make our offset wrong.) + if comment_char: + block += stream.readline() + block = re.sub(COMMENT, _sub_space, block) + # Read the block. + tokens, offset = _parse_sexpr_block(block) + # Skip whitespace + offset = re.compile(r"\s*").search(block, offset).end() + + # Move to the end position. + if encoding is None: + stream.seek(start + offset) + else: + stream.seek(start + len(block[:offset].encode(encoding))) + + # Return the list of tokens we processed + return tokens + except ValueError as e: + if e.args[0] == "Block too small": + next_block = stream.read(block_size) + if next_block: + block += next_block + continue + else: + # The file ended mid-sexpr -- return what we got. + return [block.strip()] + else: + raise + + +def _sub_space(m): + """Helper function: given a regexp match, return a string of + spaces that's the same length as the matched string.""" + return " " * (m.end() - m.start()) + + +def _parse_sexpr_block(block): + tokens = [] + start = end = 0 + + while end < len(block): + m = re.compile(r"\S").search(block, end) + if not m: + return tokens, end + + start = m.start() + + # Case 1: sexpr is not parenthesized. + if m.group() != "(": + m2 = re.compile(r"[\s(]").search(block, start) + if m2: + end = m2.start() + else: + if tokens: + return tokens, end + raise ValueError("Block too small") + + # Case 2: parenthesized sexpr. + else: + nesting = 0 + for m in re.compile(r"[()]").finditer(block, start): + if m.group() == "(": + nesting += 1 + else: + nesting -= 1 + if nesting == 0: + end = m.end() + break + else: + if tokens: + return tokens, end + raise ValueError("Block too small") + + tokens.append(block[start:end]) + + return tokens, end + + +###################################################################### +# { Finding Corpus Items +###################################################################### + + +def find_corpus_fileids(root, regexp): + if not isinstance(root, PathPointer): + raise TypeError("find_corpus_fileids: expected a PathPointer") + regexp += "$" + + # Find fileids in a zipfile: scan the zipfile's namelist. Filter + # out entries that end in '/' -- they're directories. + if isinstance(root, ZipFilePathPointer): + fileids = [ + name[len(root.entry) :] + for name in root.zipfile.namelist() + if not name.endswith("/") + ] + items = [name for name in fileids if re.match(regexp, name)] + return sorted(items) + + # Find fileids in a directory: use os.walk to search all (proper + # or symlinked) subdirectories, and match paths against the regexp. + elif isinstance(root, FileSystemPathPointer): + items = [] + for dirname, subdirs, fileids in os.walk(root.path): + prefix = "".join("%s/" % p for p in _path_from(root.path, dirname)) + items += [ + prefix + fileid + for fileid in fileids + if re.match(regexp, prefix + fileid) + ] + # Don't visit svn directories: + if ".svn" in subdirs: + subdirs.remove(".svn") + return sorted(items) + + else: + raise AssertionError("Don't know how to handle %r" % root) + + +def _path_from(parent, child): + if os.path.split(parent)[1] == "": + parent = os.path.split(parent)[0] + path = [] + while parent != child: + child, dirname = os.path.split(child) + path.insert(0, dirname) + assert os.path.split(child)[0] != child + return path + + +###################################################################### +# { Paragraph structure in Treebank files +###################################################################### + + +def tagged_treebank_para_block_reader(stream): + # Read the next paragraph. + para = "" + while True: + line = stream.readline() + # End of paragraph: + if re.match(r"======+\s*$", line): + if para.strip(): + return [para] + # End of file: + elif line == "": + if para.strip(): + return [para] + else: + return [] + # Content line: + else: + para += line diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py new file mode 100644 index 0000000000000000000000000000000000000000..6056574bb03a0797d47c68b2de333b8337b08a46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/verbnet.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Verbnet Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface to the VerbNet verb lexicon + +For details about VerbNet see: +https://verbs.colorado.edu/~mpalmer/projects/verbnet.html +""" + +import re +import textwrap +from collections import defaultdict + +from nltk.corpus.reader.xmldocs import XMLCorpusReader + + +class VerbnetCorpusReader(XMLCorpusReader): + """ + An NLTK interface to the VerbNet verb lexicon. + + From the VerbNet site: "VerbNet (VN) (Kipper-Schuler 2006) is the largest + on-line verb lexicon currently available for English. It is a hierarchical + domain-independent, broad-coverage verb lexicon with mappings to other + lexical resources such as WordNet (Miller, 1990; Fellbaum, 1998), XTAG + (XTAG Research Group, 2001), and FrameNet (Baker et al., 1998)." + + For details about VerbNet see: + https://verbs.colorado.edu/~mpalmer/projects/verbnet.html + """ + + # No unicode encoding param, since the data files are all XML. + def __init__(self, root, fileids, wrap_etree=False): + XMLCorpusReader.__init__(self, root, fileids, wrap_etree) + + self._lemma_to_class = defaultdict(list) + """A dictionary mapping from verb lemma strings to lists of + VerbNet class identifiers.""" + + self._wordnet_to_class = defaultdict(list) + """A dictionary mapping from wordnet identifier strings to + lists of VerbNet class identifiers.""" + + self._class_to_fileid = {} + """A dictionary mapping from class identifiers to + corresponding file identifiers. The keys of this dictionary + provide a complete list of all classes and subclasses.""" + + self._shortid_to_longid = {} + + # Initialize the dictionaries. Use the quick (regexp-based) + # method instead of the slow (xml-based) method, because it + # runs 2-30 times faster. + self._quick_index() + + _LONGID_RE = re.compile(r"([^\-\.]*)-([\d+.\-]+)$") + """Regular expression that matches (and decomposes) longids""" + + _SHORTID_RE = re.compile(r"[\d+.\-]+$") + """Regular expression that matches shortids""" + + _INDEX_RE = re.compile( + r']+>|' r'' + ) + """Regular expression used by ``_index()`` to quickly scan the corpus + for basic information.""" + + def lemmas(self, vnclass=None): + """ + Return a list of all verb lemmas that appear in any class, or + in the ``classid`` if specified. + """ + if vnclass is None: + return sorted(self._lemma_to_class.keys()) + else: + # [xx] should this include subclass members? + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + return [member.get("name") for member in vnclass.findall("MEMBERS/MEMBER")] + + def wordnetids(self, vnclass=None): + """ + Return a list of all wordnet identifiers that appear in any + class, or in ``classid`` if specified. + """ + if vnclass is None: + return sorted(self._wordnet_to_class.keys()) + else: + # [xx] should this include subclass members? + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + return sum( + ( + member.get("wn", "").split() + for member in vnclass.findall("MEMBERS/MEMBER") + ), + [], + ) + + def classids(self, lemma=None, wordnetid=None, fileid=None, classid=None): + """ + Return a list of the VerbNet class identifiers. If a file + identifier is specified, then return only the VerbNet class + identifiers for classes (and subclasses) defined by that file. + If a lemma is specified, then return only VerbNet class + identifiers for classes that contain that lemma as a member. + If a wordnetid is specified, then return only identifiers for + classes that contain that wordnetid as a member. If a classid + is specified, then return only identifiers for subclasses of + the specified VerbNet class. + If nothing is specified, return all classids within VerbNet + """ + if fileid is not None: + return [c for (c, f) in self._class_to_fileid.items() if f == fileid] + elif lemma is not None: + return self._lemma_to_class[lemma] + elif wordnetid is not None: + return self._wordnet_to_class[wordnetid] + elif classid is not None: + xmltree = self.vnclass(classid) + return [ + subclass.get("ID") + for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS") + ] + else: + return sorted(self._class_to_fileid.keys()) + + def vnclass(self, fileid_or_classid): + """Returns VerbNet class ElementTree + + Return an ElementTree containing the xml for the specified + VerbNet class. + + :param fileid_or_classid: An identifier specifying which class + should be returned. Can be a file identifier (such as + ``'put-9.1.xml'``), or a VerbNet class identifier (such as + ``'put-9.1'``) or a short VerbNet class identifier (such as + ``'9.1'``). + """ + # File identifier: just return the xml. + if fileid_or_classid in self._fileids: + return self.xml(fileid_or_classid) + + # Class identifier: get the xml, and find the right elt. + classid = self.longid(fileid_or_classid) + if classid in self._class_to_fileid: + fileid = self._class_to_fileid[self.longid(classid)] + tree = self.xml(fileid) + if classid == tree.get("ID"): + return tree + else: + for subclass in tree.findall(".//VNSUBCLASS"): + if classid == subclass.get("ID"): + return subclass + else: + assert False # we saw it during _index()! + + else: + raise ValueError(f"Unknown identifier {fileid_or_classid}") + + def fileids(self, vnclass_ids=None): + """ + Return a list of fileids that make up this corpus. If + ``vnclass_ids`` is specified, then return the fileids that make + up the specified VerbNet class(es). + """ + if vnclass_ids is None: + return self._fileids + elif isinstance(vnclass_ids, str): + return [self._class_to_fileid[self.longid(vnclass_ids)]] + else: + return [ + self._class_to_fileid[self.longid(vnclass_id)] + for vnclass_id in vnclass_ids + ] + + def frames(self, vnclass): + """Given a VerbNet class, this method returns VerbNet frames + + The members returned are: + 1) Example + 2) Description + 3) Syntax + 4) Semantics + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: frames - a list of frame dictionaries + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + frames = [] + vnframes = vnclass.findall("FRAMES/FRAME") + for vnframe in vnframes: + frames.append( + { + "example": self._get_example_within_frame(vnframe), + "description": self._get_description_within_frame(vnframe), + "syntax": self._get_syntactic_list_within_frame(vnframe), + "semantics": self._get_semantics_within_frame(vnframe), + } + ) + return frames + + def subclasses(self, vnclass): + """Returns subclass ids, if any exist + + Given a VerbNet class, this method returns subclass ids (if they exist) + in a list of strings. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: list of subclasses + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + subclasses = [ + subclass.get("ID") for subclass in vnclass.findall("SUBCLASSES/VNSUBCLASS") + ] + return subclasses + + def themroles(self, vnclass): + """Returns thematic roles participating in a VerbNet class + + Members returned as part of roles are- + 1) Type + 2) Modifiers + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + :return: themroles: A list of thematic roles in the VerbNet class + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + themroles = [] + for trole in vnclass.findall("THEMROLES/THEMROLE"): + themroles.append( + { + "type": trole.get("type"), + "modifiers": [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in trole.findall("SELRESTRS/SELRESTR") + ], + } + ) + return themroles + + ###################################################################### + # { Index Initialization + ###################################################################### + + def _index(self): + """ + Initialize the indexes ``_lemma_to_class``, + ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning + through the corpus fileids. This is fast if ElementTree + uses the C implementation (<0.1 secs), but quite slow (>10 secs) + if only the python implementation is available. + """ + for fileid in self._fileids: + self._index_helper(self.xml(fileid), fileid) + + def _index_helper(self, xmltree, fileid): + """Helper for ``_index()``""" + vnclass = xmltree.get("ID") + self._class_to_fileid[vnclass] = fileid + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + for member in xmltree.findall("MEMBERS/MEMBER"): + self._lemma_to_class[member.get("name")].append(vnclass) + for wn in member.get("wn", "").split(): + self._wordnet_to_class[wn].append(vnclass) + for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS"): + self._index_helper(subclass, fileid) + + def _quick_index(self): + """ + Initialize the indexes ``_lemma_to_class``, + ``_wordnet_to_class``, and ``_class_to_fileid`` by scanning + through the corpus fileids. This doesn't do proper xml parsing, + but is good enough to find everything in the standard VerbNet + corpus -- and it runs about 30 times faster than xml parsing + (with the python ElementTree; only 2-3 times faster + if ElementTree uses the C implementation). + """ + # nb: if we got rid of wordnet_to_class, this would run 2-3 + # times faster. + for fileid in self._fileids: + vnclass = fileid[:-4] # strip the '.xml' + self._class_to_fileid[vnclass] = fileid + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + with self.open(fileid) as fp: + for m in self._INDEX_RE.finditer(fp.read()): + groups = m.groups() + if groups[0] is not None: + self._lemma_to_class[groups[0]].append(vnclass) + for wn in groups[1].split(): + self._wordnet_to_class[wn].append(vnclass) + elif groups[2] is not None: + self._class_to_fileid[groups[2]] = fileid + vnclass = groups[2] # for elts. + self._shortid_to_longid[self.shortid(vnclass)] = vnclass + else: + assert False, "unexpected match condition" + + ###################################################################### + # { Identifier conversion + ###################################################################### + + def longid(self, shortid): + """Returns longid of a VerbNet class + + Given a short VerbNet class identifier (eg '37.10'), map it + to a long id (eg 'confess-37.10'). If ``shortid`` is already a + long id, then return it as-is""" + if self._LONGID_RE.match(shortid): + return shortid # it's already a longid. + elif not self._SHORTID_RE.match(shortid): + raise ValueError("vnclass identifier %r not found" % shortid) + try: + return self._shortid_to_longid[shortid] + except KeyError as e: + raise ValueError("vnclass identifier %r not found" % shortid) from e + + def shortid(self, longid): + """Returns shortid of a VerbNet class + + Given a long VerbNet class identifier (eg 'confess-37.10'), + map it to a short id (eg '37.10'). If ``longid`` is already a + short id, then return it as-is.""" + if self._SHORTID_RE.match(longid): + return longid # it's already a shortid. + m = self._LONGID_RE.match(longid) + if m: + return m.group(2) + else: + raise ValueError("vnclass identifier %r not found" % longid) + + ###################################################################### + # { Frame access utility functions + ###################################################################### + + def _get_semantics_within_frame(self, vnframe): + """Returns semantics within a single frame + + A utility function to retrieve semantics within a frame in VerbNet + Members of the semantics dictionary: + 1) Predicate value + 2) Arguments + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: semantics: semantics dictionary + """ + semantics_within_single_frame = [] + for pred in vnframe.findall("SEMANTICS/PRED"): + arguments = [ + {"type": arg.get("type"), "value": arg.get("value")} + for arg in pred.findall("ARGS/ARG") + ] + semantics_within_single_frame.append( + { + "predicate_value": pred.get("value"), + "arguments": arguments, + "negated": pred.get("bool") == "!", + } + ) + return semantics_within_single_frame + + def _get_example_within_frame(self, vnframe): + """Returns example within a frame + + A utility function to retrieve an example within a frame in VerbNet. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: example_text: The example sentence for this particular frame + """ + example_element = vnframe.find("EXAMPLES/EXAMPLE") + if example_element is not None: + example_text = example_element.text + else: + example_text = "" + return example_text + + def _get_description_within_frame(self, vnframe): + """Returns member description within frame + + A utility function to retrieve a description of participating members + within a frame in VerbNet. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: description: a description dictionary with members - primary and secondary + """ + description_element = vnframe.find("DESCRIPTION") + return { + "primary": description_element.attrib["primary"], + "secondary": description_element.get("secondary", ""), + } + + def _get_syntactic_list_within_frame(self, vnframe): + """Returns semantics within a frame + + A utility function to retrieve semantics within a frame in VerbNet. + Members of the syntactic dictionary: + 1) POS Tag + 2) Modifiers + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + :return: syntax_within_single_frame + """ + syntax_within_single_frame = [] + for elt in vnframe.find("SYNTAX"): + pos_tag = elt.tag + modifiers = dict() + modifiers["value"] = elt.get("value") if "value" in elt.attrib else "" + modifiers["selrestrs"] = [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in elt.findall("SELRESTRS/SELRESTR") + ] + modifiers["synrestrs"] = [ + {"value": restr.get("Value"), "type": restr.get("type")} + for restr in elt.findall("SYNRESTRS/SYNRESTR") + ] + syntax_within_single_frame.append( + {"pos_tag": pos_tag, "modifiers": modifiers} + ) + return syntax_within_single_frame + + ###################################################################### + # { Pretty Printing + ###################################################################### + + def pprint(self, vnclass): + """Returns pretty printed version of a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + s = vnclass.get("ID") + "\n" + s += self.pprint_subclasses(vnclass, indent=" ") + "\n" + s += self.pprint_members(vnclass, indent=" ") + "\n" + s += " Thematic roles:\n" + s += self.pprint_themroles(vnclass, indent=" ") + "\n" + s += " Frames:\n" + s += self.pprint_frames(vnclass, indent=" ") + return s + + def pprint_subclasses(self, vnclass, indent=""): + """Returns pretty printed version of subclasses of VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's subclasses. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + subclasses = self.subclasses(vnclass) + if not subclasses: + subclasses = ["(none)"] + s = "Subclasses: " + " ".join(subclasses) + return textwrap.fill( + s, 70, initial_indent=indent, subsequent_indent=indent + " " + ) + + def pprint_members(self, vnclass, indent=""): + """Returns pretty printed version of members in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's member verbs. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + members = self.lemmas(vnclass) + if not members: + members = ["(none)"] + s = "Members: " + " ".join(members) + return textwrap.fill( + s, 70, initial_indent=indent, subsequent_indent=indent + " " + ) + + def pprint_themroles(self, vnclass, indent=""): + """Returns pretty printed version of thematic roles in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet class's thematic roles. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + + pieces = [] + for themrole in self.themroles(vnclass): + piece = indent + "* " + themrole.get("type") + modifiers = [ + modifier["value"] + modifier["type"] + for modifier in themrole["modifiers"] + ] + if modifiers: + piece += "[{}]".format(" ".join(modifiers)) + pieces.append(piece) + return "\n".join(pieces) + + def pprint_frames(self, vnclass, indent=""): + """Returns pretty version of all frames in a VerbNet class + + Return a string containing a pretty-printed representation of + the list of frames within the VerbNet class. + + :param vnclass: A VerbNet class identifier; or an ElementTree + containing the xml contents of a VerbNet class. + """ + if isinstance(vnclass, str): + vnclass = self.vnclass(vnclass) + pieces = [] + for vnframe in self.frames(vnclass): + pieces.append(self._pprint_single_frame(vnframe, indent)) + return "\n".join(pieces) + + def _pprint_single_frame(self, vnframe, indent=""): + """Returns pretty printed version of a single frame in a VerbNet class + + Returns a string containing a pretty-printed representation of + the given frame. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + frame_string = self._pprint_description_within_frame(vnframe, indent) + "\n" + frame_string += self._pprint_example_within_frame(vnframe, indent + " ") + "\n" + frame_string += ( + self._pprint_syntax_within_frame(vnframe, indent + " Syntax: ") + "\n" + ) + frame_string += indent + " Semantics:\n" + frame_string += self._pprint_semantics_within_frame(vnframe, indent + " ") + return frame_string + + def _pprint_example_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of example within frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame example. + + :param vnframe: An ElementTree containing the xml contents of + a Verbnet frame. + """ + if vnframe["example"]: + return indent + " Example: " + vnframe["example"] + + def _pprint_description_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of a VerbNet frame description + + Return a string containing a pretty-printed representation of + the given VerbNet frame description. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + description = indent + vnframe["description"]["primary"] + if vnframe["description"]["secondary"]: + description += " ({})".format(vnframe["description"]["secondary"]) + return description + + def _pprint_syntax_within_frame(self, vnframe, indent=""): + """Returns pretty printed version of syntax within a frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame syntax. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + pieces = [] + for element in vnframe["syntax"]: + piece = element["pos_tag"] + modifier_list = [] + if "value" in element["modifiers"] and element["modifiers"]["value"]: + modifier_list.append(element["modifiers"]["value"]) + modifier_list += [ + "{}{}".format(restr["value"], restr["type"]) + for restr in ( + element["modifiers"]["selrestrs"] + + element["modifiers"]["synrestrs"] + ) + ] + if modifier_list: + piece += "[{}]".format(" ".join(modifier_list)) + pieces.append(piece) + + return indent + " ".join(pieces) + + def _pprint_semantics_within_frame(self, vnframe, indent=""): + """Returns a pretty printed version of semantics within frame in a VerbNet class + + Return a string containing a pretty-printed representation of + the given VerbNet frame semantics. + + :param vnframe: An ElementTree containing the xml contents of + a VerbNet frame. + """ + pieces = [] + for predicate in vnframe["semantics"]: + arguments = [argument["value"] for argument in predicate["arguments"]] + pieces.append( + f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})" + ) + return "\n".join(f"{indent}* {piece}" for piece in pieces) diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py new file mode 100644 index 0000000000000000000000000000000000000000..aced7e83fc7c48027d4d1eeb6aca46531ab57969 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py @@ -0,0 +1,166 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import line_tokenize + + +class WordListCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None, ignore_lines_startswith="\n"): + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class SwadeshCorpusReader(WordListCorpusReader): + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) + + +class NonbreakingPrefixesCorpusReader(WordListCorpusReader): + """ + This is a class to read the nonbreaking prefixes textfiles from the + Moses Machine Translation toolkit. These lists are used in the Python port + of the Moses' word tokenizer. + """ + + available_langs = { + "catalan": "ca", + "czech": "cs", + "german": "de", + "greek": "el", + "english": "en", + "spanish": "es", + "finnish": "fi", + "french": "fr", + "hungarian": "hu", + "icelandic": "is", + "italian": "it", + "latvian": "lv", + "dutch": "nl", + "polish": "pl", + "portuguese": "pt", + "romanian": "ro", + "russian": "ru", + "slovak": "sk", + "slovenian": "sl", + "swedish": "sv", + "tamil": "ta", + } + # Also, add the lang IDs as the keys. + available_langs.update({v: v for v in available_langs.values()}) + + def words(self, lang=None, fileids=None, ignore_lines_startswith="#"): + """ + This module returns a list of nonbreaking prefixes for the specified + language(s). + + >>> from nltk.corpus import nonbreaking_prefixes as nbp + >>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J'] + True + >>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89'] + True + + :return: a list words for the specified language(s). + """ + # If *lang* in list of languages available, allocate apt fileid. + # Otherwise, the function returns non-breaking prefixes for + # all languages when fileids==None. + if lang in self.available_langs: + lang = self.available_langs[lang] + fileids = ["nonbreaking_prefix." + lang] + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class UnicharsCorpusReader(WordListCorpusReader): + """ + This class is used to read lists of characters from the Perl Unicode + Properties (see https://perldoc.perl.org/perluniprops.html). + The files in the perluniprop.zip are extracted using the Unicode::Tussle + module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm + """ + + # These are categories similar to the Perl Unicode Properties + available_categories = [ + "Close_Punctuation", + "Currency_Symbol", + "IsAlnum", + "IsAlpha", + "IsLower", + "IsN", + "IsSc", + "IsSo", + "IsUpper", + "Line_Separator", + "Number", + "Open_Punctuation", + "Punctuation", + "Separator", + "Symbol", + ] + + def chars(self, category=None, fileids=None): + """ + This module returns a list of characters from the Perl Unicode Properties. + They are very useful when porting Perl tokenizers to Python. + + >>> from nltk.corpus import perluniprops as pup + >>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c'] + True + >>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5'] + True + >>> pup.available_categories + ['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol'] + + :return: a list of characters given the specific unicode character category + """ + if category in self.available_categories: + fileids = [category + ".txt"] + return list(self.raw(fileids).strip()) + + +class MWAPPDBCorpusReader(WordListCorpusReader): + """ + This class is used to read the list of word pairs from the subset of lexical + pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word + Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015): + + - http://acl2014.org/acl2014/Q14/pdf/Q14-1017 + - https://www.aclweb.org/anthology/S14-2039 + - https://www.aclweb.org/anthology/S15-2027 + + The original source of the full PPDB corpus can be found on + https://www.cis.upenn.edu/~ccb/ppdb/ + + :return: a list of tuples of similar lexical terms. + """ + + mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs" + + def entries(self, fileids=mwa_ppdb_xxxl_file): + """ + :return: a tuple of synonym word pairs. + """ + return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))] diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py b/venv/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py new file mode 100644 index 0000000000000000000000000000000000000000..1a9b3d001e0e31120ff1a7df266bb4c82b8de360 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/corpus/reader/xmldocs.py @@ -0,0 +1,397 @@ +# Natural Language Toolkit: XML Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for corpora whose documents are xml files. + +(note -- not named 'xml' to avoid conflicting w/ standard xml package) +""" + +import codecs +from xml.etree import ElementTree + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import * +from nltk.data import SeekableUnicodeStreamReader +from nltk.internals import ElementWrapper +from nltk.tokenize import WordPunctTokenizer + + +class XMLCorpusReader(CorpusReader): + """ + Corpus reader for corpora whose documents are xml files. + + Note that the ``XMLCorpusReader`` constructor does not take an + ``encoding`` argument, because the unicode encoding is specified by + the XML files themselves. See the XML specs for more info. + """ + + def __init__(self, root, fileids, wrap_etree=False): + self._wrap_etree = wrap_etree + CorpusReader.__init__(self, root, fileids) + + def xml(self, fileid=None): + # Make sure we have exactly one file -- no concatenating XML. + if fileid is None and len(self._fileids) == 1: + fileid = self._fileids[0] + if not isinstance(fileid, str): + raise TypeError("Expected a single file identifier string") + # Read the XML in using ElementTree. + with self.abspath(fileid).open() as fp: + elt = ElementTree.parse(fp).getroot() + # If requested, wrap it. + if self._wrap_etree: + elt = ElementWrapper(elt) + # Return the ElementTree element. + return elt + + def words(self, fileid=None): + """ + Returns all of the words and punctuation symbols in the specified file + that were in text nodes -- ie, tags are ignored. Like the xml() method, + fileid can only specify one file. + + :return: the given file's text nodes as a list of words and punctuation symbols + :rtype: list(str) + """ + + elt = self.xml(fileid) + encoding = self.encoding(fileid) + word_tokenizer = WordPunctTokenizer() + try: + iterator = elt.getiterator() + except: + iterator = elt.iter() + out = [] + + for node in iterator: + text = node.text + if text is not None: + if isinstance(text, bytes): + text = text.decode(encoding) + toks = word_tokenizer.tokenize(text) + out.extend(toks) + return out + + +class XMLCorpusView(StreamBackedCorpusView): + """ + A corpus view that selects out specified elements from an XML + file, and provides a flat list-like interface for accessing them. + (Note: ``XMLCorpusView`` is not used by ``XMLCorpusReader`` itself, + but may be used by subclasses of ``XMLCorpusReader``.) + + Every XML corpus view has a "tag specification", indicating what + XML elements should be included in the view; and each (non-nested) + element that matches this specification corresponds to one item in + the view. Tag specifications are regular expressions over tag + paths, where a tag path is a list of element tag names, separated + by '/', indicating the ancestry of the element. Some examples: + + - ``'foo'``: A top-level element whose tag is ``foo``. + - ``'foo/bar'``: An element whose tag is ``bar`` and whose parent + is a top-level element whose tag is ``foo``. + - ``'.*/foo'``: An element whose tag is ``foo``, appearing anywhere + in the xml tree. + - ``'.*/(foo|bar)'``: An wlement whose tag is ``foo`` or ``bar``, + appearing anywhere in the xml tree. + + The view items are generated from the selected XML elements via + the method ``handle_elt()``. By default, this method returns the + element as-is (i.e., as an ElementTree object); but it can be + overridden, either via subclassing or via the ``elt_handler`` + constructor parameter. + """ + + #: If true, then display debugging output to stdout when reading + #: blocks. + _DEBUG = False + + #: The number of characters read at a time by this corpus reader. + _BLOCK_SIZE = 1024 + + def __init__(self, fileid, tagspec, elt_handler=None): + """ + Create a new corpus view based on a specified XML file. + + Note that the ``XMLCorpusView`` constructor does not take an + ``encoding`` argument, because the unicode encoding is + specified by the XML files themselves. + + :type tagspec: str + :param tagspec: A tag specification, indicating what XML + elements should be included in the view. Each non-nested + element that matches this specification corresponds to one + item in the view. + + :param elt_handler: A function used to transform each element + to a value for the view. If no handler is specified, then + ``self.handle_elt()`` is called, which returns the element + as an ElementTree object. The signature of elt_handler is:: + + elt_handler(elt, tagspec) -> value + """ + if elt_handler: + self.handle_elt = elt_handler + + self._tagspec = re.compile(tagspec + r"\Z") + """The tag specification for this corpus view.""" + + self._tag_context = {0: ()} + """A dictionary mapping from file positions (as returned by + ``stream.seek()`` to XML contexts. An XML context is a + tuple of XML tag names, indicating which tags have not yet + been closed.""" + + encoding = self._detect_encoding(fileid) + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + + def _detect_encoding(self, fileid): + if isinstance(fileid, PathPointer): + try: + infile = fileid.open() + s = infile.readline() + finally: + infile.close() + else: + with open(fileid, "rb") as infile: + s = infile.readline() + if s.startswith(codecs.BOM_UTF16_BE): + return "utf-16-be" + if s.startswith(codecs.BOM_UTF16_LE): + return "utf-16-le" + if s.startswith(codecs.BOM_UTF32_BE): + return "utf-32-be" + if s.startswith(codecs.BOM_UTF32_LE): + return "utf-32-le" + if s.startswith(codecs.BOM_UTF8): + return "utf-8" + m = re.match(rb'\s*<\?xml\b.*\bencoding="([^"]+)"', s) + if m: + return m.group(1).decode() + m = re.match(rb"\s*<\?xml\b.*\bencoding='([^']+)'", s) + if m: + return m.group(1).decode() + # No encoding found -- what should the default be? + return "utf-8" + + def handle_elt(self, elt, context): + """ + Convert an element into an appropriate value for inclusion in + the view. Unless overridden by a subclass or by the + ``elt_handler`` constructor argument, this method simply + returns ``elt``. + + :return: The view value corresponding to ``elt``. + + :type elt: ElementTree + :param elt: The element that should be converted. + + :type context: str + :param context: A string composed of element tags separated by + forward slashes, indicating the XML context of the given + element. For example, the string ``'foo/bar/baz'`` + indicates that the element is a ``baz`` element whose + parent is a ``bar`` element and whose grandparent is a + top-level ``foo`` element. + """ + return elt + + #: A regular expression that matches XML fragments that do not + #: contain any un-closed tags. + _VALID_XML_RE = re.compile( + r""" + [^<]* + ( + (() | # comment + () | # doctype decl + (<[^!>][^>]*>)) # tag or PI + [^<]*)* + \Z""", + re.DOTALL | re.VERBOSE, + ) + + #: A regular expression used to extract the tag name from a start tag, + #: end tag, or empty-elt tag string. + _XML_TAG_NAME = re.compile(r"<\s*(?:/\s*)?([^\s>]+)") + + #: A regular expression used to find all start-tags, end-tags, and + #: empty-elt tags in an XML file. This regexp is more lenient than + #: the XML spec -- e.g., it allows spaces in some places where the + #: spec does not. + _XML_PIECE = re.compile( + r""" + # Include these so we can skip them: + (?P )| + (?P )| + (?P <\?.*?\?> )| + (?P ]*(\[[^\]]*])?\s*>)| + # These are the ones we actually care about: + (?P <\s*[^>/\?!\s][^>]*/\s*> )| + (?P <\s*[^>/\?!\s][^>]*> )| + (?P <\s*/[^>/\?!\s][^>]*> )""", + re.DOTALL | re.VERBOSE, + ) + + def _read_xml_fragment(self, stream): + """ + Read a string from the given stream that does not contain any + un-closed tags. In particular, this function first reads a + block from the stream of size ``self._BLOCK_SIZE``. It then + checks if that block contains an un-closed tag. If it does, + then this function either backtracks to the last '<', or reads + another block. + """ + fragment = "" + + if isinstance(stream, SeekableUnicodeStreamReader): + startpos = stream.tell() + while True: + # Read a block and add it to the fragment. + xml_block = stream.read(self._BLOCK_SIZE) + fragment += xml_block + + # Do we have a well-formed xml fragment? + if self._VALID_XML_RE.match(fragment): + return fragment + + # Do we have a fragment that will never be well-formed? + if re.search("[<>]", fragment).group(0) == ">": + pos = stream.tell() - ( + len(fragment) - re.search("[<>]", fragment).end() + ) + raise ValueError('Unexpected ">" near char %s' % pos) + + # End of file? + if not xml_block: + raise ValueError("Unexpected end of file: tag not closed") + + # If not, then we must be in the middle of a <..tag..>. + # If appropriate, backtrack to the most recent '<' + # character. + last_open_bracket = fragment.rfind("<") + if last_open_bracket > 0: + if self._VALID_XML_RE.match(fragment[:last_open_bracket]): + if isinstance(stream, SeekableUnicodeStreamReader): + stream.seek(startpos) + stream.char_seek_forward(last_open_bracket) + else: + stream.seek(-(len(fragment) - last_open_bracket), 1) + return fragment[:last_open_bracket] + + # Otherwise, read another block. (i.e., return to the + # top of the loop.) + + def read_block(self, stream, tagspec=None, elt_handler=None): + """ + Read from ``stream`` until we find at least one element that + matches ``tagspec``, and return the result of applying + ``elt_handler`` to each element found. + """ + if tagspec is None: + tagspec = self._tagspec + if elt_handler is None: + elt_handler = self.handle_elt + + # Use a stack of strings to keep track of our context: + context = list(self._tag_context.get(stream.tell())) + assert context is not None # check this -- could it ever happen? + + elts = [] + + elt_start = None # where does the elt start + elt_depth = None # what context depth + elt_text = "" + + while elts == [] or elt_start is not None: + if isinstance(stream, SeekableUnicodeStreamReader): + startpos = stream.tell() + xml_fragment = self._read_xml_fragment(stream) + + # End of file. + if not xml_fragment: + if elt_start is None: + break + else: + raise ValueError("Unexpected end of file") + + # Process each in the xml fragment. + for piece in self._XML_PIECE.finditer(xml_fragment): + if self._DEBUG: + print("{:>25} {}".format("/".join(context)[-20:], piece.group())) + + if piece.group("START_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + # Keep context up-to-date. + context.append(name) + # Is this one of the elts we're looking for? + if elt_start is None: + if re.match(tagspec, "/".join(context)): + elt_start = piece.start() + elt_depth = len(context) + + elif piece.group("END_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + # sanity checks: + if not context: + raise ValueError("Unmatched tag " % name) + if name != context[-1]: + raise ValueError(f"Unmatched tag <{context[-1]}>...") + # Is this the end of an element? + if elt_start is not None and elt_depth == len(context): + elt_text += xml_fragment[elt_start : piece.end()] + elts.append((elt_text, "/".join(context))) + elt_start = elt_depth = None + elt_text = "" + # Keep context up-to-date + context.pop() + + elif piece.group("EMPTY_ELT_TAG"): + name = self._XML_TAG_NAME.match(piece.group()).group(1) + if elt_start is None: + if re.match(tagspec, "/".join(context) + "/" + name): + elts.append((piece.group(), "/".join(context) + "/" + name)) + + if elt_start is not None: + # If we haven't found any elements yet, then keep + # looping until we do. + if elts == []: + elt_text += xml_fragment[elt_start:] + elt_start = 0 + + # If we've found at least one element, then try + # backtracking to the start of the element that we're + # inside of. + else: + # take back the last start-tag, and return what + # we've gotten so far (elts is non-empty). + if self._DEBUG: + print(" " * 36 + "(backtrack)") + if isinstance(stream, SeekableUnicodeStreamReader): + stream.seek(startpos) + stream.char_seek_forward(elt_start) + else: + stream.seek(-(len(xml_fragment) - elt_start), 1) + context = context[: elt_depth - 1] + elt_start = elt_depth = None + elt_text = "" + + # Update the _tag_context dict. + pos = stream.tell() + if pos in self._tag_context: + assert tuple(context) == self._tag_context[pos] + else: + self._tag_context[pos] = tuple(context) + + return [ + elt_handler( + ElementTree.fromstring(elt.encode("ascii", "xmlcharrefreplace")), + context, + ) + for (elt, context) in elts + ] diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__init__.py b/venv/lib/python3.10/site-packages/nltk/lm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11d31b9a6aeded7e96f7db4395801af082a25737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/__init__.py @@ -0,0 +1,235 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Ilia Kurenkov +# URL: >> text = [['a', 'b', 'c'], ['a', 'c', 'd', 'c', 'e', 'f']] + +If we want to train a bigram model, we need to turn this text into bigrams. +Here's what the first sentence of our text would look like if we use a function +from NLTK for this. + + >>> from nltk.util import bigrams + >>> list(bigrams(text[0])) + [('a', 'b'), ('b', 'c')] + +Notice how "b" occurs both as the first and second member of different bigrams +but "a" and "c" don't? Wouldn't it be nice to somehow indicate how often sentences +start with "a" and end with "c"? +A standard way to deal with this is to add special "padding" symbols to the +sentence before splitting it into ngrams. +Fortunately, NLTK also has a function for that, let's see what it does to the +first sentence. + + >>> from nltk.util import pad_sequence + >>> list(pad_sequence(text[0], + ... pad_left=True, + ... left_pad_symbol="", + ... pad_right=True, + ... right_pad_symbol="", + ... n=2)) + ['', 'a', 'b', 'c', ''] + +Note the `n` argument, that tells the function we need padding for bigrams. +Now, passing all these parameters every time is tedious and in most cases they +can be safely assumed as defaults anyway. +Thus our module provides a convenience function that has all these arguments +already set while the other arguments remain the same as for `pad_sequence`. + + >>> from nltk.lm.preprocessing import pad_both_ends + >>> list(pad_both_ends(text[0], n=2)) + ['', 'a', 'b', 'c', ''] + +Combining the two parts discussed so far we get the following preparation steps +for one sentence. + + >>> list(bigrams(pad_both_ends(text[0], n=2))) + [('', 'a'), ('a', 'b'), ('b', 'c'), ('c', '')] + +To make our model more robust we could also train it on unigrams (single words) +as well as bigrams, its main source of information. +NLTK once again helpfully provides a function called `everygrams`. +While not the most efficient, it is conceptually simple. + + + >>> from nltk.util import everygrams + >>> padded_bigrams = list(pad_both_ends(text[0], n=2)) + >>> list(everygrams(padded_bigrams, max_len=2)) + [('',), ('', 'a'), ('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',), ('c', ''), ('',)] + +We are almost ready to start counting ngrams, just one more step left. +During training and evaluation our model will rely on a vocabulary that +defines which words are "known" to the model. +To create this vocabulary we need to pad our sentences (just like for counting +ngrams) and then combine the sentences into one flat stream of words. + + >>> from nltk.lm.preprocessing import flatten + >>> list(flatten(pad_both_ends(sent, n=2) for sent in text)) + ['', 'a', 'b', 'c', '', '', 'a', 'c', 'd', 'c', 'e', 'f', ''] + +In most cases we want to use the same text as the source for both vocabulary +and ngram counts. +Now that we understand what this means for our preprocessing, we can simply import +a function that does everything for us. + + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train, vocab = padded_everygram_pipeline(2, text) + +So as to avoid re-creating the text in memory, both `train` and `vocab` are lazy +iterators. They are evaluated on demand at training time. + + +Training +======== +Having prepared our data we are ready to start training a model. +As a simple example, let us train a Maximum Likelihood Estimator (MLE). +We only need to specify the highest ngram order to instantiate it. + + >>> from nltk.lm import MLE + >>> lm = MLE(2) + +This automatically creates an empty vocabulary... + + >>> len(lm.vocab) + 0 + +... which gets filled as we fit the model. + + >>> lm.fit(train, vocab) + >>> print(lm.vocab) + + >>> len(lm.vocab) + 9 + +The vocabulary helps us handle words that have not occurred during training. + + >>> lm.vocab.lookup(text[0]) + ('a', 'b', 'c') + >>> lm.vocab.lookup(["aliens", "from", "Mars"]) + ('', '', '') + +Moreover, in some cases we want to ignore words that we did see during training +but that didn't occur frequently enough, to provide us useful information. +You can tell the vocabulary to ignore such words. +To find out how that works, check out the docs for the `Vocabulary` class. + + +Using a Trained Model +===================== +When it comes to ngram models the training boils down to counting up the ngrams +from the training corpus. + + >>> print(lm.counts) + + +This provides a convenient interface to access counts for unigrams... + + >>> lm.counts['a'] + 2 + +...and bigrams (in this case "a b") + + >>> lm.counts[['a']]['b'] + 1 + +And so on. However, the real purpose of training a language model is to have it +score how probable words are in certain contexts. +This being MLE, the model returns the item's relative frequency as its score. + + >>> lm.score("a") + 0.15384615384615385 + +Items that are not seen during training are mapped to the vocabulary's +"unknown label" token. This is "" by default. + + >>> lm.score("") == lm.score("aliens") + True + +Here's how you get the score for a word given some preceding context. +For example we want to know what is the chance that "b" is preceded by "a". + + >>> lm.score("b", ["a"]) + 0.5 + +To avoid underflow when working with many small score values it makes sense to +take their logarithm. +For convenience this can be done with the `logscore` method. + + >>> lm.logscore("a") + -2.700439718141092 + +Building on this method, we can also evaluate our model's cross-entropy and +perplexity with respect to sequences of ngrams. + + >>> test = [('a', 'b'), ('c', 'd')] + >>> lm.entropy(test) + 1.292481250360578 + >>> lm.perplexity(test) + 2.449489742783178 + +It is advisable to preprocess your test text exactly the same way as you did +the training text. + +One cool feature of ngram models is that they can be used to generate text. + + >>> lm.generate(1, random_seed=3) + '' + >>> lm.generate(5, random_seed=3) + ['', 'a', 'b', 'c', 'd'] + +Provide `random_seed` if you want to consistently reproduce the same text all +other things being equal. Here we are using it to test the examples. + +You can also condition your generation on some preceding text with the `context` +argument. + + >>> lm.generate(5, text_seed=['c'], random_seed=3) + ['', 'c', 'd', 'c', 'd'] + +Note that an ngram model is restricted in how much preceding context it can +take into account. For example, a trigram model can only condition its output +on 2 preceding words. If you pass in a 4-word context, the first two words +will be ignored. +""" + +from nltk.lm.counter import NgramCounter +from nltk.lm.models import ( + MLE, + AbsoluteDiscountingInterpolated, + KneserNeyInterpolated, + Laplace, + Lidstone, + StupidBackoff, + WittenBellInterpolated, +) +from nltk.lm.vocabulary import Vocabulary + +__all__ = [ + "Vocabulary", + "NgramCounter", + "MLE", + "Lidstone", + "Laplace", + "WittenBellInterpolated", + "KneserNeyInterpolated", + "AbsoluteDiscountingInterpolated", + "StupidBackoff", +] diff --git a/venv/lib/python3.10/site-packages/nltk/lm/preprocessing.py b/venv/lib/python3.10/site-packages/nltk/lm/preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba6d5bd2cfb59d479b203ebf99878024b2a0f76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/preprocessing.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +from functools import partial +from itertools import chain + +from nltk.util import everygrams, pad_sequence + +flatten = chain.from_iterable +pad_both_ends = partial( + pad_sequence, + pad_left=True, + left_pad_symbol="", + pad_right=True, + right_pad_symbol="", +) +pad_both_ends.__doc__ = """Pads both ends of a sentence to length specified by ngram order. + + Following convention pads the start of sentence pads its end. + """ + + +def padded_everygrams(order, sentence): + """Helper with some useful defaults. + + Applies pad_both_ends to sentence and follows it up with everygrams. + """ + return everygrams(list(pad_both_ends(sentence, n=order)), max_len=order) + + +def padded_everygram_pipeline(order, text): + """Default preprocessing for a sequence of sentences. + + Creates two iterators: + + - sentences padded and turned into sequences of `nltk.util.everygrams` + - sentences padded as above and chained together for a flat stream of words + + :param order: Largest ngram length produced by `everygrams`. + :param text: Text to iterate over. Expected to be an iterable of sentences. + :type text: Iterable[Iterable[str]] + :return: iterator over text as ngrams, iterator over text as vocabulary data + """ + padding_fn = partial(pad_both_ends, n=order) + return ( + (everygrams(list(padding_fn(sent)), max_len=order) for sent in text), + flatten(map(padding_fn, text)), + )