diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..238522fd6f7cedce69faf8bfb3384b22cc509cbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__init__.py @@ -0,0 +1,101 @@ +# Natural Language Toolkit: Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for labeling tokens with category labels (or +"class labels"). Typically, labels are represented with strings +(such as ``'health'`` or ``'sports'``). Classifiers can be used to +perform a wide range of classification tasks. For example, +classifiers can be used... + +- to classify documents by topic +- to classify ambiguous words by which word sense is intended +- to classify acoustic signals by which phoneme they represent +- to classify sentences by their author + +Features +======== +In order to decide which category label is appropriate for a given +token, classifiers examine one or more 'features' of the token. These +"features" are typically chosen by hand, and indicate which aspects +of the token are relevant to the classification decision. For +example, a document classifier might use a separate feature for each +word, recording how often that word occurred in the document. + +Featuresets +=========== +The features describing a token are encoded using a "featureset", +which is a dictionary that maps from "feature names" to "feature +values". Feature names are unique strings that indicate what aspect +of the token is encoded by the feature. Examples include +``'prevword'``, for a feature whose value is the previous word; and +``'contains-word(library)'`` for a feature that is true when a document +contains the word ``'library'``. Feature values are typically +booleans, numbers, or strings, depending on which feature they +describe. + +Featuresets are typically constructed using a "feature detector" +(also known as a "feature extractor"). A feature detector is a +function that takes a token (and sometimes information about its +context) as its input, and returns a featureset describing that token. +For example, the following feature detector converts a document +(stored as a list of words) to a featureset describing the set of +words included in the document: + + >>> # Define a feature detector function. + >>> def document_features(document): + ... return dict([('contains-word(%s)' % w, True) for w in document]) + +Feature detectors are typically applied to each token before it is fed +to the classifier: + + >>> # Classify each Gutenberg document. + >>> from nltk.corpus import gutenberg + >>> for fileid in gutenberg.fileids(): # doctest: +SKIP + ... doc = gutenberg.words(fileid) # doctest: +SKIP + ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP + +The parameters that a feature detector expects will vary, depending on +the task and the needs of the feature detector. For example, a +feature detector for word sense disambiguation (WSD) might take as its +input a sentence, and the index of a word that should be classified, +and return a featureset for that word. The following feature detector +for WSD includes features describing the left and right contexts of +the target word: + + >>> def wsd_features(sentence, index): + ... featureset = {} + ... for i in range(max(0, index-3), index): + ... featureset['left-context(%s)' % sentence[i]] = True + ... for i in range(index, max(index+3, len(sentence))): + ... featureset['right-context(%s)' % sentence[i]] = True + ... return featureset + +Training Classifiers +==================== +Most classifiers are built by training them on a list of hand-labeled +examples, known as the "training set". Training sets are represented +as lists of ``(featuredict, label)`` tuples. +""" + +from nltk.classify.api import ClassifierI, MultiClassifierI +from nltk.classify.decisiontree import DecisionTreeClassifier +from nltk.classify.maxent import ( + BinaryMaxentFeatureEncoding, + ConditionalExponentialClassifier, + MaxentClassifier, + TypedMaxentFeatureEncoding, +) +from nltk.classify.megam import call_megam, config_megam +from nltk.classify.naivebayes import NaiveBayesClassifier +from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier +from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features +from nltk.classify.scikitlearn import SklearnClassifier +from nltk.classify.senna import Senna +from nltk.classify.textcat import TextCat +from nltk.classify.util import accuracy, apply_features, log_likelihood +from nltk.classify.weka import WekaClassifier, config_weka diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daedff581eabcaac043efe56a84748dbff595ab4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99506cd29d25dde7a213b8656b552a481466d39d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03d670d0ccee127b1aab2bdc97e7cab2e0890e77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40f025a951736ef56a73f892f46469f167119382 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e23e9880cb5045ba008bbed2faaa3e8ce4e0eb3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b055ecca750d41e8f5d707c1f9f224a338e3b70b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebf187f17576cb3c7b3b5d9dfb9210a550b56fff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..220bc81d08c1cd6ec4d8c41be3c024e369cb3c71 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa4d4f2a33461c658d8c66e12e2eee5485f68672 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..882ac3969fbfb78b38c2916c744ed18f6129035d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4acdfcba73399155933baaa05aabb2592a252e3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aade5fc2da1bba7fe7a59fde7e246a5315e66a66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cfac54b77892fa6152492f8bce8a1970cd136f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6fb647ad41bd19efab5c956085a480e0c2961d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7d883c7bacfdc3f44b55b757febc2e860b1422 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/classify/tadm.py b/env-llmeval/lib/python3.10/site-packages/nltk/classify/tadm.py new file mode 100644 index 0000000000000000000000000000000000000000..f8eb4b3daa2b7e904856b3fa2b4f16378427715f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/classify/tadm.py @@ -0,0 +1,122 @@ +# Natural Language Toolkit: Interface to TADM Classifier +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joseph Frazee +# URL: +# For license information, see LICENSE.TXT + +import subprocess +import sys + +from nltk.internals import find_binary + +try: + import numpy +except ImportError: + pass + +_tadm_bin = None + + +def config_tadm(bin=None): + global _tadm_bin + _tadm_bin = find_binary( + "tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net" + ) + + +def write_tadm_file(train_toks, encoding, stream): + """ + Generate an input file for ``tadm`` based on the given corpus of + classified tokens. + + :type train_toks: list(tuple(dict, str)) + :param train_toks: Training data, represented as a list of + pairs, the first member of which is a feature dictionary, + and the second of which is a classification label. + :type encoding: TadmEventMaxentFeatureEncoding + :param encoding: A feature encoding, used to convert featuresets + into feature vectors. + :type stream: stream + :param stream: The stream to which the ``tadm`` input file should be + written. + """ + # See the following for a file format description: + # + # https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054 + # https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054 + labels = encoding.labels() + for featureset, label in train_toks: + length_line = "%d\n" % len(labels) + stream.write(length_line) + for known_label in labels: + v = encoding.encode(featureset, known_label) + line = "%d %d %s\n" % ( + int(label == known_label), + len(v), + " ".join("%d %d" % u for u in v), + ) + stream.write(line) + + +def parse_tadm_weights(paramfile): + """ + Given the stdout output generated by ``tadm`` when training a + model, return a ``numpy`` array containing the corresponding weight + vector. + """ + weights = [] + for line in paramfile: + weights.append(float(line.strip())) + return numpy.array(weights, "d") + + +def call_tadm(args): + """ + Call the ``tadm`` binary with the given arguments. + """ + if isinstance(args, str): + raise TypeError("args should be a list of strings") + if _tadm_bin is None: + config_tadm() + + # Call tadm via a subprocess + cmd = [_tadm_bin] + args + p = subprocess.Popen(cmd, stdout=sys.stdout) + (stdout, stderr) = p.communicate() + + # Check the return code. + if p.returncode != 0: + print() + print(stderr) + raise OSError("tadm command failed!") + + +def names_demo(): + from nltk.classify.maxent import TadmMaxentClassifier + from nltk.classify.util import names_demo + + classifier = names_demo(TadmMaxentClassifier.train) + + +def encoding_demo(): + import sys + + from nltk.classify.maxent import TadmEventMaxentFeatureEncoding + + tokens = [ + ({"f0": 1, "f1": 1, "f3": 1}, "A"), + ({"f0": 1, "f2": 1, "f4": 1}, "B"), + ({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"), + ] + encoding = TadmEventMaxentFeatureEncoding.train(tokens) + write_tadm_file(tokens, encoding, sys.stdout) + print() + for i in range(encoding.length()): + print("%s --> %d" % (encoding.describe(i), i)) + print() + + +if __name__ == "__main__": + encoding_demo() + names_demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67f565aaa85618c0268c75cd4b1524829712909c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/__init__.py @@ -0,0 +1,529 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +# TODO this docstring isn't up-to-date! +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus files in a variety of formats. These +functions can be used to read both the corpus files that are +distributed in the NLTK corpus package, and corpus files that are part +of external corpora. + +Available Corpora +================= + +Please see https://www.nltk.org/nltk_data/ for a complete list. +Install corpora using nltk.download(). + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a filename, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS + The, Fulton, County, Grand, Jury, said, ... + +""" + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader +from nltk.tokenize import RegexpTokenizer + +abc: PlaintextCorpusReader = LazyCorpusLoader( + "abc", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[("science", "latin_1"), ("rural", "utf8")], +) +alpino: AlpinoCorpusReader = LazyCorpusLoader( + "alpino", AlpinoCorpusReader, tagset="alpino" +) +bcp47: BCP47CorpusReader = LazyCorpusLoader( + "bcp47", BCP47CorpusReader, r"(cldr|iana)/*" +) +brown: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "brown", + CategorizedTaggedCorpusReader, + r"c[a-z]\d\d", + cat_file="cats.txt", + tagset="brown", + encoding="ascii", +) +cess_cat: BracketParseCorpusReader = LazyCorpusLoader( + "cess_cat", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cess_esp: BracketParseCorpusReader = LazyCorpusLoader( + "cess_esp", + BracketParseCorpusReader, + r"(?!\.).*\.tbf", + tagset="unknown", + encoding="ISO-8859-15", +) +cmudict: CMUDictCorpusReader = LazyCorpusLoader( + "cmudict", CMUDictCorpusReader, ["cmudict"] +) +comtrans: AlignedCorpusReader = LazyCorpusLoader( + "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt" +) +comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader( + "comparative_sentences", + ComparativeSentencesCorpusReader, + r"labeledSentences\.txt", + encoding="latin-1", +) +conll2000: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2000", + ConllChunkCorpusReader, + ["train.txt", "test.txt"], + ("NP", "VP", "PP"), + tagset="wsj", + encoding="ascii", +) +conll2002: ConllChunkCorpusReader = LazyCorpusLoader( + "conll2002", + ConllChunkCorpusReader, + r".*\.(test|train).*", + ("LOC", "PER", "ORG", "MISC"), + encoding="utf-8", +) +conll2007: DependencyCorpusReader = LazyCorpusLoader( + "conll2007", + DependencyCorpusReader, + r".*\.(test|train).*", + encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")], +) +crubadan: CrubadanCorpusReader = LazyCorpusLoader( + "crubadan", CrubadanCorpusReader, r".*\.txt" +) +dependency_treebank: DependencyCorpusReader = LazyCorpusLoader( + "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii" +) +extended_omw: CorpusReader = LazyCorpusLoader( + "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8" +) +floresta: BracketParseCorpusReader = LazyCorpusLoader( + "floresta", + BracketParseCorpusReader, + r"(?!\.).*\.ptb", + "#", + tagset="unknown", + encoding="ISO-8859-15", +) +framenet15: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v15", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +framenet: FramenetCorpusReader = LazyCorpusLoader( + "framenet_v17", + FramenetCorpusReader, + [ + "frRelation.xml", + "frameIndex.xml", + "fulltextIndex.xml", + "luIndex.xml", + "semTypes.xml", + ], +) +gazetteers: WordListCorpusReader = LazyCorpusLoader( + "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2" +) +genesis: PlaintextCorpusReader = LazyCorpusLoader( + "genesis", + PlaintextCorpusReader, + r"(?!\.).*\.txt", + encoding=[ + ("finnish|french|german", "latin_1"), + ("swedish", "cp865"), + (".*", "utf_8"), + ], +) +gutenberg: PlaintextCorpusReader = LazyCorpusLoader( + "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*") +inaugural: PlaintextCorpusReader = LazyCorpusLoader( + "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1" +) +# [XX] This should probably just use TaggedCorpusReader: +indian: IndianCorpusReader = LazyCorpusLoader( + "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8" +) + +jeita: ChasenCorpusReader = LazyCorpusLoader( + "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8" +) +knbc: KNBCorpusReader = LazyCorpusLoader( + "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp" +) +lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader( + "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp" +) +mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader( + "mac_morpho", + MacMorphoCorpusReader, + r"(?!\.).*\.txt", + tagset="unknown", + encoding="latin-1", +) +machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader( + "machado", + PortugueseCategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"([a-z]*)/.*", + encoding="latin-1", +) +masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader( + "masc_tagged", + CategorizedTaggedCorpusReader, + r"(spoken|written)/.*\.txt", + cat_file="categories.txt", + tagset="wsj", + encoding="utf-8", + sep="_", +) +movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "movie_reviews", + CategorizedPlaintextCorpusReader, + r"(?!\.).*\.txt", + cat_pattern=r"(neg|pos)/.*", + encoding="ascii", +) +multext_east: MTECorpusReader = LazyCorpusLoader( + "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8" +) +names: WordListCorpusReader = LazyCorpusLoader( + "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii" +) +nps_chat: NPSChatCorpusReader = LazyCorpusLoader( + "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj" +) +opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader( + "opinion_lexicon", + OpinionLexiconCorpusReader, + r"(\w+)\-words\.txt", + encoding="ISO-8859-2", +) +ppattach: PPAttachmentCorpusReader = LazyCorpusLoader( + "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"] +) +product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader( + "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8" +) +pros_cons: ProsConsCorpusReader = LazyCorpusLoader( + "pros_cons", + ProsConsCorpusReader, + r"Integrated(Cons|Pros)\.txt", + cat_pattern=r"Integrated(Cons|Pros)\.txt", + encoding="ISO-8859-2", +) +ptb: CategorizedBracketParseCorpusReader = ( + LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions + "ptb", + CategorizedBracketParseCorpusReader, + r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG", + cat_file="allcats.txt", + tagset="wsj", + ) +) +qc: StringCategoryCorpusReader = LazyCorpusLoader( + "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2" +) +reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader( + "reuters", + CategorizedPlaintextCorpusReader, + "(training|test).*", + cat_file="cats.txt", + encoding="ISO-8859-2", +) +rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml") +senseval: SensevalCorpusReader = LazyCorpusLoader( + "senseval", SensevalCorpusReader, r"(?!\.).*\.pos" +) +sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "sentence_polarity", + CategorizedSentencesCorpusReader, + r"rt-polarity\.(neg|pos)", + cat_pattern=r"rt-polarity\.(neg|pos)", + encoding="utf-8", +) +sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader( + "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8" +) +shakespeare: XMLCorpusReader = LazyCorpusLoader( + "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml" +) +sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader( + "sinica_treebank", + SinicaTreebankCorpusReader, + ["parsed"], + tagset="unknown", + encoding="utf-8", +) +state_union: PlaintextCorpusReader = LazyCorpusLoader( + "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2" +) +stopwords: WordListCorpusReader = LazyCorpusLoader( + "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader( + "subjectivity", + CategorizedSentencesCorpusReader, + r"(quote.tok.gt9|plot.tok.gt9)\.5000", + cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]}, + encoding="latin-1", +) +swadesh: SwadeshCorpusReader = LazyCorpusLoader( + "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8" +) +swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8" +) +swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader( + "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8" +) +switchboard: SwitchboardCorpusReader = LazyCorpusLoader( + "switchboard", SwitchboardCorpusReader, tagset="wsj" +) +timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader) +timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader( + "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii" +) +toolbox: ToolboxCorpusReader = LazyCorpusLoader( + "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)" +) +treebank: BracketParseCorpusReader = LazyCorpusLoader( + "treebank/combined", + BracketParseCorpusReader, + r"wsj_.*\.mrg", + tagset="wsj", + encoding="ascii", +) +treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader( + "treebank/tagged", + ChunkedCorpusReader, + r"wsj_.*\.pos", + sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True), + para_block_reader=tagged_treebank_para_block_reader, + tagset="wsj", + encoding="ascii", +) +treebank_raw: PlaintextCorpusReader = LazyCorpusLoader( + "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2" +) +twitter_samples: TwitterCorpusReader = LazyCorpusLoader( + "twitter_samples", TwitterCorpusReader, r".*\.json" +) +udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader) +udhr2: PlaintextCorpusReader = LazyCorpusLoader( + "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8" +) +universal_treebanks: ConllCorpusReader = LazyCorpusLoader( + "universal_treebanks_v20", + ConllCorpusReader, + r".*\.conll", + columntypes=( + "ignore", + "words", + "ignore", + "ignore", + "pos", + "ignore", + "ignore", + "ignore", + "ignore", + "ignore", + ), +) +verbnet: VerbnetCorpusReader = LazyCorpusLoader( + "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml" +) +webtext: PlaintextCorpusReader = LazyCorpusLoader( + "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2" +) +wordnet: WordNetCorpusReader = LazyCorpusLoader( + "wordnet", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet31: WordNetCorpusReader = LazyCorpusLoader( + "wordnet31", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet2021: WordNetCorpusReader = LazyCorpusLoader( + "wordnet2021", + WordNetCorpusReader, + LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), +) +wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader( + "wordnet_ic", WordNetICCorpusReader, r".*\.dat" +) +words: WordListCorpusReader = LazyCorpusLoader( + "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii" +) + +# defined after treebank +propbank: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +nombank: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: re.sub(r"^wsj/\d\d/", "", filename), + treebank, +) # Must be defined *after* treebank corpus. +propbank_ptb: PropbankCorpusReader = LazyCorpusLoader( + "propbank", + PropbankCorpusReader, + "prop.txt", + r"frames/.*\.xml", + "verbs.txt", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +nombank_ptb: NombankCorpusReader = LazyCorpusLoader( + "nombank.1.0", + NombankCorpusReader, + "nombank.1.0", + r"frames/.*\.xml", + "nombank.1.0.words", + lambda filename: filename.upper(), + ptb, +) # Must be defined *after* ptb corpus. +semcor: SemcorCorpusReader = LazyCorpusLoader( + "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet +) # Must be defined *after* wordnet corpus. + +nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader( + "nonbreaking_prefixes", + NonbreakingPrefixesCorpusReader, + r"(?!README|\.).*", + encoding="utf8", +) +perluniprops: UnicharsCorpusReader = LazyCorpusLoader( + "perluniprops", + UnicharsCorpusReader, + r"(?!README|\.).*", + nltk_data_subdir="misc", + encoding="utf8", +) + +# mwa_ppdb = LazyCorpusLoader( +# 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8') + +# See https://github.com/nltk/nltk/issues/1579 +# and https://github.com/nltk/nltk/issues/1716 +# +# pl196x = LazyCorpusLoader( +# 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml', +# cat_file='cats.txt', textid_file='textids.txt', encoding='utf8') +# +# ipipan = LazyCorpusLoader( +# 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml') +# +# nkjp = LazyCorpusLoader( +# 'nkjp', NKJPCorpusReader, r'', encoding='utf8') +# +# panlex_lite = LazyCorpusLoader( +# 'panlex_lite', PanLexLiteCorpusReader) +# +# ycoe = LazyCorpusLoader( +# 'ycoe', YCOECorpusReader) +# +# corpus not available with NLTK; these lines caused help(nltk.corpus) to break +# hebrew_treebank = LazyCorpusLoader( +# 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt') + +# FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116 +def demo(): + # This is out-of-date: + abc.demo() + brown.demo() + # chat80.demo() + cmudict.demo() + conll2000.demo() + conll2002.demo() + genesis.demo() + gutenberg.demo() + ieer.demo() + inaugural.demo() + indian.demo() + names.demo() + ppattach.demo() + senseval.demo() + shakespeare.demo() + sinica_treebank.demo() + state_union.demo() + stopwords.demo() + timit.demo() + toolbox.demo() + treebank.demo() + udhr.demo() + webtext.demo() + words.demo() + + +# ycoe.demo() + +if __name__ == "__main__": + # demo() + pass diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py new file mode 100644 index 0000000000000000000000000000000000000000..2a32ecc86f7b7671445effc2801870c3fc10f295 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Europarl Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.corpus.reader import * +from nltk.corpus.util import LazyCorpusLoader + +# Create a new corpus reader instance for each European language +danish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8" +) + +dutch: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8" +) + +english: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8" +) + +finnish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8" +) + +french: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8" +) + +german: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8" +) + +greek: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8" +) + +italian: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8" +) + +portuguese: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8" +) + +spanish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8" +) + +swedish: EuroparlCorpusReader = LazyCorpusLoader( + "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8" +) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a5274f09dde2db30aa213800647e19a7d8201981 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py @@ -0,0 +1,186 @@ +# Natural Language Toolkit: Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK corpus readers. The modules in this package provide functions +that can be used to read corpus fileids in a variety of formats. These +functions can be used to read both the corpus fileids that are +distributed in the NLTK corpus package, and corpus fileids that are part +of external corpora. + +Corpus Reader Functions +======================= +Each corpus module defines one or more "corpus reader functions", +which can be used to read documents from that corpus. These functions +take an argument, ``item``, which is used to indicate which document +should be read from the corpus: + +- If ``item`` is one of the unique identifiers listed in the corpus + module's ``items`` variable, then the corresponding document will + be loaded from the NLTK corpus package. +- If ``item`` is a fileid, then that file will be read. + +Additionally, corpus reader functions can be given lists of item +names; in which case, they will return a concatenation of the +corresponding documents. + +Corpus reader functions are named based on the type of information +they return. Some common examples, and their return types, are: + +- words(): list of str +- sents(): list of (list of str) +- paras(): list of (list of (list of str)) +- tagged_words(): list of (str,str) tuple +- tagged_sents(): list of (list of (str,str)) +- tagged_paras(): list of (list of (list of (str,str))) +- chunked_sents(): list of (Tree w/ (str,str) leaves) +- parsed_sents(): list of (Tree with str leaves) +- parsed_paras(): list of (list of (Tree with str leaves)) +- xml(): A single xml ElementTree +- raw(): unprocessed corpus contents + +For example, to read a list of the words in the Brown Corpus, use +``nltk.corpus.brown.words()``: + + >>> from nltk.corpus import brown + >>> print(", ".join(brown.words()[:6])) # only first 6 words + The, Fulton, County, Grand, Jury, said + +isort:skip_file +""" + +from nltk.corpus.reader.plaintext import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.api import * +from nltk.corpus.reader.tagged import * +from nltk.corpus.reader.cmudict import * +from nltk.corpus.reader.conll import * +from nltk.corpus.reader.chunked import * +from nltk.corpus.reader.wordlist import * +from nltk.corpus.reader.xmldocs import * +from nltk.corpus.reader.ppattach import * +from nltk.corpus.reader.senseval import * +from nltk.corpus.reader.ieer import * +from nltk.corpus.reader.sinica_treebank import * +from nltk.corpus.reader.bracket_parse import * +from nltk.corpus.reader.indian import * +from nltk.corpus.reader.toolbox import * +from nltk.corpus.reader.timit import * +from nltk.corpus.reader.ycoe import * +from nltk.corpus.reader.rte import * +from nltk.corpus.reader.string_category import * +from nltk.corpus.reader.propbank import * +from nltk.corpus.reader.verbnet import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.nps_chat import * +from nltk.corpus.reader.wordnet import * +from nltk.corpus.reader.switchboard import * +from nltk.corpus.reader.dependency import * +from nltk.corpus.reader.nombank import * +from nltk.corpus.reader.ipipan import * +from nltk.corpus.reader.pl196x import * +from nltk.corpus.reader.knbc import * +from nltk.corpus.reader.chasen import * +from nltk.corpus.reader.childes import * +from nltk.corpus.reader.aligned import * +from nltk.corpus.reader.lin import * +from nltk.corpus.reader.semcor import * +from nltk.corpus.reader.framenet import * +from nltk.corpus.reader.udhr import * +from nltk.corpus.reader.bnc import * +from nltk.corpus.reader.sentiwordnet import * +from nltk.corpus.reader.twitter import * +from nltk.corpus.reader.nkjp import * +from nltk.corpus.reader.crubadan import * +from nltk.corpus.reader.mte import * +from nltk.corpus.reader.reviews import * +from nltk.corpus.reader.opinion_lexicon import * +from nltk.corpus.reader.pros_cons import * +from nltk.corpus.reader.categorized_sents import * +from nltk.corpus.reader.comparative_sents import * +from nltk.corpus.reader.panlex_lite import * +from nltk.corpus.reader.panlex_swadesh import * +from nltk.corpus.reader.bcp47 import * + +# Make sure that nltk.corpus.reader.bracket_parse gives the module, not +# the function bracket_parse() defined in nltk.tree: +from nltk.corpus.reader import bracket_parse + +__all__ = [ + "CorpusReader", + "CategorizedCorpusReader", + "PlaintextCorpusReader", + "find_corpus_fileids", + "TaggedCorpusReader", + "CMUDictCorpusReader", + "ConllChunkCorpusReader", + "WordListCorpusReader", + "PPAttachmentCorpusReader", + "SensevalCorpusReader", + "IEERCorpusReader", + "ChunkedCorpusReader", + "SinicaTreebankCorpusReader", + "BracketParseCorpusReader", + "IndianCorpusReader", + "ToolboxCorpusReader", + "TimitCorpusReader", + "YCOECorpusReader", + "MacMorphoCorpusReader", + "SyntaxCorpusReader", + "AlpinoCorpusReader", + "RTECorpusReader", + "StringCategoryCorpusReader", + "EuroparlCorpusReader", + "CategorizedBracketParseCorpusReader", + "CategorizedTaggedCorpusReader", + "CategorizedPlaintextCorpusReader", + "PortugueseCategorizedPlaintextCorpusReader", + "tagged_treebank_para_block_reader", + "PropbankCorpusReader", + "VerbnetCorpusReader", + "BNCCorpusReader", + "ConllCorpusReader", + "XMLCorpusReader", + "NPSChatCorpusReader", + "SwadeshCorpusReader", + "WordNetCorpusReader", + "WordNetICCorpusReader", + "SwitchboardCorpusReader", + "DependencyCorpusReader", + "NombankCorpusReader", + "IPIPANCorpusReader", + "Pl196xCorpusReader", + "TEICorpusView", + "KNBCorpusReader", + "ChasenCorpusReader", + "CHILDESCorpusReader", + "AlignedCorpusReader", + "TimitTaggedCorpusReader", + "LinThesaurusCorpusReader", + "SemcorCorpusReader", + "FramenetCorpusReader", + "UdhrCorpusReader", + "BNCCorpusReader", + "SentiWordNetCorpusReader", + "SentiSynset", + "TwitterCorpusReader", + "NKJPCorpusReader", + "CrubadanCorpusReader", + "MTECorpusReader", + "ReviewsCorpusReader", + "OpinionLexiconCorpusReader", + "ProsConsCorpusReader", + "CategorizedSentencesCorpusReader", + "ComparativeSentencesCorpusReader", + "PanLexLiteCorpusReader", + "NonbreakingPrefixesCorpusReader", + "UnicharsCorpusReader", + "MWAPPDBCorpusReader", + "PanlexSwadeshCorpusReader", + "BCP47CorpusReader", +] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py new file mode 100644 index 0000000000000000000000000000000000000000..93caf6233b5d1ee4d66eff0009a0d73fceb67904 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Aligned Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# Author: Steven Bird +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import ( + StreamBackedCorpusView, + concat, + read_alignedsent_block, +) +from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer +from nltk.translate import AlignedSent, Alignment + + +class AlignedCorpusReader(CorpusReader): + """ + Reader for corpora of word-aligned sentences. Tokens are assumed + to be separated by whitespace. Sentences begin on separate lines. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + alignedsent_block_reader=read_alignedsent_block, + encoding="latin1", + ): + """ + Construct a new Aligned Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = AlignedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + False, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + False, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def aligned_sents(self, fileids=None): + """ + :return: the given file(s) as a list of AlignedSent objects. + :rtype: list(AlignedSent) + """ + return concat( + [ + AlignedSentCorpusView( + fileid, + enc, + True, + True, + self._word_tokenizer, + self._sent_tokenizer, + self._alignedsent_block_reader, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class AlignedSentCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for aligned sentences. + ``AlignedSentCorpusView`` objects are typically created by + ``AlignedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + aligned, + group_by_sent, + word_tokenizer, + sent_tokenizer, + alignedsent_block_reader, + ): + self._aligned = aligned + self._group_by_sent = group_by_sent + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._alignedsent_block_reader = alignedsent_block_reader + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + block = [ + self._word_tokenizer.tokenize(sent_str) + for alignedsent_str in self._alignedsent_block_reader(stream) + for sent_str in self._sent_tokenizer.tokenize(alignedsent_str) + ] + if self._aligned: + block[2] = Alignment.fromstring( + " ".join(block[2]) + ) # kludge; we shouldn't have tokenized the alignment string + block = [AlignedSent(*block)] + elif self._group_by_sent: + block = [block[0]] + else: + block = block[0] + + return block diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe80d902ff8daa5b2a94fcccbc5b050f2d36324 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/api.py @@ -0,0 +1,516 @@ +# Natural Language Toolkit: API for Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +API for corpus readers. +""" + +import os +import re +from collections import defaultdict +from itertools import chain + +from nltk.corpus.reader.util import * +from nltk.data import FileSystemPathPointer, PathPointer, ZipFilePathPointer + + +class CorpusReader: + """ + A base class for "corpus reader" classes, each of which can be + used to read a specific corpus format. Each individual corpus + reader instance is used to read a specific corpus, consisting of + one or more files under a common root directory. Each file is + identified by its ``file identifier``, which is the relative path + to the file from the root directory. + + A separate subclass is defined for each corpus format. These + subclasses define one or more methods that provide 'views' on the + corpus contents, such as ``words()`` (for a list of words) and + ``parsed_sents()`` (for a list of parsed sentences). Called with + no arguments, these methods will return the contents of the entire + corpus. For most corpora, these methods define one or more + selection arguments, such as ``fileids`` or ``categories``, which can + be used to select which portion of the corpus should be returned. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + """ + :type root: PathPointer or str + :param root: A path pointer identifying the root directory for + this corpus. If a string is specified, then it will be + converted to a ``PathPointer`` automatically. + :param fileids: A list of the files that make up this corpus. + This list can either be specified explicitly, as a list of + strings; or implicitly, as a regular expression over file + paths. The absolute path for each file will be constructed + by joining the reader's root to each file name. + :param encoding: The default unicode encoding for the files + that make up the corpus. The value of ``encoding`` can be any + of the following: + + - A string: ``encoding`` is the encoding name for all files. + - A dictionary: ``encoding[file_id]`` is the encoding + name for the file whose identifier is ``file_id``. If + ``file_id`` is not in ``encoding``, then the file + contents will be processed using non-unicode byte strings. + - A list: ``encoding`` should be a list of ``(regexp, encoding)`` + tuples. The encoding for a file whose identifier is ``file_id`` + will be the ``encoding`` value for the first tuple whose + ``regexp`` matches the ``file_id``. If no tuple's ``regexp`` + matches the ``file_id``, the file contents will be processed + using non-unicode byte strings. + - None: the file contents of all files will be + processed using non-unicode byte strings. + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + # Convert the root to a path pointer, if necessary. + if isinstance(root, str) and not isinstance(root, PathPointer): + m = re.match(r"(.*\.zip)/?(.*)$|", root) + zipfile, zipentry = m.groups() + if zipfile: + root = ZipFilePathPointer(zipfile, zipentry) + else: + root = FileSystemPathPointer(root) + elif not isinstance(root, PathPointer): + raise TypeError("CorpusReader: expected a string or a PathPointer") + + # If `fileids` is a regexp, then expand it. + if isinstance(fileids, str): + fileids = find_corpus_fileids(root, fileids) + + self._fileids = fileids + """A list of the relative paths for the fileids that make up + this corpus.""" + + self._root = root + """The root directory for this corpus.""" + + self._readme = "README" + self._license = "LICENSE" + self._citation = "citation.bib" + + # If encoding was specified as a list of regexps, then convert + # it to a dictionary. + if isinstance(encoding, list): + encoding_dict = {} + for fileid in self._fileids: + for x in encoding: + (regexp, enc) = x + if re.match(regexp, fileid): + encoding_dict[fileid] = enc + break + encoding = encoding_dict + + self._encoding = encoding + """The default unicode encoding for the fileids that make up + this corpus. If ``encoding`` is None, then the file + contents are processed using byte strings.""" + self._tagset = tagset + + def __repr__(self): + if isinstance(self._root, ZipFilePathPointer): + path = f"{self._root.zipfile.filename}/{self._root.entry}" + else: + path = "%s" % self._root.path + return f"<{self.__class__.__name__} in {path!r}>" + + def ensure_loaded(self): + """ + Load this corpus (if it has not already been loaded). This is + used by LazyCorpusLoader as a simple method that can be used to + make sure a corpus is loaded -- e.g., in case a user wants to + do help(some_corpus). + """ + pass # no need to actually do anything. + + def readme(self): + """ + Return the contents of the corpus README file, if it exists. + """ + with self.open(self._readme) as f: + return f.read() + + def license(self): + """ + Return the contents of the corpus LICENSE file, if it exists. + """ + with self.open(self._license) as f: + return f.read() + + def citation(self): + """ + Return the contents of the corpus citation.bib file, if it exists. + """ + with self.open(self._citation) as f: + return f.read() + + def fileids(self): + """ + Return a list of file identifiers for the fileids that make up + this corpus. + """ + return self._fileids + + def abspath(self, fileid): + """ + Return the absolute path for the given file. + + :type fileid: str + :param fileid: The file identifier for the file whose path + should be returned. + :rtype: PathPointer + """ + return self._root.join(fileid) + + def abspaths(self, fileids=None, include_encoding=False, include_fileid=False): + """ + Return a list of the absolute paths for all fileids in this corpus; + or for the given list of fileids, if specified. + + :type fileids: None or str or list + :param fileids: Specifies the set of fileids for which paths should + be returned. Can be None, for all fileids; a list of + file identifiers, for a specified set of fileids; or a single + file identifier, for a single file. Note that the return + value is always a list of paths, even if ``fileids`` is a + single file identifier. + + :param include_encoding: If true, then return a list of + ``(path_pointer, encoding)`` tuples. + + :rtype: list(PathPointer) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + paths = [self._root.join(f) for f in fileids] + + if include_encoding and include_fileid: + return list(zip(paths, [self.encoding(f) for f in fileids], fileids)) + elif include_fileid: + return list(zip(paths, fileids)) + elif include_encoding: + return list(zip(paths, [self.encoding(f) for f in fileids])) + else: + return paths + + def raw(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a single string. + :rtype: str + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + contents = [] + for f in fileids: + with self.open(f) as fp: + contents.append(fp.read()) + return concat(contents) + + def open(self, file): + """ + Return an open stream that can be used to read the given file. + If the file's encoding is not None, then the stream will + automatically decode the file's contents into unicode. + + :param file: The file identifier of the file to read. + """ + encoding = self.encoding(file) + stream = self._root.join(file).open(encoding) + return stream + + def encoding(self, file): + """ + Return the unicode encoding for the given corpus file, if known. + If the encoding is unknown, or if the given file should be + processed using byte strings (str), then return None. + """ + if isinstance(self._encoding, dict): + return self._encoding.get(file) + else: + return self._encoding + + def _get_root(self): + return self._root + + root = property( + _get_root, + doc=""" + The directory where this corpus is stored. + + :type: PathPointer""", + ) + + +###################################################################### +# { Corpora containing categorized items +###################################################################### + + +class CategorizedCorpusReader: + """ + A mixin class used to aid in the implementation of corpus readers + for categorized corpora. This class defines the method + ``categories()``, which returns a list of the categories for the + corpus or for a specified set of fileids; and overrides ``fileids()`` + to take a ``categories`` argument, restricting the set of fileids to + be returned. + + Subclasses are expected to: + + - Call ``__init__()`` to set up the mapping. + + - Override all view methods to accept a ``categories`` parameter, + which can be used *instead* of the ``fileids`` parameter, to + select which fileids should be included in the returned view. + """ + + def __init__(self, kwargs): + """ + Initialize this mapping based on keyword arguments, as + follows: + + - cat_pattern: A regular expression pattern used to find the + category for each file identifier. The pattern will be + applied to each file identifier, and the first matching + group will be used as the category label for that file. + + - cat_map: A dictionary, mapping from file identifiers to + category labels. + + - cat_file: The name of a file that contains the mapping + from file identifiers to categories. The argument + ``cat_delimiter`` can be used to specify a delimiter. + + The corresponding argument will be deleted from ``kwargs``. If + more than one argument is specified, an exception will be + raised. + """ + self._f2c = None #: file-to-category mapping + self._c2f = None #: category-to-file mapping + + self._pattern = None #: regexp specifying the mapping + self._map = None #: dict specifying the mapping + self._file = None #: fileid of file containing the mapping + self._delimiter = None #: delimiter for ``self._file`` + + if "cat_pattern" in kwargs: + self._pattern = kwargs["cat_pattern"] + del kwargs["cat_pattern"] + elif "cat_map" in kwargs: + self._map = kwargs["cat_map"] + del kwargs["cat_map"] + elif "cat_file" in kwargs: + self._file = kwargs["cat_file"] + del kwargs["cat_file"] + if "cat_delimiter" in kwargs: + self._delimiter = kwargs["cat_delimiter"] + del kwargs["cat_delimiter"] + else: + raise ValueError( + "Expected keyword argument cat_pattern or " "cat_map or cat_file." + ) + + if "cat_pattern" in kwargs or "cat_map" in kwargs or "cat_file" in kwargs: + raise ValueError( + "Specify exactly one of: cat_pattern, " "cat_map, cat_file." + ) + + def _init(self): + self._f2c = defaultdict(set) + self._c2f = defaultdict(set) + + if self._pattern is not None: + for file_id in self._fileids: + category = re.match(self._pattern, file_id).group(1) + self._add(file_id, category) + + elif self._map is not None: + for (file_id, categories) in self._map.items(): + for category in categories: + self._add(file_id, category) + + elif self._file is not None: + with self.open(self._file) as f: + for line in f.readlines(): + line = line.strip() + file_id, categories = line.split(self._delimiter, 1) + if file_id not in self.fileids(): + raise ValueError( + "In category mapping file %s: %s " + "not found" % (self._file, file_id) + ) + for category in categories.split(self._delimiter): + self._add(file_id, category) + + def _add(self, file_id, category): + self._f2c[file_id].add(category) + self._c2f[category].add(file_id) + + def categories(self, fileids=None): + """ + Return a list of the categories that are defined for this corpus, + or for the file(s) if it is given. + """ + if self._f2c is None: + self._init() + if fileids is None: + return sorted(self._c2f) + if isinstance(fileids, str): + fileids = [fileids] + return sorted(set.union(*(self._f2c[d] for d in fileids))) + + def fileids(self, categories=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that make up the given category(s) if specified. + """ + if categories is None: + return super().fileids() + elif isinstance(categories, str): + if self._f2c is None: + self._init() + if categories in self._c2f: + return sorted(self._c2f[categories]) + else: + raise ValueError("Category %s not found" % categories) + else: + if self._f2c is None: + self._init() + return sorted(set.union(*(self._c2f[c] for c in categories))) + + def _resolve(self, fileids, categories): + if fileids is not None and categories is not None: + raise ValueError("Specify fileids or categories, not both") + if categories is not None: + return self.fileids(categories) + else: + return fileids + + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + +###################################################################### +# { Treebank readers +###################################################################### + +# [xx] is it worth it to factor this out? +class SyntaxCorpusReader(CorpusReader): + """ + An abstract base class for reading corpora consisting of + syntactically parsed text. Subclasses should define: + + - ``__init__``, which specifies the location of the corpus + and a method for detecting the sentence blocks in corpus files. + - ``_read_block``, which reads a block from the input stream. + - ``_word``, which takes a block and returns a list of list of words. + - ``_tag``, which takes a block and returns a list of list of tagged + words. + - ``_parse``, which takes a block and returns a list of parsed + sentences. + """ + + def _parse(self, s): + raise NotImplementedError() + + def _word(self, s): + raise NotImplementedError() + + def _tag(self, s): + raise NotImplementedError() + + def _read_block(self, stream): + raise NotImplementedError() + + def parsed_sents(self, fileids=None): + reader = self._read_parsed_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_sent_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + reader = self._read_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_word_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def words(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_word_block, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + # ------------------------------------------------------------ + # { Block Readers + + def _read_word_block(self, stream): + return list(chain.from_iterable(self._read_sent_block(stream))) + + def _read_tagged_word_block(self, stream, tagset=None): + return list(chain.from_iterable(self._read_tagged_sent_block(stream, tagset))) + + def _read_sent_block(self, stream): + return list(filter(None, [self._word(t) for t in self._read_block(stream)])) + + def _read_tagged_sent_block(self, stream, tagset=None): + return list( + filter(None, [self._tag(t, tagset) for t in self._read_block(stream)]) + ) + + def _read_parsed_sent_block(self, stream): + return list(filter(None, [self._parse(t) for t in self._read_block(stream)])) + + # } End of Block Readers + # ------------------------------------------------------------ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py new file mode 100644 index 0000000000000000000000000000000000000000..429f52a65034f6faee531430a4b1d08aabe20103 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py @@ -0,0 +1,218 @@ +# Natural Language Toolkit: BCP-47 language tags +# +# Copyright (C) 2022-2023 NLTK Project +# Author: Eric Kafe +# URL: +# For license information, see LICENSE.TXT + +import re +from warnings import warn +from xml.etree import ElementTree as et + +from nltk.corpus.reader import CorpusReader + + +class BCP47CorpusReader(CorpusReader): + """ + Parse BCP-47 composite language tags + + Supports all the main subtags, and the 'u-sd' extension: + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('oc-gascon-u-sd-fr64') + 'Occitan (post 1500): Gascon: Pyrénées-Atlantiques' + + Can load a conversion table to Wikidata Q-codes: + >>> bcp47.load_wiki_q() + >>> bcp47.wiki_q['en-GI-spanglis'] + 'Q79388' + + """ + + def __init__(self, root, fileids): + """Read the BCP-47 database""" + super().__init__(root, fileids) + self.langcode = {} + with self.open("iana/language-subtag-registry.txt") as fp: + self.db = self.data_dict(fp.read().split("%%\n")) + with self.open("cldr/common-subdivisions-en.xml") as fp: + self.subdiv = self.subdiv_dict( + et.parse(fp).iterfind("localeDisplayNames/subdivisions/subdivision") + ) + self.morphology() + + def load_wiki_q(self): + """Load conversion table to Wikidata Q-codes (only if needed)""" + with self.open("cldr/tools-cldr-rdf-external-entityToCode.tsv") as fp: + self.wiki_q = self.wiki_dict(fp.read().strip().split("\n")[1:]) + + def wiki_dict(self, lines): + """Convert Wikidata list of Q-codes to a BCP-47 dictionary""" + return { + pair[1]: pair[0].split("/")[-1] + for pair in [line.strip().split("\t") for line in lines] + } + + def subdiv_dict(self, subdivs): + """Convert the CLDR subdivisions list to a dictionary""" + return {sub.attrib["type"]: sub.text for sub in subdivs} + + def morphology(self): + self.casing = { + "language": str.lower, + "extlang": str.lower, + "script": str.title, + "region": str.upper, + "variant": str.lower, + } + dig = "[0-9]" + low = "[a-z]" + up = "[A-Z]" + alnum = "[a-zA-Z0-9]" + self.format = { + "language": re.compile(f"{low*3}?"), + "extlang": re.compile(f"{low*3}"), + "script": re.compile(f"{up}{low*3}"), + "region": re.compile(f"({up*2})|({dig*3})"), + "variant": re.compile(f"{alnum*4}{(alnum+'?')*4}"), + "singleton": re.compile(f"{low}"), + } + + def data_dict(self, records): + """Convert the BCP-47 language subtag registry to a dictionary""" + self.version = records[0].replace("File-Date:", "").strip() + dic = {} + dic["deprecated"] = {} + for label in [ + "language", + "extlang", + "script", + "region", + "variant", + "redundant", + "grandfathered", + ]: + dic["deprecated"][label] = {} + for record in records[1:]: + fields = [field.split(": ") for field in record.strip().split("\n")] + typ = fields[0][1] + tag = fields[1][1] + if typ not in dic: + dic[typ] = {} + subfields = {} + for field in fields[2:]: + if len(field) == 2: + [key, val] = field + if key not in subfields: + subfields[key] = [val] + else: # multiple value + subfields[key].append(val) + else: # multiline field + subfields[key][-1] += " " + field[0].strip() + if ( + "Deprecated" not in record + and typ == "language" + and key == "Description" + ): + self.langcode[subfields[key][-1]] = tag + for key in subfields: + if len(subfields[key]) == 1: # single value + subfields[key] = subfields[key][0] + if "Deprecated" in record: + dic["deprecated"][typ][tag] = subfields + else: + dic[typ][tag] = subfields + return dic + + def val2str(self, val): + """Return only first value""" + if type(val) == list: + # val = "/".join(val) # Concatenate all values + val = val[0] + return val + + def lang2str(self, lg_record): + """Concatenate subtag values""" + name = f"{lg_record['language']}" + for label in ["extlang", "script", "region", "variant", "extension"]: + if label in lg_record: + name += f": {lg_record[label]}" + return name + + def parse_tag(self, tag): + """Convert a BCP-47 tag to a dictionary of labelled subtags""" + subtags = tag.split("-") + lang = {} + labels = ["language", "extlang", "script", "region", "variant", "variant"] + while subtags and labels: + subtag = subtags.pop(0) + found = False + while labels: + label = labels.pop(0) + subtag = self.casing[label](subtag) + if self.format[label].fullmatch(subtag): + if subtag in self.db[label]: + found = True + valstr = self.val2str(self.db[label][subtag]["Description"]) + if label == "variant" and label in lang: + lang[label] += ": " + valstr + else: + lang[label] = valstr + break + elif subtag in self.db["deprecated"][label]: + found = True + note = f"The {subtag!r} {label} code is deprecated" + if "Preferred-Value" in self.db["deprecated"][label][subtag]: + prefer = self.db["deprecated"][label][subtag][ + "Preferred-Value" + ] + note += f"', prefer '{self.val2str(prefer)}'" + lang[label] = self.val2str( + self.db["deprecated"][label][subtag]["Description"] + ) + warn(note) + break + if not found: + if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions + sd = subtags[1] + if sd in self.subdiv: + ext = self.subdiv[sd] + else: + ext = f"" + else: # other extension subtags are not supported yet + ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower() + if not self.format["singleton"].fullmatch(subtag): + ext = f"" + warn(ext) + lang["extension"] = ext + subtags = [] + return lang + + def name(self, tag): + """ + Convert a BCP-47 tag to a colon-separated string of subtag names + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('ca-Latn-ES-valencia') + 'Catalan: Latin: Spain: Valencian' + + """ + for label in ["redundant", "grandfathered"]: + val = None + if tag in self.db[label]: + val = f"{self.db[label][tag]['Description']}" + note = f"The {tag!r} code is {label}" + elif tag in self.db["deprecated"][label]: + val = f"{self.db['deprecated'][label][tag]['Description']}" + note = f"The {tag!r} code is {label} and deprecated" + if "Preferred-Value" in self.db["deprecated"][label][tag]: + prefer = self.db["deprecated"][label][tag]["Preferred-Value"] + note += f", prefer {self.val2str(prefer)!r}" + if val: + warn(note) + return val + try: + return self.lang2str(self.parse_tag(tag)) + except: + warn(f"Tag {tag!r} was not recognized") + return None diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py new file mode 100644 index 0000000000000000000000000000000000000000..e7128bf843b5c24a59b10d8a0cf1f689592bae52 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py @@ -0,0 +1,265 @@ +# Natural Language Toolkit: Plaintext Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +"""Corpus reader for the XML version of the British National Corpus.""" + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView + + +class BNCCorpusReader(XMLCorpusReader): + r"""Corpus reader for the XML version of the British National Corpus. + + For access to the complete XML data structure, use the ``xml()`` + method. For access to simple word lists and tagged word lists, use + ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``. + + You can obtain the full version of the BNC corpus at + https://www.ota.ox.ac.uk/desc/2554 + + If you extracted the archive to a directory called `BNC`, then you can + instantiate the reader as:: + + BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml') + + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, False, None, strip_space, stem) + + def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views(fileids, False, tag, strip_space, stem) + + def sents(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, True, None, strip_space, stem) + + def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views( + fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem + ) + + def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False): + """A helper function that instantiates BNCWordViews or the list of words/sentences.""" + f = BNCWordView if self._lazy else self._words + return concat( + [ + f(fileid, sent, tag, strip_space, stem) + for fileid in self.abspaths(fileids) + ] + ) + + def _words(self, fileid, bracket_sent, tag, strip_space, stem): + """ + Helper used to implement the view methods -- returns a list of + words or a list of sentences, optionally tagged. + + :param fileid: The name of the underlying file. + :param bracket_sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + result = [] + + xmldoc = ElementTree.parse(fileid).getroot() + for xmlsent in xmldoc.findall(".//s"): + sent = [] + for xmlword in _all_xmlwords_in(xmlsent): + word = xmlword.text + if not word: + word = "" # fixes issue 337? + if strip_space or stem: + word = word.strip() + if stem: + word = xmlword.get("hw", word) + if tag == "c5": + word = (word, xmlword.get("c5")) + elif tag == "pos": + word = (word, xmlword.get("pos", xmlword.get("c5"))) + sent.append(word) + if bracket_sent: + result.append(BNCSentence(xmlsent.attrib["n"], sent)) + else: + result.extend(sent) + + assert None not in result + return result + + +def _all_xmlwords_in(elt, result=None): + if result is None: + result = [] + for child in elt: + if child.tag in ("c", "w"): + result.append(child) + else: + _all_xmlwords_in(child, result) + return result + + +class BNCSentence(list): + """ + A list of words, augmented by an attribute ``num`` used to record + the sentence identifier (the ``n`` attribute from the XML). + """ + + def __init__(self, num, items): + self.num = num + list.__init__(self, items) + + +class BNCWordView(XMLCorpusView): + """ + A stream backed corpus view specialized for use with the BNC corpus. + """ + + tags_to_ignore = { + "pb", + "gap", + "vocal", + "event", + "unclear", + "shift", + "pause", + "align", + } + """These tags are ignored. For their description refer to the + technical documentation, for example, + http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html + + """ + + def __init__(self, fileid, sent, tag, strip_space, stem): + """ + :param fileid: The name of the underlying file. + :param sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + if sent: + tagspec = ".*/s" + else: + tagspec = ".*/s/(.*/)?(c|w)" + self._sent = sent + self._tag = tag + self._strip_space = strip_space + self._stem = stem + + self.title = None #: Title of the document. + self.author = None #: Author of the document. + self.editor = None #: Editor + self.resps = None #: Statement of responsibility + + XMLCorpusView.__init__(self, fileid, tagspec) + + # Read in a tasty header. + self._open() + self.read_block(self._stream, ".*/teiHeader$", self.handle_header) + self.close() + + # Reset tag context. + self._tag_context = {0: ()} + + def handle_header(self, elt, context): + # Set up some metadata! + titles = elt.findall("titleStmt/title") + if titles: + self.title = "\n".join(title.text.strip() for title in titles) + + authors = elt.findall("titleStmt/author") + if authors: + self.author = "\n".join(author.text.strip() for author in authors) + + editors = elt.findall("titleStmt/editor") + if editors: + self.editor = "\n".join(editor.text.strip() for editor in editors) + + resps = elt.findall("titleStmt/respStmt") + if resps: + self.resps = "\n\n".join( + "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps + ) + + def handle_elt(self, elt, context): + if self._sent: + return self.handle_sent(elt) + else: + return self.handle_word(elt) + + def handle_word(self, elt): + word = elt.text + if not word: + word = "" # fixes issue 337? + if self._strip_space or self._stem: + word = word.strip() + if self._stem: + word = elt.get("hw", word) + if self._tag == "c5": + word = (word, elt.get("c5")) + elif self._tag == "pos": + word = (word, elt.get("pos", elt.get("c5"))) + return word + + def handle_sent(self, elt): + sent = [] + for child in elt: + if child.tag in ("mw", "hi", "corr", "trunc"): + sent += [self.handle_word(w) for w in child] + elif child.tag in ("w", "c"): + sent.append(self.handle_word(child)) + elif child.tag not in self.tags_to_ignore: + raise ValueError("Unexpected element %s" % child.tag) + return BNCSentence(elt.attrib["n"], sent) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d3ff67b94dcc6b476e7125c62bbe41e03603f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py @@ -0,0 +1,237 @@ +# Natural Language Toolkit: Penn Treebank Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +""" +Corpus reader for corpora that consist of parenthesis-delineated parse trees. +""" + +import sys + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree + +# we use [^\s()]+ instead of \S+? to avoid matching () +SORTTAGWRD = re.compile(r"\((\d+) ([^\s()]+) ([^\s()]+)\)") +TAGWORD = re.compile(r"\(([^\s()]+) ([^\s()]+)\)") +WORD = re.compile(r"\([^\s()]+ ([^\s()]+)\)") +EMPTY_BRACKETS = re.compile(r"\s*\(\s*\(") + + +class BracketParseCorpusReader(SyntaxCorpusReader): + """ + Reader for corpora that consist of parenthesis-delineated parse trees, + like those found in the "combined" section of the Penn Treebank, + e.g. "(S (NP (DT the) (JJ little) (NN dog)) (VP (VBD barked)))". + + """ + + def __init__( + self, + root, + fileids, + comment_char=None, + detect_blocks="unindented_paren", + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param comment_char: The character which can appear at the start of + a line to indicate that the rest of the line is a comment. + :param detect_blocks: The method that is used to find blocks + in the corpus; can be 'unindented_paren' (every unindented + parenthesis starts a new parse) or 'sexpr' (brackets are + matched). + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + self._comment_char = comment_char + self._detect_blocks = detect_blocks + self._tagset = tagset + + def _read_block(self, stream): + if self._detect_blocks == "sexpr": + return read_sexpr_block(stream, comment_char=self._comment_char) + elif self._detect_blocks == "blankline": + return read_blankline_block(stream) + elif self._detect_blocks == "unindented_paren": + # Tokens start with unindented left parens. + toks = read_regexp_block(stream, start_re=r"^\(") + # Strip any comments out of the tokens. + if self._comment_char: + toks = [ + re.sub("(?m)^%s.*" % re.escape(self._comment_char), "", tok) + for tok in toks + ] + return toks + else: + assert 0, "bad block type" + + def _normalize(self, t): + # Replace leaves of the form (!), (,), with (! !), (, ,) + t = re.sub(r"\((.)\)", r"(\1 \1)", t) + # Replace leaves of the form (tag word root) with (tag word) + t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t) + return t + + def _parse(self, t): + try: + tree = Tree.fromstring(self._normalize(t)) + # If there's an empty node at the top, strip it off + if tree.label() == "" and len(tree) == 1: + return tree[0] + else: + return tree + + except ValueError as e: + sys.stderr.write("Bad tree detected; trying to recover...\n") + # Try to recover, if we can: + if e.args == ("mismatched parens",): + for n in range(1, 5): + try: + v = Tree(self._normalize(t + ")" * n)) + sys.stderr.write( + " Recovered by adding %d close " "paren(s)\n" % n + ) + return v + except ValueError: + pass + # Try something else: + sys.stderr.write(" Recovered by returning a flat parse.\n") + # sys.stderr.write(' '.join(t.split())+'\n') + return Tree("S", self._tag(t)) + + def _tag(self, t, tagset=None): + tagged_sent = [(w, p) for (p, w) in TAGWORD.findall(self._normalize(t))] + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (w, p) in tagged_sent + ] + return tagged_sent + + def _word(self, t): + return WORD.findall(self._normalize(t)) + + +class CategorizedBracketParseCorpusReader( + CategorizedCorpusReader, BracketParseCorpusReader +): + """ + A reader for parsed corpora whose documents are + divided into categories based on their file identifiers. + @author: Nathan Schneider + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (C{cat_pattern}, C{cat_map}, and C{cat_file}) are passed to + the L{CategorizedCorpusReader constructor + }. The remaining arguments + are passed to the L{BracketParseCorpusReader constructor + }. + """ + CategorizedCorpusReader.__init__(self, kwargs) + BracketParseCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + def parsed_words(self, fileids=None, categories=None): + return super().parsed_words(self._resolve(fileids, categories)) + + def parsed_sents(self, fileids=None, categories=None): + return super().parsed_sents(self._resolve(fileids, categories)) + + def parsed_paras(self, fileids=None, categories=None): + return super().parsed_paras(self._resolve(fileids, categories)) + + +class AlpinoCorpusReader(BracketParseCorpusReader): + """ + Reader for the Alpino Dutch Treebank. + This corpus has a lexical breakdown structure embedded, as read by `_parse` + Unfortunately this puts punctuation and some other words out of the sentence + order in the xml element tree. This is no good for `tag_` and `word_` + `_tag` and `_word` will be overridden to use a non-default new parameter 'ordered' + to the overridden _normalize function. The _parse function can then remain + untouched. + """ + + def __init__(self, root, encoding="ISO-8859-1", tagset=None): + BracketParseCorpusReader.__init__( + self, + root, + r"alpino\.xml", + detect_blocks="blankline", + encoding=encoding, + tagset=tagset, + ) + + def _normalize(self, t, ordered=False): + """Normalize the xml sentence element in t. + The sentence elements , although embedded in a few overall + xml elements, are separated by blank lines. That's how the reader can + deliver them one at a time. + Each sentence has a few category subnodes that are of no use to us. + The remaining word nodes may or may not appear in the proper order. + Each word node has attributes, among which: + - begin : the position of the word in the sentence + - pos : Part of Speech: the Tag + - word : the actual word + The return value is a string with all xml elementes replaced by + clauses: either a cat clause with nested clauses, or a word clause. + The order of the bracket clauses closely follows the xml. + If ordered == True, the word clauses include an order sequence number. + If ordered == False, the word clauses only have pos and word parts. + """ + if t[:10] != "', r"(\1", t) + if ordered: + t = re.sub( + r' ', + r"(\1 \2 \3)", + t, + ) + else: + t = re.sub(r' ', r"(\1 \2)", t) + t = re.sub(r" ", r")", t) + t = re.sub(r".*", r"", t) + t = re.sub(r"", r"", t) + return t + + def _tag(self, t, tagset=None): + tagged_sent = [ + (int(o), w, p) + for (o, p, w) in SORTTAGWRD.findall(self._normalize(t, ordered=True)) + ] + tagged_sent.sort() + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (o, w, p) in tagged_sent + ] + else: + tagged_sent = [(w, p) for (o, w, p) in tagged_sent] + return tagged_sent + + def _word(self, t): + """Return a correctly ordered list if words""" + tagged_sent = self._tag(t) + return [w for (w, p) in tagged_sent] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6ab8146619bbdc0f448f9771269ab7d3ee5451 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py @@ -0,0 +1,158 @@ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Masato Hagiwara +# URL: +# For license information, see LICENSE.TXT + +import sys + +from nltk.corpus.reader import util +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * + + +class ChasenCorpusReader(CorpusReader): + def __init__(self, root, fileids, encoding="utf8", sent_splitter=None): + self._sent_splitter = sent_splitter + CorpusReader.__init__(self, root, fileids, encoding) + + def words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None): + return concat( + [ + ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class ChasenCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``, + but this'll use fixed sets of word and sentence tokenizer. + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sent_splitter=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sent_splitter = sent_splitter + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in read_regexp_block(stream, r".", r"^EOS\n"): + + para = [] + + sent = [] + for line in para_str.splitlines(): + + _eos = line.strip() == "EOS" + _cells = line.split("\t") + w = (_cells[0], "\t".join(_cells[1:])) + if not _eos: + sent.append(w) + + if _eos or (self._sent_splitter and self._sent_splitter(w)): + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + sent = [] + + if len(sent) > 0: + if not self._tagged: + sent = [w for (w, t) in sent] + + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + return block + + +def demo(): + + import nltk + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + print("/".join(jeita.words()[22100:22140])) + + print( + "\nEOS\n".join( + "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent) + for sent in jeita.tagged_sents()[2170:2173] + ) + ) + + +def test(): + + from nltk.corpus.util import LazyCorpusLoader + + jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8") + + assert isinstance(jeita.tagged_words()[0][1], str) + + +if __name__ == "__main__": + demo() + test() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/childes.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/childes.py new file mode 100644 index 0000000000000000000000000000000000000000..115ccfb927f7bb4d217670f0cd52a55d64563e9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/childes.py @@ -0,0 +1,630 @@ +# CHILDES XML Corpus Reader + +# Copyright (C) 2001-2023 NLTK Project +# Author: Tomonori Nagano +# Alexis Dimitriadis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the XML version of the CHILDES corpus. +""" + +__docformat__ = "epytext en" + +import re +from collections import defaultdict + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader +from nltk.util import LazyConcatenation, LazyMap, flatten + +# to resolve the namespace issue +NS = "http://www.talkbank.org/ns/talkbank" + + +class CHILDESCorpusReader(XMLCorpusReader): + """ + Corpus reader for the XML version of the CHILDES corpus. + The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML + version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``. + Copy the needed parts of the CHILDES XML corpus into the NLTK data directory + (``nltk_data/corpora/CHILDES/``). + + For access to the file text use the usual nltk functions, + ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``. + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of words + :rtype: list(str) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of sentences or utterances, each + encoded as a list of word strings. + :rtype: list(list(str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def corpus(self, fileids=None): + """ + :return: the given file(s) as a dict of ``(corpus_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_corpus, self.abspaths(fileids)) + + def _get_corpus(self, fileid): + results = dict() + xmldoc = ElementTree.parse(fileid).getroot() + for key, value in xmldoc.items(): + results[key] = value + return results + + def participants(self, fileids=None): + """ + :return: the given file(s) as a dict of + ``(participant_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_participants(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_participants, self.abspaths(fileids)) + + def _get_participants(self, fileid): + # multidimensional dicts + def dictOfDicts(): + return defaultdict(dictOfDicts) + + xmldoc = ElementTree.parse(fileid).getroot() + # getting participants' data + pat = dictOfDicts() + for participant in xmldoc.findall( + f".//{{{NS}}}Participants/{{{NS}}}participant" + ): + for (key, value) in participant.items(): + pat[participant.get("id")][key] = value + return pat + + def age(self, fileids=None, speaker="CHI", month=False): + """ + :return: the given file(s) as string or int + :rtype: list or int + + :param month: If true, return months instead of year-month-date + """ + if not self._lazy: + return [ + self._get_age(fileid, speaker, month) + for fileid in self.abspaths(fileids) + ] + get_age = lambda fileid: self._get_age(fileid, speaker, month) + return LazyMap(get_age, self.abspaths(fileids)) + + def _get_age(self, fileid, speaker, month): + xmldoc = ElementTree.parse(fileid).getroot() + for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"): + try: + if pat.get("id") == speaker: + age = pat.get("age") + if month: + age = self.convert_age(age) + return age + # some files don't have age data + except (TypeError, AttributeError) as e: + return None + + def convert_age(self, age_year): + "Caclculate age in months from a string in CHILDES format" + m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year) + age_month = int(m.group(1)) * 12 + int(m.group(2)) + try: + if int(m.group(3)) > 15: + age_month += 1 + # some corpora don't have age information? + except ValueError as e: + pass + return age_month + + def MLU(self, fileids=None, speaker="CHI"): + """ + :return: the given file(s) as a floating number + :rtype: list(float) + """ + if not self._lazy: + return [ + self._getMLU(fileid, speaker=speaker) + for fileid in self.abspaths(fileids) + ] + get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker) + return LazyMap(get_MLU, self.abspaths(fileids)) + + def _getMLU(self, fileid, speaker): + sents = self._get_words( + fileid, + speaker=speaker, + sent=True, + stem=True, + relation=False, + pos=True, + strip_space=True, + replace=True, + ) + results = [] + lastSent = [] + numFillers = 0 + sentDiscount = 0 + for sent in sents: + posList = [pos for (word, pos) in sent] + # if any part of the sentence is intelligible + if any(pos == "unk" for pos in posList): + continue + # if the sentence is null + elif sent == []: + continue + # if the sentence is the same as the last sent + elif sent == lastSent: + continue + else: + results.append([word for (word, pos) in sent]) + # count number of fillers + if len({"co", None}.intersection(posList)) > 0: + numFillers += posList.count("co") + numFillers += posList.count(None) + sentDiscount += 1 + lastSent = sent + try: + thisWordList = flatten(results) + # count number of morphemes + # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes) + numWords = ( + len(flatten([word.split("-") for word in thisWordList])) - numFillers + ) + numSents = len(results) - sentDiscount + mlu = numWords / numSents + except ZeroDivisionError: + mlu = 0 + # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents} + return mlu + + def _get_words( + self, fileid, speaker, sent, stem, relation, pos, strip_space, replace + ): + if ( + isinstance(speaker, str) and speaker != "ALL" + ): # ensure we have a list of speakers + speaker = [speaker] + xmldoc = ElementTree.parse(fileid).getroot() + # processing each xml doc + results = [] + for xmlsent in xmldoc.findall(".//{%s}u" % NS): + sents = [] + # select speakers + if speaker == "ALL" or xmlsent.get("who") in speaker: + for xmlword in xmlsent.findall(".//{%s}w" % NS): + infl = None + suffixStem = None + suffixTag = None + # getting replaced words + if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"): + xmlword = xmlsent.find( + f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w" + ) + elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"): + xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk") + # get text + if xmlword.text: + word = xmlword.text + else: + word = "" + # strip tailing space + if strip_space: + word = word.strip() + # stem + if relation or stem: + try: + xmlstem = xmlword.find(".//{%s}stem" % NS) + word = xmlstem.text + except AttributeError as e: + pass + # if there is an inflection + try: + xmlinfl = xmlword.find( + f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk" + ) + word += "-" + xmlinfl.text + except: + pass + # if there is a suffix + try: + xmlsuffix = xmlword.find( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem" + % (NS, NS, NS, NS) + ) + suffixStem = xmlsuffix.text + except AttributeError: + suffixStem = "" + if suffixStem: + word += "~" + suffixStem + # pos + if relation or pos: + try: + xmlpos = xmlword.findall(".//{%s}c" % NS) + xmlpos2 = xmlword.findall(".//{%s}s" % NS) + if xmlpos2 != []: + tag = xmlpos[0].text + ":" + xmlpos2[0].text + else: + tag = xmlpos[0].text + except (AttributeError, IndexError) as e: + tag = "" + try: + xmlsuffixpos = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c" + % (NS, NS, NS, NS, NS) + ) + xmlsuffixpos2 = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s" + % (NS, NS, NS, NS, NS) + ) + if xmlsuffixpos2: + suffixTag = ( + xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text + ) + else: + suffixTag = xmlsuffixpos[0].text + except: + pass + if suffixTag: + tag += "~" + suffixTag + word = (word, tag) + # relational + # the gold standard is stored in + # + if relation == True: + for xmlstem_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}gra" + ): + if not xmlstem_rel.get("type") == "grt": + word = ( + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + else: + word = ( + word[0], + word[1], + word[2], + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + try: + for xmlpost_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra" + ): + if not xmlpost_rel.get("type") == "grt": + suffixStem = ( + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + else: + suffixStem = ( + suffixStem[0], + suffixStem[1], + suffixStem[2], + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + except: + pass + sents.append(word) + if sent or relation: + results.append(sents) + else: + results.extend(sents) + return LazyMap(lambda x: x, results) + + # Ready-to-use browser opener + + """ + The base URL for viewing files on the childes website. This + shouldn't need to be changed, unless CHILDES changes the configuration + of their server or unless the user sets up their own corpus webserver. + """ + childes_url_base = r"https://childes.talkbank.org/browser/index.php?url=" + + def webview_file(self, fileid, urlbase=None): + """Map a corpus file to its web version on the CHILDES website, + and open it in a web browser. + + The complete URL to be used is: + childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha') + + If no urlbase is passed, we try to calculate it. This + requires that the childes corpus was set up to mirror the + folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.: + nltk_data/corpora/childes/Eng-USA/Cornell/??? or + nltk_data/corpora/childes/Romance/Spanish/Aguirre/??? + + The function first looks (as a special case) if "Eng-USA" is + on the path consisting of +fileid; then if + "childes", possibly followed by "data-xml", appears. If neither + one is found, we use the unmodified fileid and hope for the best. + If this is not right, specify urlbase explicitly, e.g., if the + corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'. + """ + + import webbrowser + + if urlbase: + path = urlbase + "/" + fileid + else: + full = self.root + "/" + fileid + full = re.sub(r"\\", "/", full) + if "/childes/" in full.lower(): + # Discard /data-xml/ if present + path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0] + elif "eng-usa" in full.lower(): + path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0] + else: + path = fileid + + # Strip ".xml" and add ".cha", as necessary: + if path.endswith(".xml"): + path = path[:-4] + + if not path.endswith(".cha"): + path = path + ".cha" + + url = self.childes_url_base + path + + webbrowser.open_new_tab(url) + print("Opening in browser:", url) + # Pausing is a good idea, but it's up to the user... + # raw_input("Hit Return to continue") + + +def demo(corpus_root=None): + """ + The CHILDES corpus should be manually downloaded and saved + to ``[NLTK_Data_Dir]/corpora/childes/`` + """ + if not corpus_root: + from nltk.data import find + + corpus_root = find("corpora/childes/data-xml/Eng-USA/") + + try: + childes = CHILDESCorpusReader(corpus_root, ".*.xml") + # describe all corpus + for file in childes.fileids()[:5]: + corpus = "" + corpus_id = "" + for (key, value) in childes.corpus(file)[0].items(): + if key == "Corpus": + corpus = value + if key == "Id": + corpus_id = value + print("Reading", corpus, corpus_id, " .....") + print("words:", childes.words(file)[:7], "...") + print( + "words with replaced words:", + childes.words(file, replace=True)[:7], + " ...", + ) + print("words with pos tags:", childes.tagged_words(file)[:7], " ...") + print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...") + print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...") + print("stemmed words:", childes.words(file, stem=True)[:7], " ...") + print( + "words with relations and pos-tag:", + childes.words(file, relation=True)[:5], + " ...", + ) + print("sentence:", childes.sents(file)[:2], " ...") + for (participant, values) in childes.participants(file)[0].items(): + for (key, value) in values.items(): + print("\tparticipant", participant, key, ":", value) + print("num of sent:", len(childes.sents(file))) + print("num of morphemes:", len(childes.words(file, stem=True))) + print("age:", childes.age(file)) + print("age in month:", childes.age(file, month=True)) + print("MLU:", childes.MLU(file)) + print() + + except LookupError as e: + print( + """The CHILDES corpus, or the parts you need, should be manually + downloaded from https://childes.talkbank.org/data-xml/ and saved at + [NLTK_Data_Dir]/corpora/childes/ + Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.: + demo('/path/to/childes/data-xml/Eng-USA/") + """ + ) + # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip') + # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read())) + ##this fails + # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist()) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py new file mode 100644 index 0000000000000000000000000000000000000000..66b42e79ca134227357aba4cb493335196e05961 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py @@ -0,0 +1,273 @@ +# Natural Language Toolkit: Chunked Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that contain chunked (and optionally tagged) +documents. +""" + +import codecs +import os.path + +import nltk +from nltk.chunk import tagstr2tree +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import * +from nltk.tree import Tree + + +class ChunkedCorpusReader(CorpusReader): + """ + Reader for chunked (and optionally tagged) corpora. Paragraphs + are split using a block reader. They are then tokenized into + sentences using a sentence tokenizer. Finally, these sentences + are parsed into chunk trees using a string-to-chunktree conversion + function. Each of these steps can be performed using a default + function or a custom function. By default, paragraphs are split + on blank lines; sentences are listed one per line; and sentences + are parsed into chunk trees using ``nltk.chunk.tagstr2tree``. + """ + + def __init__( + self, + root, + fileids, + extension="", + str2chunktree=tagstr2tree, + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset) + """Arguments for corpus views generated by this corpus: a tuple + (str2chunktree, sent_tokenizer, para_block_tokenizer)""" + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and chunks. Words are encoded as ``(word, tag)`` + tuples (if the corpus has tags) or word strings (if the + corpus has no tags). Chunks are encoded as depth-one + trees over ``(word,tag)`` tuples or word strings. + :rtype: list(tuple(str,str) and Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a shallow Tree. The leaves + of these trees are encoded as ``(word, tag)`` tuples (if + the corpus has tags) or word strings (if the corpus has no + tags). + :rtype: list(Tree) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def chunked_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as a shallow Tree. The leaves of these + trees are encoded as ``(word, tag)`` tuples (if the corpus + has tags) or word strings (if the corpus has no tags). + :rtype: list(list(Tree)) + """ + return concat( + [ + ChunkedCorpusView( + f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset + ) + for (f, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_block(self, stream): + return [tagstr2tree(t) for t in read_blankline_block(stream)] + + +class ChunkedCorpusView(StreamBackedCorpusView): + def __init__( + self, + fileid, + encoding, + tagged, + group_by_sent, + group_by_para, + chunked, + str2chunktree, + sent_tokenizer, + para_block_reader, + source_tagset=None, + target_tagset=None, + ): + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._chunked = chunked + self._str2chunktree = str2chunktree + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._source_tagset = source_tagset + self._target_tagset = target_tagset + + def read_block(self, stream): + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = self._str2chunktree( + sent_str, + source_tagset=self._source_tagset, + target_tagset=self._target_tagset, + ) + + # If requested, throw away the tags. + if not self._tagged: + sent = self._untag(sent) + + # If requested, throw away the chunks. + if not self._chunked: + sent = sent.leaves() + + # Add the sentence to `para`. + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + + # Add the paragraph to `block`. + if self._group_by_para: + block.append(para) + else: + block.extend(para) + + # Return the block + return block + + def _untag(self, tree): + for i, child in enumerate(tree): + if isinstance(child, Tree): + self._untag(child) + elif isinstance(child, tuple): + tree[i] = child[0] + else: + raise ValueError("expected child to be Tree or tuple") + return tree diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py new file mode 100644 index 0000000000000000000000000000000000000000..032ce82c3b2a6a4011c9b1637b882db2df1bcd55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Comparative Sentence Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Comparative Sentence Dataset. + +- Comparative Sentence Dataset information - + +Annotated by: Nitin Jindal and Bing Liu, 2006. + Department of Computer Sicence + University of Illinois at Chicago + +Contact: Nitin Jindal, njindal@cs.uic.edu + Bing Liu, liub@cs.uic.edu (https://www.cs.uic.edu/~liub) + +Distributed with permission. + +Related papers: + +- Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents". + Proceedings of the ACM SIGIR International Conference on Information Retrieval + (SIGIR-06), 2006. + +- Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations". + Proceedings of Twenty First National Conference on Artificial Intelligence + (AAAI-2006), 2006. + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +# Regular expressions for dataset components +STARS = re.compile(r"^\*+$") +COMPARISON = re.compile(r"") +CLOSE_COMPARISON = re.compile(r"") +GRAD_COMPARISON = re.compile(r"") +NON_GRAD_COMPARISON = re.compile(r"") +ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)") +KEYWORD = re.compile(r"\(([^\(]*)\)$") + + +class Comparison: + """ + A Comparison represents a comparative sentence and its constituents. + """ + + def __init__( + self, + text=None, + comp_type=None, + entity_1=None, + entity_2=None, + feature=None, + keyword=None, + ): + """ + :param text: a string (optionally tokenized) containing a comparison. + :param comp_type: an integer defining the type of comparison expressed. + Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative), + 4 (Non-gradable). + :param entity_1: the first entity considered in the comparison relation. + :param entity_2: the second entity considered in the comparison relation. + :param feature: the feature considered in the comparison relation. + :param keyword: the word or phrase which is used for that comparative relation. + """ + self.text = text + self.comp_type = comp_type + self.entity_1 = entity_1 + self.entity_2 = entity_2 + self.feature = feature + self.keyword = keyword + + def __repr__(self): + return ( + 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", ' + 'feature="{}", keyword="{}")' + ).format( + self.text, + self.comp_type, + self.entity_1, + self.entity_2, + self.feature, + self.keyword, + ) + + +class ComparativeSentencesCorpusReader(CorpusReader): + """ + Reader for the Comparative Sentence Dataset by Jindal and Liu (2006). + + >>> from nltk.corpus import comparative_sentences + >>> comparison = comparative_sentences.comparisons()[0] + >>> comparison.text # doctest: +NORMALIZE_WHITESPACE + ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly', + 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve", + 'had', '.'] + >>> comparison.entity_2 + 'models' + >>> (comparison.feature, comparison.keyword) + ('rewind', 'more') + >>> len(comparative_sentences.comparisons()) + 853 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param fileids: a list or regexp specifying the fileids in this corpus. + :param word_tokenizer: tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param sent_tokenizer: tokenizer for breaking paragraphs into sentences. + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._readme = "README.txt" + + def comparisons(self, fileids=None): + """ + Return all comparisons in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + comparisons have to be returned. + :return: the given file(s) as a list of Comparison objects. + :rtype: list(Comparison) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_comparison_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def keywords(self, fileids=None): + """ + Return a set of all keywords used in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + keywords have to be returned. + :return: the set of keywords and comparative phrases used in the corpus. + :rtype: set(str) + """ + all_keywords = concat( + [ + self.CorpusView(path, self._read_keyword_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + keywords_set = {keyword.lower() for keyword in all_keywords if keyword} + return keywords_set + + def keywords_readme(self): + """ + Return the list of words and constituents considered as clues of a + comparison (from listOfkeywords.txt). + """ + keywords = [] + with self.open("listOfkeywords.txt") as fp: + raw_text = fp.read() + for line in raw_text.split("\n"): + if not line or line.startswith("//"): + continue + keywords.append(line.strip()) + return keywords + + def sents(self, fileids=None): + """ + Return all sentences in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: all sentences of the corpus as lists of tokens (or as plain + strings, if no word tokenizer is specified). + :rtype: list(list(str)) or list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_comparison_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + comparison_tags = re.findall(COMPARISON, line) + if comparison_tags: + grad_comparisons = re.findall(GRAD_COMPARISON, line) + non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line) + # Advance to the next line (it contains the comparative sentence) + comparison_text = stream.readline().strip() + if self._word_tokenizer: + comparison_text = self._word_tokenizer.tokenize(comparison_text) + # Skip the next line (it contains closing comparison tags) + stream.readline() + # If gradable comparisons are found, create Comparison instances + # and populate their fields + comparison_bundle = [] + if grad_comparisons: + # Each comparison tag has its own relations on a separate line + for comp in grad_comparisons: + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + line = stream.readline() + entities_feats = ENTITIES_FEATS.findall(line) + if entities_feats: + for (code, entity_feat) in entities_feats: + if code == "1": + comparison.entity_1 = entity_feat.strip() + elif code == "2": + comparison.entity_2 = entity_feat.strip() + elif code == "3": + comparison.feature = entity_feat.strip() + keyword = KEYWORD.findall(line) + if keyword: + comparison.keyword = keyword[0] + comparison_bundle.append(comparison) + # If non-gradable comparisons are found, create a simple Comparison + # instance for each one + if non_grad_comparisons: + for comp in non_grad_comparisons: + # comp_type in this case should always be 4. + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + comparison_bundle.append(comparison) + # Flatten the list of comparisons before returning them + # return concat([comparison_bundle]) + return comparison_bundle + + def _read_keyword_block(self, stream): + keywords = [] + for comparison in self._read_comparison_block(stream): + keywords.append(comparison.keyword) + return keywords + + def _read_sent_block(self, stream): + while True: + line = stream.readline() + if re.match(STARS, line): + while True: + line = stream.readline() + if re.match(STARS, line): + break + continue + if ( + not re.findall(COMPARISON, line) + and not ENTITIES_FEATS.findall(line) + and not re.findall(CLOSE_COMPARISON, line) + ): + if self._sent_tokenizer: + return [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(line) + ] + else: + return [self._word_tokenizer.tokenize(line)] + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/conll.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/conll.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3b30db900ee4eb4648b74d5904af04b60e1692 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/conll.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: CONLL Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read CoNLL-style chunk fileids. +""" + +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree +from nltk.util import LazyConcatenation, LazyMap + + +class ConllCorpusReader(CorpusReader): + """ + A corpus reader for CoNLL-style files. These files consist of a + series of sentences, separated by blank lines. Each sentence is + encoded using a table (or "grid") of values, where each line + corresponds to a single word, and each column corresponds to an + annotation type. The set of columns used by CoNLL-style files can + vary from corpus to corpus; the ``ConllCorpusReader`` constructor + therefore takes an argument, ``columntypes``, which is used to + specify the columns that are used by a given corpus. By default + columns are split by consecutive whitespaces, with the + ``separator`` argument you can set a string to split by (e.g. + ``\'\t\'``). + + + @todo: Add support for reading from corpora where different + parallel files contain different columns. + @todo: Possibly add caching of the grid corpus view? This would + allow the same grid view to be used by different data access + methods (eg words() and parsed_sents() could both share the + same grid corpus view object). + @todo: Better support for -DOCSTART-. Currently, we just ignore + it, but it could be used to define methods that retrieve a + document at a time (eg parsed_documents()). + """ + + # ///////////////////////////////////////////////////////////////// + # Column Types + # ///////////////////////////////////////////////////////////////// + + WORDS = "words" #: column type for words + POS = "pos" #: column type for part-of-speech tags + TREE = "tree" #: column type for parse trees + CHUNK = "chunk" #: column type for chunk structures + NE = "ne" #: column type for named entities + SRL = "srl" #: column type for semantic role labels + IGNORE = "ignore" #: column type for column that should be ignored + + #: A list of all column types supported by the conll corpus reader. + COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__( + self, + root, + fileids, + columntypes, + chunk_types=None, + root_label="S", + pos_in_tree=False, + srl_includes_roleset=True, + encoding="utf8", + tree_class=Tree, + tagset=None, + separator=None, + ): + for columntype in columntypes: + if columntype not in self.COLUMN_TYPES: + raise ValueError("Bad column type %r" % columntype) + if isinstance(chunk_types, str): + chunk_types = [chunk_types] + self._chunk_types = chunk_types + self._colmap = {c: i for (i, c) in enumerate(columntypes)} + self._pos_in_tree = pos_in_tree + self._root_label = root_label # for chunks + self._srl_includes_roleset = srl_includes_roleset + self._tree_class = tree_class + CorpusReader.__init__(self, root, fileids, encoding) + self._tagset = tagset + self.sep = separator + + # ///////////////////////////////////////////////////////////////// + # Data Access Methods + # ///////////////////////////////////////////////////////////////// + + def words(self, fileids=None): + self._require(self.WORDS) + return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids))) + + def sents(self, fileids=None): + self._require(self.WORDS) + return LazyMap(self._get_words, self._grids(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_tagged_words, self._grids(fileids))) + + def tagged_sents(self, fileids=None, tagset=None): + self._require(self.WORDS, self.POS) + + def get_tagged_words(grid): + return self._get_tagged_words(grid, tagset) + + return LazyMap(get_tagged_words, self._grids(fileids)) + + def chunked_words(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyConcatenation(LazyMap(get_chunked_words, self._grids(fileids))) + + def chunked_sents(self, fileids=None, chunk_types=None, tagset=None): + self._require(self.WORDS, self.POS, self.CHUNK) + if chunk_types is None: + chunk_types = self._chunk_types + + def get_chunked_words(grid): # capture chunk_types as local var + return self._get_chunked_words(grid, chunk_types, tagset) + + return LazyMap(get_chunked_words, self._grids(fileids)) + + def parsed_sents(self, fileids=None, pos_in_tree=None, tagset=None): + self._require(self.WORDS, self.POS, self.TREE) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_parsed_sent(grid): # capture pos_in_tree as local var + return self._get_parsed_sent(grid, pos_in_tree, tagset) + + return LazyMap(get_parsed_sent, self._grids(fileids)) + + def srl_spans(self, fileids=None): + self._require(self.SRL) + return LazyMap(self._get_srl_spans, self._grids(fileids)) + + def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True): + self._require(self.WORDS, self.POS, self.TREE, self.SRL) + if pos_in_tree is None: + pos_in_tree = self._pos_in_tree + + def get_srl_instances(grid): # capture pos_in_tree as local var + return self._get_srl_instances(grid, pos_in_tree) + + result = LazyMap(get_srl_instances, self._grids(fileids)) + if flatten: + result = LazyConcatenation(result) + return result + + def iob_words(self, fileids=None, tagset=None): + """ + :return: a list of word/tag/IOB tuples + :rtype: list(tuple) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids))) + + def iob_sents(self, fileids=None, tagset=None): + """ + :return: a list of lists of word/tag/IOB tuples + :rtype: list(list) + :param fileids: the list of fileids that make up this corpus + :type fileids: None or str or list + """ + self._require(self.WORDS, self.POS, self.CHUNK) + + def get_iob_words(grid): + return self._get_iob_words(grid, tagset) + + return LazyMap(get_iob_words, self._grids(fileids)) + + # ///////////////////////////////////////////////////////////////// + # Grid Reading + # ///////////////////////////////////////////////////////////////// + + def _grids(self, fileids=None): + # n.b.: we could cache the object returned here (keyed on + # fileids), which would let us reuse the same corpus view for + # different things (eg srl and parse trees). + return concat( + [ + StreamBackedCorpusView(fileid, self._read_grid_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_grid_block(self, stream): + grids = [] + for block in read_blankline_block(stream): + block = block.strip() + if not block: + continue + + grid = [line.split(self.sep) for line in block.split("\n")] + + # If there's a docstart row, then discard. ([xx] eventually it + # would be good to actually use it) + if grid[0][self._colmap.get("words", 0)] == "-DOCSTART-": + del grid[0] + + # Check that the grid is consistent. + for row in grid: + if len(row) != len(grid[0]): + raise ValueError("Inconsistent number of columns:\n%s" % block) + grids.append(grid) + return grids + + # ///////////////////////////////////////////////////////////////// + # Transforms + # ///////////////////////////////////////////////////////////////// + # given a grid, transform it into some representation (e.g., + # a list of words or a parse tree). + + def _get_words(self, grid): + return self._get_column(grid, self._colmap["words"]) + + def _get_tagged_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list(zip(self._get_column(grid, self._colmap["words"]), pos_tags)) + + def _get_iob_words(self, grid, tagset=None): + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + return list( + zip( + self._get_column(grid, self._colmap["words"]), + pos_tags, + self._get_column(grid, self._colmap["chunk"]), + ) + ) + + def _get_chunked_words(self, grid, chunk_types, tagset=None): + # n.b.: this method is very similar to conllstr2tree. + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + chunk_tags = self._get_column(grid, self._colmap["chunk"]) + + stack = [Tree(self._root_label, [])] + + for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags): + if chunk_tag == "O": + state, chunk_type = "O", "" + else: + (state, chunk_type) = chunk_tag.split("-") + # If it's a chunk we don't care about, treat it as O. + if chunk_types is not None and chunk_type not in chunk_types: + state = "O" + # Treat a mismatching I like a B. + if state == "I" and chunk_type != stack[-1].label(): + state = "B" + # For B or I: close any open chunks + if state in "BO" and len(stack) == 2: + stack.pop() + # For B: start a new chunk. + if state == "B": + new_chunk = Tree(chunk_type, []) + stack[-1].append(new_chunk) + stack.append(new_chunk) + # Add the word token. + stack[-1].append((word, pos_tag)) + + return stack[0] + + def _get_parsed_sent(self, grid, pos_in_tree, tagset=None): + words = self._get_column(grid, self._colmap["words"]) + pos_tags = self._get_column(grid, self._colmap["pos"]) + if tagset and tagset != self._tagset: + pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags] + parse_tags = self._get_column(grid, self._colmap["tree"]) + + treestr = "" + for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags): + if word == "(": + word = "-LRB-" + if word == ")": + word = "-RRB-" + if pos_tag == "(": + pos_tag = "-LRB-" + if pos_tag == ")": + pos_tag = "-RRB-" + (left, right) = parse_tag.split("*") + right = right.count(")") * ")" # only keep ')'. + treestr += f"{left} ({pos_tag} {word}) {right}" + try: + tree = self._tree_class.fromstring(treestr) + except (ValueError, IndexError): + tree = self._tree_class.fromstring(f"({self._root_label} {treestr})") + + if not pos_in_tree: + for subtree in tree.subtrees(): + for i, child in enumerate(subtree): + if ( + isinstance(child, Tree) + and len(child) == 1 + and isinstance(child[0], str) + ): + subtree[i] = (child[0], child.label()) + + return tree + + def _get_srl_spans(self, grid): + """ + list of list of (start, end), tag) tuples + """ + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + start_col = self._colmap["srl"] + 2 + else: + predicates = self._get_column(grid, self._colmap["srl"]) + start_col = self._colmap["srl"] + 1 + + # Count how many predicates there are. This tells us how many + # columns to expect for SRL data. + num_preds = len([p for p in predicates if p != "-"]) + + spanlists = [] + for i in range(num_preds): + col = self._get_column(grid, start_col + i) + spanlist = [] + stack = [] + for wordnum, srl_tag in enumerate(col): + (left, right) = srl_tag.split("*") + for tag in left.split("("): + if tag: + stack.append((tag, wordnum)) + for i in range(right.count(")")): + (tag, start) = stack.pop() + spanlist.append(((start, wordnum + 1), tag)) + spanlists.append(spanlist) + + return spanlists + + def _get_srl_instances(self, grid, pos_in_tree): + tree = self._get_parsed_sent(grid, pos_in_tree) + spanlists = self._get_srl_spans(grid) + if self._srl_includes_roleset: + predicates = self._get_column(grid, self._colmap["srl"] + 1) + rolesets = self._get_column(grid, self._colmap["srl"]) + else: + predicates = self._get_column(grid, self._colmap["srl"]) + rolesets = [None] * len(predicates) + + instances = ConllSRLInstanceList(tree) + for wordnum, predicate in enumerate(predicates): + if predicate == "-": + continue + # Decide which spanlist to use. Don't assume that they're + # sorted in the same order as the predicates (even though + # they usually are). + for spanlist in spanlists: + for (start, end), tag in spanlist: + if wordnum in range(start, end) and tag in ("V", "C-V"): + break + else: + continue + break + else: + raise ValueError("No srl column found for %r" % predicate) + instances.append( + ConllSRLInstance(tree, wordnum, predicate, rolesets[wordnum], spanlist) + ) + + return instances + + # ///////////////////////////////////////////////////////////////// + # Helper Methods + # ///////////////////////////////////////////////////////////////// + + def _require(self, *columntypes): + for columntype in columntypes: + if columntype not in self._colmap: + raise ValueError( + "This corpus does not contain a %s " "column." % columntype + ) + + @staticmethod + def _get_column(grid, column_index): + return [grid[i][column_index] for i in range(len(grid))] + + +class ConllSRLInstance: + """ + An SRL instance from a CoNLL corpus, which identifies and + providing labels for the arguments of a single verb. + """ + + # [xx] add inst.core_arguments, inst.argm_arguments? + + def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans): + self.verb = [] + """A list of the word indices of the words that compose the + verb whose arguments are identified by this instance. + This will contain multiple word indices when multi-word + verbs are used (e.g. 'turn on').""" + + self.verb_head = verb_head + """The word index of the head word of the verb whose arguments + are identified by this instance. E.g., for a sentence that + uses the verb 'turn on,' ``verb_head`` will be the word index + of the word 'turn'.""" + + self.verb_stem = verb_stem + + self.roleset = roleset + + self.arguments = [] + """A list of ``(argspan, argid)`` tuples, specifying the location + and type for each of the arguments identified by this + instance. ``argspan`` is a tuple ``start, end``, indicating + that the argument consists of the ``words[start:end]``.""" + + self.tagged_spans = tagged_spans + """A list of ``(span, id)`` tuples, specifying the location and + type for each of the arguments, as well as the verb pieces, + that make up this instance.""" + + self.tree = tree + """The parse tree for the sentence containing this instance.""" + + self.words = tree.leaves() + """A list of the words in the sentence containing this + instance.""" + + # Fill in the self.verb and self.arguments values. + for (start, end), tag in tagged_spans: + if tag in ("V", "C-V"): + self.verb += list(range(start, end)) + else: + self.arguments.append(((start, end), tag)) + + def __repr__(self): + # Originally, its: + ##plural = 's' if len(self.arguments) != 1 else '' + plural = "s" if len(self.arguments) != 1 else "" + return "" % ( + (self.verb_stem, len(self.arguments), plural) + ) + + def pprint(self): + verbstr = " ".join(self.words[i][0] for i in self.verb) + hdr = f"SRL for {verbstr!r} (stem={self.verb_stem!r}):\n" + s = "" + for i, word in enumerate(self.words): + if isinstance(word, tuple): + word = word[0] + for (start, end), argid in self.arguments: + if i == start: + s += "[%s " % argid + if i == end: + s += "] " + if i in self.verb: + word = "<<%s>>" % word + s += word + " " + return hdr + textwrap.fill( + s.replace(" ]", "]"), initial_indent=" ", subsequent_indent=" " + ) + + +class ConllSRLInstanceList(list): + """ + Set of instances for a single sentence + """ + + def __init__(self, tree, instances=()): + self.tree = tree + list.__init__(self, instances) + + def __str__(self): + return self.pprint() + + def pprint(self, include_tree=False): + # Sanity check: trees should be the same + for inst in self: + if inst.tree != self.tree: + raise ValueError("Tree mismatch!") + + # If desired, add trees: + if include_tree: + words = self.tree.leaves() + pos = [None] * len(words) + synt = ["*"] * len(words) + self._tree2conll(self.tree, 0, words, pos, synt) + + s = "" + for i in range(len(words)): + # optional tree columns + if include_tree: + s += "%-20s " % words[i] + s += "%-8s " % pos[i] + s += "%15s*%-8s " % tuple(synt[i].split("*")) + + # verb head column + for inst in self: + if i == inst.verb_head: + s += "%-20s " % inst.verb_stem + break + else: + s += "%-20s " % "-" + # Remaining columns: self + for inst in self: + argstr = "*" + for (start, end), argid in inst.tagged_spans: + if i == start: + argstr = f"({argid}{argstr}" + if i == (end - 1): + argstr += ")" + s += "%-12s " % argstr + s += "\n" + return s + + def _tree2conll(self, tree, wordnum, words, pos, synt): + assert isinstance(tree, Tree) + if len(tree) == 1 and isinstance(tree[0], str): + pos[wordnum] = tree.label() + assert words[wordnum] == tree[0] + return wordnum + 1 + elif len(tree) == 1 and isinstance(tree[0], tuple): + assert len(tree[0]) == 2 + pos[wordnum], pos[wordnum] = tree[0] + return wordnum + 1 + else: + synt[wordnum] = f"({tree.label()}{synt[wordnum]}" + for child in tree: + wordnum = self._tree2conll(child, wordnum, words, pos, synt) + synt[wordnum - 1] += ")" + return wordnum + + +class ConllChunkCorpusReader(ConllCorpusReader): + """ + A ConllCorpusReader whose data file contains three columns: words, + pos, and chunk. + """ + + def __init__( + self, root, fileids, chunk_types, encoding="utf8", tagset=None, separator=None + ): + ConllCorpusReader.__init__( + self, + root, + fileids, + ("words", "pos", "chunk"), + chunk_types=chunk_types, + encoding=encoding, + tagset=tagset, + separator=separator, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..87f56d4b5410a6dc419cd58538d3f4499478a205 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Dependency Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Kepa Sarasola +# Iker Manterola +# +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.parse import DependencyGraph +from nltk.tokenize import * + + +class DependencyCorpusReader(SyntaxCorpusReader): + def __init__( + self, + root, + fileids, + encoding="utf8", + word_tokenizer=TabTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + ): + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + + ######################################################### + + def words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_words(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, False, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, False, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def tagged_sents(self, fileids=None): + return concat( + [ + DependencyCorpusView(fileid, True, True, False, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + + def parsed_sents(self, fileids=None): + sents = concat( + [ + DependencyCorpusView(fileid, False, True, True, encoding=enc) + for fileid, enc in self.abspaths(fileids, include_encoding=True) + ] + ) + return [DependencyGraph(sent) for sent in sents] + + +class DependencyCorpusView(StreamBackedCorpusView): + _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da + + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + dependencies, + chunk_types=None, + encoding="utf8", + ): + self._tagged = tagged + self._dependencies = dependencies + self._group_by_sent = group_by_sent + self._chunk_types = chunk_types + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + # Read the next sentence. + sent = read_blankline_block(stream)[0].strip() + # Strip off the docstart marker, if present. + if sent.startswith(self._DOCSTART): + sent = sent[len(self._DOCSTART) :].lstrip() + + # extract word and tag from any of the formats + if not self._dependencies: + lines = [line.split("\t") for line in sent.split("\n")] + if len(lines[0]) == 3 or len(lines[0]) == 4: + sent = [(line[0], line[1]) for line in lines] + elif len(lines[0]) == 10: + sent = [(line[1], line[4]) for line in lines] + else: + raise ValueError("Unexpected number of fields in dependency tree file") + + # discard tags if they weren't requested + if not self._tagged: + sent = [word for (word, tag) in sent] + + # Return the result. + if self._group_by_sent: + return [sent] + else: + return list(sent) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py new file mode 100644 index 0000000000000000000000000000000000000000..24f83cfaebcf9a583a33806136f8788b112aaf95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py @@ -0,0 +1,116 @@ +# Natural Language Toolkit: IEER Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Information Extraction and Entity Recognition Corpus. + +NIST 1999 Information Extraction: Entity Recognition Evaluation +https://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm + +This corpus contains the NEWSWIRE development test data for the +NIST 1999 IE-ER Evaluation. The files were taken from the +subdirectory: ``/ie_er_99/english/devtest/newswire/*.ref.nwt`` +and filenames were shortened. + +The corpus contains the following files: APW_19980314, APW_19980424, +APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407. +""" + +import nltk +from nltk.corpus.reader.api import * + +#: A dictionary whose keys are the names of documents in this corpus; +#: and whose values are descriptions of those documents' contents. +titles = { + "APW_19980314": "Associated Press Weekly, 14 March 1998", + "APW_19980424": "Associated Press Weekly, 24 April 1998", + "APW_19980429": "Associated Press Weekly, 29 April 1998", + "NYT_19980315": "New York Times, 15 March 1998", + "NYT_19980403": "New York Times, 3 April 1998", + "NYT_19980407": "New York Times, 7 April 1998", +} + +#: A list of all documents in this corpus. +documents = sorted(titles) + + +class IEERDocument: + def __init__(self, text, docno=None, doctype=None, date_time=None, headline=""): + self.text = text + self.docno = docno + self.doctype = doctype + self.date_time = date_time + self.headline = headline + + def __repr__(self): + if self.headline: + headline = " ".join(self.headline.leaves()) + else: + headline = ( + " ".join([w for w in self.text.leaves() if w[:1] != "<"][:12]) + "..." + ) + if self.docno is not None: + return f"" + else: + return "" % headline + + +class IEERCorpusReader(CorpusReader): + """ """ + + def docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def parsed_docs(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_parsed_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_parsed_block(self, stream): + # TODO: figure out while empty documents are being returned + return [ + self._parse(doc) + for doc in self._read_block(stream) + if self._parse(doc).docno is not None + ] + + def _parse(self, doc): + val = nltk.chunk.ieerstr2tree(doc, root_label="DOCUMENT") + if isinstance(val, dict): + return IEERDocument(**val) + else: + return IEERDocument(val) + + def _read_block(self, stream): + out = [] + # Skip any preamble. + while True: + line = stream.readline() + if not line: + break + if line.strip() == "": + break + out.append(line) + # Read the document + while True: + line = stream.readline() + if not line: + break + out.append(line) + if line.strip() == "": + break + # Return the document + return ["\n".join(out)] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/indian.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/indian.py new file mode 100644 index 0000000000000000000000000000000000000000..23c6434c34b38dcb4e0227851afb2aefde2fd090 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/indian.py @@ -0,0 +1,93 @@ +# Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Indian Language POS-Tagged Corpus +Collected by A Kumaran, Microsoft Research, India +Distributed with permission + +Contents: + - Bangla: IIT Kharagpur + - Hindi: Microsoft Research India + - Marathi: IIT Bombay + - Telugu: IIIT Hyderabad +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple + + +class IndianCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, False) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, False, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, True) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, True, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class IndianCorpusView(StreamBackedCorpusView): + def __init__( + self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + line = stream.readline() + if line.startswith("<"): + return [] + sent = [str2tuple(word, sep="_") for word in line.split()] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + return [sent] + else: + return sent diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py new file mode 100644 index 0000000000000000000000000000000000000000..d2d16c90f4edf380658af969a0488c28d5f1b24a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py @@ -0,0 +1,356 @@ +# Natural Language Toolkit: IPI PAN Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Konrad Goluchowski +# URL: +# For license information, see LICENSE.TXT + +import functools + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import StreamBackedCorpusView, concat + + +def _parse_args(fun): + @functools.wraps(fun) + def decorator(self, fileids=None, **kwargs): + kwargs.pop("tags", None) + if not fileids: + fileids = self.fileids() + return fun(self, fileids, **kwargs) + + return decorator + + +class IPIPANCorpusReader(CorpusReader): + """ + Corpus reader designed to work with corpus created by IPI PAN. + See http://korpus.pl/en/ for more details about IPI PAN corpus. + + The corpus includes information about text domain, channel and categories. + You can access possible values using ``domains()``, ``channels()`` and + ``categories()``. You can use also this metadata to filter files, e.g.: + ``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``. + + The reader supports methods: words, sents, paras and their tagged versions. + You can get part of speech instead of full tag by giving "simplify_tags=True" + parameter, e.g.: ``tagged_sents(simplify_tags=True)``. + + Also you can get all tags disambiguated tags specifying parameter + "one_tag=False", e.g.: ``tagged_paras(one_tag=False)``. + + You can get all tags that were assigned by a morphological analyzer specifying + parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``. + + The IPIPAN Corpus contains tags indicating if there is a space between two + tokens. To add special "no space" markers, you should specify parameter + "append_no_space=True", e.g. ``tagged_words(append_no_space=True)``. + As a result in place where there should be no space between two tokens new + pair ('', 'no-space') will be inserted (for tagged data) and just '' for + methods without tags. + + The corpus reader can also try to append spaces between words. To enable this + option, specify parameter "append_space=True", e.g. ``words(append_space=True)``. + As a result either ' ' or (' ', 'space') will be inserted between tokens. + + By default, xml entities like " and & are replaced by corresponding + characters. You can turn off this feature, specifying parameter + "replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, None, None) + + def channels(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "channel") + + def domains(self, fileids=None): + if not fileids: + fileids = self.fileids() + return self._parse_header(fileids, "domain") + + def categories(self, fileids=None): + if not fileids: + fileids = self.fileids() + return [ + self._map_category(cat) for cat in self._parse_header(fileids, "keyTerm") + ] + + def fileids(self, channels=None, domains=None, categories=None): + if channels is not None and domains is not None and categories is not None: + raise ValueError( + "You can specify only one of channels, domains " + "and categories parameter at once" + ) + if channels is None and domains is None and categories is None: + return CorpusReader.fileids(self) + if isinstance(channels, str): + channels = [channels] + if isinstance(domains, str): + domains = [domains] + if isinstance(categories, str): + categories = [categories] + if channels: + return self._list_morph_files_by("channel", channels) + elif domains: + return self._list_morph_files_by("domain", domains) + else: + return self._list_morph_files_by( + "keyTerm", categories, map=self._map_category + ) + + @_parse_args + def sents(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def paras(self, fileids=None, **kwargs): + return concat( + [ + self._view( + fileid, mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs + ) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def words(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, tags=False, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_sents(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_paras(self, fileids=None, **kwargs): + return concat( + [ + self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE, **kwargs) + for fileid in self._list_morph_files(fileids) + ] + ) + + @_parse_args + def tagged_words(self, fileids=None, **kwargs): + return concat( + [self._view(fileid, **kwargs) for fileid in self._list_morph_files(fileids)] + ) + + def _list_morph_files(self, fileids): + return [f for f in self.abspaths(fileids)] + + def _list_header_files(self, fileids): + return [ + f.replace("morph.xml", "header.xml") + for f in self._list_morph_files(fileids) + ] + + def _parse_header(self, fileids, tag): + values = set() + for f in self._list_header_files(fileids): + values_list = self._get_tag(f, tag) + for v in values_list: + values.add(v) + return list(values) + + def _list_morph_files_by(self, tag, values, map=None): + fileids = self.fileids() + ret_fileids = set() + for f in fileids: + fp = self.abspath(f).replace("morph.xml", "header.xml") + values_list = self._get_tag(fp, tag) + for value in values_list: + if map is not None: + value = map(value) + if value in values: + ret_fileids.add(f) + return list(ret_fileids) + + def _get_tag(self, f, tag): + tags = [] + with open(f) as infile: + header = infile.read() + tag_end = 0 + while True: + tag_pos = header.find("<" + tag, tag_end) + if tag_pos < 0: + return tags + tag_end = header.find("", tag_pos) + tags.append(header[tag_pos + len(tag) + 2 : tag_end]) + + def _map_category(self, cat): + pos = cat.find(">") + if pos == -1: + return cat + else: + return cat[pos + 1 :] + + def _view(self, filename, **kwargs): + tags = kwargs.pop("tags", True) + mode = kwargs.pop("mode", 0) + simplify_tags = kwargs.pop("simplify_tags", False) + one_tag = kwargs.pop("one_tag", True) + disamb_only = kwargs.pop("disamb_only", True) + append_no_space = kwargs.pop("append_no_space", False) + append_space = kwargs.pop("append_space", False) + replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + if len(kwargs) > 0: + raise ValueError("Unexpected arguments: %s" % kwargs.keys()) + if not one_tag and not disamb_only: + raise ValueError( + "You cannot specify both one_tag=False and " "disamb_only=False" + ) + if not tags and (simplify_tags or not one_tag or not disamb_only): + raise ValueError( + "You cannot specify simplify_tags, one_tag or " + "disamb_only with functions other than tagged_*" + ) + + return IPIPANCorpusView( + filename, + tags=tags, + mode=mode, + simplify_tags=simplify_tags, + one_tag=one_tag, + disamb_only=disamb_only, + append_no_space=append_no_space, + append_space=append_space, + replace_xmlentities=replace_xmlentities, + ) + + +class IPIPANCorpusView(StreamBackedCorpusView): + + WORDS_MODE = 0 + SENTS_MODE = 1 + PARAS_MODE = 2 + + def __init__(self, filename, startpos=0, **kwargs): + StreamBackedCorpusView.__init__(self, filename, None, startpos, None) + self.in_sentence = False + self.position = 0 + + self.show_tags = kwargs.pop("tags", True) + self.disamb_only = kwargs.pop("disamb_only", True) + self.mode = kwargs.pop("mode", IPIPANCorpusView.WORDS_MODE) + self.simplify_tags = kwargs.pop("simplify_tags", False) + self.one_tag = kwargs.pop("one_tag", True) + self.append_no_space = kwargs.pop("append_no_space", False) + self.append_space = kwargs.pop("append_space", False) + self.replace_xmlentities = kwargs.pop("replace_xmlentities", True) + + def read_block(self, stream): + sentence = [] + sentences = [] + space = False + no_space = False + + tags = set() + + lines = self._read_data(stream) + + while True: + + # we may have only part of last line + if len(lines) <= 1: + self._seek(stream) + lines = self._read_data(stream) + + if lines == [""]: + assert not sentences + return [] + + line = lines.pop() + self.position += len(line) + 1 + + if line.startswith('"): + if self.append_space: + no_space = True + if self.append_no_space: + if self.show_tags: + sentence.append(("", "no-space")) + else: + sentence.append("") + elif line.startswith(" +# URL: +# For license information, see LICENSE.txt + +import re +from collections import defaultdict +from functools import reduce + +from nltk.corpus.reader import CorpusReader + + +class LinThesaurusCorpusReader(CorpusReader): + """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin.""" + + # Compiled regular expression for extracting the key from the first line of each + # thesaurus entry + _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+') + + @staticmethod + def __defaultdict_factory(): + """Factory for creating defaultdict of defaultdict(dict)s""" + return defaultdict(dict) + + def __init__(self, root, badscore=0.0): + """ + Initialize the thesaurus. + + :param root: root directory containing thesaurus LISP files + :type root: C{string} + :param badscore: the score to give to words which do not appear in each other's sets of synonyms + :type badscore: C{float} + """ + + super().__init__(root, r"sim[A-Z]\.lsp") + self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory) + self._badscore = badscore + for path, encoding, fileid in self.abspaths( + include_encoding=True, include_fileid=True + ): + with open(path) as lin_file: + first = True + for line in lin_file: + line = line.strip() + # Start of entry + if first: + key = LinThesaurusCorpusReader._key_re.sub(r"\1", line) + first = False + # End of entry + elif line == "))": + first = True + # Lines with pairs of ngrams and scores + else: + split_line = line.split("\t") + if len(split_line) == 2: + ngram, score = split_line + self._thesaurus[fileid][key][ngram.strip('"')] = float( + score + ) + + def similarity(self, ngram1, ngram2, fileid=None): + """ + Returns the similarity score for two ngrams. + + :param ngram1: first ngram to compare + :type ngram1: C{string} + :param ngram2: second ngram to compare + :type ngram2: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, just the score for the two ngrams; otherwise, + list of tuples of fileids and scores. + """ + # Entries don't contain themselves, so make sure similarity between item and itself is 1.0 + if ngram1 == ngram2: + if fileid: + return 1.0 + else: + return [(fid, 1.0) for fid in self._fileids] + else: + if fileid: + return ( + self._thesaurus[fileid][ngram1][ngram2] + if ngram2 in self._thesaurus[fileid][ngram1] + else self._badscore + ) + else: + return [ + ( + fid, + ( + self._thesaurus[fid][ngram1][ngram2] + if ngram2 in self._thesaurus[fid][ngram1] + else self._badscore + ), + ) + for fid in self._fileids + ] + + def scored_synonyms(self, ngram, fileid=None): + """ + Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of tuples of scores and synonyms; otherwise, + list of tuples of fileids and lists, where inner lists consist of tuples of + scores and synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].items() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].items()) + for fileid in self._fileids + ] + + def synonyms(self, ngram, fileid=None): + """ + Returns a list of synonyms for the current ngram. + + :param ngram: ngram to lookup + :type ngram: C{string} + :param fileid: thesaurus fileid to search in. If None, search all fileids. + :type fileid: C{string} + :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and + lists, where inner lists contain synonyms. + """ + if fileid: + return self._thesaurus[fileid][ngram].keys() + else: + return [ + (fileid, self._thesaurus[fileid][ngram].keys()) + for fileid in self._fileids + ] + + def __contains__(self, ngram): + """ + Determines whether or not the given ngram is in the thesaurus. + + :param ngram: ngram to lookup + :type ngram: C{string} + :return: whether the given ngram is in the thesaurus. + """ + return reduce( + lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]), + self._fileids, + False, + ) + + +###################################################################### +# Demo +###################################################################### + + +def demo(): + from nltk.corpus import lin_thesaurus as thes + + word1 = "business" + word2 = "enterprise" + print("Getting synonyms for " + word1) + print(thes.synonyms(word1)) + + print("Getting scored synonyms for " + word1) + print(thes.scored_synonyms(word1)) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print("Getting synonyms from simN.lsp (noun subsection) for " + word1) + print(thes.synonyms(word1, fileid="simN.lsp")) + + print(f"Similarity score for {word1} and {word2}:") + print(thes.similarity(word1, word2)) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py new file mode 100644 index 0000000000000000000000000000000000000000..8df4f924e25426dbe30ef2484f3a0cb4cb1a1740 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py @@ -0,0 +1,342 @@ +from collections import namedtuple +from functools import partial, wraps + +from nltk.corpus.reader.api import CategorizedCorpusReader +from nltk.corpus.reader.plaintext import PlaintextCorpusReader +from nltk.corpus.reader.util import concat, read_blankline_block +from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize + + +def comma_separated_string_args(func): + """ + A decorator that allows a function to be called with + a single string of comma-separated values which become + individual function arguments. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + _args = list() + for arg in args: + if isinstance(arg, str): + _args.append({part.strip() for part in arg.split(",")}) + elif isinstance(arg, list): + _args.append(set(arg)) + else: + _args.append(arg) + for name, value in kwargs.items(): + if isinstance(value, str): + kwargs[name] = {part.strip() for part in value.split(",")} + return func(*_args, **kwargs) + + return wrapper + + +def read_parse_blankline_block(stream, parser): + block = read_blankline_block(stream) + if block: + return [parser.render(block[0])] + return block + + +class MarkdownBlock: + def __init__(self, content): + self.content = content + self.truncate_at = 16 + + def __repr__(self): + return f"{self.__class__.__name__}(content={repr(str(self))})" + + def __str__(self): + return ( + f"{self.content[:self.truncate_at]}" + f"{'...' if len(self.content) > self.truncate_at else ''}" + ) + + @property + def raw(self): + return self.content + + @property + def words(self): + return word_tokenize(self.content) + + @property + def sents(self): + return [word_tokenize(sent) for sent in sent_tokenize(self.content)] + + @property + def paras(self): + return [ + [word_tokenize(sent) for sent in sent_tokenize(para)] + for para in blankline_tokenize(self.content) + ] + + +class CodeBlock(MarkdownBlock): + def __init__(self, language, *args): + self.language = language + super().__init__(*args) + + @property + def sents(self): + return [word_tokenize(line) for line in self.content.splitlines()] + + @property + def lines(self): + return self.content.splitlines() + + @property + def paras(self): + return [ + [word_tokenize(line) for line in para.splitlines()] + for para in blankline_tokenize(self.content) + ] + + +class MarkdownSection(MarkdownBlock): + def __init__(self, heading, level, *args): + self.heading = heading + self.level = level + super().__init__(*args) + + +Image = namedtuple("Image", "label, src, title") +Link = namedtuple("Link", "label, href, title") +List = namedtuple("List", "is_ordered, items") + + +class MarkdownCorpusReader(PlaintextCorpusReader): + def __init__(self, *args, parser=None, **kwargs): + from markdown_it import MarkdownIt + from mdit_plain.renderer import RendererPlain + from mdit_py_plugins.front_matter import front_matter_plugin + + self.parser = parser + if self.parser is None: + self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain) + self.parser.use(front_matter_plugin) + + kwargs.setdefault( + "para_block_reader", partial(read_parse_blankline_block, parser=self.parser) + ) + super().__init__(*args, **kwargs) + + # This override takes care of removing markup. + def _read_word_block(self, stream): + words = list() + for para in self._para_block_reader(stream): + words.extend(self._word_tokenizer.tokenize(para)) + return words + + +class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader): + """ + A reader for markdown corpora whose documents are divided into + categories based on their file identifiers. + + Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader: + https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader + """ + + def __init__(self, *args, cat_field="tags", **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``MarkdownCorpusReader`` constructor. + """ + cat_args = ["cat_pattern", "cat_map", "cat_file"] + if not any(arg in kwargs for arg in cat_args): + # Initialize with a blank map now, + # and try to build categories from document metadata later. + kwargs["cat_map"] = dict() + CategorizedCorpusReader.__init__(self, kwargs) + MarkdownCorpusReader.__init__(self, *args, **kwargs) + + # Map file IDs to categories if self._map exists but is still empty: + if self._map is not None and not self._map: + for file_id in self._fileids: + metadata = self.metadata(file_id) + if metadata: + self._map[file_id] = metadata[0].get(cat_field, []) + + ### Begin CategorizedCorpusReader Overrides + @comma_separated_string_args + def categories(self, fileids=None): + return super().categories(fileids) + + @comma_separated_string_args + def fileids(self, categories=None): + if categories is None: + return self._fileids + return super().fileids(categories) + + ### End CategorizedCorpusReader Overrides + + ### Begin MarkdownCorpusReader Overrides + @comma_separated_string_args + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + @comma_separated_string_args + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + @comma_separated_string_args + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + @comma_separated_string_args + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + ### End MarkdownCorpusReader Overrides + + def concatenated_view(self, reader, fileids, categories): + return concat( + [ + self.CorpusView(path, reader, encoding=enc) + for (path, enc) in self.abspaths( + self._resolve(fileids, categories), include_encoding=True + ) + ] + ) + + def metadata_reader(self, stream): + from yaml import safe_load + + return [ + safe_load(t.content) + for t in self.parser.parse(stream.read()) + if t.type == "front_matter" + ] + + @comma_separated_string_args + def metadata(self, fileids=None, categories=None): + return self.concatenated_view(self.metadata_reader, fileids, categories) + + def blockquote_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_open", tokens + ) + closing_tokens = filter( + lambda t: t.level == 0 and t.type == "blockquote_close", tokens + ) + blockquotes = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + blockquotes.append(tokens[opening_index : closing_index + 1]) + return [ + MarkdownBlock( + self.parser.renderer.render(block, self.parser.options, env=None) + ) + for block in blockquotes + ] + + @comma_separated_string_args + def blockquotes(self, fileids=None, categories=None): + return self.concatenated_view(self.blockquote_reader, fileids, categories) + + def code_block_reader(self, stream): + return [ + CodeBlock( + t.info, + t.content, + ) + for t in self.parser.parse(stream.read()) + if t.level == 0 and t.type in ("fence", "code_block") + ] + + @comma_separated_string_args + def code_blocks(self, fileids=None, categories=None): + return self.concatenated_view(self.code_block_reader, fileids, categories) + + def image_reader(self, stream): + return [ + Image( + child_token.content, + child_token.attrGet("src"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for child_token in inline_token.children + if child_token.type == "image" + ] + + @comma_separated_string_args + def images(self, fileids=None, categories=None): + return self.concatenated_view(self.image_reader, fileids, categories) + + def link_reader(self, stream): + return [ + Link( + inline_token.children[i + 1].content, + child_token.attrGet("href"), + child_token.attrGet("title"), + ) + for inline_token in filter( + lambda t: t.type == "inline", self.parser.parse(stream.read()) + ) + for i, child_token in enumerate(inline_token.children) + if child_token.type == "link_open" + ] + + @comma_separated_string_args + def links(self, fileids=None, categories=None): + return self.concatenated_view(self.link_reader, fileids, categories) + + def list_reader(self, stream): + tokens = self.parser.parse(stream.read()) + opening_types = ("bullet_list_open", "ordered_list_open") + opening_tokens = filter( + lambda t: t.level == 0 and t.type in opening_types, tokens + ) + closing_types = ("bullet_list_close", "ordered_list_close") + closing_tokens = filter( + lambda t: t.level == 0 and t.type in closing_types, tokens + ) + list_blocks = list() + for o, c in zip(opening_tokens, closing_tokens): + opening_index = tokens.index(o) + closing_index = tokens.index(c, opening_index) + list_blocks.append(tokens[opening_index : closing_index + 1]) + return [ + List( + tokens[0].type == "ordered_list_open", + [t.content for t in tokens if t.content], + ) + for tokens in list_blocks + ] + + @comma_separated_string_args + def lists(self, fileids=None, categories=None): + return self.concatenated_view(self.list_reader, fileids, categories) + + def section_reader(self, stream): + section_blocks, block = list(), list() + in_heading = False + for t in self.parser.parse(stream.read()): + if t.level == 0 and t.type == "heading_open": + if block: + section_blocks.append(block) + block = list() + in_heading = True + if in_heading: + block.append(t) + return [ + MarkdownSection( + block[1].content, + block[0].markup.count("#"), + self.parser.renderer.render(block, self.parser.options, env=None), + ) + for block in section_blocks + ] + + @comma_separated_string_args + def sections(self, fileids=None, categories=None): + return self.concatenated_view(self.section_reader, fileids, categories) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcf51dc66954866ad665a54ba926fc9c8a33116 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py @@ -0,0 +1,90 @@ +# Natural Language Toolkit: NPS Chat Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * +from nltk.internals import ElementWrapper +from nltk.tag import map_tag +from nltk.util import LazyConcatenation + + +class NPSChatCorpusReader(XMLCorpusReader): + def __init__(self, root, fileids, wrap_etree=False, tagset=None): + XMLCorpusReader.__init__(self, root, fileids, wrap_etree) + self._tagset = tagset + + def xml_posts(self, fileids=None): + if self._wrap_etree: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt) + for fileid in self.abspaths(fileids) + ] + ) + else: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post") + for fileid in self.abspaths(fileids) + ] + ) + + def posts(self, fileids=None): + return concat( + [ + XMLCorpusView( + fileid, "Session/Posts/Post/terminals", self._elt_to_words + ) + for fileid in self.abspaths(fileids) + ] + ) + + def tagged_posts(self, fileids=None, tagset=None): + def reader(elt, handler): + return self._elt_to_tagged_words(elt, handler, tagset) + + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader) + for fileid in self.abspaths(fileids) + ] + ) + + def words(self, fileids=None): + return LazyConcatenation(self.posts(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + return LazyConcatenation(self.tagged_posts(fileids, tagset)) + + def _wrap_elt(self, elt, handler): + return ElementWrapper(elt) + + def _elt_to_words(self, elt, handler): + return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")] + + def _elt_to_tagged_words(self, elt, handler, tagset=None): + tagged_post = [ + (self._simplify_username(t.attrib["word"]), t.attrib["pos"]) + for t in elt.findall("t") + ] + if tagset and tagset != self._tagset: + tagged_post = [ + (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post + ] + return tagged_post + + @staticmethod + def _simplify_username(word): + if "User" in word: + word = "U" + word.split("User", 1)[1] + elif isinstance(word, bytes): + word = word.decode("ascii") + return word diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..87be7c97e6151c8ce19e64e2f8ac6683918e3aad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Opinion Lexicon Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Opinion Lexicon. + +Opinion Lexicon information +=========================== + +Authors: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery + & Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and + Comparing Opinions on the Web". Proceedings of the 14th International World + Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan. +""" + +from nltk.corpus.reader import WordListCorpusReader +from nltk.corpus.reader.api import * + + +class IgnoreReadmeCorpusView(StreamBackedCorpusView): + """ + This CorpusView is used to skip the initial readme block of the corpus. + """ + + def __init__(self, *args, **kwargs): + StreamBackedCorpusView.__init__(self, *args, **kwargs) + # open self._stream + self._open() + # skip the readme block + read_blankline_block(self._stream) + # Set the initial position to the current stream position + self._filepos = [self._stream.tell()] + + +class OpinionLexiconCorpusReader(WordListCorpusReader): + """ + Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored. + + >>> from nltk.corpus import opinion_lexicon + >>> opinion_lexicon.words() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative + words: + + >>> opinion_lexicon.negative() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + Note that words from `words()` method are sorted by file id, not alphabetically: + + >>> opinion_lexicon.words()[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort', 'aborted'] + >>> sorted(opinion_lexicon.words())[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort'] + """ + + CorpusView = IgnoreReadmeCorpusView + + def words(self, fileids=None): + """ + Return all words in the opinion lexicon. Note that these words are not + sorted in alphabetical order. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def positive(self): + """ + Return all positive words in alphabetical order. + + :return: a list of positive words. + :rtype: list(str) + """ + return self.words("positive-words.txt") + + def negative(self): + """ + Return all negative words in alphabetical order. + + :return: a list of negative words. + :rtype: list(str) + """ + return self.words("negative-words.txt") + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + words.append(line.strip()) + return words diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py new file mode 100644 index 0000000000000000000000000000000000000000..59492992353ca876eea00f63e3759f14ec5b0e02 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py @@ -0,0 +1,174 @@ +# Natural Language Toolkit: PanLex Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: David Kamholz +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for PanLex Lite, a stripped down version of PanLex distributed +as an SQLite database. See the README.txt in the panlex_lite corpus directory +for more information on PanLex Lite. +""" + +import os +import sqlite3 + +from nltk.corpus.reader.api import CorpusReader + + +class PanLexLiteCorpusReader(CorpusReader): + MEANING_Q = """ + SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ? + ORDER BY dnx2.uq DESC + """ + + TRANSLATION_Q = """ + SELECT s.tt, sum(s.uq) AS trq FROM ( + SELECT ex2.tt, max(dnx.uq) AS uq + FROM dnx + JOIN ex ON (ex.ex = dnx.ex) + JOIN dnx dnx2 ON (dnx2.mn = dnx.mn) + JOIN ex ex2 ON (ex2.ex = dnx2.ex) + WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ? + GROUP BY ex2.tt, dnx.ui + ) s + GROUP BY s.tt + ORDER BY trq DESC, s.tt + """ + + def __init__(self, root): + self._c = sqlite3.connect(os.path.join(root, "db.sqlite")).cursor() + + self._uid_lv = {} + self._lv_uid = {} + + for row in self._c.execute("SELECT uid, lv FROM lv"): + self._uid_lv[row[0]] = row[1] + self._lv_uid[row[1]] = row[0] + + def language_varieties(self, lc=None): + """ + Return a list of PanLex language varieties. + + :param lc: ISO 639 alpha-3 code. If specified, filters returned varieties + by this code. If unspecified, all varieties are returned. + :return: the specified language varieties as a list of tuples. The first + element is the language variety's seven-character uniform identifier, + and the second element is its default name. + :rtype: list(tuple) + """ + + if lc is None: + return self._c.execute("SELECT uid, tt FROM lv ORDER BY uid").fetchall() + else: + return self._c.execute( + "SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid", (lc,) + ).fetchall() + + def meanings(self, expr_uid, expr_tt): + """ + Return a list of meanings for an expression. + + :param expr_uid: the expression's language variety, as a seven-character + uniform identifier. + :param expr_tt: the expression's text. + :return: a list of Meaning objects. + :rtype: list(Meaning) + """ + + expr_lv = self._uid_lv[expr_uid] + + mn_info = {} + + for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)): + mn = i[0] + uid = self._lv_uid[i[5]] + + if not mn in mn_info: + mn_info[mn] = { + "uq": i[1], + "ap": i[2], + "ui": i[3], + "ex": {expr_uid: [expr_tt]}, + } + + if not uid in mn_info[mn]["ex"]: + mn_info[mn]["ex"][uid] = [] + + mn_info[mn]["ex"][uid].append(i[4]) + + return [Meaning(mn, mn_info[mn]) for mn in mn_info] + + def translations(self, from_uid, from_tt, to_uid): + """ + Return a list of translations for an expression into a single language + variety. + + :param from_uid: the source expression's language variety, as a + seven-character uniform identifier. + :param from_tt: the source expression's text. + :param to_uid: the target language variety, as a seven-character + uniform identifier. + :return: a list of translation tuples. The first element is the expression + text and the second element is the translation quality. + :rtype: list(tuple) + """ + + from_lv = self._uid_lv[from_uid] + to_lv = self._uid_lv[to_uid] + + return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall() + + +class Meaning(dict): + """ + Represents a single PanLex meaning. A meaning is a translation set derived + from a single source. + """ + + def __init__(self, mn, attr): + super().__init__(**attr) + self["mn"] = mn + + def id(self): + """ + :return: the meaning's id. + :rtype: int + """ + return self["mn"] + + def quality(self): + """ + :return: the meaning's source's quality (0=worst, 9=best). + :rtype: int + """ + return self["uq"] + + def source(self): + """ + :return: the meaning's source id. + :rtype: int + """ + return self["ap"] + + def source_group(self): + """ + :return: the meaning's source group id. + :rtype: int + """ + return self["ui"] + + def expressions(self): + """ + :return: the meaning's expressions as a dictionary whose keys are language + variety uniform identifiers and whose values are lists of expression + texts. + :rtype: dict + """ + return self["ex"] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py new file mode 100644 index 0000000000000000000000000000000000000000..182960f2ebc4b3e2411e3980ce4e445412af9bcc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py @@ -0,0 +1,95 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + + +import re +from collections import defaultdict, namedtuple + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.wordlist import WordListCorpusReader +from nltk.tokenize import line_tokenize + +PanlexLanguage = namedtuple( + "PanlexLanguage", + [ + "panlex_uid", # (1) PanLex UID + "iso639", # (2) ISO 639 language code + "iso639_type", # (3) ISO 639 language type, see README + "script", # (4) normal scripts of expressions + "name", # (5) PanLex default name + "langvar_uid", # (6) UID of the language variety in which the default name is an expression + ], +) + + +class PanlexSwadeshCorpusReader(WordListCorpusReader): + """ + This is a class to read the PanLex Swadesh list from + + David Kamholz, Jonathan Pool, and Susan M. Colowick (2014). + PanLex: Building a Resource for Panlingual Lexical Translation. + In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf + + License: CC0 1.0 Universal + https://creativecommons.org/publicdomain/zero/1.0/legalcode + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Find the swadesh size using the fileids' path. + self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1) + self._languages = {lang.panlex_uid: lang for lang in self.get_languages()} + self._macro_langauges = self.get_macrolanguages() + + def license(self): + return "CC0 1.0 Universal" + + def language_codes(self): + return self._languages.keys() + + def get_languages(self): + for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"): + if not line.strip(): # Skip empty lines. + continue + yield PanlexLanguage(*line.strip().split("\t")) + + def get_macrolanguages(self): + macro_langauges = defaultdict(list) + for lang in self._languages.values(): + macro_langauges[lang.iso639].append(lang.panlex_uid) + return macro_langauges + + def words_by_lang(self, lang_code): + """ + :return: a list of list(str) + """ + fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt" + return [concept.split("\t") for concept in self.words(fileid)] + + def words_by_iso639(self, iso63_code): + """ + :return: a list of list(str) + """ + fileids = [ + f"swadesh{self.swadesh_size}/{lang_code}.txt" + for lang_code in self._macro_langauges[iso63_code] + ] + return [ + concept.split("\t") for fileid in fileids for concept in self.words(fileid) + ] + + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py new file mode 100644 index 0000000000000000000000000000000000000000..e59d297c0100f46b484b02bfc125532e4ca9d8ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py @@ -0,0 +1,375 @@ +# Natural Language Toolkit: +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Piotr Kasprzyk +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.xmldocs import XMLCorpusReader + +PARA = re.compile(r"]*){0,1}>(.*?)

") +SENT = re.compile(r"]*){0,1}>(.*?)") + +TAGGEDWORD = re.compile(r"<([wc](?: [^>]*){0,1}>)(.*?)") +WORD = re.compile(r"<[wc](?: [^>]*){0,1}>(.*?)") + +TYPE = re.compile(r'type="(.*?)"') +ANA = re.compile(r'ana="(.*?)"') + +TEXTID = re.compile(r'text id="(.*?)"') + + +class TEICorpusView(StreamBackedCorpusView): + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + group_by_para, + tagset=None, + head_len=0, + textids=None, + ): + + self._tagged = tagged + self._textids = textids + + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + # WARNING -- skip header + StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len) + + _pagesize = 4096 + + def read_block(self, stream): + block = stream.readlines(self._pagesize) + block = concat(block) + while (block.count(" block.count("")) or block.count( + "") + len("") + block = block[:beg] + block[beg + end :] + + output = [] + for para_str in PARA.findall(block): + para = [] + for sent_str in SENT.findall(para_str): + if not self._tagged: + sent = WORD.findall(sent_str) + else: + sent = list(map(self._parse_tag, TAGGEDWORD.findall(sent_str))) + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + output.append(para) + else: + output.extend(para) + return output + + def _parse_tag(self, tag_word_tuple): + (tag, word) = tag_word_tuple + if tag.startswith("w"): + tag = ANA.search(tag).group(1) + else: # tag.startswith('c') + tag = TYPE.search(tag).group(1) + return word, tag + + +class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader): + head_len = 2770 + + def __init__(self, *args, **kwargs): + if "textid_file" in kwargs: + self._textids = kwargs["textid_file"] + else: + self._textids = None + + XMLCorpusReader.__init__(self, *args) + CategorizedCorpusReader.__init__(self, kwargs) + + self._init_textids() + + def _init_textids(self): + self._f2t = defaultdict(list) + self._t2f = defaultdict(list) + if self._textids is not None: + with open(self._textids) as fp: + for line in fp: + line = line.strip() + file_id, text_ids = line.split(" ", 1) + if file_id not in self.fileids(): + raise ValueError( + "In text_id mapping file %s: %s not found" + % (self._textids, file_id) + ) + for text_id in text_ids.split(self._delimiter): + self._add_textids(file_id, text_id) + + def _add_textids(self, file_id, text_id): + self._f2t[file_id].append(text_id) + self._t2f[text_id].append(file_id) + + def _resolve(self, fileids, categories, textids=None): + tmp = None + if ( + len( + list( + filter( + lambda accessor: accessor is None, + (fileids, categories, textids), + ) + ) + ) + != 1 + ): + + raise ValueError( + "Specify exactly one of: fileids, " "categories or textids" + ) + + if fileids is not None: + return fileids, None + + if categories is not None: + return self.fileids(categories), None + + if textids is not None: + if isinstance(textids, str): + textids = [textids] + files = sum((self._t2f[t] for t in textids), []) + tdict = dict() + for f in files: + tdict[f] = set(self._f2t[f]) & set(textids) + return files, tdict + + def decode_tag(self, tag): + # to be implemented + return tag + + def textids(self, fileids=None, categories=None): + """ + In the pl196x corpus each category is stored in single + file and thus both methods provide identical functionality. In order + to accommodate finer granularity, a non-standard textids() method was + implemented. All the main functions can be supplied with a list + of required chunks---giving much more control to the user. + """ + fileids, _ = self._resolve(fileids, categories) + if fileids is None: + return sorted(self._t2f) + + if isinstance(fileids, str): + fileids = [fileids] + return sorted(sum((self._f2t[d] for d in fileids), [])) + + def words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + ) + for fileid in fileids + ] + ) + + def sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, False, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def xml(self, fileids=None, categories=None): + fileids, _ = self._resolve(fileids, categories) + if len(fileids) == 1: + return XMLCorpusReader.xml(self, fileids[0]) + else: + raise TypeError("Expected a single file") diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py new file mode 100644 index 0000000000000000000000000000000000000000..c254a8416f2c1bb38f684819e43bae76a4308eeb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py @@ -0,0 +1,520 @@ +# Natural Language Toolkit: PropBank Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +from functools import total_ordering +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.internals import raise_unorderable_types +from nltk.tree import Tree + + +class PropbankCorpusReader(CorpusReader): + """ + Corpus reader for the propbank corpus, which augments the Penn + Treebank with information about the predicate argument structure + of every verb instance. The corpus consists of two parts: the + predicate-argument annotations themselves, and a set of "frameset + files" which define the argument labels used by the annotations, + on a per-verb basis. Each "frameset file" contains one or more + predicates, such as ``'turn'`` or ``'turn_on'``, each of which is + divided into coarse-grained word senses called "rolesets". For + each "roleset", the frameset file provides descriptions of the + argument roles, along with examples. + """ + + def __init__( + self, + root, + propfile, + framefiles="", + verbsfile=None, + parse_fileid_xform=None, + parse_corpus=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param propfile: The name of the file containing the predicate- + argument annotations (relative to ``root``). + :param framefiles: A list or regexp specifying the frameset + fileids for this corpus. + :param parse_fileid_xform: A transform that should be applied + to the fileids in this corpus. This should be a function + of one argument (a fileid) that returns a string (the new + fileid). + :param parse_corpus: The corpus containing the parse trees + corresponding to this corpus. These parse trees are + necessary to resolve the tree pointers used by propbank. + """ + # If framefiles is specified as a regexp, expand it. + if isinstance(framefiles, str): + framefiles = find_corpus_fileids(root, framefiles) + framefiles = list(framefiles) + # Initialize the corpus reader. + CorpusReader.__init__(self, root, [propfile, verbsfile] + framefiles, encoding) + + # Record our frame fileids & prop file. + self._propfile = propfile + self._framefiles = framefiles + self._verbsfile = verbsfile + self._parse_fileid_xform = parse_fileid_xform + self._parse_corpus = parse_corpus + + def instances(self, baseform=None): + """ + :return: a corpus view that acts as a list of + ``PropBankInstance`` objects, one for each noun in the corpus. + """ + kwargs = {} + if baseform is not None: + kwargs["instance_filter"] = lambda inst: inst.baseform == baseform + return StreamBackedCorpusView( + self.abspath(self._propfile), + lambda stream: self._read_instance_block(stream, **kwargs), + encoding=self.encoding(self._propfile), + ) + + def lines(self): + """ + :return: a corpus view that acts as a list of strings, one for + each line in the predicate-argument annotation file. + """ + return StreamBackedCorpusView( + self.abspath(self._propfile), + read_line_block, + encoding=self.encoding(self._propfile), + ) + + def roleset(self, roleset_id): + """ + :return: the xml description for the given roleset. + """ + baseform = roleset_id.split(".")[0] + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % roleset_id) + + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + for roleset in etree.findall("predicate/roleset"): + if roleset.attrib["id"] == roleset_id: + return roleset + raise ValueError(f"Roleset {roleset_id} not found in {framefile}") + + def rolesets(self, baseform=None): + """ + :return: list of xml descriptions for rolesets. + """ + if baseform is not None: + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % baseform) + framefiles = [framefile] + else: + framefiles = self._framefiles + + rsets = [] + for framefile in framefiles: + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + rsets.append(etree.findall("predicate/roleset")) + return LazyConcatenation(rsets) + + def verbs(self): + """ + :return: a corpus view that acts as a list of all verb lemmas + in this corpus (from the verbs.txt file). + """ + return StreamBackedCorpusView( + self.abspath(self._verbsfile), + read_line_block, + encoding=self.encoding(self._verbsfile), + ) + + def _read_instance_block(self, stream, instance_filter=lambda inst: True): + block = [] + + # Read 100 at a time. + for i in range(100): + line = stream.readline().strip() + if line: + inst = PropbankInstance.parse( + line, self._parse_fileid_xform, self._parse_corpus + ) + if instance_filter(inst): + block.append(inst) + + return block + + +###################################################################### +# { Propbank Instance & related datatypes +###################################################################### + + +class PropbankInstance: + def __init__( + self, + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus=None, + ): + + self.fileid = fileid + """The name of the file containing the parse tree for this + instance's sentence.""" + + self.sentnum = sentnum + """The sentence number of this sentence within ``fileid``. + Indexing starts from zero.""" + + self.wordnum = wordnum + """The word number of this instance's predicate within its + containing sentence. Word numbers are indexed starting from + zero, and include traces and other empty parse elements.""" + + self.tagger = tagger + """An identifier for the tagger who tagged this instance; or + ``'gold'`` if this is an adjuticated instance.""" + + self.roleset = roleset + """The name of the roleset used by this instance's predicate. + Use ``propbank.roleset() `` to + look up information about the roleset.""" + + self.inflection = inflection + """A ``PropbankInflection`` object describing the inflection of + this instance's predicate.""" + + self.predicate = predicate + """A ``PropbankTreePointer`` indicating the position of this + instance's predicate within its containing sentence.""" + + self.arguments = tuple(arguments) + """A list of tuples (argloc, argid), specifying the location + and identifier for each of the predicate's argument in the + containing sentence. Argument identifiers are strings such as + ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain + the predicate.""" + + self.parse_corpus = parse_corpus + """A corpus reader for the parse trees corresponding to the + instances in this propbank corpus.""" + + @property + def baseform(self): + """The baseform of the predicate.""" + return self.roleset.split(".")[0] + + @property + def sensenumber(self): + """The sense number of the predicate.""" + return self.roleset.split(".")[1] + + @property + def predid(self): + """Identifier of the predicate.""" + return "rel" + + def __repr__(self): + return "".format( + self.fileid, + self.sentnum, + self.wordnum, + ) + + def __str__(self): + s = "{} {} {} {} {} {}".format( + self.fileid, + self.sentnum, + self.wordnum, + self.tagger, + self.roleset, + self.inflection, + ) + items = self.arguments + ((self.predicate, "rel"),) + for (argloc, argid) in sorted(items): + s += f" {argloc}-{argid}" + return s + + def _get_tree(self): + if self.parse_corpus is None: + return None + if self.fileid not in self.parse_corpus.fileids(): + return None + return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum] + + tree = property( + _get_tree, + doc=""" + The parse tree corresponding to this instance, or None if + the corresponding tree is not available.""", + ) + + @staticmethod + def parse(s, parse_fileid_xform=None, parse_corpus=None): + pieces = s.split() + if len(pieces) < 7: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Divide the line into its basic pieces. + (fileid, sentnum, wordnum, tagger, roleset, inflection) = pieces[:6] + rel = [p for p in pieces[6:] if p.endswith("-rel")] + args = [p for p in pieces[6:] if not p.endswith("-rel")] + if len(rel) != 1: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Apply the fileid selector, if any. + if parse_fileid_xform is not None: + fileid = parse_fileid_xform(fileid) + + # Convert sentence & word numbers to ints. + sentnum = int(sentnum) + wordnum = int(wordnum) + + # Parse the inflection + inflection = PropbankInflection.parse(inflection) + + # Parse the predicate location. + predicate = PropbankTreePointer.parse(rel[0][:-4]) + + # Parse the arguments. + arguments = [] + for arg in args: + argloc, argid = arg.split("-", 1) + arguments.append((PropbankTreePointer.parse(argloc), argid)) + + # Put it all together. + return PropbankInstance( + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus, + ) + + +class PropbankPointer: + """ + A pointer used by propbank to identify one or more constituents in + a parse tree. ``PropbankPointer`` is an abstract base class with + three concrete subclasses: + + - ``PropbankTreePointer`` is used to point to single constituents. + - ``PropbankSplitTreePointer`` is used to point to 'split' + constituents, which consist of a sequence of two or more + ``PropbankTreePointer`` pointers. + - ``PropbankChainTreePointer`` is used to point to entire trace + chains in a tree. It consists of a sequence of pieces, which + can be ``PropbankTreePointer`` or ``PropbankSplitTreePointer`` pointers. + """ + + def __init__(self): + if self.__class__ == PropbankPointer: + raise NotImplementedError() + + +class PropbankChainTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements may + be either ``PropbankSplitTreePointer`` or + ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return "*".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*CHAIN*", [p.select(tree) for p in self.pieces]) + + +class PropbankSplitTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements are + all ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return ",".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*SPLIT*", [p.select(tree) for p in self.pieces]) + + +@total_ordering +class PropbankTreePointer(PropbankPointer): + """ + wordnum:height*wordnum:height*... + wordnum:height, + + """ + + def __init__(self, wordnum, height): + self.wordnum = wordnum + self.height = height + + @staticmethod + def parse(s): + # Deal with chains (xx*yy*zz) + pieces = s.split("*") + if len(pieces) > 1: + return PropbankChainTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with split args (xx,yy,zz) + pieces = s.split(",") + if len(pieces) > 1: + return PropbankSplitTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with normal pointers. + pieces = s.split(":") + if len(pieces) != 2: + raise ValueError("bad propbank pointer %r" % s) + return PropbankTreePointer(int(pieces[0]), int(pieces[1])) + + def __str__(self): + return f"{self.wordnum}:{self.height}" + + def __repr__(self): + return "PropbankTreePointer(%d, %d)" % (self.wordnum, self.height) + + def __eq__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return self is other + + return self.wordnum == other.wordnum and self.height == other.height + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return id(self) < id(other) + + return (self.wordnum, -self.height) < (other.wordnum, -other.height) + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return tree[self.treepos(tree)] + + def treepos(self, tree): + """ + Convert this pointer to a standard 'tree position' pointer, + given that it points to the given tree. + """ + if tree is None: + raise ValueError("Parse tree not available") + stack = [tree] + treepos = [] + + wordnum = 0 + while True: + # tree node: + if isinstance(stack[-1], Tree): + # Select the next child. + if len(treepos) < len(stack): + treepos.append(0) + else: + treepos[-1] += 1 + # Update the stack. + if treepos[-1] < len(stack[-1]): + stack.append(stack[-1][treepos[-1]]) + else: + # End of node's child list: pop up a level. + stack.pop() + treepos.pop() + # word node: + else: + if wordnum == self.wordnum: + return tuple(treepos[: len(treepos) - self.height - 1]) + else: + wordnum += 1 + stack.pop() + + +class PropbankInflection: + # { Inflection Form + INFINITIVE = "i" + GERUND = "g" + PARTICIPLE = "p" + FINITE = "v" + # { Inflection Tense + FUTURE = "f" + PAST = "p" + PRESENT = "n" + # { Inflection Aspect + PERFECT = "p" + PROGRESSIVE = "o" + PERFECT_AND_PROGRESSIVE = "b" + # { Inflection Person + THIRD_PERSON = "3" + # { Inflection Voice + ACTIVE = "a" + PASSIVE = "p" + # { Inflection + NONE = "-" + # } + + def __init__(self, form="-", tense="-", aspect="-", person="-", voice="-"): + self.form = form + self.tense = tense + self.aspect = aspect + self.person = person + self.voice = voice + + def __str__(self): + return self.form + self.tense + self.aspect + self.person + self.voice + + def __repr__(self): + return "" % self + + _VALIDATE = re.compile(r"[igpv\-][fpn\-][pob\-][3\-][ap\-]$") + + @staticmethod + def parse(s): + if not isinstance(s, str): + raise TypeError("expected a string") + if len(s) != 5 or not PropbankInflection._VALIDATE.match(s): + raise ValueError("Bad propbank inflection string %r" % s) + return PropbankInflection(*s) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py new file mode 100644 index 0000000000000000000000000000000000000000..31f1b02f701bc68a652af9617751d78b1c04d56d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py @@ -0,0 +1,133 @@ +# Natural Language Toolkit: Pros and Cons Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Pros and Cons dataset. + +- Pros and Cons dataset information - + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and Comparing + Opinions on the Web". Proceedings of the 14th international World Wide Web + conference (WWW-2005), May 10-14, 2005, in Chiba, Japan. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + + +class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader): + """ + Reader for the Pros and Cons sentence dataset. + + >>> from nltk.corpus import pros_cons + >>> pros_cons.sents(categories='Cons') # doctest: +NORMALIZE_WHITESPACE + [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy', + 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'], + ...] + >>> pros_cons.words('IntegratedPros.txt') + ['Easy', 'to', 'use', ',', 'economical', '!', ...] + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WordPunctTokenizer(), + encoding="utf8", + **kwargs + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param encoding: the encoding that should be used to read the corpus. + :param kwargs: additional parameters passed to CategorizedCorpusReader. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + CategorizedCorpusReader.__init__(self, kwargs) + self._word_tokenizer = word_tokenizer + + def sents(self, fileids=None, categories=None): + """ + Return all sentences in the corpus or in the specified files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :param categories: a list specifying the categories whose sentences + have to be returned. + :return: the given file(s) as a list of sentences. Each sentence is + tokenized using the specified word_tokenizer. + :rtype: list(list(str)) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None, categories=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files/categories. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :param categories: a list specifying the categories whose words have + to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_sent_block(self, stream): + sents = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)", line) + if sent: + sents.append(self._word_tokenizer.tokenize(sent.group(2).strip())) + return sents + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py new file mode 100644 index 0000000000000000000000000000000000000000..5f52425c0f7c260f62d7d953b90d241a6c00a2b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py @@ -0,0 +1,331 @@ +# Natural Language Toolkit: Product Reviews Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for reviews corpora (syntax based on Customer Review Corpus). + +Customer Review Corpus information +================================== + +Annotated by: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +The "product_reviews_1" and "product_reviews_2" datasets respectively contain +annotated customer reviews of 5 and 9 products from amazon.com. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge + Discovery & Data Mining (KDD-04), 2004. + +- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews". + Proceedings of Nineteeth National Conference on Artificial Intelligence + (AAAI-2004), 2004. + +- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to + Opinion Mining." Proceedings of First ACM International Conference on Web + Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University, + Stanford, California, USA. + +Symbols used in the annotated reviews: + + :[t]: the title of the review: Each [t] tag starts a review. + :xxxx[+|-n]: xxxx is a product feature. + :[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest. + Note that the strength is quite subjective. + You may want ignore it, but only considering + and - + :[-n]: Negative opinion + :##: start of each sentence. Each line is a sentence. + :[u]: feature not appeared in the sentence. + :[p]: feature not appeared in the sentence. Pronoun resolution is needed. + :[s]: suggestion or recommendation. + :[cc]: comparison with a competing product from a different brand. + :[cs]: comparison with a competing product from the same brand. + +Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not + provide separation between different reviews. This is due to the fact that + the dataset was specifically designed for aspect/feature-based sentiment + analysis, for which sentence-level annotation is sufficient. For document- + level classification and analysis, this peculiarity should be taken into + consideration. +""" + +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +TITLE = re.compile(r"^\[t\](.*)$") # [t] Title +FEATURES = re.compile( + r"((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]" +) # find 'feature' in feature[+3] +NOTES = re.compile(r"\[(?!t)(p|u|s|cc|cs)\]") # find 'p' in camera[+2][p] +SENT = re.compile(r"##(.*)$") # find tokenized sentence + + +class Review: + """ + A Review is the main block of a ReviewsCorpusReader. + """ + + def __init__(self, title=None, review_lines=None): + """ + :param title: the title of the review. + :param review_lines: the list of the ReviewLines that belong to the Review. + """ + self.title = title + if review_lines is None: + self.review_lines = [] + else: + self.review_lines = review_lines + + def add_line(self, review_line): + """ + Add a line (ReviewLine) to the review. + + :param review_line: a ReviewLine instance that belongs to the Review. + """ + assert isinstance(review_line, ReviewLine) + self.review_lines.append(review_line) + + def features(self): + """ + Return a list of features in the review. Each feature is a tuple made of + the specific item feature and the opinion strength about that feature. + + :return: all features of the review as a list of tuples (feat, score). + :rtype: list(tuple) + """ + features = [] + for review_line in self.review_lines: + features.extend(review_line.features) + return features + + def sents(self): + """ + Return all tokenized sentences in the review. + + :return: all sentences of the review as lists of tokens. + :rtype: list(list(str)) + """ + return [review_line.sent for review_line in self.review_lines] + + def __repr__(self): + return 'Review(title="{}", review_lines={})'.format( + self.title, self.review_lines + ) + + +class ReviewLine: + """ + A ReviewLine represents a sentence of the review, together with (optional) + annotations of its features and notes about the reviewed item. + """ + + def __init__(self, sent, features=None, notes=None): + self.sent = sent + if features is None: + self.features = [] + else: + self.features = features + + if notes is None: + self.notes = [] + else: + self.notes = notes + + def __repr__(self): + return "ReviewLine(features={}, notes={}, sent={})".format( + self.features, self.notes, self.sent + ) + + +class ReviewsCorpusReader(CorpusReader): + """ + Reader for the Customer Review Data dataset by Hu, Liu (2004). + Note: we are not applying any sentence tokenization at the moment, just word + tokenization. + + >>> from nltk.corpus import product_reviews_1 + >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt') + >>> review = camera_reviews[0] + >>> review.sents()[0] # doctest: +NORMALIZE_WHITESPACE + ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am', + 'extremely', 'satisfied', 'with', 'the', 'purchase', '.'] + >>> review.features() # doctest: +NORMALIZE_WHITESPACE + [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'), + ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'), + ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'), + ('option', '+1')] + + We can also reach the same information directly from the stream: + + >>> product_reviews_1.features('Canon_G3.txt') + [('canon powershot g3', '+3'), ('use', '+2'), ...] + + We can compute stats for specific product features: + + >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> mean = tot / n_reviews + >>> print(n_reviews, tot, mean) + 15 24 1.6 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8" + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WordPunctTokenizer` + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._readme = "README.txt" + + def features(self, fileids=None): + """ + Return a list of features. Each feature is a tuple made of the specific + item feature and the opinion strength about that feature. + + :param fileids: a list or regexp specifying the ids of the files whose + features have to be returned. + :return: all features for the item(s) in the given file(s). + :rtype: list(tuple) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(fileid, self._read_features, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def reviews(self, fileids=None): + """ + Return all the reviews as a list of Review objects. If `fileids` is + specified, return all the reviews from each of the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + reviews have to be returned. + :return: the given file(s) as a list of reviews. + """ + if fileids is None: + fileids = self._fileids + return concat( + [ + self.CorpusView(fileid, self._read_review_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + Return all sentences in the corpus or in the specified files. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: the given file(s) as a list of sentences, each encoded as a + list of word strings. + :rtype: list(list(str)) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + files. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_features(self, stream): + features = [] + for i in range(20): + line = stream.readline() + if not line: + return features + features.extend(re.findall(FEATURES, line)) + return features + + def _read_review_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + title_match = re.match(TITLE, line) + if title_match: + review = Review( + title=title_match.group(1).strip() + ) # We create a new review + break + + # Scan until we find another line matching the regexp, or EOF. + while True: + oldpos = stream.tell() + line = stream.readline() + # End of file: + if not line: + return [review] + # Start of a new review: backup to just before it starts, and + # return the review we've already collected. + if re.match(TITLE, line): + stream.seek(oldpos) + return [review] + # Anything else is part of the review line. + feats = re.findall(FEATURES, line) + notes = re.findall(NOTES, line) + sent = re.findall(SENT, line) + if sent: + sent = self._word_tokenizer.tokenize(sent[0]) + review_line = ReviewLine(sent=sent, features=feats, notes=notes) + review.add_line(review_line) + + def _read_sent_block(self, stream): + sents = [] + for review in self._read_review_block(stream): + sents.extend([sent for sent in review.sents()]) + return sents + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + sent = re.findall(SENT, line) + if sent: + words.extend(self._word_tokenizer.tokenize(sent[0])) + return words diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/rte.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/rte.py new file mode 100644 index 0000000000000000000000000000000000000000..98261fae9adf04ecf6938c966ec3cae4fcc775a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/rte.py @@ -0,0 +1,146 @@ +# Natural Language Toolkit: RTE Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora. + +The files were taken from the RTE1, RTE2 and RTE3 datasets and the files +were regularized. + +Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the +gold standard annotated files. + +Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following +example is taken from RTE3:: + + + + The sale was made to pay Yukos' US$ 27.5 billion tax bill, + Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known + company Baikalfinansgroup which was later bought by the Russian + state-owned oil company Rosneft . + + Baikalfinansgroup was sold to Rosneft. + + +In order to provide globally unique IDs for each pair, a new attribute +``challenge`` has been added to the root element ``entailment-corpus`` of each +file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the +challenge number and 'n' is the pair ID. +""" +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * + + +def norm(value_string): + """ + Normalize the string value in an RTE pair's ``value`` or ``entailment`` + attribute as an integer (1, 0). + + :param value_string: the label used to classify a text/hypothesis pair + :type value_string: str + :rtype: int + """ + + valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} + return valdict[value_string.upper()] + + +class RTEPair: + """ + Container for RTE text-hypothesis pairs. + + The entailment relation is signalled by the ``value`` attribute in RTE1, and by + ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment`` + attribute of this class. + """ + + def __init__( + self, + pair, + challenge=None, + id=None, + text=None, + hyp=None, + value=None, + task=None, + length=None, + ): + """ + :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3) + :param id: identifier for the pair + :param text: the text component of the pair + :param hyp: the hypothesis component of the pair + :param value: classification label for the pair + :param task: attribute for the particular NLP task that the data was drawn from + :param length: attribute for the length of the text of the pair + """ + self.challenge = challenge + self.id = pair.attrib["id"] + self.gid = f"{self.challenge}-{self.id}" + self.text = pair[0].text + self.hyp = pair[1].text + + if "value" in pair.attrib: + self.value = norm(pair.attrib["value"]) + elif "entailment" in pair.attrib: + self.value = norm(pair.attrib["entailment"]) + else: + self.value = value + if "task" in pair.attrib: + self.task = pair.attrib["task"] + else: + self.task = task + if "length" in pair.attrib: + self.length = pair.attrib["length"] + else: + self.length = length + + def __repr__(self): + if self.challenge: + return f"" + else: + return "" % self.id + + +class RTECorpusReader(XMLCorpusReader): + """ + Corpus reader for corpora in RTE challenges. + + This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected + structure of input documents. + """ + + def _read_etree(self, doc): + """ + Map the XML input into an RTEPair. + + This uses the ``getiterator()`` method from the ElementTree package to + find all the ```` elements. + + :param doc: a parsed XML document + :rtype: list(RTEPair) + """ + try: + challenge = doc.attrib["challenge"] + except KeyError: + challenge = None + pairiter = doc.iter("pair") + return [RTEPair(pair, challenge=challenge) for pair in pairiter] + + def pairs(self, fileids): + """ + Build a list of RTEPairs from a RTE corpus. + + :param fileids: a list of RTE corpus fileids + :type: list + :rtype: list(RTEPair) + """ + if isinstance(fileids, str): + fileids = [fileids] + return concat([self._read_etree(self.xml(fileid)) for fileid in fileids]) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py new file mode 100644 index 0000000000000000000000000000000000000000..b4ae423eb920d6d86c0fce8a43881f7bdeaf5b35 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: String Category Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Read tuples from a corpus consisting of categorized strings. +For example, from the question classification corpus: + +NUM:dist How far is it from Denver to Aspen ? +LOC:city What county is Modesto , California in ? +HUM:desc Who was Galileo ? +DESC:def What is an atom ? +NUM:date When did Hawaii become a state ? +""" + +from nltk.corpus.reader.api import * + +# based on PPAttachmentCorpusReader +from nltk.corpus.reader.util import * + + +# [xx] Should the order of the tuple be reversed -- in most other places +# in nltk, we use the form (data, tag) -- e.g., tagged words and +# labeled texts for classifiers. +class StringCategoryCorpusReader(CorpusReader): + def __init__(self, root, fileids, delimiter=" ", encoding="utf8"): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param delimiter: Field delimiter + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._delimiter = delimiter + + def tuples(self, fileids=None): + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _read_tuple_block(self, stream): + line = stream.readline().strip() + if line: + return [tuple(line.split(self._delimiter, 1))] + else: + return [] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcfe1b6ff5487a57da8b5e8a9b919eba8b3b6e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py @@ -0,0 +1,354 @@ +# Natural Language Toolkit: Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora whose documents contain part-of-speech-tagged words. +""" + +import os + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.timit import read_timit_block +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple +from nltk.tokenize import * + + +class TaggedCorpusReader(CorpusReader): + """ + Reader for simple part-of-speech tagged corpora. Paragraphs are + assumed to be split using blank lines. Sentences and words can be + tokenized using the default tokenizers, or by custom tokenizers + specified as parameters to the constructor. Words are parsed + using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the + separator. I.e., words should have the form:: + + word1/tag1 word2/tag2 word3/tag3 ... + + But custom separators may be specified as parameters to the + constructor. Part of speech tags are case-normalized to upper + case. + """ + + def __init__( + self, + root, + fileids, + sep="/", + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=RegexpTokenizer("\n", gaps=True), + para_block_reader=read_blankline_block, + encoding="utf8", + tagset=None, + ): + """ + Construct a new Tagged Corpus reader for a set of documents + located at the given root directory. Example usage: + + >>> root = '/...path to corpus.../' + >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP + + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + """ + CorpusReader.__init__(self, root, fileids, encoding) + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tagset = tagset + + def words(self, fileids=None): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def paras(self, fileids=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of word strings. + :rtype: list(list(list(str))) + """ + return concat( + [ + TaggedCorpusView( + fileid, + enc, + False, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + None, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + False, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + + :rtype: list(list(tuple(str,str))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + False, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_paras(self, fileids=None, tagset=None): + """ + :return: the given file(s) as a list of + paragraphs, each encoded as a list of sentences, which are + in turn encoded as lists of ``(word,tag)`` tuples. + :rtype: list(list(list(tuple(str,str)))) + """ + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + TaggedCorpusView( + fileid, + enc, + True, + True, + True, + self._sep, + self._word_tokenizer, + self._sent_tokenizer, + self._para_block_reader, + tag_mapping_function, + ) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader): + """ + A reader for part-of-speech tagged corpora whose documents are + divided into categories based on their file identifiers. + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to + the ``CategorizedCorpusReader`` constructor. The remaining arguments + are passed to the ``TaggedCorpusReader``. + """ + CategorizedCorpusReader.__init__(self, kwargs) + TaggedCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + +class TaggedCorpusView(StreamBackedCorpusView): + """ + A specialized corpus view for tagged documents. It can be + customized via flags to divide the tagged corpus documents up by + sentence or paragraph, and to include or omit part of speech tags. + ``TaggedCorpusView`` objects are typically created by + ``TaggedCorpusReader`` (not directly by nltk users). + """ + + def __init__( + self, + corpus_file, + encoding, + tagged, + group_by_sent, + group_by_para, + sep, + word_tokenizer, + sent_tokenizer, + para_block_reader, + tag_mapping_function=None, + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + self._sep = sep + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._para_block_reader = para_block_reader + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + """Reads one paragraph at a time.""" + block = [] + for para_str in self._para_block_reader(stream): + para = [] + for sent_str in self._sent_tokenizer.tokenize(para_str): + sent = [ + str2tuple(s, self._sep) + for s in self._word_tokenizer.tokenize(sent_str) + ] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + block.append(para) + else: + block.extend(para) + return block + + +# needs to implement simplified tags +class MacMorphoCorpusReader(TaggedCorpusReader): + """ + A corpus reader for the MAC_MORPHO corpus. Each line contains a + single tagged word, using '_' as a separator. Sentence boundaries + are based on the end-sentence tag ('_.'). Paragraph information + is not included in the corpus, so each paragraph returned by + ``self.paras()`` and ``self.tagged_paras()`` contains a single + sentence. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + TaggedCorpusReader.__init__( + self, + root, + fileids, + sep="_", + word_tokenizer=LineTokenizer(), + sent_tokenizer=RegexpTokenizer(".*\n"), + para_block_reader=self._read_block, + encoding=encoding, + tagset=tagset, + ) + + def _read_block(self, stream): + return read_regexp_block(stream, r".*", r".*_\.") + + +class TimitTaggedCorpusReader(TaggedCorpusReader): + """ + A corpus reader for tagged sentences that are included in the TIMIT corpus. + """ + + def __init__(self, *args, **kwargs): + TaggedCorpusReader.__init__( + self, para_block_reader=read_timit_block, *args, **kwargs + ) + + def paras(self): + raise NotImplementedError("use sents() instead") + + def tagged_paras(self): + raise NotImplementedError("use tagged_sents() instead") diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/timit.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/timit.py new file mode 100644 index 0000000000000000000000000000000000000000..e399ac2ff31fd39c5dfc9ac9e9de0bc29d1f1842 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/timit.py @@ -0,0 +1,510 @@ +# Natural Language Toolkit: TIMIT Corpus Reader +# +# Copyright (C) 2001-2007 NLTK Project +# Author: Haejoong Lee +# Steven Bird +# Jacob Perkins +# URL: +# For license information, see LICENSE.TXT + +# [xx] this docstring is out-of-date: +""" +Read tokens, phonemes and audio data from the NLTK TIMIT Corpus. + +This corpus contains selected portion of the TIMIT corpus. + + - 16 speakers from 8 dialect regions + - 1 male and 1 female from each dialect region + - total 130 sentences (10 sentences per speaker. Note that some + sentences are shared among other speakers, especially sa1 and sa2 + are spoken by all speakers.) + - total 160 recording of sentences (10 recordings per speaker) + - audio format: NIST Sphere, single channel, 16kHz sampling, + 16 bit sample, PCM encoding + + +Module contents +=============== + +The timit corpus reader provides 4 functions and 4 data items. + + - utterances + + List of utterances in the corpus. There are total 160 utterances, + each of which corresponds to a unique utterance of a speaker. + Here's an example of an utterance identifier in the list:: + + dr1-fvmh0/sx206 + - _---- _--- + | | | | | + | | | | | + | | | | `--- sentence number + | | | `----- sentence type (a:all, i:shared, x:exclusive) + | | `--------- speaker ID + | `------------ sex (m:male, f:female) + `-------------- dialect region (1..8) + + - speakers + + List of speaker IDs. An example of speaker ID:: + + dr1-fvmh0 + + Note that if you split an item ID with colon and take the first element of + the result, you will get a speaker ID. + + >>> itemid = 'dr1-fvmh0/sx206' + >>> spkrid , sentid = itemid.split('/') + >>> spkrid + 'dr1-fvmh0' + + The second element of the result is a sentence ID. + + - dictionary() + + Phonetic dictionary of words contained in this corpus. This is a Python + dictionary from words to phoneme lists. + + - spkrinfo() + + Speaker information table. It's a Python dictionary from speaker IDs to + records of 10 fields. Speaker IDs the same as the ones in timie.speakers. + Each record is a dictionary from field names to values, and the fields are + as follows:: + + id speaker ID as defined in the original TIMIT speaker info table + sex speaker gender (M:male, F:female) + dr speaker dialect region (1:new england, 2:northern, + 3:north midland, 4:south midland, 5:southern, 6:new york city, + 7:western, 8:army brat (moved around)) + use corpus type (TRN:training, TST:test) + in this sample corpus only TRN is available + recdate recording date + birthdate speaker birth date + ht speaker height + race speaker race (WHT:white, BLK:black, AMR:american indian, + SPN:spanish-american, ORN:oriental,???:unknown) + edu speaker education level (HS:high school, AS:associate degree, + BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA), + PHD:doctorate degree (PhD,JD,MD), ??:unknown) + comments comments by the recorder + +The 4 functions are as follows. + + - tokenized(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of word lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the word list is a tuple of word(string), start offset and + end offset, where offset is represented as a number of 16kHz samples. + + - phonetic(sentences=items, offset=False) + + Given a list of items, returns an iterator of a list of phoneme lists, + each of which corresponds to an item (sentence). If offset is set to True, + each element of the phoneme list is a tuple of word(string), start offset + and end offset, where offset is represented as a number of 16kHz samples. + + - audiodata(item, start=0, end=None) + + Given an item, returns a chunk of audio samples formatted into a string. + When the function is called, if start and end are omitted, the entire + samples of the recording will be returned. If only end is omitted, + samples from the start offset to the end of the recording will be returned. + + - play(data) + + Play the given audio samples. The audio samples can be obtained from the + timit.audiodata function. + +""" +import sys +import time + +from nltk.corpus.reader.api import * +from nltk.internals import import_from_stdlib +from nltk.tree import Tree + + +class TimitCorpusReader(CorpusReader): + """ + Reader for the TIMIT corpus (or any other corpus with the same + file layout and use of file formats). The corpus root directory + should contain the following files: + + - timitdic.txt: dictionary of standard transcriptions + - spkrinfo.txt: table of speaker information + + In addition, the root directory should contain one subdirectory + for each speaker, containing three files for each utterance: + + - .txt: text content of utterances + - .wrd: tokenized text content of utterances + - .phn: phonetic transcription of utterances + - .wav: utterance sound file + """ + + _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt" + """A regexp matching fileids that are used by this corpus reader.""" + _UTTERANCE_RE = r"\w+-\w+/\w+\.txt" + + def __init__(self, root, encoding="utf8"): + """ + Construct a new TIMIT corpus reader in the given directory. + :param root: The root directory for this corpus. + """ + # Ensure that wave files don't get treated as unicode data: + if isinstance(encoding, str): + encoding = [(r".*\.wav", None), (".*", encoding)] + + CorpusReader.__init__( + self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding + ) + + self._utterances = [ + name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE) + ] + """A list of the utterance identifiers for all utterances in + this corpus.""" + + self._speakerinfo = None + self._root = root + self.speakers = sorted({u.split("/")[0] for u in self._utterances}) + + def fileids(self, filetype=None): + """ + Return a list of file identifiers for the files that make up + this corpus. + + :param filetype: If specified, then ``filetype`` indicates that + only the files that have the given type should be + returned. Accepted values are: ``txt``, ``wrd``, ``phn``, + ``wav``, or ``metadata``, + """ + if filetype is None: + return CorpusReader.fileids(self) + elif filetype in ("txt", "wrd", "phn", "wav"): + return [f"{u}.{filetype}" for u in self._utterances] + elif filetype == "metadata": + return ["timitdic.txt", "spkrinfo.txt"] + else: + raise ValueError("Bad value for filetype: %r" % filetype) + + def utteranceids( + self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None + ): + """ + :return: A list of the utterance identifiers for all + utterances in this corpus, or for the given speaker, dialect + region, gender, sentence type, or sentence number, if + specified. + """ + if isinstance(dialect, str): + dialect = [dialect] + if isinstance(sex, str): + sex = [sex] + if isinstance(spkrid, str): + spkrid = [spkrid] + if isinstance(sent_type, str): + sent_type = [sent_type] + if isinstance(sentid, str): + sentid = [sentid] + + utterances = self._utterances[:] + if dialect is not None: + utterances = [u for u in utterances if u[2] in dialect] + if sex is not None: + utterances = [u for u in utterances if u[4] in sex] + if spkrid is not None: + utterances = [u for u in utterances if u[:9] in spkrid] + if sent_type is not None: + utterances = [u for u in utterances if u[11] in sent_type] + if sentid is not None: + utterances = [u for u in utterances if u[10:] in spkrid] + return utterances + + def transcription_dict(self): + """ + :return: A dictionary giving the 'standard' transcription for + each word. + """ + _transcriptions = {} + with self.open("timitdic.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line) + if not m: + raise ValueError("Bad line: %r" % line) + _transcriptions[m.group(1)] = m.group(2).split() + return _transcriptions + + def spkrid(self, utterance): + return utterance.split("/")[0] + + def sentid(self, utterance): + return utterance.split("/")[1] + + def utterance(self, spkrid, sentid): + return f"{spkrid}/{sentid}" + + def spkrutteranceids(self, speaker): + """ + :return: A list of all utterances associated with a given + speaker. + """ + return [ + utterance + for utterance in self._utterances + if utterance.startswith(speaker + "/") + ] + + def spkrinfo(self, speaker): + """ + :return: A dictionary mapping .. something. + """ + if speaker in self._utterances: + speaker = self.spkrid(speaker) + + if self._speakerinfo is None: + self._speakerinfo = {} + with self.open("spkrinfo.txt") as fp: + for line in fp: + if not line.strip() or line[0] == ";": + continue + rec = line.strip().split(None, 9) + key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}" + self._speakerinfo[key] = SpeakerInfo(*rec) + + return self._speakerinfo[speaker] + + def phones(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def phone_times(self, utterances=None): + """ + offset is represented as a number of 16kHz samples! + """ + results = [] + for fileid in self._utterance_fileids(utterances, ".phn"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def words(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append(line.split()[-1]) + return results + + def word_times(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + for line in fp: + if line.strip(): + results.append( + ( + line.split()[2], + int(line.split()[0]), + int(line.split()[1]), + ) + ) + return results + + def sents(self, utterances=None): + results = [] + for fileid in self._utterance_fileids(utterances, ".wrd"): + with self.open(fileid) as fp: + results.append([line.split()[-1] for line in fp if line.strip()]) + return results + + def sent_times(self, utterances=None): + # TODO: Check this + return [ + ( + line.split(None, 2)[-1].strip(), + int(line.split()[0]), + int(line.split()[1]), + ) + for fileid in self._utterance_fileids(utterances, ".txt") + for line in self.open(fileid) + if line.strip() + ] + + def phone_trees(self, utterances=None): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + + trees = [] + for utterance in utterances: + word_times = self.word_times(utterance) + phone_times = self.phone_times(utterance) + sent_times = self.sent_times(utterance) + + while sent_times: + (sent, sent_start, sent_end) = sent_times.pop(0) + trees.append(Tree("S", [])) + while ( + word_times and phone_times and phone_times[0][2] <= word_times[0][1] + ): + trees[-1].append(phone_times.pop(0)[0]) + while word_times and word_times[0][2] <= sent_end: + (word, word_start, word_end) = word_times.pop(0) + trees[-1].append(Tree(word, [])) + while phone_times and phone_times[0][2] <= word_end: + trees[-1][-1].append(phone_times.pop(0)[0]) + while phone_times and phone_times[0][2] <= sent_end: + trees[-1].append(phone_times.pop(0)[0]) + return trees + + # [xx] NOTE: This is currently broken -- we're assuming that the + # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE + # fileids. + def wav(self, utterance, start=0, end=None): + # nltk.chunk conflicts with the stdlib module 'chunk' + wave = import_from_stdlib("wave") + + w = wave.open(self.open(utterance + ".wav"), "rb") + + if end is None: + end = w.getnframes() + + # Skip past frames before start, then read the frames we want + w.readframes(start) + frames = w.readframes(end - start) + + # Open a new temporary file -- the wave module requires + # an actual file, and won't work w/ stringio. :( + tf = tempfile.TemporaryFile() + out = wave.open(tf, "w") + + # Write the parameters & data to the new file. + out.setparams(w.getparams()) + out.writeframes(frames) + out.close() + + # Read the data back from the file, and return it. The + # file will automatically be deleted when we return. + tf.seek(0) + return tf.read() + + def audiodata(self, utterance, start=0, end=None): + assert end is None or end > start + headersize = 44 + with self.open(utterance + ".wav") as fp: + if end is None: + data = fp.read() + else: + data = fp.read(headersize + end * 2) + return data[headersize + start * 2 :] + + def _utterance_fileids(self, utterances, extension): + if utterances is None: + utterances = self._utterances + if isinstance(utterances, str): + utterances = [utterances] + return [f"{u}{extension}" for u in utterances] + + def play(self, utterance, start=0, end=None): + """ + Play the given audio sample. + + :param utterance: The utterance id of the sample to play + """ + # Method 1: os audio dev. + try: + import ossaudiodev + + try: + dsp = ossaudiodev.open("w") + dsp.setfmt(ossaudiodev.AFMT_S16_LE) + dsp.channels(1) + dsp.speed(16000) + dsp.write(self.audiodata(utterance, start, end)) + dsp.close() + except OSError as e: + print( + ( + "can't acquire the audio device; please " + "activate your audio device." + ), + file=sys.stderr, + ) + print("system error message:", str(e), file=sys.stderr) + return + except ImportError: + pass + + # Method 2: pygame + try: + # FIXME: this won't work under python 3 + import pygame.mixer + import StringIO + + pygame.mixer.init(16000) + f = StringIO.StringIO(self.wav(utterance, start, end)) + pygame.mixer.Sound(f).play() + while pygame.mixer.get_busy(): + time.sleep(0.01) + return + except ImportError: + pass + + # Method 3: complain. :) + print( + ("you must install pygame or ossaudiodev " "for audio playback."), + file=sys.stderr, + ) + + +class SpeakerInfo: + def __init__( + self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None + ): + self.id = id + self.sex = sex + self.dr = dr + self.use = use + self.recdate = recdate + self.birthdate = birthdate + self.ht = ht + self.race = race + self.edu = edu + self.comments = comments + + def __repr__(self): + attribs = "id sex dr use recdate birthdate ht race edu comments" + args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()] + return "SpeakerInfo(%s)" % (", ".join(args)) + + +def read_timit_block(stream): + """ + Block reader for timit tagged sentences, which are preceded by a sentence + number that will be ignored. + """ + line = stream.readline() + if not line: + return [] + n, sent = line.split(" ", 1) + return [sent] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py new file mode 100644 index 0000000000000000000000000000000000000000..5684ea0b90129223ada6e7dc62fd6a6708e90960 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py @@ -0,0 +1,76 @@ +# Natural Language Toolkit: Toolbox Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Aumann +# Stuart Robinson +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Module for reading, writing and manipulating +Toolbox databases and settings fileids. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.toolbox import ToolboxData + + +class ToolboxCorpusReader(CorpusReader): + def xml(self, fileids, key=None): + return concat( + [ + ToolboxData(path, enc).parse(key=key) + for (path, enc) in self.abspaths(fileids, True) + ] + ) + + def fields( + self, + fileids, + strip=True, + unwrap=True, + encoding="utf8", + errors="strict", + unicode_fields=None, + ): + return concat( + [ + list( + ToolboxData(fileid, enc).fields( + strip, unwrap, encoding, errors, unicode_fields + ) + ) + for (fileid, enc) in self.abspaths(fileids, include_encoding=True) + ] + ) + + # should probably be done lazily: + def entries(self, fileids, **kwargs): + if "key" in kwargs: + key = kwargs["key"] + del kwargs["key"] + else: + key = "lx" # the default key in MDF + entries = [] + for marker, contents in self.fields(fileids, **kwargs): + if marker == key: + entries.append((contents, [])) + else: + try: + entries[-1][-1].append((marker, contents)) + except IndexError: + pass + return entries + + def words(self, fileids, key="lx"): + return [contents for marker, contents in self.fields(fileids) if marker == key] + + +def demo(): + pass + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py new file mode 100644 index 0000000000000000000000000000000000000000..aced7e83fc7c48027d4d1eeb6aca46531ab57969 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py @@ -0,0 +1,166 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import line_tokenize + + +class WordListCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None, ignore_lines_startswith="\n"): + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class SwadeshCorpusReader(WordListCorpusReader): + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) + + +class NonbreakingPrefixesCorpusReader(WordListCorpusReader): + """ + This is a class to read the nonbreaking prefixes textfiles from the + Moses Machine Translation toolkit. These lists are used in the Python port + of the Moses' word tokenizer. + """ + + available_langs = { + "catalan": "ca", + "czech": "cs", + "german": "de", + "greek": "el", + "english": "en", + "spanish": "es", + "finnish": "fi", + "french": "fr", + "hungarian": "hu", + "icelandic": "is", + "italian": "it", + "latvian": "lv", + "dutch": "nl", + "polish": "pl", + "portuguese": "pt", + "romanian": "ro", + "russian": "ru", + "slovak": "sk", + "slovenian": "sl", + "swedish": "sv", + "tamil": "ta", + } + # Also, add the lang IDs as the keys. + available_langs.update({v: v for v in available_langs.values()}) + + def words(self, lang=None, fileids=None, ignore_lines_startswith="#"): + """ + This module returns a list of nonbreaking prefixes for the specified + language(s). + + >>> from nltk.corpus import nonbreaking_prefixes as nbp + >>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J'] + True + >>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89'] + True + + :return: a list words for the specified language(s). + """ + # If *lang* in list of languages available, allocate apt fileid. + # Otherwise, the function returns non-breaking prefixes for + # all languages when fileids==None. + if lang in self.available_langs: + lang = self.available_langs[lang] + fileids = ["nonbreaking_prefix." + lang] + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class UnicharsCorpusReader(WordListCorpusReader): + """ + This class is used to read lists of characters from the Perl Unicode + Properties (see https://perldoc.perl.org/perluniprops.html). + The files in the perluniprop.zip are extracted using the Unicode::Tussle + module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm + """ + + # These are categories similar to the Perl Unicode Properties + available_categories = [ + "Close_Punctuation", + "Currency_Symbol", + "IsAlnum", + "IsAlpha", + "IsLower", + "IsN", + "IsSc", + "IsSo", + "IsUpper", + "Line_Separator", + "Number", + "Open_Punctuation", + "Punctuation", + "Separator", + "Symbol", + ] + + def chars(self, category=None, fileids=None): + """ + This module returns a list of characters from the Perl Unicode Properties. + They are very useful when porting Perl tokenizers to Python. + + >>> from nltk.corpus import perluniprops as pup + >>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c'] + True + >>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5'] + True + >>> pup.available_categories + ['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol'] + + :return: a list of characters given the specific unicode character category + """ + if category in self.available_categories: + fileids = [category + ".txt"] + return list(self.raw(fileids).strip()) + + +class MWAPPDBCorpusReader(WordListCorpusReader): + """ + This class is used to read the list of word pairs from the subset of lexical + pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word + Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015): + + - http://acl2014.org/acl2014/Q14/pdf/Q14-1017 + - https://www.aclweb.org/anthology/S14-2039 + - https://www.aclweb.org/anthology/S15-2027 + + The original source of the full PPDB corpus can be found on + https://www.cis.upenn.edu/~ccb/ppdb/ + + :return: a list of tuples of similar lexical terms. + """ + + mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs" + + def entries(self, fileids=mwa_ppdb_xxxl_file): + """ + :return: a tuple of synonym word pairs. + """ + return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f10c3436dde87850528529b4ab1b4cf6413a1bce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordnet.py @@ -0,0 +1,2489 @@ +# Natural Language Toolkit: WordNet +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bethard +# Steven Bird +# Edward Loper +# Nitin Madnani +# Nasruddin A’aidil Shari +# Sim Wei Ying Geraldine +# Soe Lynn +# Francis Bond +# Eric Kafe + +# URL: +# For license information, see LICENSE.TXT + +""" +An NLTK interface for WordNet + +WordNet is a lexical database of English. +Using synsets, helps find conceptual relationships between words +such as hypernyms, hyponyms, synonyms, antonyms etc. + +For details about WordNet see: +https://wordnet.princeton.edu/ + +This module also allows you to find lemmas in languages +other than English from the Open Multilingual Wordnet +https://omwn.org/ + +""" + +import math +import os +import re +import warnings +from collections import defaultdict, deque +from functools import total_ordering +from itertools import chain, islice +from operator import itemgetter + +from nltk.corpus.reader import CorpusReader +from nltk.internals import deprecated +from nltk.probability import FreqDist +from nltk.util import binary_search_file as _binary_search_file + +###################################################################### +# Table of Contents +###################################################################### +# - Constants +# - Data Classes +# - WordNetError +# - Lemma +# - Synset +# - WordNet Corpus Reader +# - WordNet Information Content Corpus Reader +# - Similarity Metrics +# - Demo + +###################################################################### +# Constants +###################################################################### + +#: Positive infinity (for similarity functions) +_INF = 1e300 + +# { Part-of-speech constants +ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" +# } + +POS_LIST = [NOUN, VERB, ADJ, ADV] + +# A table of strings that are used to express verb frames. +VERB_FRAME_STRINGS = ( + None, + "Something %s", + "Somebody %s", + "It is %sing", + "Something is %sing PP", + "Something %s something Adjective/Noun", + "Something %s Adjective/Noun", + "Somebody %s Adjective", + "Somebody %s something", + "Somebody %s somebody", + "Something %s somebody", + "Something %s something", + "Something %s to somebody", + "Somebody %s on something", + "Somebody %s somebody something", + "Somebody %s something to somebody", + "Somebody %s something from somebody", + "Somebody %s somebody with something", + "Somebody %s somebody of something", + "Somebody %s something on somebody", + "Somebody %s somebody PP", + "Somebody %s something PP", + "Somebody %s PP", + "Somebody's (body part) %s", + "Somebody %s somebody to INFINITIVE", + "Somebody %s somebody INFINITIVE", + "Somebody %s that CLAUSE", + "Somebody %s to somebody", + "Somebody %s to INFINITIVE", + "Somebody %s whether INFINITIVE", + "Somebody %s somebody into V-ing something", + "Somebody %s something with something", + "Somebody %s INFINITIVE", + "Somebody %s VERB-ing", + "It %s that CLAUSE", + "Something %s INFINITIVE", + # OEWN additions: + "Somebody %s at something", + "Somebody %s for something", + "Somebody %s on somebody", + "Somebody %s out of somebody", +) + +SENSENUM_RE = re.compile(r"\.[\d]+\.") + + +###################################################################### +# Data Classes +###################################################################### + + +class WordNetError(Exception): + """An exception class for wordnet-related errors.""" + + +@total_ordering +class _WordNetObject: + """A common base class for lemmas and synsets.""" + + def hypernyms(self): + return self._related("@") + + def _hypernyms(self): + return self._related("@") + + def instance_hypernyms(self): + return self._related("@i") + + def _instance_hypernyms(self): + return self._related("@i") + + def hyponyms(self): + return self._related("~") + + def instance_hyponyms(self): + return self._related("~i") + + def member_holonyms(self): + return self._related("#m") + + def substance_holonyms(self): + return self._related("#s") + + def part_holonyms(self): + return self._related("#p") + + def member_meronyms(self): + return self._related("%m") + + def substance_meronyms(self): + return self._related("%s") + + def part_meronyms(self): + return self._related("%p") + + def topic_domains(self): + return self._related(";c") + + def in_topic_domains(self): + return self._related("-c") + + def region_domains(self): + return self._related(";r") + + def in_region_domains(self): + return self._related("-r") + + def usage_domains(self): + return self._related(";u") + + def in_usage_domains(self): + return self._related("-u") + + def attributes(self): + return self._related("=") + + def entailments(self): + return self._related("*") + + def causes(self): + return self._related(">") + + def also_sees(self): + return self._related("^") + + def verb_groups(self): + return self._related("$") + + def similar_tos(self): + return self._related("&") + + def __hash__(self): + return hash(self._name) + + def __eq__(self, other): + return self._name == other._name + + def __ne__(self, other): + return self._name != other._name + + def __lt__(self, other): + return self._name < other._name + + +class Lemma(_WordNetObject): + """ + The lexical entry for a single morphological form of a + sense-disambiguated word. + + Create a Lemma from a "..." string where: + is the morphological stem identifying the synset + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + is the morphological form of interest + + Note that and can be different, e.g. the Synset + 'salt.n.03' has the Lemmas 'salt.n.03.salt', 'salt.n.03.saltiness' and + 'salt.n.03.salinity'. + + Lemma attributes, accessible via methods with the same name: + + - name: The canonical name of this lemma. + - synset: The synset that this lemma belongs to. + - syntactic_marker: For adjectives, the WordNet string identifying the + syntactic position relative modified noun. See: + https://wordnet.princeton.edu/documentation/wninput5wn + For all other parts of speech, this attribute is None. + - count: The frequency of this lemma in wordnet. + + Lemma methods: + + Lemmas have the following methods for retrieving related Lemmas. They + correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Lemmas: + + - antonyms + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - topic_domains, region_domains, usage_domains + - attributes + - derivationally_related_forms + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + - pertainyms + """ + + __slots__ = [ + "_wordnet_corpus_reader", + "_name", + "_syntactic_marker", + "_synset", + "_frame_strings", + "_frame_ids", + "_lexname_index", + "_lex_id", + "_lang", + "_key", + ] + + def __init__( + self, + wordnet_corpus_reader, + synset, + name, + lexname_index, + lex_id, + syntactic_marker, + ): + self._wordnet_corpus_reader = wordnet_corpus_reader + self._name = name + self._syntactic_marker = syntactic_marker + self._synset = synset + self._frame_strings = [] + self._frame_ids = [] + self._lexname_index = lexname_index + self._lex_id = lex_id + self._lang = "eng" + + self._key = None # gets set later. + + def name(self): + return self._name + + def syntactic_marker(self): + return self._syntactic_marker + + def synset(self): + return self._synset + + def frame_strings(self): + return self._frame_strings + + def frame_ids(self): + return self._frame_ids + + def lang(self): + return self._lang + + def key(self): + return self._key + + def __repr__(self): + tup = type(self).__name__, self._synset._name, self._name + return "%s('%s.%s')" % tup + + def _related(self, relation_symbol): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if (self._name, relation_symbol) not in self._synset._lemma_pointers: + return [] + return [ + get_synset(pos, offset)._lemmas[lemma_index] + for pos, offset, lemma_index in self._synset._lemma_pointers[ + self._name, relation_symbol + ] + ] + + def count(self): + """Return the frequency count for this Lemma""" + return self._wordnet_corpus_reader.lemma_count(self) + + def antonyms(self): + return self._related("!") + + def derivationally_related_forms(self): + return self._related("+") + + def pertainyms(self): + return self._related("\\") + + +class Synset(_WordNetObject): + """Create a Synset from a ".." string where: + is the word's morphological stem + is one of the module attributes ADJ, ADJ_SAT, ADV, NOUN or VERB + is the sense number, counting from 0. + + Synset attributes, accessible via methods with the same name: + + - name: The canonical name of this synset, formed using the first lemma + of this synset. Note that this may be different from the name + passed to the constructor if that string used a different lemma to + identify the synset. + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB. + - lemmas: A list of the Lemma objects for this synset. + - definition: The definition for this synset. + - examples: A list of example strings for this synset. + - offset: The offset in the WordNet dict file of this synset. + - lexname: The name of the lexicographer file containing this synset. + + Synset methods: + + Synsets have the following methods for retrieving related Synsets. + They correspond to the names for the pointer symbols defined here: + https://wordnet.princeton.edu/documentation/wninput5wn + These methods all return lists of Synsets. + + - hypernyms, instance_hypernyms + - hyponyms, instance_hyponyms + - member_holonyms, substance_holonyms, part_holonyms + - member_meronyms, substance_meronyms, part_meronyms + - attributes + - entailments + - causes + - also_sees + - verb_groups + - similar_tos + + Additionally, Synsets support the following methods specific to the + hypernym relation: + + - root_hypernyms + - common_hypernyms + - lowest_common_hypernyms + + Note that Synsets do not support the following relations because + these are defined by WordNet as lexical relations: + + - antonyms + - derivationally_related_forms + - pertainyms + """ + + __slots__ = [ + "_pos", + "_offset", + "_name", + "_frame_ids", + "_lemmas", + "_lemma_names", + "_definition", + "_examples", + "_lexname", + "_pointers", + "_lemma_pointers", + "_max_depth", + "_min_depth", + ] + + def __init__(self, wordnet_corpus_reader): + self._wordnet_corpus_reader = wordnet_corpus_reader + # All of these attributes get initialized by + # WordNetCorpusReader._synset_from_pos_and_line() + + self._pos = None + self._offset = None + self._name = None + self._frame_ids = [] + self._lemmas = [] + self._lemma_names = [] + self._definition = None + self._examples = [] + self._lexname = None # lexicographer name + self._all_hypernyms = None + + self._pointers = defaultdict(set) + self._lemma_pointers = defaultdict(list) + + def pos(self): + return self._pos + + def offset(self): + return self._offset + + def name(self): + return self._name + + def frame_ids(self): + return self._frame_ids + + def _doc(self, doc_type, default, lang="eng"): + """Helper method for Synset.definition and Synset.examples""" + corpus = self._wordnet_corpus_reader + if lang not in corpus.langs(): + return None + elif lang == "eng": + return default + else: + corpus._load_lang_data(lang) + of = corpus.ss2of(self) + i = corpus.lg_attrs.index(doc_type) + if of in corpus._lang_data[lang][i]: + return corpus._lang_data[lang][i][of] + else: + return None + + def definition(self, lang="eng"): + """Return definition in specified language""" + return self._doc("def", self._definition, lang=lang) + + def examples(self, lang="eng"): + """Return examples in specified language""" + return self._doc("exe", self._examples, lang=lang) + + def lexname(self): + return self._lexname + + def _needs_root(self): + if self._pos == NOUN and self._wordnet_corpus_reader.get_version() != "1.6": + return False + else: + return True + + def lemma_names(self, lang="eng"): + """Return all the lemma_names associated with the synset""" + if lang == "eng": + return self._lemma_names + else: + reader = self._wordnet_corpus_reader + reader._load_lang_data(lang) + i = reader.ss2of(self) + if i in reader._lang_data[lang][0]: + return reader._lang_data[lang][0][i] + else: + return [] + + def lemmas(self, lang="eng"): + """Return all the lemma objects associated with the synset""" + if lang == "eng": + return self._lemmas + elif self._name: + self._wordnet_corpus_reader._load_lang_data(lang) + lemmark = [] + lemmy = self.lemma_names(lang) + for lem in lemmy: + temp = Lemma( + self._wordnet_corpus_reader, + self, + lem, + self._wordnet_corpus_reader._lexnames.index(self.lexname()), + 0, + None, + ) + temp._lang = lang + lemmark.append(temp) + return lemmark + + def root_hypernyms(self): + """Get the topmost hypernyms of this synset in WordNet.""" + + result = [] + seen = set() + todo = [self] + while todo: + next_synset = todo.pop() + if next_synset not in seen: + seen.add(next_synset) + next_hypernyms = ( + next_synset.hypernyms() + next_synset.instance_hypernyms() + ) + if not next_hypernyms: + result.append(next_synset) + else: + todo.extend(next_hypernyms) + return result + + # Simpler implementation which makes incorrect assumption that + # hypernym hierarchy is acyclic: + # + # if not self.hypernyms(): + # return [self] + # else: + # return list(set(root for h in self.hypernyms() + # for root in h.root_hypernyms())) + def max_depth(self): + """ + :return: The length of the longest hypernym path from this + synset to the root. + """ + + if "_max_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._max_depth = 0 + else: + self._max_depth = 1 + max(h.max_depth() for h in hypernyms) + return self._max_depth + + def min_depth(self): + """ + :return: The length of the shortest hypernym path from this + synset to the root. + """ + + if "_min_depth" not in self.__dict__: + hypernyms = self.hypernyms() + self.instance_hypernyms() + if not hypernyms: + self._min_depth = 0 + else: + self._min_depth = 1 + min(h.min_depth() for h in hypernyms) + return self._min_depth + + def closure(self, rel, depth=-1): + """ + Return the transitive closure of source under the rel + relationship, breadth-first, discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> print(list(computer.closure(topic))) + [Synset('computer_science.n.01')] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2 + + + Include redundant paths (but only once), avoiding duplicate searches + (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> print(list(dog.closure(hyp))) + [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\ + Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\ + Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\ + Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\ + Synset('physical_entity.n.01'), Synset('entity.n.01')] + + UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7 + """ + + from nltk.util import acyclic_breadth_first + + for synset in acyclic_breadth_first(self, rel, depth): + if synset != self: + yield synset + + from nltk.util import acyclic_depth_first as acyclic_tree + from nltk.util import unweighted_minimum_spanning_tree as mst + + # Also add this shortcut? + # from nltk.util import unweighted_minimum_spanning_digraph as umsd + + def tree(self, rel, depth=-1, cut_mark=None): + """ + Return the full relation tree, including self, + discarding cycles: + + >>> from nltk.corpus import wordnet as wn + >>> from pprint import pprint + >>> computer = wn.synset('computer.n.01') + >>> topic = lambda s:s.topic_domains() + >>> pprint(computer.tree(topic)) + [Synset('computer.n.01'), [Synset('computer_science.n.01')]] + + UserWarning: Discarded redundant search for Synset('computer.n.01') at depth -3 + + + But keep duplicate branches (from 'animal.n.01' to 'entity.n.01'): + + >>> dog = wn.synset('dog.n.01') + >>> hyp = lambda s:s.hypernyms() + >>> pprint(dog.tree(hyp)) + [Synset('dog.n.01'), + [Synset('canine.n.02'), + [Synset('carnivore.n.01'), + [Synset('placental.n.01'), + [Synset('mammal.n.01'), + [Synset('vertebrate.n.01'), + [Synset('chordate.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), + [Synset('entity.n.01')]]]]]]]]]]]]], + [Synset('domestic_animal.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), [Synset('entity.n.01')]]]]]]]]] + """ + + from nltk.util import acyclic_branches_depth_first + + return acyclic_branches_depth_first(self, rel, depth, cut_mark) + + def hypernym_paths(self): + """ + Get the path(s) from this synset to the root, where each path is a + list of the synset nodes traversed on the way to the root. + + :return: A list of lists, where each list gives the node sequence + connecting the initial ``Synset`` node and a root node. + """ + paths = [] + + hypernyms = self.hypernyms() + self.instance_hypernyms() + if len(hypernyms) == 0: + paths = [[self]] + + for hypernym in hypernyms: + for ancestor_list in hypernym.hypernym_paths(): + ancestor_list.append(self) + paths.append(ancestor_list) + return paths + + def common_hypernyms(self, other): + """ + Find all synsets that are hypernyms of this synset and the + other synset. + + :type other: Synset + :param other: other input synset. + :return: The synsets that are hypernyms of both synsets. + """ + if not self._all_hypernyms: + self._all_hypernyms = { + self_synset + for self_synsets in self._iter_hypernym_lists() + for self_synset in self_synsets + } + if not other._all_hypernyms: + other._all_hypernyms = { + other_synset + for other_synsets in other._iter_hypernym_lists() + for other_synset in other_synsets + } + return list(self._all_hypernyms.intersection(other._all_hypernyms)) + + def lowest_common_hypernyms(self, other, simulate_root=False, use_min_depth=False): + """ + Get a list of lowest synset(s) that both synsets have as a hypernym. + When `use_min_depth == False` this means that the synset which appears + as a hypernym of both `self` and `other` with the lowest maximum depth + is returned or if there are multiple such synsets at the same depth + they are all returned + + However, if `use_min_depth == True` then the synset(s) which has/have + the lowest minimum depth and appear(s) in both paths is/are returned. + + By setting the use_min_depth flag to True, the behavior of NLTK2 can be + preserved. This was changed in NLTK3 to give more accurate results in a + small set of cases, generally with synsets concerning people. (eg: + 'chef.n.01', 'fireman.n.01', etc.) + + This method is an implementation of Ted Pedersen's "Lowest Common + Subsumer" method from the Perl Wordnet module. It can return either + "self" or "other" if they are a hypernym of the other. + + :type other: Synset + :param other: other input synset + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (False by default) + creates a fake root that connects all the taxonomies. Set it + to True to enable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will need to be added + for nouns as well. + :type use_min_depth: bool + :param use_min_depth: This setting mimics older (v2) behavior of NLTK + wordnet If True, will use the min_depth function to calculate the + lowest common hypernyms. This is known to give strange results for + some synset pairs (eg: 'chef.n.01', 'fireman.n.01') but is retained + for backwards compatibility + :return: The synsets that are the lowest common hypernyms of both + synsets + """ + synsets = self.common_hypernyms(other) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset.hypernyms = lambda: [] + fake_synset.instance_hypernyms = lambda: [] + synsets.append(fake_synset) + + try: + if use_min_depth: + max_depth = max(s.min_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.min_depth() == max_depth] + else: + max_depth = max(s.max_depth() for s in synsets) + unsorted_lch = [s for s in synsets if s.max_depth() == max_depth] + return sorted(unsorted_lch) + except ValueError: + return [] + + def hypernym_distances(self, distance=0, simulate_root=False): + """ + Get the path(s) from this synset to the root, counting the distance + of each node from the initial node on the way. A set of + (synset, distance) tuples is returned. + + :type distance: int + :param distance: the distance (number of edges) from this hypernym to + the original hypernym ``Synset`` on which this method was called. + :return: A set of ``(Synset, int)`` tuples where each ``Synset`` is + a hypernym of the first ``Synset``. + """ + distances = {(self, distance)} + for hypernym in self._hypernyms() + self._instance_hypernyms(): + distances |= hypernym.hypernym_distances(distance + 1, simulate_root=False) + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + fake_synset_distance = max(distances, key=itemgetter(1))[1] + distances.add((fake_synset, fake_synset_distance + 1)) + return distances + + def _shortest_hypernym_paths(self, simulate_root): + if self._name == "*ROOT*": + return {self: 0} + + queue = deque([(self, 0)]) + path = {} + + while queue: + s, depth = queue.popleft() + if s in path: + continue + path[s] = depth + + depth += 1 + queue.extend((hyp, depth) for hyp in s._hypernyms()) + queue.extend((hyp, depth) for hyp in s._instance_hypernyms()) + + if simulate_root: + fake_synset = Synset(None) + fake_synset._name = "*ROOT*" + path[fake_synset] = max(path.values()) + 1 + + return path + + def shortest_path_distance(self, other, simulate_root=False): + """ + Returns the distance of the shortest path linking the two synsets (if + one exists). For each synset, all the ancestor nodes and their + distances are recorded and compared. The ancestor node common to both + synsets that can be reached with the minimum number of traversals is + used. If no ancestor nodes are common, None is returned. If a node is + compared with itself 0 is returned. + + :type other: Synset + :param other: The Synset to which the shortest path will be found. + :return: The number of edges in the shortest path connecting the two + nodes, or None if no path exists. + """ + + if self == other: + return 0 + + dist_dict1 = self._shortest_hypernym_paths(simulate_root) + dist_dict2 = other._shortest_hypernym_paths(simulate_root) + + # For each ancestor synset common to both subject synsets, find the + # connecting path length. Return the shortest of these. + + inf = float("inf") + path_distance = inf + for synset, d1 in dist_dict1.items(): + d2 = dist_dict2.get(synset, inf) + path_distance = min(path_distance, d1 + d2) + + return None if math.isinf(path_distance) else path_distance + + # interface to similarity methods + def path_similarity(self, other, verbose=False, simulate_root=True): + """ + Path Distance Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses in the is-a (hypernym/hypnoym) + taxonomy. The score is in the range 0 to 1, except in those cases where + a path cannot be found (will only be true for verbs as there are many + distinct verb taxonomies), in which case None is returned. A score of + 1 represents identity i.e. comparing a sense with itself will return 1. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally between 0 and 1. None is returned if no connecting path + could be found. 1 is returned if a ``Synset`` is compared with + itself. + """ + + distance = self.shortest_path_distance( + other, + simulate_root=simulate_root and (self._needs_root() or other._needs_root()), + ) + if distance is None or distance < 0: + return None + return 1.0 / (distance + 1) + + def lch_similarity(self, other, verbose=False, simulate_root=True): + """ + Leacock Chodorow Similarity: + Return a score denoting how similar two word senses are, based on the + shortest path that connects the senses (as above) and the maximum depth + of the taxonomy in which the senses occur. The relationship is given as + -log(p/2d) where p is the shortest path length and d is the taxonomy + depth. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A score denoting the similarity of the two ``Synset`` objects, + normally greater than 0. None is returned if no connecting path + could be found. If a ``Synset`` is compared with itself, the + maximum score is returned, which varies depending on the taxonomy + depth. + """ + + if self._pos != other._pos: + raise WordNetError( + "Computing the lch similarity requires " + "%s and %s to have the same part of speech." % (self, other) + ) + + need_root = self._needs_root() + + if self._pos not in self._wordnet_corpus_reader._max_depth: + self._wordnet_corpus_reader._compute_max_depth(self._pos, need_root) + + depth = self._wordnet_corpus_reader._max_depth[self._pos] + + distance = self.shortest_path_distance( + other, simulate_root=simulate_root and need_root + ) + + if distance is None or distance < 0 or depth == 0: + return None + return -math.log((distance + 1) / (2.0 * depth)) + + def wup_similarity(self, other, verbose=False, simulate_root=True): + """ + Wu-Palmer Similarity: + Return a score denoting how similar two word senses are, based on the + depth of the two senses in the taxonomy and that of their Least Common + Subsumer (most specific ancestor node). Previously, the scores computed + by this implementation did _not_ always agree with those given by + Pedersen's Perl implementation of WordNet Similarity. However, with + the addition of the simulate_root flag (see below), the score for + verbs now almost always agree but not always for nouns. + + The LCS does not necessarily feature in the shortest path connecting + the two senses, as it is by definition the common ancestor deepest in + the taxonomy, not closest to the two senses. Typically, however, it + will so feature. Where multiple candidates for the LCS exist, that + whose shortest path to the root node is the longest will be selected. + Where the LCS has multiple paths to the root, the longer path is used + for the purposes of the calculation. + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type simulate_root: bool + :param simulate_root: The various verb taxonomies do not + share a single root which disallows this metric from working for + synsets that are not connected. This flag (True by default) + creates a fake root that connects all the taxonomies. Set it + to false to disable this behavior. For the noun taxonomy, + there is usually a default root except for WordNet version 1.6. + If you are using wordnet 1.6, a fake root will be added for nouns + as well. + :return: A float score denoting the similarity of the two ``Synset`` + objects, normally greater than zero. If no connecting path between + the two senses can be found, None is returned. + + """ + need_root = self._needs_root() or other._needs_root() + + # Note that to preserve behavior from NLTK2 we set use_min_depth=True + # It is possible that more accurate results could be obtained by + # removing this setting and it should be tested later on + subsumers = self.lowest_common_hypernyms( + other, simulate_root=simulate_root and need_root, use_min_depth=True + ) + + # If no LCS was found return None + if len(subsumers) == 0: + return None + + subsumer = self if self in subsumers else subsumers[0] + + # Get the longest path from the LCS to the root, + # including a correction: + # - add one because the calculations include both the start and end + # nodes + depth = subsumer.max_depth() + 1 + + # Note: No need for an additional add-one correction for non-nouns + # to account for an imaginary root node because that is now + # automatically handled by simulate_root + # if subsumer._pos != NOUN: + # depth += 1 + + # Get the shortest path from the LCS to each of the synsets it is + # subsuming. Add this to the LCS path length to get the path + # length from each synset to the root. + len1 = self.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + len2 = other.shortest_path_distance( + subsumer, simulate_root=simulate_root and need_root + ) + if len1 is None or len2 is None: + return None + len1 += depth + len2 += depth + return (2.0 * depth) / (len1 + len2) + + def res_similarity(self, other, ic, verbose=False): + """ + Resnik Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. Synsets whose LCS is the root node of the taxonomy will + have a score of 0 (e.g. N['dog'][0] and N['table'][0]). + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return lcs_ic + + def jcn_similarity(self, other, ic, verbose=False): + """ + Jiang-Conrath Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 1 / (IC(s1) + IC(s2) - 2 * IC(lcs)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects. + """ + + if self == other: + return _INF + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + + # If either of the input synsets are the root synset, or have a + # frequency of 0 (sparse data problem), return 0. + if ic1 == 0 or ic2 == 0: + return 0 + + ic_difference = ic1 + ic2 - 2 * lcs_ic + + if ic_difference == 0: + return _INF + + return 1 / ic_difference + + def lin_similarity(self, other, ic, verbose=False): + """ + Lin Similarity: + Return a score denoting how similar two word senses are, based on the + Information Content (IC) of the Least Common Subsumer (most specific + ancestor node) and that of the two input Synsets. The relationship is + given by the equation 2 * IC(lcs) / (IC(s1) + IC(s2)). + + :type other: Synset + :param other: The ``Synset`` that this ``Synset`` is being compared to. + :type ic: dict + :param ic: an information content object (as returned by + ``nltk.corpus.wordnet_ic.ic()``). + :return: A float score denoting the similarity of the two ``Synset`` + objects, in the range 0 to 1. + """ + + ic1, ic2, lcs_ic = _lcs_ic(self, other, ic) + return (2.0 * lcs_ic) / (ic1 + ic2) + + def _iter_hypernym_lists(self): + """ + :return: An iterator over ``Synset`` objects that are either proper + hypernyms or instance of hypernyms of the synset. + """ + todo = [self] + seen = set() + while todo: + for synset in todo: + seen.add(synset) + yield todo + todo = [ + hypernym + for synset in todo + for hypernym in (synset.hypernyms() + synset.instance_hypernyms()) + if hypernym not in seen + ] + + def __repr__(self): + return f"{type(self).__name__}('{self._name}')" + + def _related(self, relation_symbol, sort=True): + get_synset = self._wordnet_corpus_reader.synset_from_pos_and_offset + if relation_symbol not in self._pointers: + return [] + pointer_tuples = self._pointers[relation_symbol] + r = [get_synset(pos, offset) for pos, offset in pointer_tuples] + if sort: + r.sort() + return r + + +###################################################################### +# WordNet Corpus Reader +###################################################################### + + +class WordNetCorpusReader(CorpusReader): + """ + A corpus reader used to access wordnet or its variants. + """ + + _ENCODING = "utf8" + + # { Part-of-speech constants + ADJ, ADJ_SAT, ADV, NOUN, VERB = "a", "s", "r", "n", "v" + # } + + # { Filename constants + _FILEMAP = {ADJ: "adj", ADV: "adv", NOUN: "noun", VERB: "verb"} + # } + + # { Part of speech constants + _pos_numbers = {NOUN: 1, VERB: 2, ADJ: 3, ADV: 4, ADJ_SAT: 5} + _pos_names = dict(tup[::-1] for tup in _pos_numbers.items()) + # } + + #: A list of file identifiers for all the fileids used by this + #: corpus reader. + _FILES = ( + "cntlist.rev", + "lexnames", + "index.sense", + "index.adj", + "index.adv", + "index.noun", + "index.verb", + "data.adj", + "data.adv", + "data.noun", + "data.verb", + "adj.exc", + "adv.exc", + "noun.exc", + "verb.exc", + ) + + def __init__(self, root, omw_reader): + """ + Construct a new wordnet corpus reader, with the given root + directory. + """ + + super().__init__(root, self._FILES, encoding=self._ENCODING) + + # A index that provides the file offset + # Map from lemma -> pos -> synset_index -> offset + self._lemma_pos_offset_map = defaultdict(dict) + + # A cache so we don't have to reconstruct synsets + # Map from pos -> offset -> synset + self._synset_offset_cache = defaultdict(dict) + + # A lookup for the maximum depth of each part of speech. Useful for + # the lch similarity metric. + self._max_depth = defaultdict(dict) + + # Corpus reader containing omw data. + self._omw_reader = omw_reader + + # Corpus reader containing extended_omw data. + self._exomw_reader = None + + self.provenances = defaultdict(str) + self.provenances["eng"] = "" + + if self._omw_reader is None: + warnings.warn( + "The multilingual functions are not available with this Wordnet version" + ) + + self.omw_langs = set() + + # A cache to store the wordnet data of multiple languages + self._lang_data = defaultdict(list) + + self._data_file_map = {} + self._exception_map = {} + self._lexnames = [] + self._key_count_file = None + self._key_synset_file = None + + # Load the lexnames + with self.open("lexnames") as fp: + for i, line in enumerate(fp): + index, lexname, _ = line.split() + assert int(index) == i + self._lexnames.append(lexname) + + # Load the indices for lemmas and synset offsets + self._load_lemma_pos_offset_map() + + # load the exception file data into memory + self._load_exception_map() + + self.nomap = [] + self.splits = {} + + # map from WordNet 3.0 for OMW data + self.map30 = self.map_wn30() + + # Language data attributes + self.lg_attrs = ["lemma", "none", "def", "exe"] + + def index_sense(self, version=None): + """Read sense key to synset id mapping from index.sense file in corpus directory""" + fn = "index.sense" + if version: + from nltk.corpus import CorpusReader, LazyCorpusLoader + + ixreader = LazyCorpusLoader(version, CorpusReader, r".*/" + fn) + else: + ixreader = self + with ixreader.open(fn) as fp: + sensekey_map = {} + for line in fp: + fields = line.strip().split() + sensekey = fields[0] + pos = self._pos_names[int(sensekey.split("%")[1].split(":")[0])] + sensekey_map[sensekey] = f"{fields[1]}-{pos}" + return sensekey_map + + def map_to_many(self): + sensekey_map1 = self.index_sense("wordnet") + sensekey_map2 = self.index_sense() + synset_to_many = {} + for synsetid in set(sensekey_map1.values()): + synset_to_many[synsetid] = [] + for sensekey in set(sensekey_map1.keys()).intersection( + set(sensekey_map2.keys()) + ): + source = sensekey_map1[sensekey] + target = sensekey_map2[sensekey] + synset_to_many[source].append(target) + return synset_to_many + + def map_to_one(self): + synset_to_many = self.map_to_many() + synset_to_one = {} + for source in synset_to_many: + candidates_bag = synset_to_many[source] + if candidates_bag: + candidates_set = set(candidates_bag) + if len(candidates_set) == 1: + target = candidates_bag[0] + else: + counts = [] + for candidate in candidates_set: + counts.append((candidates_bag.count(candidate), candidate)) + self.splits[source] = counts + target = max(counts)[1] + synset_to_one[source] = target + if source[-1] == "s": + # Add a mapping from "a" to target for applications like omw, + # where only Lithuanian and Slovak use the "s" ss_type. + synset_to_one[f"{source[:-1]}a"] = target + else: + self.nomap.append(source) + return synset_to_one + + def map_wn30(self): + """Mapping from Wordnet 3.0 to currently loaded Wordnet version""" + if self.get_version() == "3.0": + return None + else: + return self.map_to_one() + + # Open Multilingual WordNet functions, contributed by + # Nasruddin A’aidil Shari, Sim Wei Ying Geraldine, and Soe Lynn + + def of2ss(self, of): + """take an id and return the synsets""" + return self.synset_from_pos_and_offset(of[-1], int(of[:8])) + + def ss2of(self, ss): + """return the ID of the synset""" + if ss: + return f"{ss.offset():08d}-{ss.pos()}" + + def _load_lang_data(self, lang): + """load the wordnet data of the requested language from the file to + the cache, _lang_data""" + + if lang in self._lang_data: + return + + if self._omw_reader and not self.omw_langs: + self.add_omw() + + if lang not in self.langs(): + raise WordNetError("Language is not supported.") + + if self._exomw_reader and lang not in self.omw_langs: + reader = self._exomw_reader + else: + reader = self._omw_reader + + prov = self.provenances[lang] + if prov in ["cldr", "wikt"]: + prov2 = prov + else: + prov2 = "data" + + with reader.open(f"{prov}/wn-{prov2}-{lang.split('_')[0]}.tab") as fp: + self.custom_lemmas(fp, lang) + self.disable_custom_lemmas(lang) + + def add_provs(self, reader): + """Add languages from Multilingual Wordnet to the provenance dictionary""" + fileids = reader.fileids() + for fileid in fileids: + prov, langfile = os.path.split(fileid) + file_name, file_extension = os.path.splitext(langfile) + if file_extension == ".tab": + lang = file_name.split("-")[-1] + if lang in self.provenances or prov in ["cldr", "wikt"]: + # We already have another resource for this lang, + # so we need to further specify the lang id: + lang = f"{lang}_{prov}" + self.provenances[lang] = prov + + def add_omw(self): + self.add_provs(self._omw_reader) + self.omw_langs = set(self.provenances.keys()) + + def add_exomw(self): + """ + Add languages from Extended OMW + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> wn.add_exomw() + >>> print(wn.synset('intrinsically.r.01').lemmas(lang="eng_wikt")) + [Lemma('intrinsically.r.01.per_se'), Lemma('intrinsically.r.01.as_such')] + """ + from nltk.corpus import extended_omw + + self.add_omw() + self._exomw_reader = extended_omw + self.add_provs(self._exomw_reader) + + def langs(self): + """return a list of languages supported by Multilingual Wordnet""" + return list(self.provenances.keys()) + + def _load_lemma_pos_offset_map(self): + for suffix in self._FILEMAP.values(): + + # parse each line of the file (ignoring comment lines) + with self.open("index.%s" % suffix) as fp: + for i, line in enumerate(fp): + if line.startswith(" "): + continue + + _iter = iter(line.split()) + + def _next_token(): + return next(_iter) + + try: + + # get the lemma and part-of-speech + lemma = _next_token() + pos = _next_token() + + # get the number of synsets for this lemma + n_synsets = int(_next_token()) + assert n_synsets > 0 + + # get and ignore the pointer symbols for all synsets of + # this lemma + n_pointers = int(_next_token()) + [_next_token() for _ in range(n_pointers)] + + # same as number of synsets + n_senses = int(_next_token()) + assert n_synsets == n_senses + + # get and ignore number of senses ranked according to + # frequency + _next_token() + + # get synset offsets + synset_offsets = [int(_next_token()) for _ in range(n_synsets)] + + # raise more informative error with file name and line number + except (AssertionError, ValueError) as e: + tup = ("index.%s" % suffix), (i + 1), e + raise WordNetError("file %s, line %i: %s" % tup) from e + + # map lemmas and parts of speech to synsets + self._lemma_pos_offset_map[lemma][pos] = synset_offsets + if pos == ADJ: + self._lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets + + def _load_exception_map(self): + # load the exception file data into memory + for pos, suffix in self._FILEMAP.items(): + self._exception_map[pos] = {} + with self.open("%s.exc" % suffix) as fp: + for line in fp: + terms = line.split() + self._exception_map[pos][terms[0]] = terms[1:] + self._exception_map[ADJ_SAT] = self._exception_map[ADJ] + + def _compute_max_depth(self, pos, simulate_root): + """ + Compute the max depth for the given part of speech. This is + used by the lch similarity metric. + """ + depth = 0 + for ii in self.all_synsets(pos): + try: + depth = max(depth, ii.max_depth()) + except RuntimeError: + print(ii) + if simulate_root: + depth += 1 + self._max_depth[pos] = depth + + def get_version(self): + fh = self._data_file(ADJ) + fh.seek(0) + for line in fh: + match = re.search(r"Word[nN]et (\d+|\d+\.\d+) Copyright", line) + if match is not None: + version = match.group(1) + fh.seek(0) + return version + + ############################################################# + # Loading Lemmas + ############################################################# + + def lemma(self, name, lang="eng"): + """Return lemma object that matches the name""" + # cannot simply split on first '.', + # e.g.: '.45_caliber.a.01..45_caliber' + separator = SENSENUM_RE.search(name).end() + + synset_name, lemma_name = name[: separator - 1], name[separator:] + + synset = self.synset(synset_name) + for lemma in synset.lemmas(lang): + if lemma._name == lemma_name: + return lemma + raise WordNetError(f"No lemma {lemma_name!r} in {synset_name!r}") + + def lemma_from_key(self, key): + # Keys are case sensitive and always lower-case + key = key.lower() + + lemma_name, lex_sense = key.split("%") + pos_number, lexname_index, lex_id, _, _ = lex_sense.split(":") + pos = self._pos_names[int(pos_number)] + + # open the key -> synset file if necessary + if self._key_synset_file is None: + self._key_synset_file = self.open("index.sense") + + # Find the synset for the lemma. + synset_line = _binary_search_file(self._key_synset_file, key) + if not synset_line: + raise WordNetError("No synset found for key %r" % key) + offset = int(synset_line.split()[1]) + synset = self.synset_from_pos_and_offset(pos, offset) + # return the corresponding lemma + for lemma in synset._lemmas: + if lemma._key == key: + return lemma + raise WordNetError("No lemma found for for key %r" % key) + + ############################################################# + # Loading Synsets + ############################################################# + def synset(self, name): + # split name into lemma, part of speech and synset number + lemma, pos, synset_index_str = name.lower().rsplit(".", 2) + synset_index = int(synset_index_str) - 1 + + # get the offset for this synset + try: + offset = self._lemma_pos_offset_map[lemma][pos][synset_index] + except KeyError as e: + raise WordNetError(f"No lemma {lemma!r} with part of speech {pos!r}") from e + except IndexError as e: + n_senses = len(self._lemma_pos_offset_map[lemma][pos]) + raise WordNetError( + f"Lemma {lemma!r} with part of speech {pos!r} only " + f"has {n_senses} {'sense' if n_senses == 1 else 'senses'}" + ) from e + + # load synset information from the appropriate file + synset = self.synset_from_pos_and_offset(pos, offset) + + # some basic sanity checks on loaded attributes + if pos == "s" and synset._pos == "a": + message = ( + "Adjective satellite requested but only plain " + "adjective found for lemma %r" + ) + raise WordNetError(message % lemma) + assert synset._pos == pos or (pos == "a" and synset._pos == "s") + + # Return the synset object. + return synset + + def _data_file(self, pos): + """ + Return an open file pointer for the data file for the given + part of speech. + """ + if pos == ADJ_SAT: + pos = ADJ + if self._data_file_map.get(pos) is None: + fileid = "data.%s" % self._FILEMAP[pos] + self._data_file_map[pos] = self.open(fileid) + return self._data_file_map[pos] + + def synset_from_pos_and_offset(self, pos, offset): + """ + - pos: The synset's part of speech, matching one of the module level + attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v'). + - offset: The byte offset of this synset in the WordNet dict file + for this pos. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_pos_and_offset('n', 1740)) + Synset('entity.n.01') + """ + # Check to see if the synset is in the cache + if offset in self._synset_offset_cache[pos]: + return self._synset_offset_cache[pos][offset] + + data_file = self._data_file(pos) + data_file.seek(offset) + data_file_line = data_file.readline() + # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line: + line_offset = data_file_line[:8] + if ( + line_offset.isalnum() + and line_offset == f"{'0'*(8-len(str(offset)))}{str(offset)}" + ): + synset = self._synset_from_pos_and_line(pos, data_file_line) + assert synset._offset == offset + self._synset_offset_cache[pos][offset] = synset + else: + synset = None + warnings.warn(f"No WordNet synset found for pos={pos} at offset={offset}.") + data_file.seek(0) + return synset + + @deprecated("Use public method synset_from_pos_and_offset() instead") + def _synset_from_pos_and_offset(self, *args, **kwargs): + """ + Hack to help people like the readers of + https://stackoverflow.com/a/27145655/1709587 + who were using this function before it was officially a public method + """ + return self.synset_from_pos_and_offset(*args, **kwargs) + + def _synset_from_pos_and_line(self, pos, data_file_line): + # Construct a new (empty) synset. + synset = Synset(self) + + # parse the entry for this synset + try: + + # parse out the definitions and examples from the gloss + columns_str, gloss = data_file_line.strip().split("|") + definition = re.sub(r"[\"].*?[\"]", "", gloss).strip() + examples = re.findall(r'"([^"]*)"', gloss) + for example in examples: + synset._examples.append(example) + + synset._definition = definition.strip("; ") + + # split the other info into fields + _iter = iter(columns_str.split()) + + def _next_token(): + return next(_iter) + + # get the offset + synset._offset = int(_next_token()) + + # determine the lexicographer file name + lexname_index = int(_next_token()) + synset._lexname = self._lexnames[lexname_index] + + # get the part of speech + synset._pos = _next_token() + + # create Lemma objects for each lemma + n_lemmas = int(_next_token(), 16) + for _ in range(n_lemmas): + # get the lemma name + lemma_name = _next_token() + # get the lex_id (used for sense_keys) + lex_id = int(_next_token(), 16) + # If the lemma has a syntactic marker, extract it. + m = re.match(r"(.*?)(\(.*\))?$", lemma_name) + lemma_name, syn_mark = m.groups() + # create the lemma object + lemma = Lemma(self, synset, lemma_name, lexname_index, lex_id, syn_mark) + synset._lemmas.append(lemma) + synset._lemma_names.append(lemma._name) + + # collect the pointer tuples + n_pointers = int(_next_token()) + for _ in range(n_pointers): + symbol = _next_token() + offset = int(_next_token()) + pos = _next_token() + lemma_ids_str = _next_token() + if lemma_ids_str == "0000": + synset._pointers[symbol].add((pos, offset)) + else: + source_index = int(lemma_ids_str[:2], 16) - 1 + target_index = int(lemma_ids_str[2:], 16) - 1 + source_lemma_name = synset._lemmas[source_index]._name + lemma_pointers = synset._lemma_pointers + tups = lemma_pointers[source_lemma_name, symbol] + tups.append((pos, offset, target_index)) + + # read the verb frames + try: + frame_count = int(_next_token()) + except StopIteration: + pass + else: + for _ in range(frame_count): + # read the plus sign + plus = _next_token() + assert plus == "+" + # read the frame and lemma number + frame_number = int(_next_token()) + frame_string_fmt = VERB_FRAME_STRINGS[frame_number] + lemma_number = int(_next_token(), 16) + # lemma number of 00 means all words in the synset + if lemma_number == 0: + synset._frame_ids.append(frame_number) + for lemma in synset._lemmas: + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + # only a specific word in the synset + else: + lemma = synset._lemmas[lemma_number - 1] + lemma._frame_ids.append(frame_number) + lemma._frame_strings.append(frame_string_fmt % lemma._name) + + # raise a more informative error with line text + except ValueError as e: + raise WordNetError(f"line {data_file_line!r}: {e}") from e + + # set sense keys for Lemma objects - note that this has to be + # done afterwards so that the relations are available + for lemma in synset._lemmas: + if synset._pos == ADJ_SAT: + head_lemma = synset.similar_tos()[0]._lemmas[0] + head_name = head_lemma._name + head_id = "%02d" % head_lemma._lex_id + else: + head_name = head_id = "" + tup = ( + lemma._name, + WordNetCorpusReader._pos_numbers[synset._pos], + lemma._lexname_index, + lemma._lex_id, + head_name, + head_id, + ) + lemma._key = ("%s%%%d:%02d:%02d:%s:%s" % tup).lower() + + # the canonical name is based on the first lemma + lemma_name = synset._lemmas[0]._name.lower() + offsets = self._lemma_pos_offset_map[lemma_name][synset._pos] + sense_index = offsets.index(synset._offset) + tup = lemma_name, synset._pos, sense_index + 1 + synset._name = "%s.%s.%02i" % tup + + return synset + + def synset_from_sense_key(self, sense_key): + """ + Retrieves synset based on a given sense_key. Sense keys can be + obtained from lemma.key() + + From https://wordnet.princeton.edu/documentation/senseidx5wn: + A sense_key is represented as:: + + lemma % lex_sense (e.g. 'dog%1:18:01::') + + where lex_sense is encoded as:: + + ss_type:lex_filenum:lex_id:head_word:head_id + + :lemma: ASCII text of word/collocation, in lower case + :ss_type: synset type for the sense (1 digit int) + The synset type is encoded as follows:: + + 1 NOUN + 2 VERB + 3 ADJECTIVE + 4 ADVERB + 5 ADJECTIVE SATELLITE + :lex_filenum: name of lexicographer file containing the synset for the sense (2 digit int) + :lex_id: when paired with lemma, uniquely identifies a sense in the lexicographer file (2 digit int) + :head_word: lemma of the first word in satellite's head synset + Only used if sense is in an adjective satellite synset + :head_id: uniquely identifies sense in a lexicographer file when paired with head_word + Only used if head_word is present (2 digit int) + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> print(wn.synset_from_sense_key("drive%1:04:03::")) + Synset('drive.n.06') + + >>> print(wn.synset_from_sense_key("driving%1:04:03::")) + Synset('drive.n.06') + """ + return self.lemma_from_key(sense_key).synset() + + ############################################################# + # Retrieve synsets and lemmas. + ############################################################# + + def synsets(self, lemma, pos=None, lang="eng", check_exceptions=True): + """Load all synsets with a given lemma and part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + If lang is specified, all the synsets associated with the lemma name + of that language will be returned. + """ + lemma = lemma.lower() + + if lang == "eng": + get_synset = self.synset_from_pos_and_offset + index = self._lemma_pos_offset_map + if pos is None: + pos = POS_LIST + return [ + get_synset(p, offset) + for p in pos + for form in self._morphy(lemma, p, check_exceptions) + for offset in index[form].get(p, []) + ] + + else: + self._load_lang_data(lang) + synset_list = [] + if lemma in self._lang_data[lang][1]: + for l in self._lang_data[lang][1][lemma]: + if pos is not None and l[-1] != pos: + continue + synset_list.append(self.of2ss(l)) + return synset_list + + def lemmas(self, lemma, pos=None, lang="eng"): + """Return all Lemma objects with a name matching the specified lemma + name and part of speech tag. Matches any part of speech tag if none is + specified.""" + + lemma = lemma.lower() + if lang == "eng": + return [ + lemma_obj + for synset in self.synsets(lemma, pos) + for lemma_obj in synset.lemmas() + if lemma_obj.name().lower() == lemma + ] + + else: + self._load_lang_data(lang) + lemmas = [] + syn = self.synsets(lemma, lang=lang) + for s in syn: + if pos is not None and s.pos() != pos: + continue + for lemma_obj in s.lemmas(lang=lang): + if lemma_obj.name().lower() == lemma: + lemmas.append(lemma_obj) + return lemmas + + def all_lemma_names(self, pos=None, lang="eng"): + """Return all lemma names for all synsets for the given + part of speech tag and language or languages. If pos is + not specified, all synsets for all parts of speech will + be used.""" + + if lang == "eng": + if pos is None: + return iter(self._lemma_pos_offset_map) + else: + return ( + lemma + for lemma in self._lemma_pos_offset_map + if pos in self._lemma_pos_offset_map[lemma] + ) + else: + self._load_lang_data(lang) + lemma = [] + for i in self._lang_data[lang][0]: + if pos is not None and i[-1] != pos: + continue + lemma.extend(self._lang_data[lang][0][i]) + + lemma = iter(set(lemma)) + return lemma + + def all_omw_synsets(self, pos=None, lang=None): + if lang not in self.langs(): + return None + self._load_lang_data(lang) + for of in self._lang_data[lang][0]: + if not pos or of[-1] == pos: + ss = self.of2ss(of) + if ss: + yield ss + + # else: + # A few OMW offsets don't exist in Wordnet 3.0. + # warnings.warn(f"Language {lang}: no synset found for {of}") + + def all_synsets(self, pos=None, lang="eng"): + """Iterate over all synsets with a given part of speech tag. + If no pos is specified, all synsets for all parts of speech + will be loaded. + """ + if lang == "eng": + return self.all_eng_synsets(pos=pos) + else: + return self.all_omw_synsets(pos=pos, lang=lang) + + def all_eng_synsets(self, pos=None): + if pos is None: + pos_tags = self._FILEMAP.keys() + else: + pos_tags = [pos] + + cache = self._synset_offset_cache + from_pos_and_line = self._synset_from_pos_and_line + + # generate all synsets for each part of speech + for pos_tag in pos_tags: + # Open the file for reading. Note that we can not re-use + # the file pointers from self._data_file_map here, because + # we're defining an iterator, and those file pointers might + # be moved while we're not looking. + if pos_tag == ADJ_SAT: + pos_file = ADJ + else: + pos_file = pos_tag + fileid = "data.%s" % self._FILEMAP[pos_file] + data_file = self.open(fileid) + + try: + # generate synsets for each line in the POS file + offset = data_file.tell() + line = data_file.readline() + while line: + if not line[0].isspace(): + if offset in cache[pos_tag]: + # See if the synset is cached + synset = cache[pos_tag][offset] + else: + # Otherwise, parse the line + synset = from_pos_and_line(pos_tag, line) + cache[pos_tag][offset] = synset + + # adjective satellites are in the same file as + # adjectives so only yield the synset if it's actually + # a satellite + if pos_tag == ADJ_SAT and synset._pos == ADJ_SAT: + yield synset + # for all other POS tags, yield all synsets (this means + # that adjectives also include adjective satellites) + elif pos_tag != ADJ_SAT: + yield synset + offset = data_file.tell() + line = data_file.readline() + + # close the extra file handle we opened + except: + data_file.close() + raise + else: + data_file.close() + + def words(self, lang="eng"): + """return lemmas of the given language as list of words""" + return self.all_lemma_names(lang=lang) + + def synonyms(self, word, lang="eng"): + """return nested list with the synonyms of the different senses of word in the given language""" + return [ + sorted(list(set(ss.lemma_names(lang=lang)) - {word})) + for ss in self.synsets(word, lang=lang) + ] + + def doc(self, file="README", lang="eng"): + """Return the contents of readme, license or citation file + use lang=lang to get the file for an individual language""" + if lang == "eng": + reader = self + else: + reader = self._omw_reader + if lang in self.langs(): + file = f"{os.path.join(self.provenances[lang],file)}" + try: + with reader.open(file) as fp: + return fp.read() + except: + if lang in self._lang_data: + return f"Cannot determine {file} for {lang}" + else: + return f"Language {lang} is not supported." + + def license(self, lang="eng"): + """Return the contents of LICENSE (for omw) + use lang=lang to get the license for an individual language""" + return self.doc(file="LICENSE", lang=lang) + + def readme(self, lang="eng"): + """Return the contents of README (for omw) + use lang=lang to get the readme for an individual language""" + return self.doc(file="README", lang=lang) + + def citation(self, lang="eng"): + """Return the contents of citation.bib file (for omw) + use lang=lang to get the citation for an individual language""" + return self.doc(file="citation.bib", lang=lang) + + ############################################################# + # Misc + ############################################################# + def lemma_count(self, lemma): + """Return the frequency count for this Lemma""" + # Currently, count is only work for English + if lemma._lang != "eng": + return 0 + # open the count file if we haven't already + if self._key_count_file is None: + self._key_count_file = self.open("cntlist.rev") + # find the key in the counts file and return the count + line = _binary_search_file(self._key_count_file, lemma._key) + if line: + return int(line.rsplit(" ", 1)[-1]) + else: + return 0 + + def path_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity(synset2, verbose, simulate_root) + + path_similarity.__doc__ = Synset.path_similarity.__doc__ + + def lch_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose, simulate_root) + + lch_similarity.__doc__ = Synset.lch_similarity.__doc__ + + def wup_similarity(self, synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose, simulate_root) + + wup_similarity.__doc__ = Synset.wup_similarity.__doc__ + + def res_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose) + + res_similarity.__doc__ = Synset.res_similarity.__doc__ + + def jcn_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose) + + jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ + + def lin_similarity(self, synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose) + + lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + ############################################################# + # Morphy + ############################################################# + # Morphy, adapted from Oliver Steele's pywordnet + def morphy(self, form, pos=None, check_exceptions=True): + """ + Find a possible base form for the given form, with the given + part of speech, by checking WordNet's list of exceptional + forms, and by recursively stripping affixes for this part of + speech until a form in WordNet is found. + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.morphy('dogs')) + dog + >>> print(wn.morphy('churches')) + church + >>> print(wn.morphy('aardwolves')) + aardwolf + >>> print(wn.morphy('abaci')) + abacus + >>> wn.morphy('hardrock', wn.ADV) + >>> print(wn.morphy('book', wn.NOUN)) + book + >>> wn.morphy('book', wn.ADJ) + """ + + if pos is None: + morphy = self._morphy + analyses = chain(a for p in POS_LIST for a in morphy(form, p)) + else: + analyses = self._morphy(form, pos, check_exceptions) + + # get the first one we find + first = list(islice(analyses, 1)) + if len(first) == 1: + return first[0] + else: + return None + + MORPHOLOGICAL_SUBSTITUTIONS = { + NOUN: [ + ("s", ""), + ("ses", "s"), + ("ves", "f"), + ("xes", "x"), + ("zes", "z"), + ("ches", "ch"), + ("shes", "sh"), + ("men", "man"), + ("ies", "y"), + ], + VERB: [ + ("s", ""), + ("ies", "y"), + ("es", "e"), + ("es", ""), + ("ed", "e"), + ("ed", ""), + ("ing", "e"), + ("ing", ""), + ], + ADJ: [("er", ""), ("est", ""), ("er", "e"), ("est", "e")], + ADV: [], + } + + MORPHOLOGICAL_SUBSTITUTIONS[ADJ_SAT] = MORPHOLOGICAL_SUBSTITUTIONS[ADJ] + + def _morphy(self, form, pos, check_exceptions=True): + # from jordanbg: + # Given an original string x + # 1. Apply rules once to the input to get y1, y2, y3, etc. + # 2. Return all that are in the database + # 3. If there are no matches, keep applying rules until you either + # find a match or you can't go any further + + exceptions = self._exception_map[pos] + substitutions = self.MORPHOLOGICAL_SUBSTITUTIONS[pos] + + def apply_rules(forms): + return [ + form[: -len(old)] + new + for form in forms + for old, new in substitutions + if form.endswith(old) + ] + + def filter_forms(forms): + result = [] + seen = set() + for form in forms: + if form in self._lemma_pos_offset_map: + if pos in self._lemma_pos_offset_map[form]: + if form not in seen: + result.append(form) + seen.add(form) + return result + + # 0. Check the exception lists + if check_exceptions: + if form in exceptions: + return filter_forms([form] + exceptions[form]) + + # 1. Apply rules once to the input to get y1, y2, y3, etc. + forms = apply_rules([form]) + + # 2. Return all that are in the database (and check the original too) + results = filter_forms([form] + forms) + if results: + return results + + # 3. If there are no matches, keep applying rules until we find a match + while forms: + forms = apply_rules(forms) + results = filter_forms(forms) + if results: + return results + + # Return an empty list if we can't find anything + return [] + + ############################################################# + # Create information content from corpus + ############################################################# + def ic(self, corpus, weight_senses_equally=False, smoothing=1.0): + """ + Creates an information content lookup dictionary from a corpus. + + :type corpus: CorpusReader + :param corpus: The corpus from which we create an information + content dictionary. + :type weight_senses_equally: bool + :param weight_senses_equally: If this is True, gives all + possible senses equal weight rather than dividing by the + number of possible senses. (If a word has 3 synses, each + sense gets 0.3333 per appearance when this is False, 1.0 when + it is true.) + :param smoothing: How much do we smooth synset counts (default is 1.0) + :type smoothing: float + :return: An information content dictionary + """ + counts = FreqDist() + for ww in corpus.words(): + counts[ww] += 1 + + ic = {} + for pp in POS_LIST: + ic[pp] = defaultdict(float) + + # Initialize the counts with the smoothing value + if smoothing > 0.0: + for pp in POS_LIST: + ic[pp][0] = smoothing + for ss in self.all_synsets(): + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + ic[pos][ss._offset] = smoothing + + for ww in counts: + possible_synsets = self.synsets(ww) + if len(possible_synsets) == 0: + continue + + # Distribute weight among possible synsets + weight = float(counts[ww]) + if not weight_senses_equally: + weight /= float(len(possible_synsets)) + + for ss in possible_synsets: + pos = ss._pos + if pos == ADJ_SAT: + pos = ADJ + for level in ss._iter_hypernym_lists(): + for hh in level: + ic[pos][hh._offset] += weight + # Add the weight to the root + ic[pos][0] += weight + return ic + + def custom_lemmas(self, tab_file, lang): + """ + Reads a custom tab file containing mappings of lemmas in the given + language to Princeton WordNet 3.0 synset offsets, allowing NLTK's + WordNet functions to then be used with that language. + + See the "Tab files" section at https://omwn.org/omw1.html for + documentation on the Multilingual WordNet tab file format. + + :param tab_file: Tab file as a file or file-like object + :type: lang str + :param: lang ISO 639-3 code of the language of the tab file + """ + lg = lang.split("_")[0] + if len(lg) != 3: + raise ValueError("lang should be a (3 character) ISO 639-3 code") + self._lang_data[lang] = [ + defaultdict(list), + defaultdict(list), + defaultdict(list), + defaultdict(list), + ] + for line in tab_file.readlines(): + if isinstance(line, bytes): + # Support byte-stream files (e.g. as returned by Python 2's + # open() function) as well as text-stream ones + line = line.decode("utf-8") + if not line.startswith("#"): + triple = line.strip().split("\t") + if len(triple) < 3: + continue + offset_pos, label = triple[:2] + val = triple[-1] + if self.map30: + if offset_pos in self.map30: + # Map offset_pos to current Wordnet version: + offset_pos = self.map30[offset_pos] + else: + # Some OMW offsets were never in Wordnet: + if ( + offset_pos not in self.nomap + and offset_pos.replace("a", "s") not in self.nomap + ): + warnings.warn( + f"{lang}: invalid offset {offset_pos} in '{line}'" + ) + continue + elif offset_pos[-1] == "a": + wnss = self.of2ss(offset_pos) + if wnss and wnss.pos() == "s": # Wordnet pos is "s" + # Label OMW adjective satellites back to their Wordnet pos ("s") + offset_pos = self.ss2of(wnss) + pair = label.split(":") + attr = pair[-1] + if len(pair) == 1 or pair[0] == lg: + if attr == "lemma": + val = val.strip().replace(" ", "_") + self._lang_data[lang][1][val.lower()].append(offset_pos) + if attr in self.lg_attrs: + self._lang_data[lang][self.lg_attrs.index(attr)][ + offset_pos + ].append(val) + + def disable_custom_lemmas(self, lang): + """prevent synsets from being mistakenly added""" + for n in range(len(self.lg_attrs)): + self._lang_data[lang][n].default_factory = None + + ###################################################################### + # Visualize WordNet relation graphs using Graphviz + ###################################################################### + + def digraph( + self, + inputs, + rel=lambda s: s.hypernyms(), + pos=None, + maxdepth=-1, + shapes=None, + attr=None, + verbose=False, + ): + """ + Produce a graphical representation from 'inputs' (a list of + start nodes, which can be a mix of Synsets, Lemmas and/or words), + and a synset relation, for drawing with the 'dot' graph visualisation + program from the Graphviz package. + + Return a string in the DOT graph file language, which can then be + converted to an image by nltk.parse.dependencygraph.dot2img(dot_string). + + Optional Parameters: + :rel: Wordnet synset relation + :pos: for words, restricts Part of Speech to 'n', 'v', 'a' or 'r' + :maxdepth: limit the longest path + :shapes: dictionary of strings that trigger a specified shape + :attr: dictionary with global graph attributes + :verbose: warn about cycles + + >>> from nltk.corpus import wordnet as wn + >>> print(wn.digraph([wn.synset('dog.n.01')])) + digraph G { + "Synset('animal.n.01')" -> "Synset('organism.n.01')"; + "Synset('canine.n.02')" -> "Synset('carnivore.n.01')"; + "Synset('carnivore.n.01')" -> "Synset('placental.n.01')"; + "Synset('chordate.n.01')" -> "Synset('animal.n.01')"; + "Synset('dog.n.01')" -> "Synset('canine.n.02')"; + "Synset('dog.n.01')" -> "Synset('domestic_animal.n.01')"; + "Synset('domestic_animal.n.01')" -> "Synset('animal.n.01')"; + "Synset('living_thing.n.01')" -> "Synset('whole.n.02')"; + "Synset('mammal.n.01')" -> "Synset('vertebrate.n.01')"; + "Synset('object.n.01')" -> "Synset('physical_entity.n.01')"; + "Synset('organism.n.01')" -> "Synset('living_thing.n.01')"; + "Synset('physical_entity.n.01')" -> "Synset('entity.n.01')"; + "Synset('placental.n.01')" -> "Synset('mammal.n.01')"; + "Synset('vertebrate.n.01')" -> "Synset('chordate.n.01')"; + "Synset('whole.n.02')" -> "Synset('object.n.01')"; + } + + """ + from nltk.util import edge_closure, edges2dot + + synsets = set() + edges = set() + if not shapes: + shapes = dict() + if not attr: + attr = dict() + + def add_lemma(lem): + ss = lem.synset() + synsets.add(ss) + edges.add((lem, ss)) + + for node in inputs: + typ = type(node) + if typ == Synset: + synsets.add(node) + elif typ == Lemma: + add_lemma(node) + elif typ == str: + for lemma in self.lemmas(node, pos): + add_lemma(lemma) + + for ss in synsets: + edges = edges.union(edge_closure(ss, rel, maxdepth, verbose)) + dot_string = edges2dot(sorted(list(edges)), shapes=shapes, attr=attr) + return dot_string + + +###################################################################### +# WordNet Information Content Corpus Reader +###################################################################### + + +class WordNetICCorpusReader(CorpusReader): + """ + A corpus reader for the WordNet information content corpus. + """ + + def __init__(self, root, fileids): + CorpusReader.__init__(self, root, fileids, encoding="utf8") + + # this load function would be more efficient if the data was pickled + # Note that we can't use NLTK's frequency distributions because + # synsets are overlapping (each instance of a synset also counts + # as an instance of its hypernyms) + def ic(self, icfile): + """ + Load an information content file from the wordnet_ic corpus + and return a dictionary. This dictionary has just two keys, + NOUN and VERB, whose values are dictionaries that map from + synsets to information content values. + + :type icfile: str + :param icfile: The name of the wordnet_ic file (e.g. "ic-brown.dat") + :return: An information content dictionary + """ + ic = {} + ic[NOUN] = defaultdict(float) + ic[VERB] = defaultdict(float) + with self.open(icfile) as fp: + for num, line in enumerate(fp): + if num == 0: # skip the header + continue + fields = line.split() + offset = int(fields[0][:-1]) + value = float(fields[1]) + pos = _get_pos(fields[0]) + if len(fields) == 3 and fields[2] == "ROOT": + # Store root count. + ic[pos][0] += value + if value != 0: + ic[pos][offset] = value + return ic + + +###################################################################### +# Similarity metrics +###################################################################### + +# TODO: Add in the option to manually add a new root node; this will be +# useful for verb similarity as there exist multiple verb taxonomies. + +# More information about the metrics is available at +# http://marimba.d.umn.edu/similarity/measures.html + + +def path_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.path_similarity( + synset2, verbose=verbose, simulate_root=simulate_root + ) + + +def lch_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.lch_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def wup_similarity(synset1, synset2, verbose=False, simulate_root=True): + return synset1.wup_similarity(synset2, verbose=verbose, simulate_root=simulate_root) + + +def res_similarity(synset1, synset2, ic, verbose=False): + return synset1.res_similarity(synset2, ic, verbose=verbose) + + +def jcn_similarity(synset1, synset2, ic, verbose=False): + return synset1.jcn_similarity(synset2, ic, verbose=verbose) + + +def lin_similarity(synset1, synset2, ic, verbose=False): + return synset1.lin_similarity(synset2, ic, verbose=verbose) + + +path_similarity.__doc__ = Synset.path_similarity.__doc__ +lch_similarity.__doc__ = Synset.lch_similarity.__doc__ +wup_similarity.__doc__ = Synset.wup_similarity.__doc__ +res_similarity.__doc__ = Synset.res_similarity.__doc__ +jcn_similarity.__doc__ = Synset.jcn_similarity.__doc__ +lin_similarity.__doc__ = Synset.lin_similarity.__doc__ + + +def _lcs_ic(synset1, synset2, ic, verbose=False): + """ + Get the information content of the least common subsumer that has + the highest information content value. If two nodes have no + explicit common subsumer, assume that they share an artificial + root node that is the hypernym of all explicit roots. + + :type synset1: Synset + :param synset1: First input synset. + :type synset2: Synset + :param synset2: Second input synset. Must be the same part of + speech as the first synset. + :type ic: dict + :param ic: an information content object (as returned by ``load_ic()``). + :return: The information content of the two synsets and their most + informative subsumer + """ + if synset1._pos != synset2._pos: + raise WordNetError( + "Computing the least common subsumer requires " + "%s and %s to have the same part of speech." % (synset1, synset2) + ) + + ic1 = information_content(synset1, ic) + ic2 = information_content(synset2, ic) + subsumers = synset1.common_hypernyms(synset2) + if len(subsumers) == 0: + subsumer_ic = 0 + else: + subsumer_ic = max(information_content(s, ic) for s in subsumers) + + if verbose: + print("> LCS Subsumer by content:", subsumer_ic) + + return ic1, ic2, subsumer_ic + + +# Utility functions + + +def information_content(synset, ic): + pos = synset._pos + if pos == ADJ_SAT: + pos = ADJ + try: + icpos = ic[pos] + except KeyError as e: + msg = "Information content file has no entries for part-of-speech: %s" + raise WordNetError(msg % pos) from e + + counts = icpos[synset._offset] + if counts == 0: + return _INF + else: + return -math.log(counts / icpos[0]) + + +# get the part of speech (NOUN or VERB) from the information content record +# (each identifier has a 'n' or 'v' suffix) + + +def _get_pos(field): + if field[-1] == "n": + return NOUN + elif field[-1] == "v": + return VERB + else: + msg = ( + "Unidentified part of speech in WordNet Information Content file " + "for field %s" % field + ) + raise ValueError(msg) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py new file mode 100644 index 0000000000000000000000000000000000000000..35bafdfef4f12f934de8e5e4617341fb2ba7b7a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ycoe.py @@ -0,0 +1,256 @@ +# Natural Language Toolkit: York-Toronto-Helsinki Parsed Corpus of Old English Prose (YCOE) +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Selina Dennis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old +English Prose (YCOE), a 1.5 million word syntactically-annotated +corpus of Old English prose texts. The corpus is distributed by the +Oxford Text Archive: http://www.ota.ahds.ac.uk/ It is not included +with NLTK. + +The YCOE corpus is divided into 100 files, each representing +an Old English prose text. Tags used within each text complies +to the YCOE standard: https://www-users.york.ac.uk/~lang22/YCOE/YcoeHome.htm +""" + +import os +import re + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader +from nltk.corpus.reader.tagged import TaggedCorpusReader +from nltk.corpus.reader.util import * +from nltk.tokenize import RegexpTokenizer + + +class YCOECorpusReader(CorpusReader): + """ + Corpus reader for the York-Toronto-Helsinki Parsed Corpus of Old + English Prose (YCOE), a 1.5 million word syntactically-annotated + corpus of Old English prose texts. + """ + + def __init__(self, root, encoding="utf8"): + CorpusReader.__init__(self, root, [], encoding) + + self._psd_reader = YCOEParseCorpusReader( + self.root.join("psd"), ".*", ".psd", encoding=encoding + ) + self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos") + + # Make sure we have a consistent set of items: + documents = {f[:-4] for f in self._psd_reader.fileids()} + if {f[:-4] for f in self._pos_reader.fileids()} != documents: + raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.") + + fileids = sorted( + ["%s.psd" % doc for doc in documents] + + ["%s.pos" % doc for doc in documents] + ) + CorpusReader.__init__(self, root, fileids, encoding) + self._documents = sorted(documents) + + def documents(self, fileids=None): + """ + Return a list of document identifiers for all documents in + this corpus, or for the documents with the given file(s) if + specified. + """ + if fileids is None: + return self._documents + if isinstance(fileids, str): + fileids = [fileids] + for f in fileids: + if f not in self._fileids: + raise KeyError("File id %s not found" % fileids) + # Strip off the '.pos' and '.psd' extensions. + return sorted({f[:-4] for f in fileids}) + + def fileids(self, documents=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that store the given document(s) if specified. + """ + if documents is None: + return self._fileids + elif isinstance(documents, str): + documents = [documents] + return sorted( + set( + ["%s.pos" % doc for doc in documents] + + ["%s.psd" % doc for doc in documents] + ) + ) + + def _getfileids(self, documents, subcorpus): + """ + Helper that selects the appropriate fileids for a given set of + documents from a given subcorpus (pos or psd). + """ + if documents is None: + documents = self._documents + else: + if isinstance(documents, str): + documents = [documents] + for document in documents: + if document not in self._documents: + if document[-4:] in (".pos", ".psd"): + raise ValueError( + "Expected a document identifier, not a file " + "identifier. (Use corpus.documents() to get " + "a list of document identifiers." + ) + else: + raise ValueError("Document identifier %s not found" % document) + return [f"{d}.{subcorpus}" for d in documents] + + # Delegate to one of our two sub-readers: + def words(self, documents=None): + return self._pos_reader.words(self._getfileids(documents, "pos")) + + def sents(self, documents=None): + return self._pos_reader.sents(self._getfileids(documents, "pos")) + + def paras(self, documents=None): + return self._pos_reader.paras(self._getfileids(documents, "pos")) + + def tagged_words(self, documents=None): + return self._pos_reader.tagged_words(self._getfileids(documents, "pos")) + + def tagged_sents(self, documents=None): + return self._pos_reader.tagged_sents(self._getfileids(documents, "pos")) + + def tagged_paras(self, documents=None): + return self._pos_reader.tagged_paras(self._getfileids(documents, "pos")) + + def parsed_sents(self, documents=None): + return self._psd_reader.parsed_sents(self._getfileids(documents, "psd")) + + +class YCOEParseCorpusReader(BracketParseCorpusReader): + """Specialized version of the standard bracket parse corpus reader + that strips out (CODE ...) and (ID ...) nodes.""" + + def _parse(self, t): + t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t) + if re.match(r"\s*\(\s*\)\s*$", t): + return None + return BracketParseCorpusReader._parse(self, t) + + +class YCOETaggedCorpusReader(TaggedCorpusReader): + def __init__(self, root, items, encoding="utf8"): + gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*" + sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True) + TaggedCorpusReader.__init__( + self, root, items, sep="_", sent_tokenizer=sent_tokenizer + ) + + +#: A list of all documents and their titles in ycoe. +documents = { + "coadrian.o34": "Adrian and Ritheus", + "coaelhom.o3": "Ælfric, Supplemental Homilies", + "coaelive.o3": "Ælfric's Lives of Saints", + "coalcuin": "Alcuin De virtutibus et vitiis", + "coalex.o23": "Alexander's Letter to Aristotle", + "coapollo.o3": "Apollonius of Tyre", + "coaugust": "Augustine", + "cobede.o2": "Bede's History of the English Church", + "cobenrul.o3": "Benedictine Rule", + "coblick.o23": "Blickling Homilies", + "coboeth.o2": "Boethius' Consolation of Philosophy", + "cobyrhtf.o3": "Byrhtferth's Manual", + "cocanedgD": "Canons of Edgar (D)", + "cocanedgX": "Canons of Edgar (X)", + "cocathom1.o3": "Ælfric's Catholic Homilies I", + "cocathom2.o3": "Ælfric's Catholic Homilies II", + "cochad.o24": "Saint Chad", + "cochdrul": "Chrodegang of Metz, Rule", + "cochristoph": "Saint Christopher", + "cochronA.o23": "Anglo-Saxon Chronicle A", + "cochronC": "Anglo-Saxon Chronicle C", + "cochronD": "Anglo-Saxon Chronicle D", + "cochronE.o34": "Anglo-Saxon Chronicle E", + "cocura.o2": "Cura Pastoralis", + "cocuraC": "Cura Pastoralis (Cotton)", + "codicts.o34": "Dicts of Cato", + "codocu1.o1": "Documents 1 (O1)", + "codocu2.o12": "Documents 2 (O1/O2)", + "codocu2.o2": "Documents 2 (O2)", + "codocu3.o23": "Documents 3 (O2/O3)", + "codocu3.o3": "Documents 3 (O3)", + "codocu4.o24": "Documents 4 (O2/O4)", + "coeluc1": "Honorius of Autun, Elucidarium 1", + "coeluc2": "Honorius of Autun, Elucidarium 1", + "coepigen.o3": "Ælfric's Epilogue to Genesis", + "coeuphr": "Saint Euphrosyne", + "coeust": "Saint Eustace and his companions", + "coexodusP": "Exodus (P)", + "cogenesiC": "Genesis (C)", + "cogregdC.o24": "Gregory's Dialogues (C)", + "cogregdH.o23": "Gregory's Dialogues (H)", + "coherbar": "Pseudo-Apuleius, Herbarium", + "coinspolD.o34": "Wulfstan's Institute of Polity (D)", + "coinspolX": "Wulfstan's Institute of Polity (X)", + "cojames": "Saint James", + "colacnu.o23": "Lacnunga", + "colaece.o2": "Leechdoms", + "colaw1cn.o3": "Laws, Cnut I", + "colaw2cn.o3": "Laws, Cnut II", + "colaw5atr.o3": "Laws, Æthelred V", + "colaw6atr.o3": "Laws, Æthelred VI", + "colawaf.o2": "Laws, Alfred", + "colawafint.o2": "Alfred's Introduction to Laws", + "colawger.o34": "Laws, Gerefa", + "colawine.ox2": "Laws, Ine", + "colawnorthu.o3": "Northumbra Preosta Lagu", + "colawwllad.o4": "Laws, William I, Lad", + "coleofri.o4": "Leofric", + "colsigef.o3": "Ælfric's Letter to Sigefyrth", + "colsigewB": "Ælfric's Letter to Sigeweard (B)", + "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)", + "colwgeat": "Ælfric's Letter to Wulfgeat", + "colwsigeT": "Ælfric's Letter to Wulfsige (T)", + "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)", + "colwstan1.o3": "Ælfric's Letter to Wulfstan I", + "colwstan2.o3": "Ælfric's Letter to Wulfstan II", + "comargaC.o34": "Saint Margaret (C)", + "comargaT": "Saint Margaret (T)", + "comart1": "Martyrology, I", + "comart2": "Martyrology, II", + "comart3.o23": "Martyrology, III", + "comarvel.o23": "Marvels of the East", + "comary": "Mary of Egypt", + "coneot": "Saint Neot", + "conicodA": "Gospel of Nicodemus (A)", + "conicodC": "Gospel of Nicodemus (C)", + "conicodD": "Gospel of Nicodemus (D)", + "conicodE": "Gospel of Nicodemus (E)", + "coorosiu.o2": "Orosius", + "cootest.o3": "Heptateuch", + "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I", + "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II", + "coprefcura.o2": "Preface to the Cura Pastoralis", + "coprefgen.o3": "Ælfric's Preface to Genesis", + "copreflives.o3": "Ælfric's Preface to Lives of Saints", + "coprefsolilo": "Preface to Augustine's Soliloquies", + "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus", + "corood": "History of the Holy Rood-Tree", + "cosevensl": "Seven Sleepers", + "cosolilo": "St. Augustine's Soliloquies", + "cosolsat1.o4": "Solomon and Saturn I", + "cosolsat2": "Solomon and Saturn II", + "cotempo.o3": "Ælfric's De Temporibus Anni", + "coverhom": "Vercelli Homilies", + "coverhomE": "Vercelli Homilies (E)", + "coverhomL": "Vercelli Homilies (L)", + "covinceB": "Saint Vincent (Bodley 343)", + "covinsal": "Vindicta Salvatoris", + "cowsgosp.o3": "West-Saxon Gospels", + "cowulf.o34": "Wulfstan's Homilies", +} diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/util.py new file mode 100644 index 0000000000000000000000000000000000000000..29a63574264c4859081ef8e36e26d9382f5b087f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/util.py @@ -0,0 +1,154 @@ +# Natural Language Toolkit: Corpus Reader Utility Functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +###################################################################### +# { Lazy Corpus Loader +###################################################################### + +import gc +import re + +import nltk + +TRY_ZIPFILE_FIRST = False + + +class LazyCorpusLoader: + """ + To see the API documentation for this lazily loaded corpus, first + run corpus.ensure_loaded(), and then run help(this_corpus). + + LazyCorpusLoader is a proxy object which is used to stand in for a + corpus object before the corpus is loaded. This allows NLTK to + create an object for each corpus, but defer the costs associated + with loading those corpora until the first time that they're + actually accessed. + + The first time this object is accessed in any way, it will load + the corresponding corpus, and transform itself into that corpus + (by modifying its own ``__class__`` and ``__dict__`` attributes). + + If the corpus can not be found, then accessing this object will + raise an exception, displaying installation instructions for the + NLTK data package. Once they've properly installed the data + package (or modified ``nltk.data.path`` to point to its location), + they can then use the corpus object without restarting python. + + :param name: The name of the corpus + :type name: str + :param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader + :type reader: nltk.corpus.reader.api.CorpusReader + :param nltk_data_subdir: The subdirectory where the corpus is stored. + :type nltk_data_subdir: str + :param `*args`: Any other non-keywords arguments that `reader_cls` might need. + :param `**kwargs`: Any other keywords arguments that `reader_cls` might need. + """ + + def __init__(self, name, reader_cls, *args, **kwargs): + from nltk.corpus.reader.api import CorpusReader + + assert issubclass(reader_cls, CorpusReader) + self.__name = self.__name__ = name + self.__reader_cls = reader_cls + # If nltk_data_subdir is set explicitly + if "nltk_data_subdir" in kwargs: + # Use the specified subdirectory path + self.subdir = kwargs["nltk_data_subdir"] + # Pops the `nltk_data_subdir` argument, we don't need it anymore. + kwargs.pop("nltk_data_subdir", None) + else: # Otherwise use 'nltk_data/corpora' + self.subdir = "corpora" + self.__args = args + self.__kwargs = kwargs + + def __load(self): + # Find the corpus root directory. + zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) + if TRY_ZIPFILE_FIRST: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError: + raise e + else: + try: + root = nltk.data.find(f"{self.subdir}/{self.__name}") + except LookupError as e: + try: + root = nltk.data.find(f"{self.subdir}/{zip_name}") + except LookupError: + raise e + + # Load the corpus. + corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) + + # This is where the magic happens! Transform ourselves into + # the corpus by modifying our own __dict__ and __class__ to + # match that of the corpus. + + args, kwargs = self.__args, self.__kwargs + name, reader_cls = self.__name, self.__reader_cls + + self.__dict__ = corpus.__dict__ + self.__class__ = corpus.__class__ + + # _unload support: assign __dict__ and __class__ back, then do GC. + # after reassigning __dict__ there shouldn't be any references to + # corpus data so the memory should be deallocated after gc.collect() + def _unload(self): + lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) + self.__dict__ = lazy_reader.__dict__ + self.__class__ = lazy_reader.__class__ + gc.collect() + + self._unload = _make_bound_method(_unload, self) + + def __getattr__(self, attr): + + # Fix for inspect.isclass under Python 2.6 + # (see https://bugs.python.org/issue1225107). + # Without this fix tests may take extra 1.5GB RAM + # because all corpora gets loaded during test collection. + if attr == "__bases__": + raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") + + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return getattr(self, attr) + + def __repr__(self): + return "<{} in {!r} (not loaded yet)>".format( + self.__reader_cls.__name__, + ".../corpora/" + self.__name, + ) + + def _unload(self): + # If an exception occurs during corpus loading then + # '_unload' method may be unattached, so __getattr__ can be called; + # we shouldn't trigger corpus loading again in this case. + pass + + +def _make_bound_method(func, self): + """ + Magic for creating bound methods (used for _unload). + """ + + class Foo: + def meth(self): + pass + + f = Foo() + bound_method = type(f.meth) + + try: + return bound_method(func, self, self.__class__) + except TypeError: # python3 + return bound_method(func, self) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..754b3d2d78286799b229700bb19bd21cb683b855 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__init__.py @@ -0,0 +1,24 @@ +# Natural Language Toolkit: Inference +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for theorem proving and model building. +""" + +from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand +from nltk.inference.discourse import ( + CfgReadingCommand, + DiscourseTester, + DrtGlueReadingCommand, + ReadingCommand, +) +from nltk.inference.mace import Mace, MaceCommand +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand +from nltk.inference.tableau import TableauProver, TableauProverCommand diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fe07b7f10b1f4d31064f42f6ec7db09f8bcf05b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4920c289af107ebb301efaa1ce39801562417a32 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50a52608b8160bc98df5a5e39618e5b341d3affd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/discourse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f781b64ceced20bba0c81c969880fb3dd53dacd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/mace.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36e64fc0af08a07ccb237483be237051dd2c0efe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ebb333639b8fb3156c29e6898105186d48dc08c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bce0291a7cdbad75d0ae6be0137a1cb778a19a29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/resolution.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..201317a4e3d56585847066e17b65d2590bc79201 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/inference/__pycache__/tableau.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/api.py new file mode 100644 index 0000000000000000000000000000000000000000..12f1c099941280c1a72f40f957330dc5497a1b27 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/api.py @@ -0,0 +1,614 @@ +# Natural Language Toolkit: Classifier Interface +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Interfaces and base classes for theorem provers and model builders. + +``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a +list of assumptions. + +``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions. +the model builder tries to build a model for the assumptions. Given a set of assumptions and a +goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy +the assumptions plus the negation of *G*. +""" + +import threading +import time +from abc import ABCMeta, abstractmethod + + +class Prover(metaclass=ABCMeta): + """ + Interface for trying to prove a goal from assumptions. Both the goal and + the assumptions are constrained to be formulas of ``logic.Expression``. + """ + + def prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not. + :rtype: bool + """ + return self._prove(goal, assumptions, verbose)[0] + + @abstractmethod + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not, along with the proof + :rtype: tuple: (bool, str) + """ + + +class ModelBuilder(metaclass=ABCMeta): + """ + Interface for trying to build a model of set of formulas. + Open formulas are assumed to be universally quantified. + Both the goal and the assumptions are constrained to be formulas + of ``logic.Expression``. + """ + + def build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated + :rtype: bool + """ + return self._build_model(goal, assumptions, verbose)[0] + + @abstractmethod + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated, and the model itself + :rtype: tuple(bool, sem.Valuation) + """ + + +class TheoremToolCommand(metaclass=ABCMeta): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + @abstractmethod + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + + @abstractmethod + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + + @abstractmethod + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + + @abstractmethod + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + + @abstractmethod + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + + +class ProverCommand(TheoremToolCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + @abstractmethod + def prove(self, verbose=False): + """ + Perform the actual proof. + """ + + @abstractmethod + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_prover(self): + """ + Return the prover object + :return: ``Prover`` + """ + + +class ModelBuilderCommand(TheoremToolCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. + When build_model() is called, the ``ModelBuilder`` is executed with the goal + and assumptions. + """ + + @abstractmethod + def build_model(self, verbose=False): + """ + Perform the actual model building. + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + + @abstractmethod + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_model_builder(self): + """ + Return the model builder object + :return: ``ModelBuilder`` + """ + + +class BaseTheoremToolCommand(TheoremToolCommand): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + def __init__(self, goal=None, assumptions=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + self._goal = goal + + if not assumptions: + self._assumptions = [] + else: + self._assumptions = list(assumptions) + + self._result = None + """A holder for the result, to prevent unnecessary re-proving""" + + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + self._assumptions.extend(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + retracted = set(retracted) + result_list = list(filter(lambda a: a not in retracted, self._assumptions)) + if debug and result_list == self._assumptions: + print(Warning("Assumptions list has not been changed:")) + self.print_assumptions() + + self._assumptions = result_list + + self._result = None + + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + return self._assumptions + + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + return self._goal + + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + for a in self.assumptions(): + print(a) + + +class BaseProverCommand(BaseTheoremToolCommand, ProverCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + def __init__(self, prover, goal=None, assumptions=None): + """ + :param prover: The theorem tool to execute with the assumptions + :type prover: Prover + :see: ``BaseTheoremToolCommand`` + """ + self._prover = prover + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._proof = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, self._proof = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return proof_string + + def get_prover(self): + return self._prover + + +class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When + build_model() is called, the ``ModelBuilder`` is executed with the goal and + assumptions. + """ + + def __init__(self, modelbuilder, goal=None, assumptions=None): + """ + :param modelbuilder: The theorem tool to execute with the assumptions + :type modelbuilder: ModelBuilder + :see: ``BaseTheoremToolCommand`` + """ + self._modelbuilder = modelbuilder + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + self._result, self._model = self._modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return valuation_str + + def get_model_builder(self): + return self._modelbuilder + + +class TheoremToolCommandDecorator(TheoremToolCommand): + """ + A base decorator for the ``ProverCommandDecorator`` and + ``ModelBuilderCommandDecorator`` classes from which decorators can extend. + """ + + def __init__(self, command): + """ + :param command: ``TheoremToolCommand`` to decorate + """ + self._command = command + + # The decorator has its own versions of 'result' different from the + # underlying command + self._result = None + + def assumptions(self): + return self._command.assumptions() + + def goal(self): + return self._command.goal() + + def add_assumptions(self, new_assumptions): + self._command.add_assumptions(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + self._command.retract_assumptions(retracted, debug) + self._result = None + + def print_assumptions(self): + self._command.print_assumptions() + + +class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand): + """ + A base decorator for the ``ProverCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, proverCommand): + """ + :param proverCommand: ``ProverCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, proverCommand) + + # The decorator has its own versions of 'result' and 'proof' + # because they may be different from the underlying command + self._proof = None + + def prove(self, verbose=False): + if self._result is None: + prover = self.get_prover() + self._result, self._proof = prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return self._command.decorate_proof(proof_string, simplify) + + def get_prover(self): + return self._command.get_prover() + + +class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand): + """ + A base decorator for the ``ModelBuilderCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, modelBuilderCommand): + """ + :param modelBuilderCommand: ``ModelBuilderCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, modelBuilderCommand) + + # The decorator has its own versions of 'result' and 'valuation' + # because they may be different from the underlying command + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + modelbuilder = self.get_model_builder() + self._result, self._model = modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + Modify and return the proof string + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return self._command._decorate_model(valuation_str, format) + + def get_model_builder(self): + return self._command.get_prover() + + +class ParallelProverBuilder(Prover, ModelBuilder): + """ + This class stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + """ + + def __init__(self, prover, modelbuilder): + self._prover = prover + self._modelbuilder = modelbuilder + + def _prove(self, goal=None, assumptions=None, verbose=False): + return self._run(goal, assumptions, verbose), "" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + return not self._run(goal, assumptions, verbose), "" + + def _run(self, goal, assumptions, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: self._prover.prove(goal, assumptions, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: self._modelbuilder.build_model(goal, assumptions, verbose), + verbose, + "MB", + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + return tp_thread.result + elif mb_thread.result is not None: + return not mb_thread.result + else: + return None + + +class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand): + """ + This command stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + + Because the theorem prover result is the opposite of the model builder + result, we will treat self._result as meaning "proof found/no model found". + """ + + def __init__(self, prover, modelbuilder, goal=None, assumptions=None): + BaseProverCommand.__init__(self, prover, goal, assumptions) + BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions) + + def prove(self, verbose=False): + return self._run(verbose) + + def build_model(self, verbose=False): + return not self._run(verbose) + + def _run(self, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: BaseProverCommand.prove(self, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, "MB" + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + self._result = tp_thread.result + elif mb_thread.result is not None: + self._result = not mb_thread.result + return self._result + + +class TheoremToolThread(threading.Thread): + def __init__(self, command, verbose, name=None): + threading.Thread.__init__(self) + self._command = command + self._result = None + self._verbose = verbose + self._name = name + + def run(self): + try: + self._result = self._command() + if self._verbose: + print( + "Thread %s finished with result %s at %s" + % (self._name, self._result, time.localtime(time.time())) + ) + except Exception as e: + print(e) + print("Thread %s completed abnormally" % (self._name)) + + @property + def result(self): + return self._result diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/discourse.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/discourse.py new file mode 100644 index 0000000000000000000000000000000000000000..9630234dcf3837d9da2b4213fe26d22491899932 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/discourse.py @@ -0,0 +1,651 @@ +# Natural Language Toolkit: Discourse Processing +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +r""" +Module for incrementally developing simple discourses, and checking for semantic ambiguity, +consistency and informativeness. + +Many of the ideas are based on the CURT family of programs of Blackburn and Bos +(see http://homepages.inf.ed.ac.uk/jbos/comsem/book1.html). + +Consistency checking is carried out by using the ``mace`` module to call the Mace4 model builder. +Informativeness checking is carried out with a call to ``Prover.prove()`` from +the ``inference`` module. + +``DiscourseTester`` is a constructor for discourses. +The basic data structure is a list of sentences, stored as ``self._sentences``. Each sentence in the list +is assigned a "sentence ID" (``sid``) of the form ``s``\ *i*. For example:: + + s0: A boxer walks + s1: Every boxer chases a girl + +Each sentence can be ambiguous between a number of readings, each of which receives a +"reading ID" (``rid``) of the form ``s``\ *i* -``r``\ *j*. For example:: + + s0 readings: + + s0-r1: some x.(boxer(x) & walk(x)) + s0-r0: some x.(boxerdog(x) & walk(x)) + +A "thread" is a list of readings, represented as a list of ``rid``\ s. +Each thread receives a "thread ID" (``tid``) of the form ``d``\ *i*. +For example:: + + d0: ['s0-r0', 's1-r0'] + +The set of all threads for a discourse is the Cartesian product of all the readings of the sequences of sentences. +(This is not intended to scale beyond very short discourses!) The method ``readings(filter=True)`` will only show +those threads which are consistent (taking into account any background assumptions). +""" + +import os +from abc import ABCMeta, abstractmethod +from functools import reduce +from operator import add, and_ + +from nltk.data import show_cfg +from nltk.inference.mace import MaceCommand +from nltk.inference.prover9 import Prover9Command +from nltk.parse import load_parser +from nltk.parse.malt import MaltParser +from nltk.sem.drt import AnaphoraResolutionException, resolve_anaphora +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Expression +from nltk.tag import RegexpTagger + + +class ReadingCommand(metaclass=ABCMeta): + @abstractmethod + def parse_to_readings(self, sentence): + """ + :param sentence: the sentence to read + :type sentence: str + """ + + def process_thread(self, sentence_readings): + """ + This method should be used to handle dependencies between readings such + as resolving anaphora. + + :param sentence_readings: readings to process + :type sentence_readings: list(Expression) + :return: the list of readings after processing + :rtype: list(Expression) + """ + return sentence_readings + + @abstractmethod + def combine_readings(self, readings): + """ + :param readings: readings to combine + :type readings: list(Expression) + :return: one combined reading + :rtype: Expression + """ + + @abstractmethod + def to_fol(self, expression): + """ + Convert this expression into a First-Order Logic expression. + + :param expression: an expression + :type expression: Expression + :return: a FOL version of the input expression + :rtype: Expression + """ + + +class CfgReadingCommand(ReadingCommand): + def __init__(self, gramfile=None): + """ + :param gramfile: name of file where grammar can be loaded + :type gramfile: str + """ + self._gramfile = ( + gramfile if gramfile else "grammars/book_grammars/discourse.fcfg" + ) + self._parser = load_parser(self._gramfile) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + from nltk.sem import root_semrep + + tokens = sentence.split() + trees = self._parser.parse(tokens) + return [root_semrep(tree) for tree in trees] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + return reduce(and_, readings) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression + + +class DrtGlueReadingCommand(ReadingCommand): + def __init__(self, semtype_file=None, remove_duplicates=False, depparser=None): + """ + :param semtype_file: name of file where grammar can be loaded + :param remove_duplicates: should duplicates be removed? + :param depparser: the dependency parser + """ + if semtype_file is None: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + self._glue = DrtGlue( + semtype_file=semtype_file, + remove_duplicates=remove_duplicates, + depparser=depparser, + ) + + def parse_to_readings(self, sentence): + """:see: ReadingCommand.parse_to_readings()""" + return self._glue.parse_to_meaning(sentence) + + def process_thread(self, sentence_readings): + """:see: ReadingCommand.process_thread()""" + try: + return [self.combine_readings(sentence_readings)] + except AnaphoraResolutionException: + return [] + + def combine_readings(self, readings): + """:see: ReadingCommand.combine_readings()""" + thread_reading = reduce(add, readings) + return resolve_anaphora(thread_reading.simplify()) + + def to_fol(self, expression): + """:see: ReadingCommand.to_fol()""" + return expression.fol() + + +class DiscourseTester: + """ + Check properties of an ongoing discourse. + """ + + def __init__(self, input, reading_command=None, background=None): + """ + Initialize a ``DiscourseTester``. + + :param input: the discourse sentences + :type input: list of str + :param background: Formulas which express background assumptions + :type background: list(Expression) + """ + self._input = input + self._sentences = {"s%s" % i: sent for i, sent in enumerate(input)} + self._models = None + self._readings = {} + self._reading_command = ( + reading_command if reading_command else CfgReadingCommand() + ) + self._threads = {} + self._filtered_threads = {} + if background is not None: + from nltk.sem.logic import Expression + + for e in background: + assert isinstance(e, Expression) + self._background = background + else: + self._background = [] + + ############################### + # Sentences + ############################### + + def sentences(self): + """ + Display the list of sentences in the current discourse. + """ + for id in sorted(self._sentences): + print(f"{id}: {self._sentences[id]}") + + def add_sentence(self, sentence, informchk=False, consistchk=False): + """ + Add a sentence to the current discourse. + + Updates ``self._input`` and ``self._sentences``. + :param sentence: An input sentence + :type sentence: str + :param informchk: if ``True``, check that the result of adding the sentence is thread-informative. Updates ``self._readings``. + :param consistchk: if ``True``, check that the result of adding the sentence is thread-consistent. Updates ``self._readings``. + + """ + # check whether the new sentence is informative (i.e. not entailed by the previous discourse) + if informchk: + self.readings(verbose=False) + for tid in sorted(self._threads): + assumptions = [reading for (rid, reading) in self.expand_threads(tid)] + assumptions += self._background + for sent_reading in self._get_readings(sentence): + tp = Prover9Command(goal=sent_reading, assumptions=assumptions) + if tp.prove(): + print( + "Sentence '%s' under reading '%s':" + % (sentence, str(sent_reading)) + ) + print("Not informative relative to thread '%s'" % tid) + + self._input.append(sentence) + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + # check whether adding the new sentence to the discourse preserves consistency (i.e. a model can be found for the combined set of + # of assumptions + if consistchk: + self.readings(verbose=False) + self.models(show=False) + + def retract_sentence(self, sentence, verbose=True): + """ + Remove a sentence from the current discourse. + + Updates ``self._input``, ``self._sentences`` and ``self._readings``. + :param sentence: An input sentence + :type sentence: str + :param verbose: If ``True``, report on the updated list of sentences. + """ + try: + self._input.remove(sentence) + except ValueError: + print( + "Retraction failed. The sentence '%s' is not part of the current discourse:" + % sentence + ) + self.sentences() + return None + self._sentences = {"s%s" % i: sent for i, sent in enumerate(self._input)} + self.readings(verbose=False) + if verbose: + print("Current sentences are ") + self.sentences() + + def grammar(self): + """ + Print out the grammar in use for parsing input sentences + """ + show_cfg(self._reading_command._gramfile) + + ############################### + # Readings and Threads + ############################### + + def _get_readings(self, sentence): + """ + Build a list of semantic readings for a sentence. + + :rtype: list(Expression) + """ + return self._reading_command.parse_to_readings(sentence) + + def _construct_readings(self): + """ + Use ``self._sentences`` to construct a value for ``self._readings``. + """ + # re-initialize self._readings in case we have retracted a sentence + self._readings = {} + for sid in sorted(self._sentences): + sentence = self._sentences[sid] + readings = self._get_readings(sentence) + self._readings[sid] = { + f"{sid}-r{rid}": reading.simplify() + for rid, reading in enumerate(sorted(readings, key=str)) + } + + def _construct_threads(self): + """ + Use ``self._readings`` to construct a value for ``self._threads`` + and use the model builder to construct a value for ``self._filtered_threads`` + """ + thread_list = [[]] + for sid in sorted(self._readings): + thread_list = self.multiply(thread_list, sorted(self._readings[sid])) + self._threads = {"d%s" % tid: thread for tid, thread in enumerate(thread_list)} + # re-initialize the filtered threads + self._filtered_threads = {} + # keep the same ids, but only include threads which get models + consistency_checked = self._check_consistency(self._threads) + for (tid, thread) in self._threads.items(): + if (tid, True) in consistency_checked: + self._filtered_threads[tid] = thread + + def _show_readings(self, sentence=None): + """ + Print out the readings for the discourse (or a single sentence). + """ + if sentence is not None: + print("The sentence '%s' has these readings:" % sentence) + for r in [str(reading) for reading in (self._get_readings(sentence))]: + print(" %s" % r) + else: + for sid in sorted(self._readings): + print() + print("%s readings:" % sid) + print() #'-' * 30 + for rid in sorted(self._readings[sid]): + lf = self._readings[sid][rid] + print(f"{rid}: {lf.normalize()}") + + def _show_threads(self, filter=False, show_thread_readings=False): + """ + Print out the value of ``self._threads`` or ``self._filtered_hreads`` + """ + threads = self._filtered_threads if filter else self._threads + for tid in sorted(threads): + if show_thread_readings: + readings = [ + self._readings[rid.split("-")[0]][rid] for rid in self._threads[tid] + ] + try: + thread_reading = ( + ": %s" + % self._reading_command.combine_readings(readings).normalize() + ) + except Exception as e: + thread_reading = ": INVALID: %s" % e.__class__.__name__ + else: + thread_reading = "" + + print("%s:" % tid, self._threads[tid], thread_reading) + + def readings( + self, + sentence=None, + threaded=False, + verbose=True, + filter=False, + show_thread_readings=False, + ): + """ + Construct and show the readings of the discourse (or of a single sentence). + + :param sentence: test just this sentence + :type sentence: str + :param threaded: if ``True``, print out each thread ID and the corresponding thread. + :param filter: if ``True``, only print out consistent thread IDs and threads. + """ + self._construct_readings() + self._construct_threads() + + # if we are filtering or showing thread readings, show threads + if filter or show_thread_readings: + threaded = True + + if verbose: + if not threaded: + self._show_readings(sentence=sentence) + else: + self._show_threads( + filter=filter, show_thread_readings=show_thread_readings + ) + + def expand_threads(self, thread_id, threads=None): + """ + Given a thread ID, find the list of ``logic.Expression`` objects corresponding to the reading IDs in that thread. + + :param thread_id: thread ID + :type thread_id: str + :param threads: a mapping from thread IDs to lists of reading IDs + :type threads: dict + :return: A list of pairs ``(rid, reading)`` where reading is the ``logic.Expression`` associated with a reading ID + :rtype: list of tuple + """ + if threads is None: + threads = self._threads + return [ + (rid, self._readings[sid][rid]) + for rid in threads[thread_id] + for sid in rid.split("-")[:1] + ] + + ############################### + # Models and Background + ############################### + + def _check_consistency(self, threads, show=False, verbose=False): + results = [] + for tid in sorted(threads): + assumptions = [ + reading for (rid, reading) in self.expand_threads(tid, threads=threads) + ] + assumptions = list( + map( + self._reading_command.to_fol, + self._reading_command.process_thread(assumptions), + ) + ) + if assumptions: + assumptions += self._background + # if Mace4 finds a model, it always seems to find it quickly + mb = MaceCommand(None, assumptions, max_models=20) + modelfound = mb.build_model() + else: + modelfound = False + results.append((tid, modelfound)) + if show: + spacer(80) + print("Model for Discourse Thread %s" % tid) + spacer(80) + if verbose: + for a in assumptions: + print(a) + spacer(80) + if modelfound: + print(mb.model(format="cooked")) + else: + print("No model found!\n") + return results + + def models(self, thread_id=None, show=True, verbose=False): + """ + Call Mace4 to build a model for each current discourse thread. + + :param thread_id: thread ID + :type thread_id: str + :param show: If ``True``, display the model that has been found. + """ + self._construct_readings() + self._construct_threads() + threads = {thread_id: self._threads[thread_id]} if thread_id else self._threads + + for (tid, modelfound) in self._check_consistency( + threads, show=show, verbose=verbose + ): + idlist = [rid for rid in threads[tid]] + + if not modelfound: + print(f"Inconsistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + else: + print(f"Consistent discourse: {tid} {idlist}:") + for rid, reading in self.expand_threads(tid): + print(f" {rid}: {reading.normalize()}") + print() + + def add_background(self, background, verbose=False): + """ + Add a list of background assumptions for reasoning about the discourse. + + When called, this method also updates the discourse model's set of readings and threads. + :param background: Formulas which contain background information + :type background: list(Expression) + """ + from nltk.sem.logic import Expression + + for (count, e) in enumerate(background): + assert isinstance(e, Expression) + if verbose: + print("Adding assumption %s to background" % count) + self._background.append(e) + + # update the state + self._construct_readings() + self._construct_threads() + + def background(self): + """ + Show the current background assumptions. + """ + for e in self._background: + print(str(e)) + + ############################### + # Misc + ############################### + + @staticmethod + def multiply(discourse, readings): + """ + Multiply every thread in ``discourse`` by every reading in ``readings``. + + Given discourse = [['A'], ['B']], readings = ['a', 'b', 'c'] , returns + [['A', 'a'], ['A', 'b'], ['A', 'c'], ['B', 'a'], ['B', 'b'], ['B', 'c']] + + :param discourse: the current list of readings + :type discourse: list of lists + :param readings: an additional list of readings + :type readings: list(Expression) + :rtype: A list of lists + """ + result = [] + for sublist in discourse: + for r in readings: + new = [] + new += sublist + new.append(r) + result.append(new) + return result + + +def load_fol(s): + """ + Temporarily duplicated from ``nltk.sem.util``. + Convert a file of first order formulas into a list of ``Expression`` objects. + + :param s: the contents of the file + :type s: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(Expression.fromstring(line)) + except Exception as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +############################### +# Demo +############################### +def discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester( + ["A boxer walks", "Every boxer chases a girl"], reading_command + ) + dt.models() + print() + # dt.grammar() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(threaded=True) + print() + dt.models("d1") + dt.add_sentence("John is a boxer") + print() + dt.sentences() + print() + dt.readings(threaded=True) + print() + dt = DiscourseTester( + ["A student dances", "Every student is a person"], reading_command + ) + print() + dt.add_sentence("No person dances", consistchk=True) + print() + dt.readings() + print() + dt.retract_sentence("No person dances", verbose=True) + print() + dt.models() + print() + dt.readings("A person dances") + print() + dt.add_sentence("A person dances", informchk=True) + dt = DiscourseTester( + ["Vincent is a boxer", "Fido is a boxer", "Vincent is married", "Fido barks"], + reading_command, + ) + dt.readings(filter=True) + import nltk.data + + background_file = os.path.join("grammars", "book_grammars", "background.fol") + background = nltk.data.load(background_file) + + print() + dt.add_background(background, verbose=False) + dt.background() + print() + dt.readings(filter=True) + print() + dt.models() + + +def drt_discourse_demo(reading_command=None): + """ + Illustrate the various methods of ``DiscourseTester`` + """ + dt = DiscourseTester(["every dog chases a boy", "he runs"], reading_command) + dt.models() + print() + dt.sentences() + print() + dt.readings() + print() + dt.readings(show_thread_readings=True) + print() + dt.readings(filter=True, show_thread_readings=True) + + +def spacer(num=30): + print("-" * num) + + +def demo(): + discourse_demo() + + tagger = RegexpTagger( + [ + ("^(chases|runs)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(dog|boy)$", "NN"), + ("^(he)$", "PRP"), + ] + ) + depparser = MaltParser(tagger=tagger) + drt_discourse_demo( + DrtGlueReadingCommand(remove_duplicates=False, depparser=depparser) + ) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/mace.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/mace.py new file mode 100644 index 0000000000000000000000000000000000000000..ee4d9e8e38d7db34c4b58f9c37dee330d397e123 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/mace.py @@ -0,0 +1,383 @@ +# Natural Language Toolkit: Interface to the Mace4 Model Builder +# +# Author: Dan Garrette +# Ewan Klein + +# URL: +# For license information, see LICENSE.TXT + +""" +A model builder that makes use of the external 'Mace4' package. +""" + +import os +import tempfile + +from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder +from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent +from nltk.sem import Expression, Valuation +from nltk.sem.logic import is_indvar + + +class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand): + """ + A ``MaceCommand`` specific to the ``Mace`` model builder. It contains + a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + _interpformat_bin = None + + def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param max_models: The maximum number of models that Mace will try before + simply returning false. (Use 0 for no maximum.) + :type max_models: int + """ + if model_builder is not None: + assert isinstance(model_builder, Mace) + else: + model_builder = Mace(max_models) + + BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions) + + @property + def valuation(mbc): + return mbc.model("valuation") + + def _convert2val(self, valuation_str): + """ + Transform the output file into an NLTK-style Valuation. + + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + valuation_standard_format = self._transform_output(valuation_str, "standard") + + val = [] + for line in valuation_standard_format.splitlines(False): + l = line.strip() + + if l.startswith("interpretation"): + # find the number of entities in the model + num_entities = int(l[l.index("(") + 1 : l.index(",")].strip()) + + elif l.startswith("function") and l.find("_") == -1: + # replace the integer identifier with a corresponding alphabetic character + name = l[l.index("(") + 1 : l.index(",")].strip() + if is_indvar(name): + name = name.upper() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, MaceCommand._make_model_var(value))) + + elif l.startswith("relation"): + l = l[l.index("(") + 1 :] + if "(" in l: + # relation is not nullary + name = l[: l.index("(")].strip() + values = [ + int(v.strip()) + for v in l[l.index("[") + 1 : l.index("]")].split(",") + ] + val.append( + (name, MaceCommand._make_relation_set(num_entities, values)) + ) + else: + # relation is nullary + name = l[: l.index(",")].strip() + value = int(l[l.index("[") + 1 : l.index("]")].strip()) + val.append((name, value == 1)) + + return Valuation(val) + + @staticmethod + def _make_relation_set(num_entities, values): + """ + Convert a Mace4-style relation table into a dictionary. + + :param num_entities: the number of entities in the model; determines the row length in the table. + :type num_entities: int + :param values: a list of 1's and 0's that represent whether a relation holds in a Mace4 model. + :type values: list of int + """ + r = set() + for position in [pos for (pos, v) in enumerate(values) if v == 1]: + r.add( + tuple(MaceCommand._make_relation_tuple(position, values, num_entities)) + ) + return r + + @staticmethod + def _make_relation_tuple(position, values, num_entities): + if len(values) == 1: + return [] + else: + sublist_size = len(values) // num_entities + sublist_start = position // sublist_size + sublist_position = int(position % sublist_size) + + sublist = values[ + sublist_start * sublist_size : (sublist_start + 1) * sublist_size + ] + return [ + MaceCommand._make_model_var(sublist_start) + ] + MaceCommand._make_relation_tuple( + sublist_position, sublist, num_entities + ) + + @staticmethod + def _make_model_var(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + ][value] + num = value // 26 + return letter + str(num) if num > 0 else letter + + def _decorate_model(self, valuation_str, format): + """ + Print out a Mace4 model using any Mace4 ``interpformat`` format. + See https://www.cs.unm.edu/~mccune/mace4/manual/ for details. + + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + models. Defaults to 'standard' format. + :return: str + """ + if not format: + return valuation_str + elif format == "valuation": + return self._convert2val(valuation_str) + else: + return self._transform_output(valuation_str, format) + + def _transform_output(self, valuation_str, format): + """ + Transform the output file into any Mace4 ``interpformat`` format. + + :param format: Output format for displaying models. + :type format: str + """ + if format in [ + "standard", + "standard2", + "portable", + "tabular", + "raw", + "cooked", + "xml", + "tex", + ]: + return self._call_interpformat(valuation_str, [format])[0] + else: + raise LookupError("The specified format does not exist") + + def _call_interpformat(self, input_str, args=[], verbose=False): + """ + Call the ``interpformat`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._interpformat_bin is None: + self._interpformat_bin = self._modelbuilder._find_binary( + "interpformat", verbose + ) + + return self._modelbuilder._call( + input_str, self._interpformat_bin, args, verbose + ) + + +class Mace(Prover9Parent, ModelBuilder): + _mace4_bin = None + + def __init__(self, end_size=500): + self._end_size = end_size + """The maximum model size that Mace will try before + simply returning false. (Use -1 for no maximum.)""" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Use Mace4 to build a first order model. + + :return: ``True`` if a model was found (i.e. Mace returns value of 0), + else ``False`` + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_mace4( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def _call_mace4(self, input_str, args=[], verbose=False): + """ + Call the ``mace4`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._mace4_bin is None: + self._mace4_bin = self._find_binary("mace4", verbose) + + updated_input_str = "" + if self._end_size > 0: + updated_input_str += "assign(end_size, %d).\n\n" % self._end_size + updated_input_str += input_str + + return self._call(updated_input_str, self._mace4_bin, args, verbose) + + +def spacer(num=30): + print("-" * num) + + +def decode_result(found): + """ + Decode the result of model_found() + + :param found: The output of model_found() + :type found: bool + """ + return {True: "Countermodel found", False: "No countermodel found", None: "None"}[ + found + ] + + +def test_model_found(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [lp.parse(a) for a in assumptions] + m = MaceCommand(g, assumptions=alist, max_models=50) + found = m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(found)}\n") + + +def test_build_model(arguments): + """ + Try to build a ``nltk.sem.Valuation``. + """ + g = Expression.fromstring("all x.man(x)") + alist = [ + Expression.fromstring(a) + for a in [ + "man(John)", + "man(Socrates)", + "man(Bill)", + "some x.(-(x = John) & man(x) & sees(John,x))", + "some x.(-(x = Bill) & man(x))", + "all x.some y.(man(x) -> gives(Socrates,x,y))", + ] + ] + + m = MaceCommand(g, assumptions=alist) + m.build_model() + spacer() + print("Assumptions and Goal") + spacer() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {decode_result(m.build_model())}\n") + spacer() + # print(m.model('standard')) + # print(m.model('cooked')) + print("Valuation") + spacer() + print(m.valuation, "\n") + + +def test_transform_output(argument_pair): + """ + Transform the model into various Mace4 ``interpformat`` formats. + """ + g = Expression.fromstring(argument_pair[0]) + alist = [lp.parse(a) for a in argument_pair[1]] + m = MaceCommand(g, assumptions=alist) + m.build_model() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {m.build_model()}\n") + for format in ["standard", "portable", "xml", "cooked"]: + spacer() + print("Using '%s' format" % format) + spacer() + print(m.model(format=format)) + + +def test_make_relation_set(): + print( + MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1]) + == {("c",), ("a",)} + ) + print( + MaceCommand._make_relation_set( + num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0] + ) + == {("c", "a")} + ) + print( + MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0]) + == {("a", "b", "a"), ("b", "b", "a")} + ) + + +arguments = [ + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), +] + + +def demo(): + test_model_found(arguments) + test_build_model(arguments) + test_transform_output(arguments[1]) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7075ed11e7833201ad98c6fc80406d1ef646db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py @@ -0,0 +1,561 @@ +# Natural Language Toolkit: Nonmonotonic Reasoning +# +# Author: Daniel H. Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A module to perform nonmonotonic reasoning. The ideas and demonstrations in +this module are based on "Logical Foundations of Artificial Intelligence" by +Michael R. Genesereth and Nils J. Nilsson. +""" + +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import Prover, ProverCommandDecorator +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BooleanExpression, + EqualityExpression, + ExistsExpression, + Expression, + ImpExpression, + NegatedExpression, + Variable, + VariableExpression, + operator, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +def get_domain(goal, assumptions): + if goal is None: + all_expressions = assumptions + else: + all_expressions = assumptions + [-goal] + return reduce(operator.or_, (a.constants() for a in all_expressions), set()) + + +class ClosedDomainProver(ProverCommandDecorator): + """ + This is a prover decorator that adds domain closure assumptions before + proving. + """ + + def assumptions(self): + assumptions = [a for a in self._command.assumptions()] + goal = self._command.goal() + domain = get_domain(goal, assumptions) + return [self.replace_quants(ex, domain) for ex in assumptions] + + def goal(self): + goal = self._command.goal() + domain = get_domain(goal, self._command.assumptions()) + return self.replace_quants(goal, domain) + + def replace_quants(self, ex, domain): + """ + Apply the closed domain assumption to the expression + + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR + "P.replace(x, d1) | P.replace(x, d2) | ..." + - translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..." + + :param ex: ``Expression`` + :param domain: set of {Variable}s + :return: ``Expression`` + """ + if isinstance(ex, AllExpression): + conjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + conjuncts = [self.replace_quants(c, domain) for c in conjuncts] + return reduce(lambda x, y: x & y, conjuncts) + elif isinstance(ex, BooleanExpression): + return ex.__class__( + self.replace_quants(ex.first, domain), + self.replace_quants(ex.second, domain), + ) + elif isinstance(ex, NegatedExpression): + return -self.replace_quants(ex.term, domain) + elif isinstance(ex, ExistsExpression): + disjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + disjuncts = [self.replace_quants(d, domain) for d in disjuncts] + return reduce(lambda x, y: x | y, disjuncts) + else: + return ex + + +class UniqueNamesProver(ProverCommandDecorator): + """ + This is a prover decorator that adds unique names assumptions before + proving. + """ + + def assumptions(self): + """ + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - if "d1 = d2" cannot be proven from the premises, then add "d1 != d2" + """ + assumptions = self._command.assumptions() + + domain = list(get_domain(self._command.goal(), assumptions)) + + # build a dictionary of obvious equalities + eq_sets = SetHolder() + for a in assumptions: + if isinstance(a, EqualityExpression): + av = a.first.variable + bv = a.second.variable + # put 'a' and 'b' in the same set + eq_sets[av].add(bv) + + new_assumptions = [] + for i, a in enumerate(domain): + for b in domain[i + 1 :]: + # if a and b are not already in the same equality set + if b not in eq_sets[a]: + newEqEx = EqualityExpression( + VariableExpression(a), VariableExpression(b) + ) + if Prover9().prove(newEqEx, assumptions): + # we can prove that the names are the same entity. + # remember that they are equal so we don't re-check. + eq_sets[a].add(b) + else: + # we can't prove it, so assume unique names + new_assumptions.append(-newEqEx) + + return assumptions + new_assumptions + + +class SetHolder(list): + """ + A list of sets of Variables. + """ + + def __getitem__(self, item): + """ + :param item: ``Variable`` + :return: the set containing 'item' + """ + assert isinstance(item, Variable) + for s in self: + if item in s: + return s + # item is not found in any existing set. so create a new set + new = {item} + self.append(new) + return new + + +class ClosedWorldProver(ProverCommandDecorator): + """ + This is a prover decorator that completes predicates before proving. + + If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P". + If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird". + If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P". + + walk(Socrates) + Socrates != Bill + + all x.(walk(x) -> (x=Socrates)) + ---------------- + -walk(Bill) + + see(Socrates, John) + see(John, Mary) + Socrates != John + John != Mary + + all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary))) + ---------------- + -see(Socrates, Mary) + + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + Sam != Tweety + + all x.(bird(x) -> (ostrich(x) | x=Tweety)) + + all x.-ostrich(x) + ------------------- + -bird(Sam) + """ + + def assumptions(self): + assumptions = self._command.assumptions() + + predicates = self._make_predicate_dict(assumptions) + + new_assumptions = [] + for p in predicates: + predHolder = predicates[p] + new_sig = self._make_unique_signature(predHolder) + new_sig_exs = [VariableExpression(v) for v in new_sig] + + disjuncts = [] + + # Turn the signatures into disjuncts + for sig in predHolder.signatures: + equality_exs = [] + for v1, v2 in zip(new_sig_exs, sig): + equality_exs.append(EqualityExpression(v1, v2)) + disjuncts.append(reduce(lambda x, y: x & y, equality_exs)) + + # Turn the properties into disjuncts + for prop in predHolder.properties: + # replace variables from the signature with new sig variables + bindings = {} + for v1, v2 in zip(new_sig_exs, prop[0]): + bindings[v2] = v1 + disjuncts.append(prop[1].substitute_bindings(bindings)) + + # make the assumption + if disjuncts: + # disjuncts exist, so make an implication + antecedent = self._make_antecedent(p, new_sig) + consequent = reduce(lambda x, y: x | y, disjuncts) + accum = ImpExpression(antecedent, consequent) + else: + # nothing has property 'p' + accum = NegatedExpression(self._make_antecedent(p, new_sig)) + + # quantify the implication + for new_sig_var in new_sig[::-1]: + accum = AllExpression(new_sig_var, accum) + new_assumptions.append(accum) + + return assumptions + new_assumptions + + def _make_unique_signature(self, predHolder): + """ + This method figures out how many arguments the predicate takes and + returns a tuple containing that number of unique variables. + """ + return tuple(unique_variable() for i in range(predHolder.signature_len)) + + def _make_antecedent(self, predicate, signature): + """ + Return an application expression with 'predicate' as the predicate + and 'signature' as the list of arguments. + """ + antecedent = predicate + for v in signature: + antecedent = antecedent(VariableExpression(v)) + return antecedent + + def _make_predicate_dict(self, assumptions): + """ + Create a dictionary of predicates from the assumptions. + + :param assumptions: a list of ``Expression``s + :return: dict mapping ``AbstractVariableExpression`` to ``PredHolder`` + """ + predicates = defaultdict(PredHolder) + for a in assumptions: + self._map_predicates(a, predicates) + return predicates + + def _map_predicates(self, expression, predDict): + if isinstance(expression, ApplicationExpression): + func, args = expression.uncurry() + if isinstance(func, AbstractVariableExpression): + predDict[func].append_sig(tuple(args)) + elif isinstance(expression, AndExpression): + self._map_predicates(expression.first, predDict) + self._map_predicates(expression.second, predDict) + elif isinstance(expression, AllExpression): + # collect all the universally quantified variables + sig = [expression.variable] + term = expression.term + while isinstance(term, AllExpression): + sig.append(term.variable) + term = term.term + if isinstance(term, ImpExpression): + if isinstance(term.first, ApplicationExpression) and isinstance( + term.second, ApplicationExpression + ): + func1, args1 = term.first.uncurry() + func2, args2 = term.second.uncurry() + if ( + isinstance(func1, AbstractVariableExpression) + and isinstance(func2, AbstractVariableExpression) + and sig == [v.variable for v in args1] + and sig == [v.variable for v in args2] + ): + predDict[func2].append_prop((tuple(sig), term.first)) + predDict[func1].validate_sig_len(sig) + + +class PredHolder: + """ + This class will be used by a dictionary that will store information + about predicates to be used by the ``ClosedWorldProver``. + + The 'signatures' property is a list of tuples defining signatures for + which the predicate is true. For instance, 'see(john, mary)' would be + result in the signature '(john,mary)' for 'see'. + + The second element of the pair is a list of pairs such that the first + element of the pair is a tuple of variables and the second element is an + expression of those variables that makes the predicate true. For instance, + 'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))" + for 'know'. + """ + + def __init__(self): + self.signatures = [] + self.properties = [] + self.signature_len = None + + def append_sig(self, new_sig): + self.validate_sig_len(new_sig) + self.signatures.append(new_sig) + + def append_prop(self, new_prop): + self.validate_sig_len(new_prop[0]) + self.properties.append(new_prop) + + def validate_sig_len(self, new_sig): + if self.signature_len is None: + self.signature_len = len(new_sig) + elif self.signature_len != len(new_sig): + raise Exception("Signature lengths do not match") + + def __str__(self): + return f"({self.signatures},{self.properties},{self.signature_len})" + + def __repr__(self): + return "%s" % self + + +def closed_domain_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"walk(Bill)") + c = lexpr(r"all x.walk(x)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"girl(mary)") + p2 = lexpr(r"dog(rover)") + p3 = lexpr(r"all x.(girl(x) -> -dog(x))") + p4 = lexpr(r"all x.(dog(x) -> -girl(x))") + p5 = lexpr(r"chase(mary, rover)") + c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))") + prover = Prover9Command(c, [p1, p2, p3, p4, p5]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + +def unique_names_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"man(Socrates)") + p2 = lexpr(r"man(Bill)") + c = lexpr(r"exists x.exists y.(x != y)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))") + p2 = lexpr(r"Bill = William") + p3 = lexpr(r"Bill = Billy") + c = lexpr(r"-walk(William)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + +def closed_world_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"(Socrates != Bill)") + c = lexpr(r"-walk(Bill)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + p3 = lexpr(r"(Socrates != John)") + p4 = lexpr(r"(John != Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"all x.(ostrich(x) -> bird(x))") + p2 = lexpr(r"bird(Tweety)") + p3 = lexpr(r"-ostrich(Sam)") + p4 = lexpr(r"Sam != Tweety") + c = lexpr(r"-bird(Sam)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + +def combination_prover_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + for a in command.assumptions(): + print(a) + print(command.prove()) + + +def default_reasoning_demo(): + lexpr = Expression.fromstring + + premises = [] + + # define taxonomy + premises.append(lexpr(r"all x.(elephant(x) -> animal(x))")) + premises.append(lexpr(r"all x.(bird(x) -> animal(x))")) + premises.append(lexpr(r"all x.(dove(x) -> bird(x))")) + premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))")) + premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))")) + + # default properties + premises.append( + lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))") + ) # normal animals don't fly + premises.append( + lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))") + ) # normal birds fly + premises.append( + lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))") + ) # normal ostriches don't fly + + # specify abnormal entities + premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight + premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird + premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich + + # define entities + premises.append(lexpr(r"elephant(E)")) + premises.append(lexpr(r"dove(D)")) + premises.append(lexpr(r"ostrich(O)")) + + # print the assumptions + prover = Prover9Command(None, premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + for a in command.assumptions(): + print(a) + + print_proof("-fly(E)", premises) + print_proof("fly(D)", premises) + print_proof("-fly(O)", premises) + + +def print_proof(goal, premises): + lexpr = Expression.fromstring + prover = Prover9Command(lexpr(goal), premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + print(goal, prover.prove(), command.prove()) + + +def demo(): + closed_domain_demo() + unique_names_demo() + closed_world_demo() + combination_prover_demo() + default_reasoning_demo() + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/prover9.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/prover9.py new file mode 100644 index 0000000000000000000000000000000000000000..73345f27473f011a7628c91834606f6e1f532044 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/prover9.py @@ -0,0 +1,508 @@ +# Natural Language Toolkit: Interface to the Prover9 Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# Ewan Klein +# +# URL: +# For license information, see LICENSE.TXT +""" +A theorem prover that makes use of the external 'Prover9' package. +""" + +import os +import subprocess + +import nltk +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem.logic import ( + AllExpression, + AndExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, +) + +# +# Following is not yet used. Return code for 2 actually realized as 512. +# +p9_return_codes = { + 0: True, + 1: "(FATAL)", # A fatal error occurred (user's syntax error). + 2: False, # (SOS_EMPTY) Prover9 ran out of things to do + # (sos list exhausted). + 3: "(MAX_MEGS)", # The max_megs (memory limit) parameter was exceeded. + 4: "(MAX_SECONDS)", # The max_seconds parameter was exceeded. + 5: "(MAX_GIVEN)", # The max_given parameter was exceeded. + 6: "(MAX_KEPT)", # The max_kept parameter was exceeded. + 7: "(ACTION)", # A Prover9 action terminated the search. + 101: "(SIGSEGV)", # Prover9 crashed, most probably due to a bug. +} + + +class Prover9CommandParent: + """ + A common base class used by both ``Prover9Command`` and ``MaceCommand``, + which is responsible for maintaining a goal and a set of assumptions, + and generating prover9-style input files from them. + """ + + def print_assumptions(self, output_format="nltk"): + """ + Print the list of the current assumptions. + """ + if output_format.lower() == "nltk": + for a in self.assumptions(): + print(a) + elif output_format.lower() == "prover9": + for a in convert_to_prover9(self.assumptions()): + print(a) + else: + raise NameError( + "Unrecognized value for 'output_format': %s" % output_format + ) + + +class Prover9Command(Prover9CommandParent, BaseProverCommand): + """ + A ``ProverCommand`` specific to the ``Prover9`` prover. It contains + the a print_assumptions() method that is used to print the list + of assumptions in multiple formats. + """ + + def __init__(self, goal=None, assumptions=None, timeout=60, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + :param timeout: number of seconds before timeout; set to 0 for + no timeout. + :type timeout: int + :param prover: a prover. If not set, one will be created. + :type prover: Prover9 + """ + if not assumptions: + assumptions = [] + + if prover is not None: + assert isinstance(prover, Prover9) + else: + prover = Prover9(timeout) + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + def decorate_proof(self, proof_string, simplify=True): + """ + :see BaseProverCommand.decorate_proof() + """ + if simplify: + return self._prover._call_prooftrans(proof_string, ["striplabels"])[ + 0 + ].rstrip() + else: + return proof_string.rstrip() + + +class Prover9Parent: + """ + A common class extended by both ``Prover9`` and ``Mace ``. + It contains the functionality required to convert NLTK-style + expressions into Prover9-style expressions. + """ + + _binary_location = None + + def config_prover9(self, binary_location, verbose=False): + if binary_location is None: + self._binary_location = None + self._prover9_bin = None + else: + name = "prover9" + self._prover9_bin = nltk.internals.find_binary( + name, + path_to_bin=binary_location, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1) + + def prover9_input(self, goal, assumptions): + """ + :return: The input string that should be provided to the + prover9 binary. This string is formed based on the goal, + assumptions, and timeout value of this object. + """ + s = "" + + if assumptions: + s += "formulas(assumptions).\n" + for p9_assumption in convert_to_prover9(assumptions): + s += " %s.\n" % p9_assumption + s += "end_of_list.\n\n" + + if goal: + s += "formulas(goals).\n" + s += " %s.\n" % convert_to_prover9(goal) + s += "end_of_list.\n\n" + + return s + + def binary_locations(self): + """ + A list of directories that should be searched for the prover9 + executables. This list is used by ``config_prover9`` when searching + for the prover9 executables. + """ + return [ + "/usr/local/bin/prover9", + "/usr/local/bin/prover9/bin", + "/usr/local/bin", + "/usr/bin", + "/usr/local/prover9", + "/usr/local/share/prover9", + ] + + def _find_binary(self, name, verbose=False): + binary_locations = self.binary_locations() + if self._binary_location is not None: + binary_locations += [self._binary_location] + return nltk.internals.find_binary( + name, + searchpath=binary_locations, + env_vars=["PROVER9"], + url="https://www.cs.unm.edu/~mccune/prover9/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:\n", input_str, "\n") + + # Call prover9 via a subprocess + cmd = [binary] + args + try: + input_str = input_str.encode("utf8") + except AttributeError: + pass + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE + ) + (stdout, stderr) = p.communicate(input=input_str) + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + + return (stdout.decode("utf-8"), p.returncode) + + +def convert_to_prover9(input): + """ + Convert a ``logic.Expression`` to Prover9 format. + """ + if isinstance(input, list): + result = [] + for s in input: + try: + result.append(_convert_to_prover9(s.simplify())) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + return result + else: + try: + return _convert_to_prover9(input.simplify()) + except: + print("input %s cannot be converted to Prover9 input syntax" % input) + raise + + +def _convert_to_prover9(expression): + """ + Convert ``logic.Expression`` to Prover9 formatted string. + """ + if isinstance(expression, ExistsExpression): + return ( + "exists " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, AllExpression): + return ( + "all " + + str(expression.variable) + + " " + + _convert_to_prover9(expression.term) + ) + elif isinstance(expression, NegatedExpression): + return "-(" + _convert_to_prover9(expression.term) + ")" + elif isinstance(expression, AndExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " & " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, OrExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " | " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, ImpExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " -> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, IffExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " <-> " + + _convert_to_prover9(expression.second) + + ")" + ) + elif isinstance(expression, EqualityExpression): + return ( + "(" + + _convert_to_prover9(expression.first) + + " = " + + _convert_to_prover9(expression.second) + + ")" + ) + else: + return str(expression) + + +class Prover9(Prover9Parent, Prover): + _prover9_bin = None + _prooftrans_bin = None + + def __init__(self, timeout=60): + self._timeout = timeout + """The timeout value for prover9. If a proof can not be found + in this amount of time, then prover9 will return false. + (Use 0 for no timeout.)""" + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + Use Prover9 to prove a theorem. + :return: A pair whose first element is a boolean indicating if the + proof was successful (i.e. returns value of 0) and whose second element + is the output of the prover. + """ + if not assumptions: + assumptions = [] + + stdout, returncode = self._call_prover9( + self.prover9_input(goal, assumptions), verbose=verbose + ) + return (returncode == 0, stdout) + + def prover9_input(self, goal, assumptions): + """ + :see: Prover9Parent.prover9_input + """ + s = "clear(auto_denials).\n" # only one proof required + return s + Prover9Parent.prover9_input(self, goal, assumptions) + + def _call_prover9(self, input_str, args=[], verbose=False): + """ + Call the ``prover9`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prover9_bin is None: + self._prover9_bin = self._find_binary("prover9", verbose) + + updated_input_str = "" + if self._timeout > 0: + updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout + updated_input_str += input_str + + stdout, returncode = self._call( + updated_input_str, self._prover9_bin, args, verbose + ) + + if returncode not in [0, 2]: + errormsgprefix = "%%ERROR:" + if errormsgprefix in stdout: + msgstart = stdout.index(errormsgprefix) + errormsg = stdout[msgstart:].strip() + else: + errormsg = None + if returncode in [3, 4, 5, 6]: + raise Prover9LimitExceededException(returncode, errormsg) + else: + raise Prover9FatalException(returncode, errormsg) + + return stdout, returncode + + def _call_prooftrans(self, input_str, args=[], verbose=False): + """ + Call the ``prooftrans`` binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param args: A list of command-line arguments. + :return: A tuple (stdout, returncode) + :see: ``config_prover9`` + """ + if self._prooftrans_bin is None: + self._prooftrans_bin = self._find_binary("prooftrans", verbose) + + return self._call(input_str, self._prooftrans_bin, args, verbose) + + +class Prover9Exception(Exception): + def __init__(self, returncode, message): + msg = p9_return_codes[returncode] + if message: + msg += "\n%s" % message + Exception.__init__(self, msg) + + +class Prover9FatalException(Prover9Exception): + pass + + +class Prover9LimitExceededException(Prover9Exception): + pass + + +###################################################################### +# { Tests and Demos +###################################################################### + + +def test_config(): + + a = Expression.fromstring("(walk(j) & sing(j))") + g = Expression.fromstring("walk(j)") + p = Prover9Command(g, assumptions=[a]) + p._executable_path = None + p.prover9_search = [] + p.prove() + # config_prover9('/usr/local/bin') + print(p.prove()) + print(p.proof()) + + +def test_convert_to_prover9(expr): + """ + Test that parsing works OK. + """ + for t in expr: + e = Expression.fromstring(t) + print(convert_to_prover9(e)) + + +def test_prove(arguments): + """ + Try some proofs and exhibit the results. + """ + for (goal, assumptions) in arguments: + g = Expression.fromstring(goal) + alist = [Expression.fromstring(a) for a in assumptions] + p = Prover9Command(g, assumptions=alist).prove() + for a in alist: + print(" %s" % a) + print(f"|- {g}: {p}\n") + + +arguments = [ + ("(man(x) <-> (not (not man(x))))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) & (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(not (man(x) & (not man(x))))", []), + ("(man(x) | (not man(x)))", []), + ("(man(x) -> man(x))", []), + ("(man(x) <-> man(x))", []), + ("(not (man(x) <-> (not man(x))))", []), + ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), + ("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []), + ("(all x.man(x) -> all x.man(x))", []), + ("some x.all y.sees(x,y)", []), + ( + "some e3.(walk(e3) & subj(e3, mary))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), + ( + "some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))", + [ + "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" + ], + ), +] + +expressions = [ + r"some x y.sees(x,y)", + r"some x.(man(x) & walks(x))", + r"\x.(man(x) & walks(x))", + r"\x y.sees(x,y)", + r"walks(john)", + r"\x.big(x, \y.mouse(y))", + r"(walks(x) & (runs(x) & (threes(x) & fours(x))))", + r"(walks(x) -> runs(x))", + r"some x.(PRO(x) & sees(John, x))", + r"some x.(man(x) & (not walks(x)))", + r"all x.(man(x) -> walks(x))", +] + + +def spacer(num=45): + print("-" * num) + + +def demo(): + print("Testing configuration") + spacer() + test_config() + print() + print("Testing conversion to Prover9 format") + spacer() + test_convert_to_prover9(expressions) + print() + print("Testing proofs") + spacer() + test_prove(arguments) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/resolution.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..52428eb2c5d2bee410716b058165cbccbbb238a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/resolution.py @@ -0,0 +1,759 @@ +# Natural Language Toolkit: First-order Resolution-based Theorem Prover +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a resolution-based First Order theorem prover. +""" + +import operator +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem import skolemize +from nltk.sem.logic import ( + AndExpression, + ApplicationExpression, + EqualityExpression, + Expression, + IndividualVariableExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + is_indvar, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +class ResolutionProver(Prover): + ANSWER_KEY = "ANSWER" + _assume_false = True + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in the proof + :type assumptions: list(sem.Expression) + """ + if not assumptions: + assumptions = [] + + result = None + try: + clauses = [] + if goal: + clauses.extend(clausify(-goal)) + for a in assumptions: + clauses.extend(clausify(a)) + result, clauses = self._attempt_proof(clauses) + if verbose: + print(ResolutionProverCommand._decorate_clauses(clauses)) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + clauses = [] + else: + if verbose: + print(e) + else: + raise e + return (result, clauses) + + def _attempt_proof(self, clauses): + # map indices to lists of indices, to store attempted unifications + tried = defaultdict(list) + + i = 0 + while i < len(clauses): + if not clauses[i].is_tautology(): + # since we try clauses in order, we should start after the last + # index tried + if tried[i]: + j = tried[i][-1] + 1 + else: + j = i + 1 # nothing tried yet for 'i', so start with the next + + while j < len(clauses): + # don't: 1) unify a clause with itself, + # 2) use tautologies + if i != j and j and not clauses[j].is_tautology(): + tried[i].append(j) + newclauses = clauses[i].unify(clauses[j]) + if newclauses: + for newclause in newclauses: + newclause._parents = (i + 1, j + 1) + clauses.append(newclause) + if not len(newclause): # if there's an empty clause + return (True, clauses) + i = -1 # since we added a new clause, restart from the top + break + j += 1 + i += 1 + return (False, clauses) + + +class ResolutionProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, ResolutionProver) + else: + prover = ResolutionProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + self._clauses = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, clauses = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + self._clauses = clauses + self._proof = ResolutionProverCommand._decorate_clauses(clauses) + return self._result + + def find_answers(self, verbose=False): + self.prove(verbose) + + answers = set() + answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY)) + for clause in self._clauses: + for term in clause: + if ( + isinstance(term, ApplicationExpression) + and term.function == answer_ex + and not isinstance(term.argument, IndividualVariableExpression) + ): + answers.add(term.argument) + return answers + + @staticmethod + def _decorate_clauses(clauses): + """ + Decorate the proof output. + """ + out = "" + max_clause_len = max(len(str(clause)) for clause in clauses) + max_seq_len = len(str(len(clauses))) + for i in range(len(clauses)): + parents = "A" + taut = "" + if clauses[i].is_tautology(): + taut = "Tautology" + if clauses[i]._parents: + parents = str(clauses[i]._parents) + parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents + seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1) + out += f"[{seq}] {clauses[i]} {parents} {taut}\n" + return out + + +class Clause(list): + def __init__(self, data): + list.__init__(self, data) + self._is_tautology = None + self._parents = None + + def unify(self, other, bindings=None, used=None, skipped=None, debug=False): + """ + Attempt to unify this Clause with the other, returning a list of + resulting, unified, Clauses. + + :param other: ``Clause`` with which to unify + :param bindings: ``BindingDict`` containing bindings that should be used + during the unification + :param used: tuple of two lists of atoms. The first lists the + atoms from 'self' that were successfully unified with atoms from + 'other'. The second lists the atoms from 'other' that were successfully + unified with atoms from 'self'. + :param skipped: tuple of two ``Clause`` objects. The first is a list of all + the atoms from the 'self' Clause that have not been unified with + anything on the path. The second is same thing for the 'other' Clause. + :param debug: bool indicating whether debug statements should print + :return: list containing all the resulting ``Clause`` objects that could be + obtained by unification + """ + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + if skipped is None: + skipped = ([], []) + if isinstance(debug, bool): + debug = DebugObject(debug) + + newclauses = _iterate_first( + self, other, bindings, used, skipped, _complete_unify_path, debug + ) + + # remove subsumed clauses. make a list of all indices of subsumed + # clauses, and then remove them from the list + subsumed = [] + for i, c1 in enumerate(newclauses): + if i not in subsumed: + for j, c2 in enumerate(newclauses): + if i != j and j not in subsumed and c1.subsumes(c2): + subsumed.append(j) + result = [] + for i in range(len(newclauses)): + if i not in subsumed: + result.append(newclauses[i]) + + return result + + def isSubsetOf(self, other): + """ + Return True iff every term in 'self' is a term in 'other'. + + :param other: ``Clause`` + :return: bool + """ + for a in self: + if a not in other: + return False + return True + + def subsumes(self, other): + """ + Return True iff 'self' subsumes 'other', this is, if there is a + substitution such that every term in 'self' can be unified with a term + in 'other'. + + :param other: ``Clause`` + :return: bool + """ + negatedother = [] + for atom in other: + if isinstance(atom, NegatedExpression): + negatedother.append(atom.term) + else: + negatedother.append(-atom) + + negatedotherClause = Clause(negatedother) + + bindings = BindingDict() + used = ([], []) + skipped = ([], []) + debug = DebugObject(False) + + return ( + len( + _iterate_first( + self, + negatedotherClause, + bindings, + used, + skipped, + _subsumes_finalize, + debug, + ) + ) + > 0 + ) + + def __getslice__(self, start, end): + return Clause(list.__getslice__(self, start, end)) + + def __sub__(self, other): + return Clause([a for a in self if a not in other]) + + def __add__(self, other): + return Clause(list.__add__(self, other)) + + def is_tautology(self): + """ + Self is a tautology if it contains ground terms P and -P. The ground + term, P, must be an exact match, ie, not using unification. + """ + if self._is_tautology is not None: + return self._is_tautology + for i, a in enumerate(self): + if not isinstance(a, EqualityExpression): + j = len(self) - 1 + while j > i: + b = self[j] + if isinstance(a, NegatedExpression): + if a.term == b: + self._is_tautology = True + return True + elif isinstance(b, NegatedExpression): + if a == b.term: + self._is_tautology = True + return True + j -= 1 + self._is_tautology = False + return False + + def free(self): + return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self)) + + def replace(self, variable, expression): + """ + Replace every instance of variable with expression across every atom + in the clause + + :param variable: ``Variable`` + :param expression: ``Expression`` + """ + return Clause([atom.replace(variable, expression) for atom in self]) + + def substitute_bindings(self, bindings): + """ + Replace every binding + + :param bindings: A list of tuples mapping Variable Expressions to the + Expressions to which they are bound. + :return: ``Clause`` + """ + return Clause([atom.substitute_bindings(bindings) for atom in self]) + + def __str__(self): + return "{" + ", ".join("%s" % item for item in self) + "}" + + def __repr__(self): + return "%s" % self + + +def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'self' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # explore this 'self' atom + result = _iterate_second( + first, second, bindings, used, skipped, finalize_method, debug + 1 + ) + + # skip this possible 'self' atom + newskipped = (skipped[0] + [first[0]], skipped[1]) + result += _iterate_first( + first[1:], second, bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_first( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'other' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # skip this possible pairing and move to the next + newskipped = (skipped[0], skipped[1] + [second[0]]) + result = _iterate_second( + first, second[1:], bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_second( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _unify_terms(a, b, bindings=None, used=None): + """ + This method attempts to unify two terms. Two expressions are unifiable + if there exists a substitution function S such that S(a) == S(-b). + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which + the unification must be consistent + :return: ``BindingDict`` A dictionary of the bindings required to unify + :raise ``BindingException``: If the terms cannot be unified + """ + assert isinstance(a, Expression) + assert isinstance(b, Expression) + + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + + # Use resolution + if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression): + newbindings = most_general_unification(a.term, b, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression): + newbindings = most_general_unification(a, b.term, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + + # Use demodulation + elif isinstance(a, EqualityExpression): + newbindings = BindingDict([(a.first.variable, a.second)]) + newused = (used[0] + [a], used[1]) + unused = ([], [b]) + elif isinstance(b, EqualityExpression): + newbindings = BindingDict([(b.first.variable, b.second)]) + newused = (used[0], used[1] + [b]) + unused = ([a], []) + + else: + raise BindingException((a, b)) + + return newbindings, newused, unused + + +def _complete_unify_path(first, second, bindings, used, skipped, debug): + if used[0] or used[1]: # if bindings were made along the path + newclause = Clause(skipped[0] + skipped[1] + first + second) + debug.line(" -> New Clause: %s" % newclause) + return [newclause.substitute_bindings(bindings)] + else: # no bindings made means no unification occurred. so no result + debug.line(" -> End") + return [] + + +def _subsumes_finalize(first, second, bindings, used, skipped, debug): + if not len(skipped[0]) and not len(first): + # If there are no skipped terms and no terms left in 'first', then + # all of the terms in the original 'self' were unified with terms + # in 'other'. Therefore, there exists a binding (this one) such that + # every term in self can be unified with a term in other, which + # is the definition of subsumption. + return [True] + else: + return [] + + +def clausify(expression): + """ + Skolemize, clausify, and standardize the variables apart. + """ + clause_list = [] + for clause in _clausify(skolemize(expression)): + for free in clause.free(): + if is_indvar(free.name): + newvar = VariableExpression(unique_variable()) + clause = clause.replace(free, newvar) + clause_list.append(clause) + return clause_list + + +def _clausify(expression): + """ + :param expression: a skolemized expression in CNF + """ + if isinstance(expression, AndExpression): + return _clausify(expression.first) + _clausify(expression.second) + elif isinstance(expression, OrExpression): + first = _clausify(expression.first) + second = _clausify(expression.second) + assert len(first) == 1 + assert len(second) == 1 + return [first[0] + second[0]] + elif isinstance(expression, EqualityExpression): + return [Clause([expression])] + elif isinstance(expression, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression, NegatedExpression): + if isinstance(expression.term, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression.term, EqualityExpression): + return [Clause([expression])] + raise ProverParseError() + + +class BindingDict: + def __init__(self, binding_list=None): + """ + :param binding_list: list of (``AbstractVariableExpression``, ``AtomicExpression``) to initialize the dictionary + """ + self.d = {} + + if binding_list: + for (v, b) in binding_list: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``Variable`` The variable to bind + :param binding: ``Expression`` The atomic to which 'variable' should be bound + :raise BindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, Variable) + assert isinstance(binding, Expression) + + try: + existing = self[variable] + except KeyError: + existing = None + + if not existing or binding == existing: + self.d[variable] = binding + elif isinstance(binding, IndividualVariableExpression): + # Since variable is already bound, try to bind binding to variable + try: + existing = self[binding.variable] + except KeyError: + existing = None + + binding2 = VariableExpression(variable) + + if not existing or binding2 == existing: + self.d[binding.variable] = binding2 + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, Variable) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise BindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except BindingException as e: + raise BindingException( + "Attempting to add two contradicting " + "BindingDicts: '%s' and '%s'" % (self, other) + ) from e + + def __len__(self): + return len(self.d) + + def __str__(self): + data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + return "{" + data_str + "}" + + def __repr__(self): + return "%s" % self + + +def most_general_unification(a, b, bindings=None): + """ + Find the most general unification of the two given expressions + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which the + unification must be consistent + :return: a list of bindings + :raise BindingException: if the Expressions cannot be unified + """ + if bindings is None: + bindings = BindingDict() + + if a == b: + return bindings + elif isinstance(a, IndividualVariableExpression): + return _mgu_var(a, b, bindings) + elif isinstance(b, IndividualVariableExpression): + return _mgu_var(b, a, bindings) + elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression): + return most_general_unification( + a.function, b.function, bindings + ) + most_general_unification(a.argument, b.argument, bindings) + raise BindingException((a, b)) + + +def _mgu_var(var, expression, bindings): + if var.variable in expression.free() | expression.constants(): + raise BindingException((var, expression)) + else: + return BindingDict([(var.variable, expression)]) + bindings + + +class BindingException(Exception): + def __init__(self, arg): + if isinstance(arg, tuple): + Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg) + else: + Exception.__init__(self, arg) + + +class UnificationException(Exception): + def __init__(self, a, b): + Exception.__init__(self, f"'{a}' cannot unify with '{b}'") + + +class DebugObject: + def __init__(self, enabled=True, indent=0): + self.enabled = enabled + self.indent = indent + + def __add__(self, i): + return DebugObject(self.enabled, self.indent + i) + + def line(self, line): + if self.enabled: + print(" " * self.indent + line) + + +def testResolutionProver(): + resolution_test(r"man(x)") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) -> --man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) iff man(x))") + resolution_test(r"-(man(x) iff -man(x))") + resolution_test("all x.man(x)") + resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))") + resolution_test("some x.all y.sees(x,y)") + + p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))") + p2 = Expression.fromstring(r"man(Socrates)") + c = Expression.fromstring(r"mortal(Socrates)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))") + p2 = Expression.fromstring(r"man(John)") + c = Expression.fromstring(r"some y.walks(y)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))") + c = Expression.fromstring(r"some e0.walk(e0,mary)") + print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}") + + +def resolution_test(e): + f = Expression.fromstring(e) + t = ResolutionProver().prove(f) + print(f"|- {f}: {t}") + + +def test_clausify(): + lexpr = Expression.fromstring + + print(clausify(lexpr("P(x) | Q(x)"))) + print(clausify(lexpr("(P(x) & Q(x)) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x))"))) + print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))"))) + + print(clausify(lexpr("P(x) | Q(x) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)"))) + + print(clausify(lexpr("exists x.P(x) | Q(x)"))) + + print(clausify(lexpr("-(-P(x) & Q(x))"))) + print(clausify(lexpr("P(x) <-> Q(x)"))) + print(clausify(lexpr("-(P(x) <-> Q(x))"))) + print(clausify(lexpr("-(all x.P(x))"))) + print(clausify(lexpr("-(some x.P(x))"))) + + print(clausify(lexpr("some x.P(x)"))) + print(clausify(lexpr("some x.all y.P(x,y)"))) + print(clausify(lexpr("all y.some x.P(x,y)"))) + print(clausify(lexpr("all z.all y.some x.P(x,y,z)"))) + print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))"))) + + +def demo(): + test_clausify() + print() + testResolutionProver() + print() + + p = Expression.fromstring("man(x)") + print(ResolutionProverCommand(p, [p]).prove()) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/inference/tableau.py b/env-llmeval/lib/python3.10/site-packages/nltk/inference/tableau.py new file mode 100644 index 0000000000000000000000000000000000000000..620f21b465225f3d8dc91a05414bfd9bbbe3e5c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/inference/tableau.py @@ -0,0 +1,712 @@ +# Natural Language Toolkit: First-Order Tableau Theorem Prover +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a tableau-based First Order theorem prover. +""" + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.internals import Counter +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + unique_variable, +) + +_counter = Counter() + + +class ProverParseError(Exception): + pass + + +class TableauProver(Prover): + _assume_false = False + + def _prove(self, goal=None, assumptions=None, verbose=False): + if not assumptions: + assumptions = [] + + result = None + try: + agenda = Agenda() + if goal: + agenda.put(-goal) + agenda.put_all(assumptions) + debugger = Debug(verbose) + result = self._attempt_proof(agenda, set(), set(), debugger) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + else: + if verbose: + print(e) + else: + raise e + return (result, "\n".join(debugger.lines)) + + def _attempt_proof(self, agenda, accessible_vars, atoms, debug): + (current, context), category = agenda.pop_first() + + # if there's nothing left in the agenda, and we haven't closed the path + if not current: + debug.line("AGENDA EMPTY") + return False + + proof_method = { + Categories.ATOM: self._attempt_proof_atom, + Categories.PROP: self._attempt_proof_prop, + Categories.N_ATOM: self._attempt_proof_n_atom, + Categories.N_PROP: self._attempt_proof_n_prop, + Categories.APP: self._attempt_proof_app, + Categories.N_APP: self._attempt_proof_n_app, + Categories.N_EQ: self._attempt_proof_n_eq, + Categories.D_NEG: self._attempt_proof_d_neg, + Categories.N_ALL: self._attempt_proof_n_all, + Categories.N_EXISTS: self._attempt_proof_n_some, + Categories.AND: self._attempt_proof_and, + Categories.N_OR: self._attempt_proof_n_or, + Categories.N_IMP: self._attempt_proof_n_imp, + Categories.OR: self._attempt_proof_or, + Categories.IMP: self._attempt_proof_imp, + Categories.N_AND: self._attempt_proof_n_and, + Categories.IFF: self._attempt_proof_iff, + Categories.N_IFF: self._attempt_proof_n_iff, + Categories.EQ: self._attempt_proof_eq, + Categories.EXISTS: self._attempt_proof_some, + Categories.ALL: self._attempt_proof_all, + }[category] + + debug.line((current, context)) + return proof_method(current, context, agenda, accessible_vars, atoms, debug) + + def _attempt_proof_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.args), + atoms | {(current, False)}, + debug + 1, + ) + + def _attempt_proof_n_atom( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + if context: + if isinstance(context.term, NegatedExpression): + current = current.negate() + agenda.put(context(current).simplify()) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + else: + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, + accessible_vars | set(current.term.args), + atoms | {(current.term, True)}, + debug + 1, + ) + + def _attempt_proof_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current, True) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current, False)}, debug + 1 + ) + + def _attempt_proof_n_prop( + self, current, context, agenda, accessible_vars, atoms, debug + ): + # Check if the branch is closed. Return 'True' if it is + if (current.term, False) in atoms: + debug.line("CLOSED", 1) + return True + + # mark all AllExpressions as 'not exhausted' into the agenda since we are (potentially) adding new accessible vars + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1 + ) + + def _attempt_proof_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, ctx) + agenda.put(arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_app( + self, current, context, agenda, accessible_vars, atoms, debug + ): + f, args = current.term.uncurry() + for i, arg in enumerate(args): + if not TableauProver.is_atom(arg): + ctx = f + nv = Variable("X%s" % _counter.get()) + for j, a in enumerate(args): + ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) + if context: + # combine new context with existing + ctx = context(ctx).simplify() + ctx = LambdaExpression(nv, -ctx) + agenda.put(-arg, ctx) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + raise Exception("If this method is called, there must be a non-atomic argument") + + def _attempt_proof_n_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ########################################################################### + # Since 'current' is of type '~(a=b)', the path is closed if 'a' == 'b' + ########################################################################### + if current.term.first == current.term.second: + debug.line("CLOSED", 1) + return True + + agenda[Categories.N_EQ].add((current, context)) + current._exhausted = True + return self._attempt_proof( + agenda, + accessible_vars | {current.term.first, current.term.second}, + atoms, + debug + 1, + ) + + def _attempt_proof_d_neg( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.term, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.EXISTS].add( + (ExistsExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda[Categories.ALL].add( + (AllExpression(current.term.variable, -current.term.term), context) + ) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.first, context) + agenda.put(current.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(-current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_or( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_imp( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.first, context) + new_agenda.put(current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_and( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(-current.term.first, context) + new_agenda.put(-current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.first, context) + agenda.put(current.second, context) + new_agenda.put(-current.first, context) + new_agenda.put(-current.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_n_iff( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_agenda = agenda.clone() + agenda.put(current.term.first, context) + agenda.put(-current.term.second, context) + new_agenda.put(-current.term.first, context) + new_agenda.put(current.term.second, context) + return self._attempt_proof( + agenda, accessible_vars, atoms, debug + 1 + ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) + + def _attempt_proof_eq( + self, current, context, agenda, accessible_vars, atoms, debug + ): + ######################################################################### + # Since 'current' is of the form '(a = b)', replace ALL free instances + # of 'a' with 'b' + ######################################################################### + agenda.put_atoms(atoms) + agenda.replace_all(current.first, current.second) + accessible_vars.discard(current.first) + agenda.mark_neqs_fresh() + return self._attempt_proof(agenda, accessible_vars, set(), debug + 1) + + def _attempt_proof_some( + self, current, context, agenda, accessible_vars, atoms, debug + ): + new_unique_variable = VariableExpression(unique_variable()) + agenda.put(current.term.replace(current.variable, new_unique_variable), context) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + def _attempt_proof_all( + self, current, context, agenda, accessible_vars, atoms, debug + ): + try: + current._used_vars + except AttributeError: + current._used_vars = set() + + # if there are accessible_vars on the path + if accessible_vars: + # get the set of bound variables that have not be used by this AllExpression + bv_available = accessible_vars - current._used_vars + + if bv_available: + variable_to_use = list(bv_available)[0] + debug.line("--> Using '%s'" % variable_to_use, 2) + current._used_vars |= {variable_to_use} + agenda.put( + current.term.replace(current.variable, variable_to_use), context + ) + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + # no more available variables to substitute + debug.line("--> Variables Exhausted", 2) + current._exhausted = True + agenda[Categories.ALL].add((current, context)) + return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) + + else: + new_unique_variable = VariableExpression(unique_variable()) + debug.line("--> Using '%s'" % new_unique_variable, 2) + current._used_vars |= {new_unique_variable} + agenda.put( + current.term.replace(current.variable, new_unique_variable), context + ) + agenda[Categories.ALL].add((current, context)) + agenda.mark_alls_fresh() + return self._attempt_proof( + agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 + ) + + @staticmethod + def is_atom(e): + if isinstance(e, NegatedExpression): + e = e.term + + if isinstance(e, ApplicationExpression): + for arg in e.args: + if not TableauProver.is_atom(arg): + return False + return True + elif isinstance(e, AbstractVariableExpression) or isinstance( + e, LambdaExpression + ): + return True + else: + return False + + +class TableauProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, TableauProver) + else: + prover = TableauProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + + +class Agenda: + def __init__(self): + self.sets = tuple(set() for i in range(21)) + + def clone(self): + new_agenda = Agenda() + set_list = [s.copy() for s in self.sets] + + new_allExs = set() + for allEx, _ in set_list[Categories.ALL]: + new_allEx = AllExpression(allEx.variable, allEx.term) + try: + new_allEx._used_vars = {used for used in allEx._used_vars} + except AttributeError: + new_allEx._used_vars = set() + new_allExs.add((new_allEx, None)) + set_list[Categories.ALL] = new_allExs + + set_list[Categories.N_EQ] = { + (NegatedExpression(n_eq.term), ctx) + for (n_eq, ctx) in set_list[Categories.N_EQ] + } + + new_agenda.sets = tuple(set_list) + return new_agenda + + def __getitem__(self, index): + return self.sets[index] + + def put(self, expression, context=None): + if isinstance(expression, AllExpression): + ex_to_add = AllExpression(expression.variable, expression.term) + try: + ex_to_add._used_vars = {used for used in expression._used_vars} + except AttributeError: + ex_to_add._used_vars = set() + else: + ex_to_add = expression + self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context)) + + def put_all(self, expressions): + for expression in expressions: + self.put(expression) + + def put_atoms(self, atoms): + for atom, neg in atoms: + if neg: + self[Categories.N_ATOM].add((-atom, None)) + else: + self[Categories.ATOM].add((atom, None)) + + def pop_first(self): + """Pop the first expression that appears in the agenda""" + for i, s in enumerate(self.sets): + if s: + if i in [Categories.N_EQ, Categories.ALL]: + for ex in s: + try: + if not ex[0]._exhausted: + s.remove(ex) + return (ex, i) + except AttributeError: + s.remove(ex) + return (ex, i) + else: + return (s.pop(), i) + return ((None, None), None) + + def replace_all(self, old, new): + for s in self.sets: + for ex, ctx in s: + ex.replace(old.variable, new) + if ctx is not None: + ctx.replace(old.variable, new) + + def mark_alls_fresh(self): + for u, _ in self.sets[Categories.ALL]: + u._exhausted = False + + def mark_neqs_fresh(self): + for neq, _ in self.sets[Categories.N_EQ]: + neq._exhausted = False + + def _categorize_expression(self, current): + if isinstance(current, NegatedExpression): + return self._categorize_NegatedExpression(current) + elif isinstance(current, FunctionVariableExpression): + return Categories.PROP + elif TableauProver.is_atom(current): + return Categories.ATOM + elif isinstance(current, AllExpression): + return Categories.ALL + elif isinstance(current, AndExpression): + return Categories.AND + elif isinstance(current, OrExpression): + return Categories.OR + elif isinstance(current, ImpExpression): + return Categories.IMP + elif isinstance(current, IffExpression): + return Categories.IFF + elif isinstance(current, EqualityExpression): + return Categories.EQ + elif isinstance(current, ExistsExpression): + return Categories.EXISTS + elif isinstance(current, ApplicationExpression): + return Categories.APP + else: + raise ProverParseError("cannot categorize %s" % current.__class__.__name__) + + def _categorize_NegatedExpression(self, current): + negated = current.term + + if isinstance(negated, NegatedExpression): + return Categories.D_NEG + elif isinstance(negated, FunctionVariableExpression): + return Categories.N_PROP + elif TableauProver.is_atom(negated): + return Categories.N_ATOM + elif isinstance(negated, AllExpression): + return Categories.N_ALL + elif isinstance(negated, AndExpression): + return Categories.N_AND + elif isinstance(negated, OrExpression): + return Categories.N_OR + elif isinstance(negated, ImpExpression): + return Categories.N_IMP + elif isinstance(negated, IffExpression): + return Categories.N_IFF + elif isinstance(negated, EqualityExpression): + return Categories.N_EQ + elif isinstance(negated, ExistsExpression): + return Categories.N_EXISTS + elif isinstance(negated, ApplicationExpression): + return Categories.N_APP + else: + raise ProverParseError("cannot categorize %s" % negated.__class__.__name__) + + +class Debug: + def __init__(self, verbose, indent=0, lines=None): + self.verbose = verbose + self.indent = indent + + if not lines: + lines = [] + self.lines = lines + + def __add__(self, increment): + return Debug(self.verbose, self.indent + 1, self.lines) + + def line(self, data, indent=0): + if isinstance(data, tuple): + ex, ctx = data + if ctx: + data = f"{ex}, {ctx}" + else: + data = "%s" % ex + + if isinstance(ex, AllExpression): + try: + used_vars = "[%s]" % ( + ",".join("%s" % ve.variable.name for ve in ex._used_vars) + ) + data += ": %s" % used_vars + except AttributeError: + data += ": []" + + newline = "{}{}".format(" " * (self.indent + indent), data) + self.lines.append(newline) + + if self.verbose: + print(newline) + + +class Categories: + ATOM = 0 + PROP = 1 + N_ATOM = 2 + N_PROP = 3 + APP = 4 + N_APP = 5 + N_EQ = 6 + D_NEG = 7 + N_ALL = 8 + N_EXISTS = 9 + AND = 10 + N_OR = 11 + N_IMP = 12 + OR = 13 + IMP = 14 + N_AND = 15 + IFF = 16 + N_IFF = 17 + EQ = 18 + EXISTS = 19 + ALL = 20 + + +def testTableauProver(): + tableau_test("P | -P") + tableau_test("P & -P") + tableau_test("Q", ["P", "(P -> Q)"]) + tableau_test("man(x)") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) -> --man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("-(man(x) and -man(x))") + tableau_test("(man(x) or -man(x))") + tableau_test("(man(x) -> man(x))") + tableau_test("(man(x) iff man(x))") + tableau_test("-(man(x) iff -man(x))") + tableau_test("all x.man(x)") + tableau_test("all x.all y.((x = y) -> (y = x))") + tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))") + # tableau_test('-all x.some y.F(x,y) & some x.all y.(-F(x,y))') + # tableau_test('some x.all y.sees(x,y)') + + p1 = "all x.(man(x) -> mortal(x))" + p2 = "man(Socrates)" + c = "mortal(Socrates)" + tableau_test(c, [p1, p2]) + + p1 = "all x.(man(x) -> walks(x))" + p2 = "man(John)" + c = "some y.walks(y)" + tableau_test(c, [p1, p2]) + + p = "((x = y) & walks(y))" + c = "walks(x)" + tableau_test(c, [p]) + + p = "((x = y) & ((y = z) & (z = w)))" + c = "(x = w)" + tableau_test(c, [p]) + + p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))" + c = "some e0.walk(e0,mary)" + tableau_test(c, [p]) + + c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))" + tableau_test(c) + + +# p = 'some e1.some e2.((believe e1 john e2) and (walk e2 mary))' +# c = 'some x.some e3.some e4.((believe e3 x e4) and (walk e4 mary))' +# tableau_test(c, [p]) + + +def testHigherOrderTableauProver(): + tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"]) + tableau_test( + "believe(j, lie(b))", ["lie(b)"] + ) # how do we capture that John believes all things that are true + tableau_test( + "believe(j, know(b, cheat(b)))", + ["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"], + ) + tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"]) + + tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"]) + tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) + + +def tableau_test(c, ps=None, verbose=False): + pc = Expression.fromstring(c) + pps = [Expression.fromstring(p) for p in ps] if ps else [] + if not ps: + ps = [] + print( + "%s |- %s: %s" + % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)) + ) + + +def demo(): + testTableauProver() + testHigherOrderTableauProver() + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_boson.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_boson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d9edf1b5a58dc2e3b8ce42f9aae97ad03d3c807 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_boson.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_dagger.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_dagger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0787f1c3cacbee9eb393560fc2fb520afe25390c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_dagger.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_operator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a248da6874f8512d6eefa9f021e0317345ad330e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_operator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_printing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_printing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b66099824c56e26ed99f44e23de4809e33f4dae9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_printing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_qft.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_qft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..773bcfb6ded7abe95fbd60d795000b96805d6713 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_qft.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_represent.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_represent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5ea462cd5faee07e27026b45ad31a1a95cab61e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_represent.cpython-310.pyc differ