diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/classify/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/classify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..238522fd6f7cedce69faf8bfb3384b22cc509cbb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/classify/__init__.py @@ -0,0 +1,101 @@ +# Natural Language Toolkit: Classifiers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for labeling tokens with category labels (or +"class labels"). Typically, labels are represented with strings +(such as ``'health'`` or ``'sports'``). Classifiers can be used to +perform a wide range of classification tasks. For example, +classifiers can be used... + +- to classify documents by topic +- to classify ambiguous words by which word sense is intended +- to classify acoustic signals by which phoneme they represent +- to classify sentences by their author + +Features +======== +In order to decide which category label is appropriate for a given +token, classifiers examine one or more 'features' of the token. These +"features" are typically chosen by hand, and indicate which aspects +of the token are relevant to the classification decision. For +example, a document classifier might use a separate feature for each +word, recording how often that word occurred in the document. + +Featuresets +=========== +The features describing a token are encoded using a "featureset", +which is a dictionary that maps from "feature names" to "feature +values". Feature names are unique strings that indicate what aspect +of the token is encoded by the feature. Examples include +``'prevword'``, for a feature whose value is the previous word; and +``'contains-word(library)'`` for a feature that is true when a document +contains the word ``'library'``. Feature values are typically +booleans, numbers, or strings, depending on which feature they +describe. + +Featuresets are typically constructed using a "feature detector" +(also known as a "feature extractor"). A feature detector is a +function that takes a token (and sometimes information about its +context) as its input, and returns a featureset describing that token. +For example, the following feature detector converts a document +(stored as a list of words) to a featureset describing the set of +words included in the document: + + >>> # Define a feature detector function. + >>> def document_features(document): + ... return dict([('contains-word(%s)' % w, True) for w in document]) + +Feature detectors are typically applied to each token before it is fed +to the classifier: + + >>> # Classify each Gutenberg document. + >>> from nltk.corpus import gutenberg + >>> for fileid in gutenberg.fileids(): # doctest: +SKIP + ... doc = gutenberg.words(fileid) # doctest: +SKIP + ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP + +The parameters that a feature detector expects will vary, depending on +the task and the needs of the feature detector. For example, a +feature detector for word sense disambiguation (WSD) might take as its +input a sentence, and the index of a word that should be classified, +and return a featureset for that word. The following feature detector +for WSD includes features describing the left and right contexts of +the target word: + + >>> def wsd_features(sentence, index): + ... featureset = {} + ... for i in range(max(0, index-3), index): + ... featureset['left-context(%s)' % sentence[i]] = True + ... for i in range(index, max(index+3, len(sentence))): + ... featureset['right-context(%s)' % sentence[i]] = True + ... return featureset + +Training Classifiers +==================== +Most classifiers are built by training them on a list of hand-labeled +examples, known as the "training set". Training sets are represented +as lists of ``(featuredict, label)`` tuples. +""" + +from nltk.classify.api import ClassifierI, MultiClassifierI +from nltk.classify.decisiontree import DecisionTreeClassifier +from nltk.classify.maxent import ( + BinaryMaxentFeatureEncoding, + ConditionalExponentialClassifier, + MaxentClassifier, + TypedMaxentFeatureEncoding, +) +from nltk.classify.megam import call_megam, config_megam +from nltk.classify.naivebayes import NaiveBayesClassifier +from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier +from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features +from nltk.classify.scikitlearn import SklearnClassifier +from nltk.classify.senna import Senna +from nltk.classify.textcat import TextCat +from nltk.classify.util import accuracy, apply_features, log_likelihood +from nltk.classify.weka import WekaClassifier, config_weka diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffac7e4a272199b26f0f3cd13826c2dd76d59c53 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38f3ba469574242e67be62b814715ffa387b000a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/europarl_raw.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e702e5dbe29094d3e8da238c0692fa53052fe41a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac0699aadd56e4e1cb5a4d301fc078a92635a7d0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95ec2631ffde1ea0b6354f10640e2ec0b4369d2c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/aligned.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bee6b43e2ebfd505b78d333387764cb25097b34 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..112a113cddb073a506cb960b0a6eaab7a5490a93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..538693a25c987569a0394181bbc3b02c25e301dc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e1bb3d4de51e6ddd66417ef61370562b7a178ed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bcbe03963b24c06384d495ffc0cfeb61164e0dc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba19d02165e27dd918e7e71208cde2590e0ef58f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chasen.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e536cd27784cb8a4673518183f2920077ec77f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/childes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35bfa99ade1cadd01515e00a69be17e49f8e7d02 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/chunked.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854677f1881228b5e76c7c3712749162b6ea4670 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d091438a9a8d829fcb0b58928bbc2ecb1b00b923 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/comparative_sents.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..167549414378a32182d40aca927b4fbc767a2ee9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/conll.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f637beb1d8e7e78b30ab1228ef69ab180de85aa8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/crubadan.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84f9ca410553dd3a1ddde44784a9310e3bc83cb1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5dd53be3575a15b36456face42bea02ffabce51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/framenet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c319110b4980af02b873f4b09424c6259dbaed0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ieer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3b4c91a3ad747fe1b297aaa3fc6dfe8c7ba8da1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/indian.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d25443153c0acf41c06e138037a6a876eb50c2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ipipan.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..047ead835682b5b0a45e92219c034940661b9793 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/knbc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51816d1274cb51cb7faec12ec8c04f0b2dba0048 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a44c32ac4fb0a4ebd3564a15ba14744de168d704 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/markdown.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b124630b6656b1ccf92bc09d733701a6a4e66d51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/mte.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3482096fd057def261bdc391497f206696426c99 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a568ac7710770a9f5aaf9e14f72bd27ccae4538d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nombank.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b2c02ffaf555bc6c4a7a5aa3334d6d6a764ea9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nps_chat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00947e02a8deedb7b05001c244297d04c0246d13 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/opinion_lexicon.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8500a7b840a2cce966326cdf4f7b03d0ef648dc5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40df6798e4eb66ff2668941cb80b1782c8326465 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_swadesh.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d26f554b943c75f6c39002308673a69a06cce1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d873d6a1774ad1f9144caa6b3be50442b0b518 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1ea2acf0bf869e5a4c06473865caba774f329ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..810346fa6463f22708d6429eeca1030ccbaf2d76 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..995eb50d6723db2d46a6f74de1a7d25178b2c1b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pros_cons.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8169ba4d6bd061598b08feaaba0172879b1bb84e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/reviews.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62d0fbad0c7ae8f0f4c5e2f439d29ebb76bef97 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..246a6f32b5a43ff40a1f168a8e7af920278b599e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b2f2b24e900febb34b397d666cd4c7aecebdafb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/senseval.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dc750469610c70b68fbb48cee8273d8662f0037 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sentiwordnet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..002e44ee53291ee27832f2d2b47f0a034fc50075 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/sinica_treebank.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658e2955650a9581b7a51ef720ecb486f60d35c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/string_category.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52a489b4e6b4a8d526513a24b1a1f9db4ba5780f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/switchboard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..393d847137d848f7fe904665b15eb78a8ba75334 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/tagged.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c3c6d4aece899fba1027a79d7e0279bd7b17c4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/timit.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43aab807b27d796d831ea42c7e3bfbae8ff2b76 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f5213def9144f0ed98eada151023ffc4f26e804 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/twitter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dcf397f69a740e86f6b2d8c20194940885beef1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/udhr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..295adb12480cf8a0b594c1405d255af72804e6ff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0661d35ca0765acabd58aea3948260ad5ff2ce11 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dbe4a199e5a8086f5d43cfa4d6bb601b9a026fd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordlist.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1f4e86ef80c200c255c55f824beb7a29cc1e96e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/wordnet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb43bdbe73ca24a9e8b763c3f16bddecff7071cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/xmldocs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26ffe47cf053bd152a8d30cba5ccc696d5bcdc6c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cbe80d902ff8daa5b2a94fcccbc5b050f2d36324 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/api.py @@ -0,0 +1,516 @@ +# Natural Language Toolkit: API for Corpus Readers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +API for corpus readers. +""" + +import os +import re +from collections import defaultdict +from itertools import chain + +from nltk.corpus.reader.util import * +from nltk.data import FileSystemPathPointer, PathPointer, ZipFilePathPointer + + +class CorpusReader: + """ + A base class for "corpus reader" classes, each of which can be + used to read a specific corpus format. Each individual corpus + reader instance is used to read a specific corpus, consisting of + one or more files under a common root directory. Each file is + identified by its ``file identifier``, which is the relative path + to the file from the root directory. + + A separate subclass is defined for each corpus format. These + subclasses define one or more methods that provide 'views' on the + corpus contents, such as ``words()`` (for a list of words) and + ``parsed_sents()`` (for a list of parsed sentences). Called with + no arguments, these methods will return the contents of the entire + corpus. For most corpora, these methods define one or more + selection arguments, such as ``fileids`` or ``categories``, which can + be used to select which portion of the corpus should be returned. + """ + + def __init__(self, root, fileids, encoding="utf8", tagset=None): + """ + :type root: PathPointer or str + :param root: A path pointer identifying the root directory for + this corpus. If a string is specified, then it will be + converted to a ``PathPointer`` automatically. + :param fileids: A list of the files that make up this corpus. + This list can either be specified explicitly, as a list of + strings; or implicitly, as a regular expression over file + paths. The absolute path for each file will be constructed + by joining the reader's root to each file name. + :param encoding: The default unicode encoding for the files + that make up the corpus. The value of ``encoding`` can be any + of the following: + + - A string: ``encoding`` is the encoding name for all files. + - A dictionary: ``encoding[file_id]`` is the encoding + name for the file whose identifier is ``file_id``. If + ``file_id`` is not in ``encoding``, then the file + contents will be processed using non-unicode byte strings. + - A list: ``encoding`` should be a list of ``(regexp, encoding)`` + tuples. The encoding for a file whose identifier is ``file_id`` + will be the ``encoding`` value for the first tuple whose + ``regexp`` matches the ``file_id``. If no tuple's ``regexp`` + matches the ``file_id``, the file contents will be processed + using non-unicode byte strings. + - None: the file contents of all files will be + processed using non-unicode byte strings. + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + # Convert the root to a path pointer, if necessary. + if isinstance(root, str) and not isinstance(root, PathPointer): + m = re.match(r"(.*\.zip)/?(.*)$|", root) + zipfile, zipentry = m.groups() + if zipfile: + root = ZipFilePathPointer(zipfile, zipentry) + else: + root = FileSystemPathPointer(root) + elif not isinstance(root, PathPointer): + raise TypeError("CorpusReader: expected a string or a PathPointer") + + # If `fileids` is a regexp, then expand it. + if isinstance(fileids, str): + fileids = find_corpus_fileids(root, fileids) + + self._fileids = fileids + """A list of the relative paths for the fileids that make up + this corpus.""" + + self._root = root + """The root directory for this corpus.""" + + self._readme = "README" + self._license = "LICENSE" + self._citation = "citation.bib" + + # If encoding was specified as a list of regexps, then convert + # it to a dictionary. + if isinstance(encoding, list): + encoding_dict = {} + for fileid in self._fileids: + for x in encoding: + (regexp, enc) = x + if re.match(regexp, fileid): + encoding_dict[fileid] = enc + break + encoding = encoding_dict + + self._encoding = encoding + """The default unicode encoding for the fileids that make up + this corpus. If ``encoding`` is None, then the file + contents are processed using byte strings.""" + self._tagset = tagset + + def __repr__(self): + if isinstance(self._root, ZipFilePathPointer): + path = f"{self._root.zipfile.filename}/{self._root.entry}" + else: + path = "%s" % self._root.path + return f"<{self.__class__.__name__} in {path!r}>" + + def ensure_loaded(self): + """ + Load this corpus (if it has not already been loaded). This is + used by LazyCorpusLoader as a simple method that can be used to + make sure a corpus is loaded -- e.g., in case a user wants to + do help(some_corpus). + """ + pass # no need to actually do anything. + + def readme(self): + """ + Return the contents of the corpus README file, if it exists. + """ + with self.open(self._readme) as f: + return f.read() + + def license(self): + """ + Return the contents of the corpus LICENSE file, if it exists. + """ + with self.open(self._license) as f: + return f.read() + + def citation(self): + """ + Return the contents of the corpus citation.bib file, if it exists. + """ + with self.open(self._citation) as f: + return f.read() + + def fileids(self): + """ + Return a list of file identifiers for the fileids that make up + this corpus. + """ + return self._fileids + + def abspath(self, fileid): + """ + Return the absolute path for the given file. + + :type fileid: str + :param fileid: The file identifier for the file whose path + should be returned. + :rtype: PathPointer + """ + return self._root.join(fileid) + + def abspaths(self, fileids=None, include_encoding=False, include_fileid=False): + """ + Return a list of the absolute paths for all fileids in this corpus; + or for the given list of fileids, if specified. + + :type fileids: None or str or list + :param fileids: Specifies the set of fileids for which paths should + be returned. Can be None, for all fileids; a list of + file identifiers, for a specified set of fileids; or a single + file identifier, for a single file. Note that the return + value is always a list of paths, even if ``fileids`` is a + single file identifier. + + :param include_encoding: If true, then return a list of + ``(path_pointer, encoding)`` tuples. + + :rtype: list(PathPointer) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + paths = [self._root.join(f) for f in fileids] + + if include_encoding and include_fileid: + return list(zip(paths, [self.encoding(f) for f in fileids], fileids)) + elif include_fileid: + return list(zip(paths, fileids)) + elif include_encoding: + return list(zip(paths, [self.encoding(f) for f in fileids])) + else: + return paths + + def raw(self, fileids=None): + """ + :param fileids: A list specifying the fileids that should be used. + :return: the given file(s) as a single string. + :rtype: str + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + contents = [] + for f in fileids: + with self.open(f) as fp: + contents.append(fp.read()) + return concat(contents) + + def open(self, file): + """ + Return an open stream that can be used to read the given file. + If the file's encoding is not None, then the stream will + automatically decode the file's contents into unicode. + + :param file: The file identifier of the file to read. + """ + encoding = self.encoding(file) + stream = self._root.join(file).open(encoding) + return stream + + def encoding(self, file): + """ + Return the unicode encoding for the given corpus file, if known. + If the encoding is unknown, or if the given file should be + processed using byte strings (str), then return None. + """ + if isinstance(self._encoding, dict): + return self._encoding.get(file) + else: + return self._encoding + + def _get_root(self): + return self._root + + root = property( + _get_root, + doc=""" + The directory where this corpus is stored. + + :type: PathPointer""", + ) + + +###################################################################### +# { Corpora containing categorized items +###################################################################### + + +class CategorizedCorpusReader: + """ + A mixin class used to aid in the implementation of corpus readers + for categorized corpora. This class defines the method + ``categories()``, which returns a list of the categories for the + corpus or for a specified set of fileids; and overrides ``fileids()`` + to take a ``categories`` argument, restricting the set of fileids to + be returned. + + Subclasses are expected to: + + - Call ``__init__()`` to set up the mapping. + + - Override all view methods to accept a ``categories`` parameter, + which can be used *instead* of the ``fileids`` parameter, to + select which fileids should be included in the returned view. + """ + + def __init__(self, kwargs): + """ + Initialize this mapping based on keyword arguments, as + follows: + + - cat_pattern: A regular expression pattern used to find the + category for each file identifier. The pattern will be + applied to each file identifier, and the first matching + group will be used as the category label for that file. + + - cat_map: A dictionary, mapping from file identifiers to + category labels. + + - cat_file: The name of a file that contains the mapping + from file identifiers to categories. The argument + ``cat_delimiter`` can be used to specify a delimiter. + + The corresponding argument will be deleted from ``kwargs``. If + more than one argument is specified, an exception will be + raised. + """ + self._f2c = None #: file-to-category mapping + self._c2f = None #: category-to-file mapping + + self._pattern = None #: regexp specifying the mapping + self._map = None #: dict specifying the mapping + self._file = None #: fileid of file containing the mapping + self._delimiter = None #: delimiter for ``self._file`` + + if "cat_pattern" in kwargs: + self._pattern = kwargs["cat_pattern"] + del kwargs["cat_pattern"] + elif "cat_map" in kwargs: + self._map = kwargs["cat_map"] + del kwargs["cat_map"] + elif "cat_file" in kwargs: + self._file = kwargs["cat_file"] + del kwargs["cat_file"] + if "cat_delimiter" in kwargs: + self._delimiter = kwargs["cat_delimiter"] + del kwargs["cat_delimiter"] + else: + raise ValueError( + "Expected keyword argument cat_pattern or " "cat_map or cat_file." + ) + + if "cat_pattern" in kwargs or "cat_map" in kwargs or "cat_file" in kwargs: + raise ValueError( + "Specify exactly one of: cat_pattern, " "cat_map, cat_file." + ) + + def _init(self): + self._f2c = defaultdict(set) + self._c2f = defaultdict(set) + + if self._pattern is not None: + for file_id in self._fileids: + category = re.match(self._pattern, file_id).group(1) + self._add(file_id, category) + + elif self._map is not None: + for (file_id, categories) in self._map.items(): + for category in categories: + self._add(file_id, category) + + elif self._file is not None: + with self.open(self._file) as f: + for line in f.readlines(): + line = line.strip() + file_id, categories = line.split(self._delimiter, 1) + if file_id not in self.fileids(): + raise ValueError( + "In category mapping file %s: %s " + "not found" % (self._file, file_id) + ) + for category in categories.split(self._delimiter): + self._add(file_id, category) + + def _add(self, file_id, category): + self._f2c[file_id].add(category) + self._c2f[category].add(file_id) + + def categories(self, fileids=None): + """ + Return a list of the categories that are defined for this corpus, + or for the file(s) if it is given. + """ + if self._f2c is None: + self._init() + if fileids is None: + return sorted(self._c2f) + if isinstance(fileids, str): + fileids = [fileids] + return sorted(set.union(*(self._f2c[d] for d in fileids))) + + def fileids(self, categories=None): + """ + Return a list of file identifiers for the files that make up + this corpus, or that make up the given category(s) if specified. + """ + if categories is None: + return super().fileids() + elif isinstance(categories, str): + if self._f2c is None: + self._init() + if categories in self._c2f: + return sorted(self._c2f[categories]) + else: + raise ValueError("Category %s not found" % categories) + else: + if self._f2c is None: + self._init() + return sorted(set.union(*(self._c2f[c] for c in categories))) + + def _resolve(self, fileids, categories): + if fileids is not None and categories is not None: + raise ValueError("Specify fileids or categories, not both") + if categories is not None: + return self.fileids(categories) + else: + return fileids + + def raw(self, fileids=None, categories=None): + return super().raw(self._resolve(fileids, categories)) + + def words(self, fileids=None, categories=None): + return super().words(self._resolve(fileids, categories)) + + def sents(self, fileids=None, categories=None): + return super().sents(self._resolve(fileids, categories)) + + def paras(self, fileids=None, categories=None): + return super().paras(self._resolve(fileids, categories)) + + +###################################################################### +# { Treebank readers +###################################################################### + +# [xx] is it worth it to factor this out? +class SyntaxCorpusReader(CorpusReader): + """ + An abstract base class for reading corpora consisting of + syntactically parsed text. Subclasses should define: + + - ``__init__``, which specifies the location of the corpus + and a method for detecting the sentence blocks in corpus files. + - ``_read_block``, which reads a block from the input stream. + - ``_word``, which takes a block and returns a list of list of words. + - ``_tag``, which takes a block and returns a list of list of tagged + words. + - ``_parse``, which takes a block and returns a list of parsed + sentences. + """ + + def _parse(self, s): + raise NotImplementedError() + + def _word(self, s): + raise NotImplementedError() + + def _tag(self, s): + raise NotImplementedError() + + def _read_block(self, stream): + raise NotImplementedError() + + def parsed_sents(self, fileids=None): + reader = self._read_parsed_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_sent_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + reader = self._read_sent_block + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + def reader(stream): + return self._read_tagged_word_block(stream, tagset) + + return concat( + [ + StreamBackedCorpusView(fileid, reader, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + def words(self, fileids=None): + return concat( + [ + StreamBackedCorpusView(fileid, self._read_word_block, encoding=enc) + for fileid, enc in self.abspaths(fileids, True) + ] + ) + + # ------------------------------------------------------------ + # { Block Readers + + def _read_word_block(self, stream): + return list(chain.from_iterable(self._read_sent_block(stream))) + + def _read_tagged_word_block(self, stream, tagset=None): + return list(chain.from_iterable(self._read_tagged_sent_block(stream, tagset))) + + def _read_sent_block(self, stream): + return list(filter(None, [self._word(t) for t in self._read_block(stream)])) + + def _read_tagged_sent_block(self, stream, tagset=None): + return list( + filter(None, [self._tag(t, tagset) for t in self._read_block(stream)]) + ) + + def _read_parsed_sent_block(self, stream): + return list(filter(None, [self._parse(t) for t in self._read_block(stream)])) + + # } End of Block Readers + # ------------------------------------------------------------ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py new file mode 100644 index 0000000000000000000000000000000000000000..429f52a65034f6faee531430a4b1d08aabe20103 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py @@ -0,0 +1,218 @@ +# Natural Language Toolkit: BCP-47 language tags +# +# Copyright (C) 2022-2023 NLTK Project +# Author: Eric Kafe +# URL: +# For license information, see LICENSE.TXT + +import re +from warnings import warn +from xml.etree import ElementTree as et + +from nltk.corpus.reader import CorpusReader + + +class BCP47CorpusReader(CorpusReader): + """ + Parse BCP-47 composite language tags + + Supports all the main subtags, and the 'u-sd' extension: + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('oc-gascon-u-sd-fr64') + 'Occitan (post 1500): Gascon: Pyrénées-Atlantiques' + + Can load a conversion table to Wikidata Q-codes: + >>> bcp47.load_wiki_q() + >>> bcp47.wiki_q['en-GI-spanglis'] + 'Q79388' + + """ + + def __init__(self, root, fileids): + """Read the BCP-47 database""" + super().__init__(root, fileids) + self.langcode = {} + with self.open("iana/language-subtag-registry.txt") as fp: + self.db = self.data_dict(fp.read().split("%%\n")) + with self.open("cldr/common-subdivisions-en.xml") as fp: + self.subdiv = self.subdiv_dict( + et.parse(fp).iterfind("localeDisplayNames/subdivisions/subdivision") + ) + self.morphology() + + def load_wiki_q(self): + """Load conversion table to Wikidata Q-codes (only if needed)""" + with self.open("cldr/tools-cldr-rdf-external-entityToCode.tsv") as fp: + self.wiki_q = self.wiki_dict(fp.read().strip().split("\n")[1:]) + + def wiki_dict(self, lines): + """Convert Wikidata list of Q-codes to a BCP-47 dictionary""" + return { + pair[1]: pair[0].split("/")[-1] + for pair in [line.strip().split("\t") for line in lines] + } + + def subdiv_dict(self, subdivs): + """Convert the CLDR subdivisions list to a dictionary""" + return {sub.attrib["type"]: sub.text for sub in subdivs} + + def morphology(self): + self.casing = { + "language": str.lower, + "extlang": str.lower, + "script": str.title, + "region": str.upper, + "variant": str.lower, + } + dig = "[0-9]" + low = "[a-z]" + up = "[A-Z]" + alnum = "[a-zA-Z0-9]" + self.format = { + "language": re.compile(f"{low*3}?"), + "extlang": re.compile(f"{low*3}"), + "script": re.compile(f"{up}{low*3}"), + "region": re.compile(f"({up*2})|({dig*3})"), + "variant": re.compile(f"{alnum*4}{(alnum+'?')*4}"), + "singleton": re.compile(f"{low}"), + } + + def data_dict(self, records): + """Convert the BCP-47 language subtag registry to a dictionary""" + self.version = records[0].replace("File-Date:", "").strip() + dic = {} + dic["deprecated"] = {} + for label in [ + "language", + "extlang", + "script", + "region", + "variant", + "redundant", + "grandfathered", + ]: + dic["deprecated"][label] = {} + for record in records[1:]: + fields = [field.split(": ") for field in record.strip().split("\n")] + typ = fields[0][1] + tag = fields[1][1] + if typ not in dic: + dic[typ] = {} + subfields = {} + for field in fields[2:]: + if len(field) == 2: + [key, val] = field + if key not in subfields: + subfields[key] = [val] + else: # multiple value + subfields[key].append(val) + else: # multiline field + subfields[key][-1] += " " + field[0].strip() + if ( + "Deprecated" not in record + and typ == "language" + and key == "Description" + ): + self.langcode[subfields[key][-1]] = tag + for key in subfields: + if len(subfields[key]) == 1: # single value + subfields[key] = subfields[key][0] + if "Deprecated" in record: + dic["deprecated"][typ][tag] = subfields + else: + dic[typ][tag] = subfields + return dic + + def val2str(self, val): + """Return only first value""" + if type(val) == list: + # val = "/".join(val) # Concatenate all values + val = val[0] + return val + + def lang2str(self, lg_record): + """Concatenate subtag values""" + name = f"{lg_record['language']}" + for label in ["extlang", "script", "region", "variant", "extension"]: + if label in lg_record: + name += f": {lg_record[label]}" + return name + + def parse_tag(self, tag): + """Convert a BCP-47 tag to a dictionary of labelled subtags""" + subtags = tag.split("-") + lang = {} + labels = ["language", "extlang", "script", "region", "variant", "variant"] + while subtags and labels: + subtag = subtags.pop(0) + found = False + while labels: + label = labels.pop(0) + subtag = self.casing[label](subtag) + if self.format[label].fullmatch(subtag): + if subtag in self.db[label]: + found = True + valstr = self.val2str(self.db[label][subtag]["Description"]) + if label == "variant" and label in lang: + lang[label] += ": " + valstr + else: + lang[label] = valstr + break + elif subtag in self.db["deprecated"][label]: + found = True + note = f"The {subtag!r} {label} code is deprecated" + if "Preferred-Value" in self.db["deprecated"][label][subtag]: + prefer = self.db["deprecated"][label][subtag][ + "Preferred-Value" + ] + note += f"', prefer '{self.val2str(prefer)}'" + lang[label] = self.val2str( + self.db["deprecated"][label][subtag]["Description"] + ) + warn(note) + break + if not found: + if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions + sd = subtags[1] + if sd in self.subdiv: + ext = self.subdiv[sd] + else: + ext = f"" + else: # other extension subtags are not supported yet + ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower() + if not self.format["singleton"].fullmatch(subtag): + ext = f"" + warn(ext) + lang["extension"] = ext + subtags = [] + return lang + + def name(self, tag): + """ + Convert a BCP-47 tag to a colon-separated string of subtag names + + >>> from nltk.corpus import bcp47 + >>> bcp47.name('ca-Latn-ES-valencia') + 'Catalan: Latin: Spain: Valencian' + + """ + for label in ["redundant", "grandfathered"]: + val = None + if tag in self.db[label]: + val = f"{self.db[label][tag]['Description']}" + note = f"The {tag!r} code is {label}" + elif tag in self.db["deprecated"][label]: + val = f"{self.db['deprecated'][label][tag]['Description']}" + note = f"The {tag!r} code is {label} and deprecated" + if "Preferred-Value" in self.db["deprecated"][label][tag]: + prefer = self.db["deprecated"][label][tag]["Preferred-Value"] + note += f", prefer {self.val2str(prefer)!r}" + if val: + warn(note) + return val + try: + return self.lang2str(self.parse_tag(tag)) + except: + warn(f"Tag {tag!r} was not recognized") + return None diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py new file mode 100644 index 0000000000000000000000000000000000000000..e7128bf843b5c24a59b10d8a0cf1f689592bae52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py @@ -0,0 +1,265 @@ +# Natural Language Toolkit: Plaintext Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +"""Corpus reader for the XML version of the British National Corpus.""" + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView + + +class BNCCorpusReader(XMLCorpusReader): + r"""Corpus reader for the XML version of the British National Corpus. + + For access to the complete XML data structure, use the ``xml()`` + method. For access to simple word lists and tagged word lists, use + ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``. + + You can obtain the full version of the BNC corpus at + https://www.ota.ox.ac.uk/desc/2554 + + If you extracted the archive to a directory called `BNC`, then you can + instantiate the reader as:: + + BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml') + + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of words + and punctuation symbols. + :rtype: list(str) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, False, None, strip_space, stem) + + def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views(fileids, False, tag, strip_space, stem) + + def sents(self, fileids=None, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences or utterances, each encoded as a list of word + strings. + :rtype: list(list(str)) + + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + return self._views(fileids, True, None, strip_space, stem) + + def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param c5: If true, then the tags used will be the more detailed + c5 tags. Otherwise, the simplified tags will be used. + :param strip_space: If true, then strip trailing spaces from + word tokens. Otherwise, leave the spaces on the tokens. + :param stem: If true, then use word stems instead of word strings. + """ + tag = "c5" if c5 else "pos" + return self._views( + fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem + ) + + def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False): + """A helper function that instantiates BNCWordViews or the list of words/sentences.""" + f = BNCWordView if self._lazy else self._words + return concat( + [ + f(fileid, sent, tag, strip_space, stem) + for fileid in self.abspaths(fileids) + ] + ) + + def _words(self, fileid, bracket_sent, tag, strip_space, stem): + """ + Helper used to implement the view methods -- returns a list of + words or a list of sentences, optionally tagged. + + :param fileid: The name of the underlying file. + :param bracket_sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + result = [] + + xmldoc = ElementTree.parse(fileid).getroot() + for xmlsent in xmldoc.findall(".//s"): + sent = [] + for xmlword in _all_xmlwords_in(xmlsent): + word = xmlword.text + if not word: + word = "" # fixes issue 337? + if strip_space or stem: + word = word.strip() + if stem: + word = xmlword.get("hw", word) + if tag == "c5": + word = (word, xmlword.get("c5")) + elif tag == "pos": + word = (word, xmlword.get("pos", xmlword.get("c5"))) + sent.append(word) + if bracket_sent: + result.append(BNCSentence(xmlsent.attrib["n"], sent)) + else: + result.extend(sent) + + assert None not in result + return result + + +def _all_xmlwords_in(elt, result=None): + if result is None: + result = [] + for child in elt: + if child.tag in ("c", "w"): + result.append(child) + else: + _all_xmlwords_in(child, result) + return result + + +class BNCSentence(list): + """ + A list of words, augmented by an attribute ``num`` used to record + the sentence identifier (the ``n`` attribute from the XML). + """ + + def __init__(self, num, items): + self.num = num + list.__init__(self, items) + + +class BNCWordView(XMLCorpusView): + """ + A stream backed corpus view specialized for use with the BNC corpus. + """ + + tags_to_ignore = { + "pb", + "gap", + "vocal", + "event", + "unclear", + "shift", + "pause", + "align", + } + """These tags are ignored. For their description refer to the + technical documentation, for example, + http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html + + """ + + def __init__(self, fileid, sent, tag, strip_space, stem): + """ + :param fileid: The name of the underlying file. + :param sent: If true, include sentence bracketing. + :param tag: The name of the tagset to use, or None for no tags. + :param strip_space: If true, strip spaces from word tokens. + :param stem: If true, then substitute stems for words. + """ + if sent: + tagspec = ".*/s" + else: + tagspec = ".*/s/(.*/)?(c|w)" + self._sent = sent + self._tag = tag + self._strip_space = strip_space + self._stem = stem + + self.title = None #: Title of the document. + self.author = None #: Author of the document. + self.editor = None #: Editor + self.resps = None #: Statement of responsibility + + XMLCorpusView.__init__(self, fileid, tagspec) + + # Read in a tasty header. + self._open() + self.read_block(self._stream, ".*/teiHeader$", self.handle_header) + self.close() + + # Reset tag context. + self._tag_context = {0: ()} + + def handle_header(self, elt, context): + # Set up some metadata! + titles = elt.findall("titleStmt/title") + if titles: + self.title = "\n".join(title.text.strip() for title in titles) + + authors = elt.findall("titleStmt/author") + if authors: + self.author = "\n".join(author.text.strip() for author in authors) + + editors = elt.findall("titleStmt/editor") + if editors: + self.editor = "\n".join(editor.text.strip() for editor in editors) + + resps = elt.findall("titleStmt/respStmt") + if resps: + self.resps = "\n\n".join( + "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps + ) + + def handle_elt(self, elt, context): + if self._sent: + return self.handle_sent(elt) + else: + return self.handle_word(elt) + + def handle_word(self, elt): + word = elt.text + if not word: + word = "" # fixes issue 337? + if self._strip_space or self._stem: + word = word.strip() + if self._stem: + word = elt.get("hw", word) + if self._tag == "c5": + word = (word, elt.get("c5")) + elif self._tag == "pos": + word = (word, elt.get("pos", elt.get("c5"))) + return word + + def handle_sent(self, elt): + sent = [] + for child in elt: + if child.tag in ("mw", "hi", "corr", "trunc"): + sent += [self.handle_word(w) for w in child] + elif child.tag in ("w", "c"): + sent.append(self.handle_word(child)) + elif child.tag not in self.tags_to_ignore: + raise ValueError("Unexpected element %s" % child.tag) + return BNCSentence(elt.attrib["n"], sent) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d3ff67b94dcc6b476e7125c62bbe41e03603f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py @@ -0,0 +1,237 @@ +# Natural Language Toolkit: Penn Treebank Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +""" +Corpus reader for corpora that consist of parenthesis-delineated parse trees. +""" + +import sys + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag +from nltk.tree import Tree + +# we use [^\s()]+ instead of \S+? to avoid matching () +SORTTAGWRD = re.compile(r"\((\d+) ([^\s()]+) ([^\s()]+)\)") +TAGWORD = re.compile(r"\(([^\s()]+) ([^\s()]+)\)") +WORD = re.compile(r"\([^\s()]+ ([^\s()]+)\)") +EMPTY_BRACKETS = re.compile(r"\s*\(\s*\(") + + +class BracketParseCorpusReader(SyntaxCorpusReader): + """ + Reader for corpora that consist of parenthesis-delineated parse trees, + like those found in the "combined" section of the Penn Treebank, + e.g. "(S (NP (DT the) (JJ little) (NN dog)) (VP (VBD barked)))". + + """ + + def __init__( + self, + root, + fileids, + comment_char=None, + detect_blocks="unindented_paren", + encoding="utf8", + tagset=None, + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param comment_char: The character which can appear at the start of + a line to indicate that the rest of the line is a comment. + :param detect_blocks: The method that is used to find blocks + in the corpus; can be 'unindented_paren' (every unindented + parenthesis starts a new parse) or 'sexpr' (brackets are + matched). + :param tagset: The name of the tagset used by this corpus, to be used + for normalizing or converting the POS tags returned by the + ``tagged_...()`` methods. + """ + SyntaxCorpusReader.__init__(self, root, fileids, encoding) + self._comment_char = comment_char + self._detect_blocks = detect_blocks + self._tagset = tagset + + def _read_block(self, stream): + if self._detect_blocks == "sexpr": + return read_sexpr_block(stream, comment_char=self._comment_char) + elif self._detect_blocks == "blankline": + return read_blankline_block(stream) + elif self._detect_blocks == "unindented_paren": + # Tokens start with unindented left parens. + toks = read_regexp_block(stream, start_re=r"^\(") + # Strip any comments out of the tokens. + if self._comment_char: + toks = [ + re.sub("(?m)^%s.*" % re.escape(self._comment_char), "", tok) + for tok in toks + ] + return toks + else: + assert 0, "bad block type" + + def _normalize(self, t): + # Replace leaves of the form (!), (,), with (! !), (, ,) + t = re.sub(r"\((.)\)", r"(\1 \1)", t) + # Replace leaves of the form (tag word root) with (tag word) + t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t) + return t + + def _parse(self, t): + try: + tree = Tree.fromstring(self._normalize(t)) + # If there's an empty node at the top, strip it off + if tree.label() == "" and len(tree) == 1: + return tree[0] + else: + return tree + + except ValueError as e: + sys.stderr.write("Bad tree detected; trying to recover...\n") + # Try to recover, if we can: + if e.args == ("mismatched parens",): + for n in range(1, 5): + try: + v = Tree(self._normalize(t + ")" * n)) + sys.stderr.write( + " Recovered by adding %d close " "paren(s)\n" % n + ) + return v + except ValueError: + pass + # Try something else: + sys.stderr.write(" Recovered by returning a flat parse.\n") + # sys.stderr.write(' '.join(t.split())+'\n') + return Tree("S", self._tag(t)) + + def _tag(self, t, tagset=None): + tagged_sent = [(w, p) for (p, w) in TAGWORD.findall(self._normalize(t))] + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (w, p) in tagged_sent + ] + return tagged_sent + + def _word(self, t): + return WORD.findall(self._normalize(t)) + + +class CategorizedBracketParseCorpusReader( + CategorizedCorpusReader, BracketParseCorpusReader +): + """ + A reader for parsed corpora whose documents are + divided into categories based on their file identifiers. + @author: Nathan Schneider + """ + + def __init__(self, *args, **kwargs): + """ + Initialize the corpus reader. Categorization arguments + (C{cat_pattern}, C{cat_map}, and C{cat_file}) are passed to + the L{CategorizedCorpusReader constructor + }. The remaining arguments + are passed to the L{BracketParseCorpusReader constructor + }. + """ + CategorizedCorpusReader.__init__(self, kwargs) + BracketParseCorpusReader.__init__(self, *args, **kwargs) + + def tagged_words(self, fileids=None, categories=None, tagset=None): + return super().tagged_words(self._resolve(fileids, categories), tagset) + + def tagged_sents(self, fileids=None, categories=None, tagset=None): + return super().tagged_sents(self._resolve(fileids, categories), tagset) + + def tagged_paras(self, fileids=None, categories=None, tagset=None): + return super().tagged_paras(self._resolve(fileids, categories), tagset) + + def parsed_words(self, fileids=None, categories=None): + return super().parsed_words(self._resolve(fileids, categories)) + + def parsed_sents(self, fileids=None, categories=None): + return super().parsed_sents(self._resolve(fileids, categories)) + + def parsed_paras(self, fileids=None, categories=None): + return super().parsed_paras(self._resolve(fileids, categories)) + + +class AlpinoCorpusReader(BracketParseCorpusReader): + """ + Reader for the Alpino Dutch Treebank. + This corpus has a lexical breakdown structure embedded, as read by `_parse` + Unfortunately this puts punctuation and some other words out of the sentence + order in the xml element tree. This is no good for `tag_` and `word_` + `_tag` and `_word` will be overridden to use a non-default new parameter 'ordered' + to the overridden _normalize function. The _parse function can then remain + untouched. + """ + + def __init__(self, root, encoding="ISO-8859-1", tagset=None): + BracketParseCorpusReader.__init__( + self, + root, + r"alpino\.xml", + detect_blocks="blankline", + encoding=encoding, + tagset=tagset, + ) + + def _normalize(self, t, ordered=False): + """Normalize the xml sentence element in t. + The sentence elements , although embedded in a few overall + xml elements, are separated by blank lines. That's how the reader can + deliver them one at a time. + Each sentence has a few category subnodes that are of no use to us. + The remaining word nodes may or may not appear in the proper order. + Each word node has attributes, among which: + - begin : the position of the word in the sentence + - pos : Part of Speech: the Tag + - word : the actual word + The return value is a string with all xml elementes replaced by + clauses: either a cat clause with nested clauses, or a word clause. + The order of the bracket clauses closely follows the xml. + If ordered == True, the word clauses include an order sequence number. + If ordered == False, the word clauses only have pos and word parts. + """ + if t[:10] != "', r"(\1", t) + if ordered: + t = re.sub( + r' ', + r"(\1 \2 \3)", + t, + ) + else: + t = re.sub(r' ', r"(\1 \2)", t) + t = re.sub(r" ", r")", t) + t = re.sub(r".*", r"", t) + t = re.sub(r"", r"", t) + return t + + def _tag(self, t, tagset=None): + tagged_sent = [ + (int(o), w, p) + for (o, p, w) in SORTTAGWRD.findall(self._normalize(t, ordered=True)) + ] + tagged_sent.sort() + if tagset and tagset != self._tagset: + tagged_sent = [ + (w, map_tag(self._tagset, tagset, p)) for (o, w, p) in tagged_sent + ] + else: + tagged_sent = [(w, p) for (o, w, p) in tagged_sent] + return tagged_sent + + def _word(self, t): + """Return a correctly ordered list if words""" + tagged_sent = self._tag(t) + return [w for (w, p) in tagged_sent] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py new file mode 100644 index 0000000000000000000000000000000000000000..92bfe47210e9db56aa1cde4fe27a41f4133909c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/categorized_sents.py @@ -0,0 +1,168 @@ +# Natural Language Toolkit: Categorized Sentences Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader structured for corpora that contain one instance on each row. +This CorpusReader is specifically used for the Subjectivity Dataset and the +Sentence Polarity Dataset. + +- Subjectivity Dataset information - + +Authors: Bo Pang and Lillian Lee. +Url: https://www.cs.cornell.edu/people/pabo/movie-review-data + +Distributed with permission. + +Related papers: + +- Bo Pang and Lillian Lee. "A Sentimental Education: Sentiment Analysis Using + Subjectivity Summarization Based on Minimum Cuts". Proceedings of the ACL, + 2004. + +- Sentence Polarity Dataset information - + +Authors: Bo Pang and Lillian Lee. +Url: https://www.cs.cornell.edu/people/pabo/movie-review-data + +Related papers: + +- Bo Pang and Lillian Lee. "Seeing stars: Exploiting class relationships for + sentiment categorization with respect to rating scales". Proceedings of the + ACL, 2005. +""" + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + + +class CategorizedSentencesCorpusReader(CategorizedCorpusReader, CorpusReader): + """ + A reader for corpora in which each row represents a single instance, mainly + a sentence. Istances are divided into categories based on their file identifiers + (see CategorizedCorpusReader). + Since many corpora allow rows that contain more than one sentence, it is + possible to specify a sentence tokenizer to retrieve all sentences instead + than all rows. + + Examples using the Subjectivity Dataset: + + >>> from nltk.corpus import subjectivity + >>> subjectivity.sents()[23] # doctest: +NORMALIZE_WHITESPACE + ['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits', + 'happened', 'off', 'screen', '.'] + >>> subjectivity.categories() + ['obj', 'subj'] + >>> subjectivity.words(categories='subj') + ['smart', 'and', 'alert', ',', 'thirteen', ...] + + Examples using the Sentence Polarity Dataset: + + >>> from nltk.corpus import sentence_polarity + >>> sentence_polarity.sents() # doctest: +NORMALIZE_WHITESPACE + [['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish', + 'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find', + 'it', 'funny', '.'], ...] + >>> sentence_polarity.categories() + ['neg', 'pos'] + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=None, + encoding="utf8", + **kwargs + ): + """ + :param root: The root directory for the corpus. + :param fileids: a list or regexp specifying the fileids in the corpus. + :param word_tokenizer: a tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param sent_tokenizer: a tokenizer for breaking paragraphs into sentences. + :param encoding: the encoding that should be used to read the corpus. + :param kwargs: additional parameters passed to CategorizedCorpusReader. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + CategorizedCorpusReader.__init__(self, kwargs) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + + def sents(self, fileids=None, categories=None): + """ + Return all sentences in the corpus or in the specified file(s). + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :param categories: a list specifying the categories whose sentences have + to be returned. + :return: the given file(s) as a list of sentences. + Each sentence is tokenized using the specified word_tokenizer. + :rtype: list(list(str)) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None, categories=None): + """ + Return all words and punctuation symbols in the corpus or in the specified + file(s). + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :param categories: a list specifying the categories whose words have to + be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + fileids = self._resolve(fileids, categories) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_sent_block(self, stream): + sents = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + if self._sent_tokenizer: + sents.extend( + [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(line) + ] + ) + else: + sents.append(self._word_tokenizer.tokenize(line)) + return sents + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/childes.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/childes.py new file mode 100644 index 0000000000000000000000000000000000000000..115ccfb927f7bb4d217670f0cd52a55d64563e9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/childes.py @@ -0,0 +1,630 @@ +# CHILDES XML Corpus Reader + +# Copyright (C) 2001-2023 NLTK Project +# Author: Tomonori Nagano +# Alexis Dimitriadis +# URL: +# For license information, see LICENSE.TXT + +""" +Corpus reader for the XML version of the CHILDES corpus. +""" + +__docformat__ = "epytext en" + +import re +from collections import defaultdict + +from nltk.corpus.reader.util import concat +from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader +from nltk.util import LazyConcatenation, LazyMap, flatten + +# to resolve the namespace issue +NS = "http://www.talkbank.org/ns/talkbank" + + +class CHILDESCorpusReader(XMLCorpusReader): + """ + Corpus reader for the XML version of the CHILDES corpus. + The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML + version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``. + Copy the needed parts of the CHILDES XML corpus into the NLTK data directory + (``nltk_data/corpora/CHILDES/``). + + For access to the file text use the usual nltk functions, + ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``. + """ + + def __init__(self, root, fileids, lazy=True): + XMLCorpusReader.__init__(self, root, fileids) + self._lazy = lazy + + def words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of words + :rtype: list(str) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_words( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=False, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of tagged + words and punctuation symbols, encoded as tuples + ``(word,tag)``. + :rtype: list(tuple(str,str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of (stem, index, + dependent_index) + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = None + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of sentences or utterances, each + encoded as a list of word strings. + :rtype: list(list(str)) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = False + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def tagged_sents( + self, + fileids=None, + speaker="ALL", + stem=False, + relation=None, + strip_space=True, + replace=False, + ): + """ + :return: the given file(s) as a list of + sentences, each encoded as a list of ``(word,tag)`` tuples. + :rtype: list(list(tuple(str,str))) + + :param speaker: If specified, select specific speaker(s) defined + in the corpus. Default is 'ALL' (all participants). Common choices + are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude + researchers) + :param stem: If true, then use word stems instead of word strings. + :param relation: If true, then return tuples of ``(str,pos,relation_list)``. + If there is manually-annotated relation info, it will return + tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)`` + :param strip_space: If true, then strip trailing spaces from word + tokens. Otherwise, leave the spaces on the tokens. + :param replace: If true, then use the replaced (intended) word instead + of the original word (e.g., 'wat' will be replaced with 'watch') + """ + sent = True + pos = True + if not self._lazy: + return [ + self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + for fileid in self.abspaths(fileids) + ] + + get_words = lambda fileid: self._get_words( + fileid, speaker, sent, stem, relation, pos, strip_space, replace + ) + return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids))) + + def corpus(self, fileids=None): + """ + :return: the given file(s) as a dict of ``(corpus_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_corpus, self.abspaths(fileids)) + + def _get_corpus(self, fileid): + results = dict() + xmldoc = ElementTree.parse(fileid).getroot() + for key, value in xmldoc.items(): + results[key] = value + return results + + def participants(self, fileids=None): + """ + :return: the given file(s) as a dict of + ``(participant_property_key, value)`` + :rtype: list(dict) + """ + if not self._lazy: + return [self._get_participants(fileid) for fileid in self.abspaths(fileids)] + return LazyMap(self._get_participants, self.abspaths(fileids)) + + def _get_participants(self, fileid): + # multidimensional dicts + def dictOfDicts(): + return defaultdict(dictOfDicts) + + xmldoc = ElementTree.parse(fileid).getroot() + # getting participants' data + pat = dictOfDicts() + for participant in xmldoc.findall( + f".//{{{NS}}}Participants/{{{NS}}}participant" + ): + for (key, value) in participant.items(): + pat[participant.get("id")][key] = value + return pat + + def age(self, fileids=None, speaker="CHI", month=False): + """ + :return: the given file(s) as string or int + :rtype: list or int + + :param month: If true, return months instead of year-month-date + """ + if not self._lazy: + return [ + self._get_age(fileid, speaker, month) + for fileid in self.abspaths(fileids) + ] + get_age = lambda fileid: self._get_age(fileid, speaker, month) + return LazyMap(get_age, self.abspaths(fileids)) + + def _get_age(self, fileid, speaker, month): + xmldoc = ElementTree.parse(fileid).getroot() + for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"): + try: + if pat.get("id") == speaker: + age = pat.get("age") + if month: + age = self.convert_age(age) + return age + # some files don't have age data + except (TypeError, AttributeError) as e: + return None + + def convert_age(self, age_year): + "Caclculate age in months from a string in CHILDES format" + m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year) + age_month = int(m.group(1)) * 12 + int(m.group(2)) + try: + if int(m.group(3)) > 15: + age_month += 1 + # some corpora don't have age information? + except ValueError as e: + pass + return age_month + + def MLU(self, fileids=None, speaker="CHI"): + """ + :return: the given file(s) as a floating number + :rtype: list(float) + """ + if not self._lazy: + return [ + self._getMLU(fileid, speaker=speaker) + for fileid in self.abspaths(fileids) + ] + get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker) + return LazyMap(get_MLU, self.abspaths(fileids)) + + def _getMLU(self, fileid, speaker): + sents = self._get_words( + fileid, + speaker=speaker, + sent=True, + stem=True, + relation=False, + pos=True, + strip_space=True, + replace=True, + ) + results = [] + lastSent = [] + numFillers = 0 + sentDiscount = 0 + for sent in sents: + posList = [pos for (word, pos) in sent] + # if any part of the sentence is intelligible + if any(pos == "unk" for pos in posList): + continue + # if the sentence is null + elif sent == []: + continue + # if the sentence is the same as the last sent + elif sent == lastSent: + continue + else: + results.append([word for (word, pos) in sent]) + # count number of fillers + if len({"co", None}.intersection(posList)) > 0: + numFillers += posList.count("co") + numFillers += posList.count(None) + sentDiscount += 1 + lastSent = sent + try: + thisWordList = flatten(results) + # count number of morphemes + # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes) + numWords = ( + len(flatten([word.split("-") for word in thisWordList])) - numFillers + ) + numSents = len(results) - sentDiscount + mlu = numWords / numSents + except ZeroDivisionError: + mlu = 0 + # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents} + return mlu + + def _get_words( + self, fileid, speaker, sent, stem, relation, pos, strip_space, replace + ): + if ( + isinstance(speaker, str) and speaker != "ALL" + ): # ensure we have a list of speakers + speaker = [speaker] + xmldoc = ElementTree.parse(fileid).getroot() + # processing each xml doc + results = [] + for xmlsent in xmldoc.findall(".//{%s}u" % NS): + sents = [] + # select speakers + if speaker == "ALL" or xmlsent.get("who") in speaker: + for xmlword in xmlsent.findall(".//{%s}w" % NS): + infl = None + suffixStem = None + suffixTag = None + # getting replaced words + if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"): + xmlword = xmlsent.find( + f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w" + ) + elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"): + xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk") + # get text + if xmlword.text: + word = xmlword.text + else: + word = "" + # strip tailing space + if strip_space: + word = word.strip() + # stem + if relation or stem: + try: + xmlstem = xmlword.find(".//{%s}stem" % NS) + word = xmlstem.text + except AttributeError as e: + pass + # if there is an inflection + try: + xmlinfl = xmlword.find( + f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk" + ) + word += "-" + xmlinfl.text + except: + pass + # if there is a suffix + try: + xmlsuffix = xmlword.find( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem" + % (NS, NS, NS, NS) + ) + suffixStem = xmlsuffix.text + except AttributeError: + suffixStem = "" + if suffixStem: + word += "~" + suffixStem + # pos + if relation or pos: + try: + xmlpos = xmlword.findall(".//{%s}c" % NS) + xmlpos2 = xmlword.findall(".//{%s}s" % NS) + if xmlpos2 != []: + tag = xmlpos[0].text + ":" + xmlpos2[0].text + else: + tag = xmlpos[0].text + except (AttributeError, IndexError) as e: + tag = "" + try: + xmlsuffixpos = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c" + % (NS, NS, NS, NS, NS) + ) + xmlsuffixpos2 = xmlword.findall( + ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s" + % (NS, NS, NS, NS, NS) + ) + if xmlsuffixpos2: + suffixTag = ( + xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text + ) + else: + suffixTag = xmlsuffixpos[0].text + except: + pass + if suffixTag: + tag += "~" + suffixTag + word = (word, tag) + # relational + # the gold standard is stored in + # + if relation == True: + for xmlstem_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}gra" + ): + if not xmlstem_rel.get("type") == "grt": + word = ( + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + else: + word = ( + word[0], + word[1], + word[2], + word[0], + word[1], + xmlstem_rel.get("index") + + "|" + + xmlstem_rel.get("head") + + "|" + + xmlstem_rel.get("relation"), + ) + try: + for xmlpost_rel in xmlword.findall( + f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra" + ): + if not xmlpost_rel.get("type") == "grt": + suffixStem = ( + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + else: + suffixStem = ( + suffixStem[0], + suffixStem[1], + suffixStem[2], + suffixStem[0], + suffixStem[1], + xmlpost_rel.get("index") + + "|" + + xmlpost_rel.get("head") + + "|" + + xmlpost_rel.get("relation"), + ) + except: + pass + sents.append(word) + if sent or relation: + results.append(sents) + else: + results.extend(sents) + return LazyMap(lambda x: x, results) + + # Ready-to-use browser opener + + """ + The base URL for viewing files on the childes website. This + shouldn't need to be changed, unless CHILDES changes the configuration + of their server or unless the user sets up their own corpus webserver. + """ + childes_url_base = r"https://childes.talkbank.org/browser/index.php?url=" + + def webview_file(self, fileid, urlbase=None): + """Map a corpus file to its web version on the CHILDES website, + and open it in a web browser. + + The complete URL to be used is: + childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha') + + If no urlbase is passed, we try to calculate it. This + requires that the childes corpus was set up to mirror the + folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.: + nltk_data/corpora/childes/Eng-USA/Cornell/??? or + nltk_data/corpora/childes/Romance/Spanish/Aguirre/??? + + The function first looks (as a special case) if "Eng-USA" is + on the path consisting of +fileid; then if + "childes", possibly followed by "data-xml", appears. If neither + one is found, we use the unmodified fileid and hope for the best. + If this is not right, specify urlbase explicitly, e.g., if the + corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'. + """ + + import webbrowser + + if urlbase: + path = urlbase + "/" + fileid + else: + full = self.root + "/" + fileid + full = re.sub(r"\\", "/", full) + if "/childes/" in full.lower(): + # Discard /data-xml/ if present + path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0] + elif "eng-usa" in full.lower(): + path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0] + else: + path = fileid + + # Strip ".xml" and add ".cha", as necessary: + if path.endswith(".xml"): + path = path[:-4] + + if not path.endswith(".cha"): + path = path + ".cha" + + url = self.childes_url_base + path + + webbrowser.open_new_tab(url) + print("Opening in browser:", url) + # Pausing is a good idea, but it's up to the user... + # raw_input("Hit Return to continue") + + +def demo(corpus_root=None): + """ + The CHILDES corpus should be manually downloaded and saved + to ``[NLTK_Data_Dir]/corpora/childes/`` + """ + if not corpus_root: + from nltk.data import find + + corpus_root = find("corpora/childes/data-xml/Eng-USA/") + + try: + childes = CHILDESCorpusReader(corpus_root, ".*.xml") + # describe all corpus + for file in childes.fileids()[:5]: + corpus = "" + corpus_id = "" + for (key, value) in childes.corpus(file)[0].items(): + if key == "Corpus": + corpus = value + if key == "Id": + corpus_id = value + print("Reading", corpus, corpus_id, " .....") + print("words:", childes.words(file)[:7], "...") + print( + "words with replaced words:", + childes.words(file, replace=True)[:7], + " ...", + ) + print("words with pos tags:", childes.tagged_words(file)[:7], " ...") + print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...") + print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...") + print("stemmed words:", childes.words(file, stem=True)[:7], " ...") + print( + "words with relations and pos-tag:", + childes.words(file, relation=True)[:5], + " ...", + ) + print("sentence:", childes.sents(file)[:2], " ...") + for (participant, values) in childes.participants(file)[0].items(): + for (key, value) in values.items(): + print("\tparticipant", participant, key, ":", value) + print("num of sent:", len(childes.sents(file))) + print("num of morphemes:", len(childes.words(file, stem=True))) + print("age:", childes.age(file)) + print("age in month:", childes.age(file, month=True)) + print("MLU:", childes.MLU(file)) + print() + + except LookupError as e: + print( + """The CHILDES corpus, or the parts you need, should be manually + downloaded from https://childes.talkbank.org/data-xml/ and saved at + [NLTK_Data_Dir]/corpora/childes/ + Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.: + demo('/path/to/childes/data-xml/Eng-USA/") + """ + ) + # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip') + # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read())) + ##this fails + # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist()) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py new file mode 100644 index 0000000000000000000000000000000000000000..032ce82c3b2a6a4011c9b1637b882db2df1bcd55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Comparative Sentence Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Comparative Sentence Dataset. + +- Comparative Sentence Dataset information - + +Annotated by: Nitin Jindal and Bing Liu, 2006. + Department of Computer Sicence + University of Illinois at Chicago + +Contact: Nitin Jindal, njindal@cs.uic.edu + Bing Liu, liub@cs.uic.edu (https://www.cs.uic.edu/~liub) + +Distributed with permission. + +Related papers: + +- Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents". + Proceedings of the ACM SIGIR International Conference on Information Retrieval + (SIGIR-06), 2006. + +- Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations". + Proceedings of Twenty First National Conference on Artificial Intelligence + (AAAI-2006), 2006. + +- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences". + Proceedings of the 22nd International Conference on Computational Linguistics + (Coling-2008), Manchester, 18-22 August, 2008. +""" +import re + +from nltk.corpus.reader.api import * +from nltk.tokenize import * + +# Regular expressions for dataset components +STARS = re.compile(r"^\*+$") +COMPARISON = re.compile(r"") +CLOSE_COMPARISON = re.compile(r"") +GRAD_COMPARISON = re.compile(r"") +NON_GRAD_COMPARISON = re.compile(r"") +ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)") +KEYWORD = re.compile(r"\(([^\(]*)\)$") + + +class Comparison: + """ + A Comparison represents a comparative sentence and its constituents. + """ + + def __init__( + self, + text=None, + comp_type=None, + entity_1=None, + entity_2=None, + feature=None, + keyword=None, + ): + """ + :param text: a string (optionally tokenized) containing a comparison. + :param comp_type: an integer defining the type of comparison expressed. + Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative), + 4 (Non-gradable). + :param entity_1: the first entity considered in the comparison relation. + :param entity_2: the second entity considered in the comparison relation. + :param feature: the feature considered in the comparison relation. + :param keyword: the word or phrase which is used for that comparative relation. + """ + self.text = text + self.comp_type = comp_type + self.entity_1 = entity_1 + self.entity_2 = entity_2 + self.feature = feature + self.keyword = keyword + + def __repr__(self): + return ( + 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", ' + 'feature="{}", keyword="{}")' + ).format( + self.text, + self.comp_type, + self.entity_1, + self.entity_2, + self.feature, + self.keyword, + ) + + +class ComparativeSentencesCorpusReader(CorpusReader): + """ + Reader for the Comparative Sentence Dataset by Jindal and Liu (2006). + + >>> from nltk.corpus import comparative_sentences + >>> comparison = comparative_sentences.comparisons()[0] + >>> comparison.text # doctest: +NORMALIZE_WHITESPACE + ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly', + 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve", + 'had', '.'] + >>> comparison.entity_2 + 'models' + >>> (comparison.feature, comparison.keyword) + ('rewind', 'more') + >>> len(comparative_sentences.comparisons()) + 853 + """ + + CorpusView = StreamBackedCorpusView + + def __init__( + self, + root, + fileids, + word_tokenizer=WhitespaceTokenizer(), + sent_tokenizer=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param fileids: a list or regexp specifying the fileids in this corpus. + :param word_tokenizer: tokenizer for breaking sentences or paragraphs + into words. Default: `WhitespaceTokenizer` + :param sent_tokenizer: tokenizer for breaking paragraphs into sentences. + :param encoding: the encoding that should be used to read the corpus. + """ + + CorpusReader.__init__(self, root, fileids, encoding) + self._word_tokenizer = word_tokenizer + self._sent_tokenizer = sent_tokenizer + self._readme = "README.txt" + + def comparisons(self, fileids=None): + """ + Return all comparisons in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + comparisons have to be returned. + :return: the given file(s) as a list of Comparison objects. + :rtype: list(Comparison) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_comparison_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def keywords(self, fileids=None): + """ + Return a set of all keywords used in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + keywords have to be returned. + :return: the set of keywords and comparative phrases used in the corpus. + :rtype: set(str) + """ + all_keywords = concat( + [ + self.CorpusView(path, self._read_keyword_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + keywords_set = {keyword.lower() for keyword in all_keywords if keyword} + return keywords_set + + def keywords_readme(self): + """ + Return the list of words and constituents considered as clues of a + comparison (from listOfkeywords.txt). + """ + keywords = [] + with self.open("listOfkeywords.txt") as fp: + raw_text = fp.read() + for line in raw_text.split("\n"): + if not line or line.startswith("//"): + continue + keywords.append(line.strip()) + return keywords + + def sents(self, fileids=None): + """ + Return all sentences in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + sentences have to be returned. + :return: all sentences of the corpus as lists of tokens (or as plain + strings, if no word tokenizer is specified). + :rtype: list(list(str)) or list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_sent_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def words(self, fileids=None): + """ + Return all words and punctuation symbols in the corpus. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def _read_comparison_block(self, stream): + while True: + line = stream.readline() + if not line: + return [] # end of file. + comparison_tags = re.findall(COMPARISON, line) + if comparison_tags: + grad_comparisons = re.findall(GRAD_COMPARISON, line) + non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line) + # Advance to the next line (it contains the comparative sentence) + comparison_text = stream.readline().strip() + if self._word_tokenizer: + comparison_text = self._word_tokenizer.tokenize(comparison_text) + # Skip the next line (it contains closing comparison tags) + stream.readline() + # If gradable comparisons are found, create Comparison instances + # and populate their fields + comparison_bundle = [] + if grad_comparisons: + # Each comparison tag has its own relations on a separate line + for comp in grad_comparisons: + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + line = stream.readline() + entities_feats = ENTITIES_FEATS.findall(line) + if entities_feats: + for (code, entity_feat) in entities_feats: + if code == "1": + comparison.entity_1 = entity_feat.strip() + elif code == "2": + comparison.entity_2 = entity_feat.strip() + elif code == "3": + comparison.feature = entity_feat.strip() + keyword = KEYWORD.findall(line) + if keyword: + comparison.keyword = keyword[0] + comparison_bundle.append(comparison) + # If non-gradable comparisons are found, create a simple Comparison + # instance for each one + if non_grad_comparisons: + for comp in non_grad_comparisons: + # comp_type in this case should always be 4. + comp_type = int(re.match(r"", comp).group(1)) + comparison = Comparison( + text=comparison_text, comp_type=comp_type + ) + comparison_bundle.append(comparison) + # Flatten the list of comparisons before returning them + # return concat([comparison_bundle]) + return comparison_bundle + + def _read_keyword_block(self, stream): + keywords = [] + for comparison in self._read_comparison_block(stream): + keywords.append(comparison.keyword) + return keywords + + def _read_sent_block(self, stream): + while True: + line = stream.readline() + if re.match(STARS, line): + while True: + line = stream.readline() + if re.match(STARS, line): + break + continue + if ( + not re.findall(COMPARISON, line) + and not ENTITIES_FEATS.findall(line) + and not re.findall(CLOSE_COMPARISON, line) + ): + if self._sent_tokenizer: + return [ + self._word_tokenizer.tokenize(sent) + for sent in self._sent_tokenizer.tokenize(line) + ] + else: + return [self._word_tokenizer.tokenize(line)] + + def _read_word_block(self, stream): + words = [] + for sent in self._read_sent_block(stream): + words.extend(sent) + return words diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/indian.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/indian.py new file mode 100644 index 0000000000000000000000000000000000000000..23c6434c34b38dcb4e0227851afb2aefde2fd090 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/indian.py @@ -0,0 +1,93 @@ +# Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Indian Language POS-Tagged Corpus +Collected by A Kumaran, Microsoft Research, India +Distributed with permission + +Contents: + - Bangla: IIT Kharagpur + - Hindi: Microsoft Research India + - Marathi: IIT Bombay + - Telugu: IIIT Hyderabad +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tag import map_tag, str2tuple + + +class IndianCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, False) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_words(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, False, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def sents(self, fileids=None): + return concat( + [ + IndianCorpusView(fileid, enc, False, True) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def tagged_sents(self, fileids=None, tagset=None): + if tagset and tagset != self._tagset: + tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) + else: + tag_mapping_function = None + return concat( + [ + IndianCorpusView(fileid, enc, True, True, tag_mapping_function) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + +class IndianCorpusView(StreamBackedCorpusView): + def __init__( + self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None + ): + self._tagged = tagged + self._group_by_sent = group_by_sent + self._tag_mapping_function = tag_mapping_function + StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) + + def read_block(self, stream): + line = stream.readline() + if line.startswith("<"): + return [] + sent = [str2tuple(word, sep="_") for word in line.split()] + if self._tag_mapping_function: + sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] + if not self._tagged: + sent = [w for (w, t) in sent] + if self._group_by_sent: + return [sent] + else: + return sent diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py new file mode 100644 index 0000000000000000000000000000000000000000..ddee6206019c644968058e7cb6cac83f5076ade6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nombank.py @@ -0,0 +1,466 @@ +# Natural Language Toolkit: NomBank Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Paul Bedaride +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +from functools import total_ordering +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.internals import raise_unorderable_types +from nltk.tree import Tree + + +class NombankCorpusReader(CorpusReader): + """ + Corpus reader for the nombank corpus, which augments the Penn + Treebank with information about the predicate argument structure + of every noun instance. The corpus consists of two parts: the + predicate-argument annotations themselves, and a set of "frameset + files" which define the argument labels used by the annotations, + on a per-noun basis. Each "frameset file" contains one or more + predicates, such as ``'turn'`` or ``'turn_on'``, each of which is + divided into coarse-grained word senses called "rolesets". For + each "roleset", the frameset file provides descriptions of the + argument roles, along with examples. + """ + + def __init__( + self, + root, + nomfile, + framefiles="", + nounsfile=None, + parse_fileid_xform=None, + parse_corpus=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param nomfile: The name of the file containing the predicate- + argument annotations (relative to ``root``). + :param framefiles: A list or regexp specifying the frameset + fileids for this corpus. + :param parse_fileid_xform: A transform that should be applied + to the fileids in this corpus. This should be a function + of one argument (a fileid) that returns a string (the new + fileid). + :param parse_corpus: The corpus containing the parse trees + corresponding to this corpus. These parse trees are + necessary to resolve the tree pointers used by nombank. + """ + + # If framefiles is specified as a regexp, expand it. + if isinstance(framefiles, str): + self._fileids = find_corpus_fileids(root, framefiles) + self._fileids = list(framefiles) + # Initialize the corpus reader. + CorpusReader.__init__(self, root, framefiles, encoding) + + # Record our nom file & nouns file. + self._nomfile = nomfile + self._nounsfile = nounsfile + self._parse_fileid_xform = parse_fileid_xform + self._parse_corpus = parse_corpus + + def instances(self, baseform=None): + """ + :return: a corpus view that acts as a list of + ``NombankInstance`` objects, one for each noun in the corpus. + """ + kwargs = {} + if baseform is not None: + kwargs["instance_filter"] = lambda inst: inst.baseform == baseform + return StreamBackedCorpusView( + self.abspath(self._nomfile), + lambda stream: self._read_instance_block(stream, **kwargs), + encoding=self.encoding(self._nomfile), + ) + + def lines(self): + """ + :return: a corpus view that acts as a list of strings, one for + each line in the predicate-argument annotation file. + """ + return StreamBackedCorpusView( + self.abspath(self._nomfile), + read_line_block, + encoding=self.encoding(self._nomfile), + ) + + def roleset(self, roleset_id): + """ + :return: the xml description for the given roleset. + """ + baseform = roleset_id.split(".")[0] + baseform = baseform.replace("perc-sign", "%") + baseform = baseform.replace("oneslashonezero", "1/10").replace( + "1/10", "1-slash-10" + ) + framefile = "frames/%s.xml" % baseform + if framefile not in self.fileids(): + raise ValueError("Frameset file for %s not found" % roleset_id) + + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + for roleset in etree.findall("predicate/roleset"): + if roleset.attrib["id"] == roleset_id: + return roleset + raise ValueError(f"Roleset {roleset_id} not found in {framefile}") + + def rolesets(self, baseform=None): + """ + :return: list of xml descriptions for rolesets. + """ + if baseform is not None: + framefile = "frames/%s.xml" % baseform + if framefile not in self.fileids(): + raise ValueError("Frameset file for %s not found" % baseform) + framefiles = [framefile] + else: + framefiles = self.fileids() + + rsets = [] + for framefile in framefiles: + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + rsets.append(etree.findall("predicate/roleset")) + return LazyConcatenation(rsets) + + def nouns(self): + """ + :return: a corpus view that acts as a list of all noun lemmas + in this corpus (from the nombank.1.0.words file). + """ + return StreamBackedCorpusView( + self.abspath(self._nounsfile), + read_line_block, + encoding=self.encoding(self._nounsfile), + ) + + def _read_instance_block(self, stream, instance_filter=lambda inst: True): + block = [] + + # Read 100 at a time. + for i in range(100): + line = stream.readline().strip() + if line: + inst = NombankInstance.parse( + line, self._parse_fileid_xform, self._parse_corpus + ) + if instance_filter(inst): + block.append(inst) + + return block + + +###################################################################### +# { Nombank Instance & related datatypes +###################################################################### + + +class NombankInstance: + def __init__( + self, + fileid, + sentnum, + wordnum, + baseform, + sensenumber, + predicate, + predid, + arguments, + parse_corpus=None, + ): + + self.fileid = fileid + """The name of the file containing the parse tree for this + instance's sentence.""" + + self.sentnum = sentnum + """The sentence number of this sentence within ``fileid``. + Indexing starts from zero.""" + + self.wordnum = wordnum + """The word number of this instance's predicate within its + containing sentence. Word numbers are indexed starting from + zero, and include traces and other empty parse elements.""" + + self.baseform = baseform + """The baseform of the predicate.""" + + self.sensenumber = sensenumber + """The sense number of the predicate.""" + + self.predicate = predicate + """A ``NombankTreePointer`` indicating the position of this + instance's predicate within its containing sentence.""" + + self.predid = predid + """Identifier of the predicate.""" + + self.arguments = tuple(arguments) + """A list of tuples (argloc, argid), specifying the location + and identifier for each of the predicate's argument in the + containing sentence. Argument identifiers are strings such as + ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain + the predicate.""" + + self.parse_corpus = parse_corpus + """A corpus reader for the parse trees corresponding to the + instances in this nombank corpus.""" + + @property + def roleset(self): + """The name of the roleset used by this instance's predicate. + Use ``nombank.roleset() `` to + look up information about the roleset.""" + r = self.baseform.replace("%", "perc-sign") + r = r.replace("1/10", "1-slash-10").replace("1-slash-10", "oneslashonezero") + return f"{r}.{self.sensenumber}" + + def __repr__(self): + return "".format( + self.fileid, + self.sentnum, + self.wordnum, + ) + + def __str__(self): + s = "{} {} {} {} {}".format( + self.fileid, + self.sentnum, + self.wordnum, + self.baseform, + self.sensenumber, + ) + items = self.arguments + ((self.predicate, "rel"),) + for (argloc, argid) in sorted(items): + s += f" {argloc}-{argid}" + return s + + def _get_tree(self): + if self.parse_corpus is None: + return None + if self.fileid not in self.parse_corpus.fileids(): + return None + return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum] + + tree = property( + _get_tree, + doc=""" + The parse tree corresponding to this instance, or None if + the corresponding tree is not available.""", + ) + + @staticmethod + def parse(s, parse_fileid_xform=None, parse_corpus=None): + pieces = s.split() + if len(pieces) < 6: + raise ValueError("Badly formatted nombank line: %r" % s) + + # Divide the line into its basic pieces. + (fileid, sentnum, wordnum, baseform, sensenumber) = pieces[:5] + + args = pieces[5:] + rel = [args.pop(i) for i, p in enumerate(args) if "-rel" in p] + if len(rel) != 1: + raise ValueError("Badly formatted nombank line: %r" % s) + + # Apply the fileid selector, if any. + if parse_fileid_xform is not None: + fileid = parse_fileid_xform(fileid) + + # Convert sentence & word numbers to ints. + sentnum = int(sentnum) + wordnum = int(wordnum) + + # Parse the predicate location. + + predloc, predid = rel[0].split("-", 1) + predicate = NombankTreePointer.parse(predloc) + + # Parse the arguments. + arguments = [] + for arg in args: + argloc, argid = arg.split("-", 1) + arguments.append((NombankTreePointer.parse(argloc), argid)) + + # Put it all together. + return NombankInstance( + fileid, + sentnum, + wordnum, + baseform, + sensenumber, + predicate, + predid, + arguments, + parse_corpus, + ) + + +class NombankPointer: + """ + A pointer used by nombank to identify one or more constituents in + a parse tree. ``NombankPointer`` is an abstract base class with + three concrete subclasses: + + - ``NombankTreePointer`` is used to point to single constituents. + - ``NombankSplitTreePointer`` is used to point to 'split' + constituents, which consist of a sequence of two or more + ``NombankTreePointer`` pointers. + - ``NombankChainTreePointer`` is used to point to entire trace + chains in a tree. It consists of a sequence of pieces, which + can be ``NombankTreePointer`` or ``NombankSplitTreePointer`` pointers. + """ + + def __init__(self): + if self.__class__ == NombankPointer: + raise NotImplementedError() + + +class NombankChainTreePointer(NombankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements may + be either ``NombankSplitTreePointer`` or + ``NombankTreePointer`` pointers.""" + + def __str__(self): + return "*".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*CHAIN*", [p.select(tree) for p in self.pieces]) + + +class NombankSplitTreePointer(NombankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements are + all ``NombankTreePointer`` pointers.""" + + def __str__(self): + return ",".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*SPLIT*", [p.select(tree) for p in self.pieces]) + + +@total_ordering +class NombankTreePointer(NombankPointer): + """ + wordnum:height*wordnum:height*... + wordnum:height, + + """ + + def __init__(self, wordnum, height): + self.wordnum = wordnum + self.height = height + + @staticmethod + def parse(s): + # Deal with chains (xx*yy*zz) + pieces = s.split("*") + if len(pieces) > 1: + return NombankChainTreePointer( + [NombankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with split args (xx,yy,zz) + pieces = s.split(",") + if len(pieces) > 1: + return NombankSplitTreePointer( + [NombankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with normal pointers. + pieces = s.split(":") + if len(pieces) != 2: + raise ValueError("bad nombank pointer %r" % s) + return NombankTreePointer(int(pieces[0]), int(pieces[1])) + + def __str__(self): + return f"{self.wordnum}:{self.height}" + + def __repr__(self): + return "NombankTreePointer(%d, %d)" % (self.wordnum, self.height) + + def __eq__(self, other): + while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, NombankTreePointer): + return self is other + + return self.wordnum == other.wordnum and self.height == other.height + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + while isinstance(other, (NombankChainTreePointer, NombankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, NombankTreePointer): + return id(self) < id(other) + + return (self.wordnum, -self.height) < (other.wordnum, -other.height) + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return tree[self.treepos(tree)] + + def treepos(self, tree): + """ + Convert this pointer to a standard 'tree position' pointer, + given that it points to the given tree. + """ + if tree is None: + raise ValueError("Parse tree not available") + stack = [tree] + treepos = [] + + wordnum = 0 + while True: + # tree node: + if isinstance(stack[-1], Tree): + # Select the next child. + if len(treepos) < len(stack): + treepos.append(0) + else: + treepos[-1] += 1 + # Update the stack. + if treepos[-1] < len(stack[-1]): + stack.append(stack[-1][treepos[-1]]) + else: + # End of node's child list: pop up a level. + stack.pop() + treepos.pop() + # word node: + else: + if wordnum == self.wordnum: + return tuple(treepos[: len(treepos) - self.height - 1]) + else: + wordnum += 1 + stack.pop() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..0bcf51dc66954866ad665a54ba926fc9c8a33116 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py @@ -0,0 +1,90 @@ +# Natural Language Toolkit: NPS Chat Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +import textwrap + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.corpus.reader.xmldocs import * +from nltk.internals import ElementWrapper +from nltk.tag import map_tag +from nltk.util import LazyConcatenation + + +class NPSChatCorpusReader(XMLCorpusReader): + def __init__(self, root, fileids, wrap_etree=False, tagset=None): + XMLCorpusReader.__init__(self, root, fileids, wrap_etree) + self._tagset = tagset + + def xml_posts(self, fileids=None): + if self._wrap_etree: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt) + for fileid in self.abspaths(fileids) + ] + ) + else: + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post") + for fileid in self.abspaths(fileids) + ] + ) + + def posts(self, fileids=None): + return concat( + [ + XMLCorpusView( + fileid, "Session/Posts/Post/terminals", self._elt_to_words + ) + for fileid in self.abspaths(fileids) + ] + ) + + def tagged_posts(self, fileids=None, tagset=None): + def reader(elt, handler): + return self._elt_to_tagged_words(elt, handler, tagset) + + return concat( + [ + XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader) + for fileid in self.abspaths(fileids) + ] + ) + + def words(self, fileids=None): + return LazyConcatenation(self.posts(fileids)) + + def tagged_words(self, fileids=None, tagset=None): + return LazyConcatenation(self.tagged_posts(fileids, tagset)) + + def _wrap_elt(self, elt, handler): + return ElementWrapper(elt) + + def _elt_to_words(self, elt, handler): + return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")] + + def _elt_to_tagged_words(self, elt, handler, tagset=None): + tagged_post = [ + (self._simplify_username(t.attrib["word"]), t.attrib["pos"]) + for t in elt.findall("t") + ] + if tagset and tagset != self._tagset: + tagged_post = [ + (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post + ] + return tagged_post + + @staticmethod + def _simplify_username(word): + if "User" in word: + word = "U" + word.split("User", 1)[1] + elif isinstance(word, bytes): + word = word.decode("ascii") + return word diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..87be7c97e6151c8ce19e64e2f8ac6683918e3aad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py @@ -0,0 +1,125 @@ +# Natural Language Toolkit: Opinion Lexicon Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +CorpusReader for the Opinion Lexicon. + +Opinion Lexicon information +=========================== + +Authors: Minqing Hu and Bing Liu, 2004. + Department of Computer Science + University of Illinois at Chicago + +Contact: Bing Liu, liub@cs.uic.edu + https://www.cs.uic.edu/~liub + +Distributed with permission. + +Related papers: + +- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews". + Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery + & Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA. + +- Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and + Comparing Opinions on the Web". Proceedings of the 14th International World + Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan. +""" + +from nltk.corpus.reader import WordListCorpusReader +from nltk.corpus.reader.api import * + + +class IgnoreReadmeCorpusView(StreamBackedCorpusView): + """ + This CorpusView is used to skip the initial readme block of the corpus. + """ + + def __init__(self, *args, **kwargs): + StreamBackedCorpusView.__init__(self, *args, **kwargs) + # open self._stream + self._open() + # skip the readme block + read_blankline_block(self._stream) + # Set the initial position to the current stream position + self._filepos = [self._stream.tell()] + + +class OpinionLexiconCorpusReader(WordListCorpusReader): + """ + Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored. + + >>> from nltk.corpus import opinion_lexicon + >>> opinion_lexicon.words() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative + words: + + >>> opinion_lexicon.negative() + ['2-faced', '2-faces', 'abnormal', 'abolish', ...] + + Note that words from `words()` method are sorted by file id, not alphabetically: + + >>> opinion_lexicon.words()[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort', 'aborted'] + >>> sorted(opinion_lexicon.words())[0:10] # doctest: +NORMALIZE_WHITESPACE + ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort'] + """ + + CorpusView = IgnoreReadmeCorpusView + + def words(self, fileids=None): + """ + Return all words in the opinion lexicon. Note that these words are not + sorted in alphabetical order. + + :param fileids: a list or regexp specifying the ids of the files whose + words have to be returned. + :return: the given file(s) as a list of words and punctuation symbols. + :rtype: list(str) + """ + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + return concat( + [ + self.CorpusView(path, self._read_word_block, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def positive(self): + """ + Return all positive words in alphabetical order. + + :return: a list of positive words. + :rtype: list(str) + """ + return self.words("positive-words.txt") + + def negative(self): + """ + Return all negative words in alphabetical order. + + :return: a list of negative words. + :rtype: list(str) + """ + return self.words("negative-words.txt") + + def _read_word_block(self, stream): + words = [] + for i in range(20): # Read 20 lines at a time. + line = stream.readline() + if not line: + continue + words.append(line.strip()) + return words diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py new file mode 100644 index 0000000000000000000000000000000000000000..e59d297c0100f46b484b02bfc125532e4ca9d8ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py @@ -0,0 +1,375 @@ +# Natural Language Toolkit: +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Piotr Kasprzyk +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.xmldocs import XMLCorpusReader + +PARA = re.compile(r"]*){0,1}>(.*?)

") +SENT = re.compile(r"]*){0,1}>(.*?)") + +TAGGEDWORD = re.compile(r"<([wc](?: [^>]*){0,1}>)(.*?)") +WORD = re.compile(r"<[wc](?: [^>]*){0,1}>(.*?)") + +TYPE = re.compile(r'type="(.*?)"') +ANA = re.compile(r'ana="(.*?)"') + +TEXTID = re.compile(r'text id="(.*?)"') + + +class TEICorpusView(StreamBackedCorpusView): + def __init__( + self, + corpus_file, + tagged, + group_by_sent, + group_by_para, + tagset=None, + head_len=0, + textids=None, + ): + + self._tagged = tagged + self._textids = textids + + self._group_by_sent = group_by_sent + self._group_by_para = group_by_para + # WARNING -- skip header + StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len) + + _pagesize = 4096 + + def read_block(self, stream): + block = stream.readlines(self._pagesize) + block = concat(block) + while (block.count(" block.count("")) or block.count( + "") + len("") + block = block[:beg] + block[beg + end :] + + output = [] + for para_str in PARA.findall(block): + para = [] + for sent_str in SENT.findall(para_str): + if not self._tagged: + sent = WORD.findall(sent_str) + else: + sent = list(map(self._parse_tag, TAGGEDWORD.findall(sent_str))) + if self._group_by_sent: + para.append(sent) + else: + para.extend(sent) + if self._group_by_para: + output.append(para) + else: + output.extend(para) + return output + + def _parse_tag(self, tag_word_tuple): + (tag, word) = tag_word_tuple + if tag.startswith("w"): + tag = ANA.search(tag).group(1) + else: # tag.startswith('c') + tag = TYPE.search(tag).group(1) + return word, tag + + +class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader): + head_len = 2770 + + def __init__(self, *args, **kwargs): + if "textid_file" in kwargs: + self._textids = kwargs["textid_file"] + else: + self._textids = None + + XMLCorpusReader.__init__(self, *args) + CategorizedCorpusReader.__init__(self, kwargs) + + self._init_textids() + + def _init_textids(self): + self._f2t = defaultdict(list) + self._t2f = defaultdict(list) + if self._textids is not None: + with open(self._textids) as fp: + for line in fp: + line = line.strip() + file_id, text_ids = line.split(" ", 1) + if file_id not in self.fileids(): + raise ValueError( + "In text_id mapping file %s: %s not found" + % (self._textids, file_id) + ) + for text_id in text_ids.split(self._delimiter): + self._add_textids(file_id, text_id) + + def _add_textids(self, file_id, text_id): + self._f2t[file_id].append(text_id) + self._t2f[text_id].append(file_id) + + def _resolve(self, fileids, categories, textids=None): + tmp = None + if ( + len( + list( + filter( + lambda accessor: accessor is None, + (fileids, categories, textids), + ) + ) + ) + != 1 + ): + + raise ValueError( + "Specify exactly one of: fileids, " "categories or textids" + ) + + if fileids is not None: + return fileids, None + + if categories is not None: + return self.fileids(categories), None + + if textids is not None: + if isinstance(textids, str): + textids = [textids] + files = sum((self._t2f[t] for t in textids), []) + tdict = dict() + for f in files: + tdict[f] = set(self._f2t[f]) & set(textids) + return files, tdict + + def decode_tag(self, tag): + # to be implemented + return tag + + def textids(self, fileids=None, categories=None): + """ + In the pl196x corpus each category is stored in single + file and thus both methods provide identical functionality. In order + to accommodate finer granularity, a non-standard textids() method was + implemented. All the main functions can be supplied with a list + of required chunks---giving much more control to the user. + """ + fileids, _ = self._resolve(fileids, categories) + if fileids is None: + return sorted(self._t2f) + + if isinstance(fileids, str): + fileids = [fileids] + return sorted(sum((self._f2t[d] for d in fileids), [])) + + def words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + False, + False, + head_len=self.head_len, + ) + for fileid in fileids + ] + ) + + def sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + False, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), False, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_words(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + False, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, False, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_sents(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + False, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, False, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def tagged_paras(self, fileids=None, categories=None, textids=None): + fileids, textids = self._resolve(fileids, categories, textids) + if fileids is None: + fileids = self._fileids + elif isinstance(fileids, str): + fileids = [fileids] + + if textids: + return concat( + [ + TEICorpusView( + self.abspath(fileid), + True, + True, + True, + head_len=self.head_len, + textids=textids[fileid], + ) + for fileid in fileids + ] + ) + else: + return concat( + [ + TEICorpusView( + self.abspath(fileid), True, True, True, head_len=self.head_len + ) + for fileid in fileids + ] + ) + + def xml(self, fileids=None, categories=None): + fileids, _ = self._resolve(fileids, categories) + if len(fileids) == 1: + return XMLCorpusReader.xml(self, fileids[0]) + else: + raise TypeError("Expected a single file") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py new file mode 100644 index 0000000000000000000000000000000000000000..c254a8416f2c1bb38f684819e43bae76a4308eeb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py @@ -0,0 +1,520 @@ +# Natural Language Toolkit: PropBank Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +import re +from functools import total_ordering +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.internals import raise_unorderable_types +from nltk.tree import Tree + + +class PropbankCorpusReader(CorpusReader): + """ + Corpus reader for the propbank corpus, which augments the Penn + Treebank with information about the predicate argument structure + of every verb instance. The corpus consists of two parts: the + predicate-argument annotations themselves, and a set of "frameset + files" which define the argument labels used by the annotations, + on a per-verb basis. Each "frameset file" contains one or more + predicates, such as ``'turn'`` or ``'turn_on'``, each of which is + divided into coarse-grained word senses called "rolesets". For + each "roleset", the frameset file provides descriptions of the + argument roles, along with examples. + """ + + def __init__( + self, + root, + propfile, + framefiles="", + verbsfile=None, + parse_fileid_xform=None, + parse_corpus=None, + encoding="utf8", + ): + """ + :param root: The root directory for this corpus. + :param propfile: The name of the file containing the predicate- + argument annotations (relative to ``root``). + :param framefiles: A list or regexp specifying the frameset + fileids for this corpus. + :param parse_fileid_xform: A transform that should be applied + to the fileids in this corpus. This should be a function + of one argument (a fileid) that returns a string (the new + fileid). + :param parse_corpus: The corpus containing the parse trees + corresponding to this corpus. These parse trees are + necessary to resolve the tree pointers used by propbank. + """ + # If framefiles is specified as a regexp, expand it. + if isinstance(framefiles, str): + framefiles = find_corpus_fileids(root, framefiles) + framefiles = list(framefiles) + # Initialize the corpus reader. + CorpusReader.__init__(self, root, [propfile, verbsfile] + framefiles, encoding) + + # Record our frame fileids & prop file. + self._propfile = propfile + self._framefiles = framefiles + self._verbsfile = verbsfile + self._parse_fileid_xform = parse_fileid_xform + self._parse_corpus = parse_corpus + + def instances(self, baseform=None): + """ + :return: a corpus view that acts as a list of + ``PropBankInstance`` objects, one for each noun in the corpus. + """ + kwargs = {} + if baseform is not None: + kwargs["instance_filter"] = lambda inst: inst.baseform == baseform + return StreamBackedCorpusView( + self.abspath(self._propfile), + lambda stream: self._read_instance_block(stream, **kwargs), + encoding=self.encoding(self._propfile), + ) + + def lines(self): + """ + :return: a corpus view that acts as a list of strings, one for + each line in the predicate-argument annotation file. + """ + return StreamBackedCorpusView( + self.abspath(self._propfile), + read_line_block, + encoding=self.encoding(self._propfile), + ) + + def roleset(self, roleset_id): + """ + :return: the xml description for the given roleset. + """ + baseform = roleset_id.split(".")[0] + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % roleset_id) + + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + for roleset in etree.findall("predicate/roleset"): + if roleset.attrib["id"] == roleset_id: + return roleset + raise ValueError(f"Roleset {roleset_id} not found in {framefile}") + + def rolesets(self, baseform=None): + """ + :return: list of xml descriptions for rolesets. + """ + if baseform is not None: + framefile = "frames/%s.xml" % baseform + if framefile not in self._framefiles: + raise ValueError("Frameset file for %s not found" % baseform) + framefiles = [framefile] + else: + framefiles = self._framefiles + + rsets = [] + for framefile in framefiles: + # n.b.: The encoding for XML fileids is specified by the file + # itself; so we ignore self._encoding here. + with self.abspath(framefile).open() as fp: + etree = ElementTree.parse(fp).getroot() + rsets.append(etree.findall("predicate/roleset")) + return LazyConcatenation(rsets) + + def verbs(self): + """ + :return: a corpus view that acts as a list of all verb lemmas + in this corpus (from the verbs.txt file). + """ + return StreamBackedCorpusView( + self.abspath(self._verbsfile), + read_line_block, + encoding=self.encoding(self._verbsfile), + ) + + def _read_instance_block(self, stream, instance_filter=lambda inst: True): + block = [] + + # Read 100 at a time. + for i in range(100): + line = stream.readline().strip() + if line: + inst = PropbankInstance.parse( + line, self._parse_fileid_xform, self._parse_corpus + ) + if instance_filter(inst): + block.append(inst) + + return block + + +###################################################################### +# { Propbank Instance & related datatypes +###################################################################### + + +class PropbankInstance: + def __init__( + self, + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus=None, + ): + + self.fileid = fileid + """The name of the file containing the parse tree for this + instance's sentence.""" + + self.sentnum = sentnum + """The sentence number of this sentence within ``fileid``. + Indexing starts from zero.""" + + self.wordnum = wordnum + """The word number of this instance's predicate within its + containing sentence. Word numbers are indexed starting from + zero, and include traces and other empty parse elements.""" + + self.tagger = tagger + """An identifier for the tagger who tagged this instance; or + ``'gold'`` if this is an adjuticated instance.""" + + self.roleset = roleset + """The name of the roleset used by this instance's predicate. + Use ``propbank.roleset() `` to + look up information about the roleset.""" + + self.inflection = inflection + """A ``PropbankInflection`` object describing the inflection of + this instance's predicate.""" + + self.predicate = predicate + """A ``PropbankTreePointer`` indicating the position of this + instance's predicate within its containing sentence.""" + + self.arguments = tuple(arguments) + """A list of tuples (argloc, argid), specifying the location + and identifier for each of the predicate's argument in the + containing sentence. Argument identifiers are strings such as + ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain + the predicate.""" + + self.parse_corpus = parse_corpus + """A corpus reader for the parse trees corresponding to the + instances in this propbank corpus.""" + + @property + def baseform(self): + """The baseform of the predicate.""" + return self.roleset.split(".")[0] + + @property + def sensenumber(self): + """The sense number of the predicate.""" + return self.roleset.split(".")[1] + + @property + def predid(self): + """Identifier of the predicate.""" + return "rel" + + def __repr__(self): + return "".format( + self.fileid, + self.sentnum, + self.wordnum, + ) + + def __str__(self): + s = "{} {} {} {} {} {}".format( + self.fileid, + self.sentnum, + self.wordnum, + self.tagger, + self.roleset, + self.inflection, + ) + items = self.arguments + ((self.predicate, "rel"),) + for (argloc, argid) in sorted(items): + s += f" {argloc}-{argid}" + return s + + def _get_tree(self): + if self.parse_corpus is None: + return None + if self.fileid not in self.parse_corpus.fileids(): + return None + return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum] + + tree = property( + _get_tree, + doc=""" + The parse tree corresponding to this instance, or None if + the corresponding tree is not available.""", + ) + + @staticmethod + def parse(s, parse_fileid_xform=None, parse_corpus=None): + pieces = s.split() + if len(pieces) < 7: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Divide the line into its basic pieces. + (fileid, sentnum, wordnum, tagger, roleset, inflection) = pieces[:6] + rel = [p for p in pieces[6:] if p.endswith("-rel")] + args = [p for p in pieces[6:] if not p.endswith("-rel")] + if len(rel) != 1: + raise ValueError("Badly formatted propbank line: %r" % s) + + # Apply the fileid selector, if any. + if parse_fileid_xform is not None: + fileid = parse_fileid_xform(fileid) + + # Convert sentence & word numbers to ints. + sentnum = int(sentnum) + wordnum = int(wordnum) + + # Parse the inflection + inflection = PropbankInflection.parse(inflection) + + # Parse the predicate location. + predicate = PropbankTreePointer.parse(rel[0][:-4]) + + # Parse the arguments. + arguments = [] + for arg in args: + argloc, argid = arg.split("-", 1) + arguments.append((PropbankTreePointer.parse(argloc), argid)) + + # Put it all together. + return PropbankInstance( + fileid, + sentnum, + wordnum, + tagger, + roleset, + inflection, + predicate, + arguments, + parse_corpus, + ) + + +class PropbankPointer: + """ + A pointer used by propbank to identify one or more constituents in + a parse tree. ``PropbankPointer`` is an abstract base class with + three concrete subclasses: + + - ``PropbankTreePointer`` is used to point to single constituents. + - ``PropbankSplitTreePointer`` is used to point to 'split' + constituents, which consist of a sequence of two or more + ``PropbankTreePointer`` pointers. + - ``PropbankChainTreePointer`` is used to point to entire trace + chains in a tree. It consists of a sequence of pieces, which + can be ``PropbankTreePointer`` or ``PropbankSplitTreePointer`` pointers. + """ + + def __init__(self): + if self.__class__ == PropbankPointer: + raise NotImplementedError() + + +class PropbankChainTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements may + be either ``PropbankSplitTreePointer`` or + ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return "*".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*CHAIN*", [p.select(tree) for p in self.pieces]) + + +class PropbankSplitTreePointer(PropbankPointer): + def __init__(self, pieces): + self.pieces = pieces + """A list of the pieces that make up this chain. Elements are + all ``PropbankTreePointer`` pointers.""" + + def __str__(self): + return ",".join("%s" % p for p in self.pieces) + + def __repr__(self): + return "" % self + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return Tree("*SPLIT*", [p.select(tree) for p in self.pieces]) + + +@total_ordering +class PropbankTreePointer(PropbankPointer): + """ + wordnum:height*wordnum:height*... + wordnum:height, + + """ + + def __init__(self, wordnum, height): + self.wordnum = wordnum + self.height = height + + @staticmethod + def parse(s): + # Deal with chains (xx*yy*zz) + pieces = s.split("*") + if len(pieces) > 1: + return PropbankChainTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with split args (xx,yy,zz) + pieces = s.split(",") + if len(pieces) > 1: + return PropbankSplitTreePointer( + [PropbankTreePointer.parse(elt) for elt in pieces] + ) + + # Deal with normal pointers. + pieces = s.split(":") + if len(pieces) != 2: + raise ValueError("bad propbank pointer %r" % s) + return PropbankTreePointer(int(pieces[0]), int(pieces[1])) + + def __str__(self): + return f"{self.wordnum}:{self.height}" + + def __repr__(self): + return "PropbankTreePointer(%d, %d)" % (self.wordnum, self.height) + + def __eq__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return self is other + + return self.wordnum == other.wordnum and self.height == other.height + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)): + other = other.pieces[0] + + if not isinstance(other, PropbankTreePointer): + return id(self) < id(other) + + return (self.wordnum, -self.height) < (other.wordnum, -other.height) + + def select(self, tree): + if tree is None: + raise ValueError("Parse tree not available") + return tree[self.treepos(tree)] + + def treepos(self, tree): + """ + Convert this pointer to a standard 'tree position' pointer, + given that it points to the given tree. + """ + if tree is None: + raise ValueError("Parse tree not available") + stack = [tree] + treepos = [] + + wordnum = 0 + while True: + # tree node: + if isinstance(stack[-1], Tree): + # Select the next child. + if len(treepos) < len(stack): + treepos.append(0) + else: + treepos[-1] += 1 + # Update the stack. + if treepos[-1] < len(stack[-1]): + stack.append(stack[-1][treepos[-1]]) + else: + # End of node's child list: pop up a level. + stack.pop() + treepos.pop() + # word node: + else: + if wordnum == self.wordnum: + return tuple(treepos[: len(treepos) - self.height - 1]) + else: + wordnum += 1 + stack.pop() + + +class PropbankInflection: + # { Inflection Form + INFINITIVE = "i" + GERUND = "g" + PARTICIPLE = "p" + FINITE = "v" + # { Inflection Tense + FUTURE = "f" + PAST = "p" + PRESENT = "n" + # { Inflection Aspect + PERFECT = "p" + PROGRESSIVE = "o" + PERFECT_AND_PROGRESSIVE = "b" + # { Inflection Person + THIRD_PERSON = "3" + # { Inflection Voice + ACTIVE = "a" + PASSIVE = "p" + # { Inflection + NONE = "-" + # } + + def __init__(self, form="-", tense="-", aspect="-", person="-", voice="-"): + self.form = form + self.tense = tense + self.aspect = aspect + self.person = person + self.voice = voice + + def __str__(self): + return self.form + self.tense + self.aspect + self.person + self.voice + + def __repr__(self): + return "" % self + + _VALIDATE = re.compile(r"[igpv\-][fpn\-][pob\-][3\-][ap\-]$") + + @staticmethod + def parse(s): + if not isinstance(s, str): + raise TypeError("expected a string") + if len(s) != 5 or not PropbankInflection._VALIDATE.match(s): + raise ValueError("Bad propbank inflection string %r" % s) + return PropbankInflection(*s) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py new file mode 100644 index 0000000000000000000000000000000000000000..99f09fe9f486f7770bddb290550f844898aef966 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/senseval.py @@ -0,0 +1,196 @@ +# Natural Language Toolkit: Senseval 2 Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Steven Bird (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +Read from the Senseval 2 Corpus. + +SENSEVAL [http://www.senseval.org/] +Evaluation exercises for Word Sense Disambiguation. +Organized by ACL-SIGLEX [https://www.siglex.org/] + +Prepared by Ted Pedersen , University of Minnesota, +https://www.d.umn.edu/~tpederse/data.html +Distributed with permission. + +The NLTK version of the Senseval 2 files uses well-formed XML. +Each instance of the ambiguous words "hard", "interest", "line", and "serve" +is tagged with a sense identifier, and supplied with context. +""" + +import re +from xml.etree import ElementTree + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import * + + +class SensevalInstance: + def __init__(self, word, position, context, senses): + self.word = word + self.senses = tuple(senses) + self.position = position + self.context = context + + def __repr__(self): + return "SensevalInstance(word=%r, position=%r, " "context=%r, senses=%r)" % ( + self.word, + self.position, + self.context, + self.senses, + ) + + +class SensevalCorpusReader(CorpusReader): + def instances(self, fileids=None): + return concat( + [ + SensevalCorpusView(fileid, enc) + for (fileid, enc) in self.abspaths(fileids, True) + ] + ) + + def _entry(self, tree): + elts = [] + for lexelt in tree.findall("lexelt"): + for inst in lexelt.findall("instance"): + sense = inst[0].attrib["senseid"] + context = [(w.text, w.attrib["pos"]) for w in inst[1]] + elts.append((sense, context)) + return elts + + +class SensevalCorpusView(StreamBackedCorpusView): + def __init__(self, fileid, encoding): + StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) + + self._word_tokenizer = WhitespaceTokenizer() + self._lexelt_starts = [0] # list of streampos + self._lexelts = [None] # list of lexelt names + + def read_block(self, stream): + # Decide which lexical element we're in. + lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell()) - 1 + lexelt = self._lexelts[lexelt_num] + + instance_lines = [] + in_instance = False + while True: + line = stream.readline() + if line == "": + assert instance_lines == [] + return [] + + # Start of a lexical element? + if line.lstrip().startswith(" has no 'item=...' + lexelt = m.group(1)[1:-1] + if lexelt_num < len(self._lexelts): + assert lexelt == self._lexelts[lexelt_num] + else: + self._lexelts.append(lexelt) + self._lexelt_starts.append(stream.tell()) + + # Start of an instance? + if line.lstrip().startswith("" + elif cword.tag == "wf": + context.append((cword.text, cword.attrib["pos"])) + elif cword.tag == "s": + pass # Sentence boundary marker. + + else: + print("ACK", cword.tag) + assert False, "expected CDATA or or " + if cword.tail: + context += self._word_tokenizer.tokenize(cword.tail) + else: + assert False, "unexpected tag %s" % child.tag + return SensevalInstance(lexelt, position, context, senses) + + +def _fixXML(text): + """ + Fix the various issues with Senseval pseudo-XML. + """ + # <~> or <^> => ~ or ^ + text = re.sub(r"<([~\^])>", r"\1", text) + # fix lone & + text = re.sub(r"(\s+)\&(\s+)", r"\1&\2", text) + # fix """ + text = re.sub(r'"""', "'\"'", text) + # fix => + text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text) + # fix foreign word tag + text = re.sub(r"<\&frasl>\s*]*>", "FRASL", text) + # remove <&I .> + text = re.sub(r"<\&I[^>]*>", "", text) + # fix <{word}> + text = re.sub(r"<{([^}]+)}>", r"\1", text) + # remove <@>,

,

+ text = re.sub(r"<(@|/?p)>", r"", text) + # remove <&M .> and <&T .> and <&Ms .> + text = re.sub(r"<&\w+ \.>", r"", text) + # remove lines + text = re.sub(r"]*>", r"", text) + # remove <[hi]> and <[/p]> etc + text = re.sub(r"<\[\/?[^>]+\]*>", r"", text) + # take the thing out of the brackets: <…> + text = re.sub(r"<(\&\w+;)>", r"\1", text) + # and remove the & for those patterns that aren't regular XML + text = re.sub(r"&(?!amp|gt|lt|apos|quot)", r"", text) + # fix 'abc ' style tags - now abc + text = re.sub( + r'[ \t]*([^<>\s]+?)[ \t]*', r' \1', text + ) + text = re.sub(r'\s*"\s*', " \"", text) + return text diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py new file mode 100644 index 0000000000000000000000000000000000000000..5684ea0b90129223ada6e7dc62fd6a6708e90960 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py @@ -0,0 +1,76 @@ +# Natural Language Toolkit: Toolbox Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Aumann +# Stuart Robinson +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Module for reading, writing and manipulating +Toolbox databases and settings fileids. +""" + +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.toolbox import ToolboxData + + +class ToolboxCorpusReader(CorpusReader): + def xml(self, fileids, key=None): + return concat( + [ + ToolboxData(path, enc).parse(key=key) + for (path, enc) in self.abspaths(fileids, True) + ] + ) + + def fields( + self, + fileids, + strip=True, + unwrap=True, + encoding="utf8", + errors="strict", + unicode_fields=None, + ): + return concat( + [ + list( + ToolboxData(fileid, enc).fields( + strip, unwrap, encoding, errors, unicode_fields + ) + ) + for (fileid, enc) in self.abspaths(fileids, include_encoding=True) + ] + ) + + # should probably be done lazily: + def entries(self, fileids, **kwargs): + if "key" in kwargs: + key = kwargs["key"] + del kwargs["key"] + else: + key = "lx" # the default key in MDF + entries = [] + for marker, contents in self.fields(fileids, **kwargs): + if marker == key: + entries.append((contents, [])) + else: + try: + entries[-1][-1].append((marker, contents)) + except IndexError: + pass + return entries + + def words(self, fileids, key="lx"): + return [contents for marker, contents in self.fields(fileids) if marker == key] + + +def demo(): + pass + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py new file mode 100644 index 0000000000000000000000000000000000000000..a54c6654f0d95aefa3e1bfb55402be505981607e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/twitter.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Twitter Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +A reader for corpora that consist of Tweets. It is assumed that the Tweets +have been serialised into line-delimited JSON. +""" + +import json +import os + +from nltk.corpus.reader.api import CorpusReader +from nltk.corpus.reader.util import StreamBackedCorpusView, ZipFilePathPointer, concat +from nltk.tokenize import TweetTokenizer + + +class TwitterCorpusReader(CorpusReader): + r""" + Reader for corpora that consist of Tweets represented as a list of line-delimited JSON. + + Individual Tweets can be tokenized using the default tokenizer, or by a + custom tokenizer specified as a parameter to the constructor. + + Construct a new Tweet corpus reader for a set of documents + located at the given root directory. + + If you made your own tweet collection in a directory called + `twitter-files`, then you can initialise the reader as:: + + from nltk.corpus import TwitterCorpusReader + reader = TwitterCorpusReader(root='/path/to/twitter-files', '.*\.json') + + However, the recommended approach is to set the relevant directory as the + value of the environmental variable `TWITTER`, and then invoke the reader + as follows:: + + root = os.environ['TWITTER'] + reader = TwitterCorpusReader(root, '.*\.json') + + If you want to work directly with the raw Tweets, the `json` library can + be used:: + + import json + for tweet in reader.docs(): + print(json.dumps(tweet, indent=1, sort_keys=True)) + + """ + + CorpusView = StreamBackedCorpusView + """ + The corpus view class used by this reader. + """ + + def __init__( + self, root, fileids=None, word_tokenizer=TweetTokenizer(), encoding="utf8" + ): + """ + :param root: The root directory for this corpus. + :param fileids: A list or regexp specifying the fileids in this corpus. + :param word_tokenizer: Tokenizer for breaking the text of Tweets into + smaller units, including but not limited to words. + """ + CorpusReader.__init__(self, root, fileids, encoding) + + for path in self.abspaths(self._fileids): + if isinstance(path, ZipFilePathPointer): + pass + elif os.path.getsize(path) == 0: + raise ValueError(f"File {path} is empty") + """Check that all user-created corpus files are non-empty.""" + + self._word_tokenizer = word_tokenizer + + def docs(self, fileids=None): + """ + Returns the full Tweet objects, as specified by `Twitter + documentation on Tweets + `_ + + :return: the given file(s) as a list of dictionaries deserialised + from JSON. + :rtype: list(dict) + """ + return concat( + [ + self.CorpusView(path, self._read_tweets, encoding=enc) + for (path, enc, fileid) in self.abspaths(fileids, True, True) + ] + ) + + def strings(self, fileids=None): + """ + Returns only the text content of Tweets in the file(s) + + :return: the given file(s) as a list of Tweets. + :rtype: list(str) + """ + fulltweets = self.docs(fileids) + tweets = [] + for jsono in fulltweets: + try: + text = jsono["text"] + if isinstance(text, bytes): + text = text.decode(self.encoding) + tweets.append(text) + except KeyError: + pass + return tweets + + def tokenized(self, fileids=None): + """ + :return: the given file(s) as a list of the text content of Tweets as + as a list of words, screenanames, hashtags, URLs and punctuation symbols. + + :rtype: list(list(str)) + """ + tweets = self.strings(fileids) + tokenizer = self._word_tokenizer + return [tokenizer.tokenize(t) for t in tweets] + + def _read_tweets(self, stream): + """ + Assumes that each line in ``stream`` is a JSON-serialised object. + """ + tweets = [] + for i in range(10): + line = stream.readline() + if not line: + return tweets + tweet = json.loads(line) + tweets.append(tweet) + return tweets diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py new file mode 100644 index 0000000000000000000000000000000000000000..e6309ff4559659ff9b97bf679b563bcb957d18f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/udhr.py @@ -0,0 +1,75 @@ +""" +UDHR corpus reader. It mostly deals with encodings. +""" + +from nltk.corpus.reader.plaintext import PlaintextCorpusReader +from nltk.corpus.reader.util import find_corpus_fileids + + +class UdhrCorpusReader(PlaintextCorpusReader): + + ENCODINGS = [ + (".*-Latin1$", "latin-1"), + (".*-Hebrew$", "hebrew"), + (".*-Arabic$", "cp1256"), + ("Czech_Cesky-UTF8", "cp1250"), # yeah + ("Polish-Latin2", "cp1250"), + ("Polish_Polski-Latin2", "cp1250"), + (".*-Cyrillic$", "cyrillic"), + (".*-SJIS$", "SJIS"), + (".*-GB2312$", "GB2312"), + (".*-Latin2$", "ISO-8859-2"), + (".*-Greek$", "greek"), + (".*-UTF8$", "utf-8"), + ("Hungarian_Magyar-Unicode", "utf-16-le"), + ("Amahuaca", "latin1"), + ("Turkish_Turkce-Turkish", "latin5"), + ("Lithuanian_Lietuviskai-Baltic", "latin4"), + ("Japanese_Nihongo-EUC", "EUC-JP"), + ("Japanese_Nihongo-JIS", "iso2022_jp"), + ("Chinese_Mandarin-HZ", "hz"), + (r"Abkhaz\-Cyrillic\+Abkh", "cp1251"), + ] + + SKIP = { + # The following files are not fully decodable because they + # were truncated at wrong bytes: + "Burmese_Myanmar-UTF8", + "Japanese_Nihongo-JIS", + "Chinese_Mandarin-HZ", + "Chinese_Mandarin-UTF8", + "Gujarati-UTF8", + "Hungarian_Magyar-Unicode", + "Lao-UTF8", + "Magahi-UTF8", + "Marathi-UTF8", + "Tamil-UTF8", + # Unfortunately, encodings required for reading + # the following files are not supported by Python: + "Vietnamese-VPS", + "Vietnamese-VIQR", + "Vietnamese-TCVN", + "Magahi-Agra", + "Bhojpuri-Agra", + "Esperanto-T61", # latin3 raises an exception + # The following files are encoded for specific fonts: + "Burmese_Myanmar-WinResearcher", + "Armenian-DallakHelv", + "Tigrinya_Tigrigna-VG2Main", + "Amharic-Afenegus6..60375", # ? + "Navaho_Dine-Navajo-Navaho-font", + # What are these? + "Azeri_Azerbaijani_Cyrillic-Az.Times.Cyr.Normal0117", + "Azeri_Azerbaijani_Latin-Az.Times.Lat0117", + # The following files are unintended: + "Czech-Latin2-err", + "Russian_Russky-UTF8~", + } + + def __init__(self, root="udhr"): + fileids = find_corpus_fileids(root, r"(?!README|\.).*") + super().__init__( + root, + [fileid for fileid in fileids if fileid not in self.SKIP], + encoding=self.ENCODINGS, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py new file mode 100644 index 0000000000000000000000000000000000000000..aced7e83fc7c48027d4d1eeb6aca46531ab57969 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py @@ -0,0 +1,166 @@ +# Natural Language Toolkit: Word List Corpus Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +from nltk.corpus.reader.api import * +from nltk.corpus.reader.util import * +from nltk.tokenize import line_tokenize + + +class WordListCorpusReader(CorpusReader): + """ + List of words, one per line. Blank lines are ignored. + """ + + def words(self, fileids=None, ignore_lines_startswith="\n"): + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class SwadeshCorpusReader(WordListCorpusReader): + def entries(self, fileids=None): + """ + :return: a tuple of words for the specified fileids. + """ + if not fileids: + fileids = self.fileids() + + wordlists = [self.words(f) for f in fileids] + return list(zip(*wordlists)) + + +class NonbreakingPrefixesCorpusReader(WordListCorpusReader): + """ + This is a class to read the nonbreaking prefixes textfiles from the + Moses Machine Translation toolkit. These lists are used in the Python port + of the Moses' word tokenizer. + """ + + available_langs = { + "catalan": "ca", + "czech": "cs", + "german": "de", + "greek": "el", + "english": "en", + "spanish": "es", + "finnish": "fi", + "french": "fr", + "hungarian": "hu", + "icelandic": "is", + "italian": "it", + "latvian": "lv", + "dutch": "nl", + "polish": "pl", + "portuguese": "pt", + "romanian": "ro", + "russian": "ru", + "slovak": "sk", + "slovenian": "sl", + "swedish": "sv", + "tamil": "ta", + } + # Also, add the lang IDs as the keys. + available_langs.update({v: v for v in available_langs.values()}) + + def words(self, lang=None, fileids=None, ignore_lines_startswith="#"): + """ + This module returns a list of nonbreaking prefixes for the specified + language(s). + + >>> from nltk.corpus import nonbreaking_prefixes as nbp + >>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J'] + True + >>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89'] + True + + :return: a list words for the specified language(s). + """ + # If *lang* in list of languages available, allocate apt fileid. + # Otherwise, the function returns non-breaking prefixes for + # all languages when fileids==None. + if lang in self.available_langs: + lang = self.available_langs[lang] + fileids = ["nonbreaking_prefix." + lang] + return [ + line + for line in line_tokenize(self.raw(fileids)) + if not line.startswith(ignore_lines_startswith) + ] + + +class UnicharsCorpusReader(WordListCorpusReader): + """ + This class is used to read lists of characters from the Perl Unicode + Properties (see https://perldoc.perl.org/perluniprops.html). + The files in the perluniprop.zip are extracted using the Unicode::Tussle + module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm + """ + + # These are categories similar to the Perl Unicode Properties + available_categories = [ + "Close_Punctuation", + "Currency_Symbol", + "IsAlnum", + "IsAlpha", + "IsLower", + "IsN", + "IsSc", + "IsSo", + "IsUpper", + "Line_Separator", + "Number", + "Open_Punctuation", + "Punctuation", + "Separator", + "Symbol", + ] + + def chars(self, category=None, fileids=None): + """ + This module returns a list of characters from the Perl Unicode Properties. + They are very useful when porting Perl tokenizers to Python. + + >>> from nltk.corpus import perluniprops as pup + >>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c'] + True + >>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5'] + True + >>> pup.available_categories + ['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol'] + + :return: a list of characters given the specific unicode character category + """ + if category in self.available_categories: + fileids = [category + ".txt"] + return list(self.raw(fileids).strip()) + + +class MWAPPDBCorpusReader(WordListCorpusReader): + """ + This class is used to read the list of word pairs from the subset of lexical + pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word + Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015): + + - http://acl2014.org/acl2014/Q14/pdf/Q14-1017 + - https://www.aclweb.org/anthology/S14-2039 + - https://www.aclweb.org/anthology/S15-2027 + + The original source of the full PPDB corpus can be found on + https://www.cis.upenn.edu/~ccb/ppdb/ + + :return: a list of tuples of similar lexical terms. + """ + + mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs" + + def entries(self, fileids=mwa_ppdb_xxxl_file): + """ + :return: a tuple of synonym word pairs. + """ + return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))]