applied-ai-018 commited on
Commit
eb8294f
·
verified ·
1 Parent(s): eabccd4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/classify/__init__.py +101 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/classify/tadm.py +122 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/corpus/__init__.py +529 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py +56 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py +186 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py +154 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/api.py +516 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py +218 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py +265 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py +237 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py +158 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/childes.py +630 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py +273 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py +309 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/conll.py +579 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py +115 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py +116 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/indian.py +93 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py +356 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/lin.py +183 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py +342 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py +90 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py +125 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py +174 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py +95 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py +375 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py +520 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py +133 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py +331 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/rte.py +146 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py +56 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py +354 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/timit.py +510 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py +76 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py +166 -0
env-llmeval/lib/python3.10/site-packages/nltk/classify/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Classes and interfaces for labeling tokens with category labels (or
10
+ "class labels"). Typically, labels are represented with strings
11
+ (such as ``'health'`` or ``'sports'``). Classifiers can be used to
12
+ perform a wide range of classification tasks. For example,
13
+ classifiers can be used...
14
+
15
+ - to classify documents by topic
16
+ - to classify ambiguous words by which word sense is intended
17
+ - to classify acoustic signals by which phoneme they represent
18
+ - to classify sentences by their author
19
+
20
+ Features
21
+ ========
22
+ In order to decide which category label is appropriate for a given
23
+ token, classifiers examine one or more 'features' of the token. These
24
+ "features" are typically chosen by hand, and indicate which aspects
25
+ of the token are relevant to the classification decision. For
26
+ example, a document classifier might use a separate feature for each
27
+ word, recording how often that word occurred in the document.
28
+
29
+ Featuresets
30
+ ===========
31
+ The features describing a token are encoded using a "featureset",
32
+ which is a dictionary that maps from "feature names" to "feature
33
+ values". Feature names are unique strings that indicate what aspect
34
+ of the token is encoded by the feature. Examples include
35
+ ``'prevword'``, for a feature whose value is the previous word; and
36
+ ``'contains-word(library)'`` for a feature that is true when a document
37
+ contains the word ``'library'``. Feature values are typically
38
+ booleans, numbers, or strings, depending on which feature they
39
+ describe.
40
+
41
+ Featuresets are typically constructed using a "feature detector"
42
+ (also known as a "feature extractor"). A feature detector is a
43
+ function that takes a token (and sometimes information about its
44
+ context) as its input, and returns a featureset describing that token.
45
+ For example, the following feature detector converts a document
46
+ (stored as a list of words) to a featureset describing the set of
47
+ words included in the document:
48
+
49
+ >>> # Define a feature detector function.
50
+ >>> def document_features(document):
51
+ ... return dict([('contains-word(%s)' % w, True) for w in document])
52
+
53
+ Feature detectors are typically applied to each token before it is fed
54
+ to the classifier:
55
+
56
+ >>> # Classify each Gutenberg document.
57
+ >>> from nltk.corpus import gutenberg
58
+ >>> for fileid in gutenberg.fileids(): # doctest: +SKIP
59
+ ... doc = gutenberg.words(fileid) # doctest: +SKIP
60
+ ... print(fileid, classifier.classify(document_features(doc))) # doctest: +SKIP
61
+
62
+ The parameters that a feature detector expects will vary, depending on
63
+ the task and the needs of the feature detector. For example, a
64
+ feature detector for word sense disambiguation (WSD) might take as its
65
+ input a sentence, and the index of a word that should be classified,
66
+ and return a featureset for that word. The following feature detector
67
+ for WSD includes features describing the left and right contexts of
68
+ the target word:
69
+
70
+ >>> def wsd_features(sentence, index):
71
+ ... featureset = {}
72
+ ... for i in range(max(0, index-3), index):
73
+ ... featureset['left-context(%s)' % sentence[i]] = True
74
+ ... for i in range(index, max(index+3, len(sentence))):
75
+ ... featureset['right-context(%s)' % sentence[i]] = True
76
+ ... return featureset
77
+
78
+ Training Classifiers
79
+ ====================
80
+ Most classifiers are built by training them on a list of hand-labeled
81
+ examples, known as the "training set". Training sets are represented
82
+ as lists of ``(featuredict, label)`` tuples.
83
+ """
84
+
85
+ from nltk.classify.api import ClassifierI, MultiClassifierI
86
+ from nltk.classify.decisiontree import DecisionTreeClassifier
87
+ from nltk.classify.maxent import (
88
+ BinaryMaxentFeatureEncoding,
89
+ ConditionalExponentialClassifier,
90
+ MaxentClassifier,
91
+ TypedMaxentFeatureEncoding,
92
+ )
93
+ from nltk.classify.megam import call_megam, config_megam
94
+ from nltk.classify.naivebayes import NaiveBayesClassifier
95
+ from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
96
+ from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features
97
+ from nltk.classify.scikitlearn import SklearnClassifier
98
+ from nltk.classify.senna import Senna
99
+ from nltk.classify.textcat import TextCat
100
+ from nltk.classify.util import accuracy, apply_features, log_likelihood
101
+ from nltk.classify.weka import WekaClassifier, config_weka
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc ADDED
Binary file (9.58 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc ADDED
Binary file (46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc ADDED
Binary file (5.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc ADDED
Binary file (7.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc ADDED
Binary file (5.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc ADDED
Binary file (5.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc ADDED
Binary file (691 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/classify/tadm.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to TADM Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joseph Frazee <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import subprocess
9
+ import sys
10
+
11
+ from nltk.internals import find_binary
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ _tadm_bin = None
19
+
20
+
21
+ def config_tadm(bin=None):
22
+ global _tadm_bin
23
+ _tadm_bin = find_binary(
24
+ "tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net"
25
+ )
26
+
27
+
28
+ def write_tadm_file(train_toks, encoding, stream):
29
+ """
30
+ Generate an input file for ``tadm`` based on the given corpus of
31
+ classified tokens.
32
+
33
+ :type train_toks: list(tuple(dict, str))
34
+ :param train_toks: Training data, represented as a list of
35
+ pairs, the first member of which is a feature dictionary,
36
+ and the second of which is a classification label.
37
+ :type encoding: TadmEventMaxentFeatureEncoding
38
+ :param encoding: A feature encoding, used to convert featuresets
39
+ into feature vectors.
40
+ :type stream: stream
41
+ :param stream: The stream to which the ``tadm`` input file should be
42
+ written.
43
+ """
44
+ # See the following for a file format description:
45
+ #
46
+ # https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
47
+ # https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
48
+ labels = encoding.labels()
49
+ for featureset, label in train_toks:
50
+ length_line = "%d\n" % len(labels)
51
+ stream.write(length_line)
52
+ for known_label in labels:
53
+ v = encoding.encode(featureset, known_label)
54
+ line = "%d %d %s\n" % (
55
+ int(label == known_label),
56
+ len(v),
57
+ " ".join("%d %d" % u for u in v),
58
+ )
59
+ stream.write(line)
60
+
61
+
62
+ def parse_tadm_weights(paramfile):
63
+ """
64
+ Given the stdout output generated by ``tadm`` when training a
65
+ model, return a ``numpy`` array containing the corresponding weight
66
+ vector.
67
+ """
68
+ weights = []
69
+ for line in paramfile:
70
+ weights.append(float(line.strip()))
71
+ return numpy.array(weights, "d")
72
+
73
+
74
+ def call_tadm(args):
75
+ """
76
+ Call the ``tadm`` binary with the given arguments.
77
+ """
78
+ if isinstance(args, str):
79
+ raise TypeError("args should be a list of strings")
80
+ if _tadm_bin is None:
81
+ config_tadm()
82
+
83
+ # Call tadm via a subprocess
84
+ cmd = [_tadm_bin] + args
85
+ p = subprocess.Popen(cmd, stdout=sys.stdout)
86
+ (stdout, stderr) = p.communicate()
87
+
88
+ # Check the return code.
89
+ if p.returncode != 0:
90
+ print()
91
+ print(stderr)
92
+ raise OSError("tadm command failed!")
93
+
94
+
95
+ def names_demo():
96
+ from nltk.classify.maxent import TadmMaxentClassifier
97
+ from nltk.classify.util import names_demo
98
+
99
+ classifier = names_demo(TadmMaxentClassifier.train)
100
+
101
+
102
+ def encoding_demo():
103
+ import sys
104
+
105
+ from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
106
+
107
+ tokens = [
108
+ ({"f0": 1, "f1": 1, "f3": 1}, "A"),
109
+ ({"f0": 1, "f2": 1, "f4": 1}, "B"),
110
+ ({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"),
111
+ ]
112
+ encoding = TadmEventMaxentFeatureEncoding.train(tokens)
113
+ write_tadm_file(tokens, encoding, sys.stdout)
114
+ print()
115
+ for i in range(encoding.length()):
116
+ print("%s --> %d" % (encoding.describe(i), i))
117
+ print()
118
+
119
+
120
+ if __name__ == "__main__":
121
+ encoding_demo()
122
+ names_demo()
env-llmeval/lib/python3.10/site-packages/nltk/corpus/__init__.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO this docstring isn't up-to-date!
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus files in a variety of formats. These
12
+ functions can be used to read both the corpus files that are
13
+ distributed in the NLTK corpus package, and corpus files that are part
14
+ of external corpora.
15
+
16
+ Available Corpora
17
+ =================
18
+
19
+ Please see https://www.nltk.org/nltk_data/ for a complete list.
20
+ Install corpora using nltk.download().
21
+
22
+ Corpus Reader Functions
23
+ =======================
24
+ Each corpus module defines one or more "corpus reader functions",
25
+ which can be used to read documents from that corpus. These functions
26
+ take an argument, ``item``, which is used to indicate which document
27
+ should be read from the corpus:
28
+
29
+ - If ``item`` is one of the unique identifiers listed in the corpus
30
+ module's ``items`` variable, then the corresponding document will
31
+ be loaded from the NLTK corpus package.
32
+ - If ``item`` is a filename, then that file will be read.
33
+
34
+ Additionally, corpus reader functions can be given lists of item
35
+ names; in which case, they will return a concatenation of the
36
+ corresponding documents.
37
+
38
+ Corpus reader functions are named based on the type of information
39
+ they return. Some common examples, and their return types, are:
40
+
41
+ - words(): list of str
42
+ - sents(): list of (list of str)
43
+ - paras(): list of (list of (list of str))
44
+ - tagged_words(): list of (str,str) tuple
45
+ - tagged_sents(): list of (list of (str,str))
46
+ - tagged_paras(): list of (list of (list of (str,str)))
47
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
48
+ - parsed_sents(): list of (Tree with str leaves)
49
+ - parsed_paras(): list of (list of (Tree with str leaves))
50
+ - xml(): A single xml ElementTree
51
+ - raw(): unprocessed corpus contents
52
+
53
+ For example, to read a list of the words in the Brown Corpus, use
54
+ ``nltk.corpus.brown.words()``:
55
+
56
+ >>> from nltk.corpus import brown
57
+ >>> print(", ".join(brown.words())) # doctest: +ELLIPSIS
58
+ The, Fulton, County, Grand, Jury, said, ...
59
+
60
+ """
61
+
62
+ import re
63
+
64
+ from nltk.corpus.reader import *
65
+ from nltk.corpus.util import LazyCorpusLoader
66
+ from nltk.tokenize import RegexpTokenizer
67
+
68
+ abc: PlaintextCorpusReader = LazyCorpusLoader(
69
+ "abc",
70
+ PlaintextCorpusReader,
71
+ r"(?!\.).*\.txt",
72
+ encoding=[("science", "latin_1"), ("rural", "utf8")],
73
+ )
74
+ alpino: AlpinoCorpusReader = LazyCorpusLoader(
75
+ "alpino", AlpinoCorpusReader, tagset="alpino"
76
+ )
77
+ bcp47: BCP47CorpusReader = LazyCorpusLoader(
78
+ "bcp47", BCP47CorpusReader, r"(cldr|iana)/*"
79
+ )
80
+ brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
81
+ "brown",
82
+ CategorizedTaggedCorpusReader,
83
+ r"c[a-z]\d\d",
84
+ cat_file="cats.txt",
85
+ tagset="brown",
86
+ encoding="ascii",
87
+ )
88
+ cess_cat: BracketParseCorpusReader = LazyCorpusLoader(
89
+ "cess_cat",
90
+ BracketParseCorpusReader,
91
+ r"(?!\.).*\.tbf",
92
+ tagset="unknown",
93
+ encoding="ISO-8859-15",
94
+ )
95
+ cess_esp: BracketParseCorpusReader = LazyCorpusLoader(
96
+ "cess_esp",
97
+ BracketParseCorpusReader,
98
+ r"(?!\.).*\.tbf",
99
+ tagset="unknown",
100
+ encoding="ISO-8859-15",
101
+ )
102
+ cmudict: CMUDictCorpusReader = LazyCorpusLoader(
103
+ "cmudict", CMUDictCorpusReader, ["cmudict"]
104
+ )
105
+ comtrans: AlignedCorpusReader = LazyCorpusLoader(
106
+ "comtrans", AlignedCorpusReader, r"(?!\.).*\.txt"
107
+ )
108
+ comparative_sentences: ComparativeSentencesCorpusReader = LazyCorpusLoader(
109
+ "comparative_sentences",
110
+ ComparativeSentencesCorpusReader,
111
+ r"labeledSentences\.txt",
112
+ encoding="latin-1",
113
+ )
114
+ conll2000: ConllChunkCorpusReader = LazyCorpusLoader(
115
+ "conll2000",
116
+ ConllChunkCorpusReader,
117
+ ["train.txt", "test.txt"],
118
+ ("NP", "VP", "PP"),
119
+ tagset="wsj",
120
+ encoding="ascii",
121
+ )
122
+ conll2002: ConllChunkCorpusReader = LazyCorpusLoader(
123
+ "conll2002",
124
+ ConllChunkCorpusReader,
125
+ r".*\.(test|train).*",
126
+ ("LOC", "PER", "ORG", "MISC"),
127
+ encoding="utf-8",
128
+ )
129
+ conll2007: DependencyCorpusReader = LazyCorpusLoader(
130
+ "conll2007",
131
+ DependencyCorpusReader,
132
+ r".*\.(test|train).*",
133
+ encoding=[("eus", "ISO-8859-2"), ("esp", "utf8")],
134
+ )
135
+ crubadan: CrubadanCorpusReader = LazyCorpusLoader(
136
+ "crubadan", CrubadanCorpusReader, r".*\.txt"
137
+ )
138
+ dependency_treebank: DependencyCorpusReader = LazyCorpusLoader(
139
+ "dependency_treebank", DependencyCorpusReader, r".*\.dp", encoding="ascii"
140
+ )
141
+ extended_omw: CorpusReader = LazyCorpusLoader(
142
+ "extended_omw", CorpusReader, r".*/wn-[a-z\-]*\.tab", encoding="utf8"
143
+ )
144
+ floresta: BracketParseCorpusReader = LazyCorpusLoader(
145
+ "floresta",
146
+ BracketParseCorpusReader,
147
+ r"(?!\.).*\.ptb",
148
+ "#",
149
+ tagset="unknown",
150
+ encoding="ISO-8859-15",
151
+ )
152
+ framenet15: FramenetCorpusReader = LazyCorpusLoader(
153
+ "framenet_v15",
154
+ FramenetCorpusReader,
155
+ [
156
+ "frRelation.xml",
157
+ "frameIndex.xml",
158
+ "fulltextIndex.xml",
159
+ "luIndex.xml",
160
+ "semTypes.xml",
161
+ ],
162
+ )
163
+ framenet: FramenetCorpusReader = LazyCorpusLoader(
164
+ "framenet_v17",
165
+ FramenetCorpusReader,
166
+ [
167
+ "frRelation.xml",
168
+ "frameIndex.xml",
169
+ "fulltextIndex.xml",
170
+ "luIndex.xml",
171
+ "semTypes.xml",
172
+ ],
173
+ )
174
+ gazetteers: WordListCorpusReader = LazyCorpusLoader(
175
+ "gazetteers", WordListCorpusReader, r"(?!LICENSE|\.).*\.txt", encoding="ISO-8859-2"
176
+ )
177
+ genesis: PlaintextCorpusReader = LazyCorpusLoader(
178
+ "genesis",
179
+ PlaintextCorpusReader,
180
+ r"(?!\.).*\.txt",
181
+ encoding=[
182
+ ("finnish|french|german", "latin_1"),
183
+ ("swedish", "cp865"),
184
+ (".*", "utf_8"),
185
+ ],
186
+ )
187
+ gutenberg: PlaintextCorpusReader = LazyCorpusLoader(
188
+ "gutenberg", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
189
+ )
190
+ ieer: IEERCorpusReader = LazyCorpusLoader("ieer", IEERCorpusReader, r"(?!README|\.).*")
191
+ inaugural: PlaintextCorpusReader = LazyCorpusLoader(
192
+ "inaugural", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="latin1"
193
+ )
194
+ # [XX] This should probably just use TaggedCorpusReader:
195
+ indian: IndianCorpusReader = LazyCorpusLoader(
196
+ "indian", IndianCorpusReader, r"(?!\.).*\.pos", tagset="unknown", encoding="utf8"
197
+ )
198
+
199
+ jeita: ChasenCorpusReader = LazyCorpusLoader(
200
+ "jeita", ChasenCorpusReader, r".*\.chasen", encoding="utf-8"
201
+ )
202
+ knbc: KNBCorpusReader = LazyCorpusLoader(
203
+ "knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp"
204
+ )
205
+ lin_thesaurus: LinThesaurusCorpusReader = LazyCorpusLoader(
206
+ "lin_thesaurus", LinThesaurusCorpusReader, r".*\.lsp"
207
+ )
208
+ mac_morpho: MacMorphoCorpusReader = LazyCorpusLoader(
209
+ "mac_morpho",
210
+ MacMorphoCorpusReader,
211
+ r"(?!\.).*\.txt",
212
+ tagset="unknown",
213
+ encoding="latin-1",
214
+ )
215
+ machado: PortugueseCategorizedPlaintextCorpusReader = LazyCorpusLoader(
216
+ "machado",
217
+ PortugueseCategorizedPlaintextCorpusReader,
218
+ r"(?!\.).*\.txt",
219
+ cat_pattern=r"([a-z]*)/.*",
220
+ encoding="latin-1",
221
+ )
222
+ masc_tagged: CategorizedTaggedCorpusReader = LazyCorpusLoader(
223
+ "masc_tagged",
224
+ CategorizedTaggedCorpusReader,
225
+ r"(spoken|written)/.*\.txt",
226
+ cat_file="categories.txt",
227
+ tagset="wsj",
228
+ encoding="utf-8",
229
+ sep="_",
230
+ )
231
+ movie_reviews: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
232
+ "movie_reviews",
233
+ CategorizedPlaintextCorpusReader,
234
+ r"(?!\.).*\.txt",
235
+ cat_pattern=r"(neg|pos)/.*",
236
+ encoding="ascii",
237
+ )
238
+ multext_east: MTECorpusReader = LazyCorpusLoader(
239
+ "mte_teip5", MTECorpusReader, r"(oana).*\.xml", encoding="utf-8"
240
+ )
241
+ names: WordListCorpusReader = LazyCorpusLoader(
242
+ "names", WordListCorpusReader, r"(?!\.).*\.txt", encoding="ascii"
243
+ )
244
+ nps_chat: NPSChatCorpusReader = LazyCorpusLoader(
245
+ "nps_chat", NPSChatCorpusReader, r"(?!README|\.).*\.xml", tagset="wsj"
246
+ )
247
+ opinion_lexicon: OpinionLexiconCorpusReader = LazyCorpusLoader(
248
+ "opinion_lexicon",
249
+ OpinionLexiconCorpusReader,
250
+ r"(\w+)\-words\.txt",
251
+ encoding="ISO-8859-2",
252
+ )
253
+ ppattach: PPAttachmentCorpusReader = LazyCorpusLoader(
254
+ "ppattach", PPAttachmentCorpusReader, ["training", "test", "devset"]
255
+ )
256
+ product_reviews_1: ReviewsCorpusReader = LazyCorpusLoader(
257
+ "product_reviews_1", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
258
+ )
259
+ product_reviews_2: ReviewsCorpusReader = LazyCorpusLoader(
260
+ "product_reviews_2", ReviewsCorpusReader, r"^(?!Readme).*\.txt", encoding="utf8"
261
+ )
262
+ pros_cons: ProsConsCorpusReader = LazyCorpusLoader(
263
+ "pros_cons",
264
+ ProsConsCorpusReader,
265
+ r"Integrated(Cons|Pros)\.txt",
266
+ cat_pattern=r"Integrated(Cons|Pros)\.txt",
267
+ encoding="ISO-8859-2",
268
+ )
269
+ ptb: CategorizedBracketParseCorpusReader = (
270
+ LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
271
+ "ptb",
272
+ CategorizedBracketParseCorpusReader,
273
+ r"(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG",
274
+ cat_file="allcats.txt",
275
+ tagset="wsj",
276
+ )
277
+ )
278
+ qc: StringCategoryCorpusReader = LazyCorpusLoader(
279
+ "qc", StringCategoryCorpusReader, ["train.txt", "test.txt"], encoding="ISO-8859-2"
280
+ )
281
+ reuters: CategorizedPlaintextCorpusReader = LazyCorpusLoader(
282
+ "reuters",
283
+ CategorizedPlaintextCorpusReader,
284
+ "(training|test).*",
285
+ cat_file="cats.txt",
286
+ encoding="ISO-8859-2",
287
+ )
288
+ rte: RTECorpusReader = LazyCorpusLoader("rte", RTECorpusReader, r"(?!\.).*\.xml")
289
+ senseval: SensevalCorpusReader = LazyCorpusLoader(
290
+ "senseval", SensevalCorpusReader, r"(?!\.).*\.pos"
291
+ )
292
+ sentence_polarity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
293
+ "sentence_polarity",
294
+ CategorizedSentencesCorpusReader,
295
+ r"rt-polarity\.(neg|pos)",
296
+ cat_pattern=r"rt-polarity\.(neg|pos)",
297
+ encoding="utf-8",
298
+ )
299
+ sentiwordnet: SentiWordNetCorpusReader = LazyCorpusLoader(
300
+ "sentiwordnet", SentiWordNetCorpusReader, "SentiWordNet_3.0.0.txt", encoding="utf-8"
301
+ )
302
+ shakespeare: XMLCorpusReader = LazyCorpusLoader(
303
+ "shakespeare", XMLCorpusReader, r"(?!\.).*\.xml"
304
+ )
305
+ sinica_treebank: SinicaTreebankCorpusReader = LazyCorpusLoader(
306
+ "sinica_treebank",
307
+ SinicaTreebankCorpusReader,
308
+ ["parsed"],
309
+ tagset="unknown",
310
+ encoding="utf-8",
311
+ )
312
+ state_union: PlaintextCorpusReader = LazyCorpusLoader(
313
+ "state_union", PlaintextCorpusReader, r"(?!\.).*\.txt", encoding="ISO-8859-2"
314
+ )
315
+ stopwords: WordListCorpusReader = LazyCorpusLoader(
316
+ "stopwords", WordListCorpusReader, r"(?!README|\.).*", encoding="utf8"
317
+ )
318
+ subjectivity: CategorizedSentencesCorpusReader = LazyCorpusLoader(
319
+ "subjectivity",
320
+ CategorizedSentencesCorpusReader,
321
+ r"(quote.tok.gt9|plot.tok.gt9)\.5000",
322
+ cat_map={"quote.tok.gt9.5000": ["subj"], "plot.tok.gt9.5000": ["obj"]},
323
+ encoding="latin-1",
324
+ )
325
+ swadesh: SwadeshCorpusReader = LazyCorpusLoader(
326
+ "swadesh", SwadeshCorpusReader, r"(?!README|\.).*", encoding="utf8"
327
+ )
328
+ swadesh110: PanlexSwadeshCorpusReader = LazyCorpusLoader(
329
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh110/.*\.txt", encoding="utf8"
330
+ )
331
+ swadesh207: PanlexSwadeshCorpusReader = LazyCorpusLoader(
332
+ "panlex_swadesh", PanlexSwadeshCorpusReader, r"swadesh207/.*\.txt", encoding="utf8"
333
+ )
334
+ switchboard: SwitchboardCorpusReader = LazyCorpusLoader(
335
+ "switchboard", SwitchboardCorpusReader, tagset="wsj"
336
+ )
337
+ timit: TimitCorpusReader = LazyCorpusLoader("timit", TimitCorpusReader)
338
+ timit_tagged: TimitTaggedCorpusReader = LazyCorpusLoader(
339
+ "timit", TimitTaggedCorpusReader, r".+\.tags", tagset="wsj", encoding="ascii"
340
+ )
341
+ toolbox: ToolboxCorpusReader = LazyCorpusLoader(
342
+ "toolbox", ToolboxCorpusReader, r"(?!.*(README|\.)).*\.(dic|txt)"
343
+ )
344
+ treebank: BracketParseCorpusReader = LazyCorpusLoader(
345
+ "treebank/combined",
346
+ BracketParseCorpusReader,
347
+ r"wsj_.*\.mrg",
348
+ tagset="wsj",
349
+ encoding="ascii",
350
+ )
351
+ treebank_chunk: ChunkedCorpusReader = LazyCorpusLoader(
352
+ "treebank/tagged",
353
+ ChunkedCorpusReader,
354
+ r"wsj_.*\.pos",
355
+ sent_tokenizer=RegexpTokenizer(r"(?<=/\.)\s*(?![^\[]*\])", gaps=True),
356
+ para_block_reader=tagged_treebank_para_block_reader,
357
+ tagset="wsj",
358
+ encoding="ascii",
359
+ )
360
+ treebank_raw: PlaintextCorpusReader = LazyCorpusLoader(
361
+ "treebank/raw", PlaintextCorpusReader, r"wsj_.*", encoding="ISO-8859-2"
362
+ )
363
+ twitter_samples: TwitterCorpusReader = LazyCorpusLoader(
364
+ "twitter_samples", TwitterCorpusReader, r".*\.json"
365
+ )
366
+ udhr: UdhrCorpusReader = LazyCorpusLoader("udhr", UdhrCorpusReader)
367
+ udhr2: PlaintextCorpusReader = LazyCorpusLoader(
368
+ "udhr2", PlaintextCorpusReader, r".*\.txt", encoding="utf8"
369
+ )
370
+ universal_treebanks: ConllCorpusReader = LazyCorpusLoader(
371
+ "universal_treebanks_v20",
372
+ ConllCorpusReader,
373
+ r".*\.conll",
374
+ columntypes=(
375
+ "ignore",
376
+ "words",
377
+ "ignore",
378
+ "ignore",
379
+ "pos",
380
+ "ignore",
381
+ "ignore",
382
+ "ignore",
383
+ "ignore",
384
+ "ignore",
385
+ ),
386
+ )
387
+ verbnet: VerbnetCorpusReader = LazyCorpusLoader(
388
+ "verbnet", VerbnetCorpusReader, r"(?!\.).*\.xml"
389
+ )
390
+ webtext: PlaintextCorpusReader = LazyCorpusLoader(
391
+ "webtext", PlaintextCorpusReader, r"(?!README|\.).*\.txt", encoding="ISO-8859-2"
392
+ )
393
+ wordnet: WordNetCorpusReader = LazyCorpusLoader(
394
+ "wordnet",
395
+ WordNetCorpusReader,
396
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
397
+ )
398
+ wordnet31: WordNetCorpusReader = LazyCorpusLoader(
399
+ "wordnet31",
400
+ WordNetCorpusReader,
401
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
402
+ )
403
+ wordnet2021: WordNetCorpusReader = LazyCorpusLoader(
404
+ "wordnet2021",
405
+ WordNetCorpusReader,
406
+ LazyCorpusLoader("omw-1.4", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"),
407
+ )
408
+ wordnet_ic: WordNetICCorpusReader = LazyCorpusLoader(
409
+ "wordnet_ic", WordNetICCorpusReader, r".*\.dat"
410
+ )
411
+ words: WordListCorpusReader = LazyCorpusLoader(
412
+ "words", WordListCorpusReader, r"(?!README|\.).*", encoding="ascii"
413
+ )
414
+
415
+ # defined after treebank
416
+ propbank: PropbankCorpusReader = LazyCorpusLoader(
417
+ "propbank",
418
+ PropbankCorpusReader,
419
+ "prop.txt",
420
+ r"frames/.*\.xml",
421
+ "verbs.txt",
422
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
423
+ treebank,
424
+ ) # Must be defined *after* treebank corpus.
425
+ nombank: NombankCorpusReader = LazyCorpusLoader(
426
+ "nombank.1.0",
427
+ NombankCorpusReader,
428
+ "nombank.1.0",
429
+ r"frames/.*\.xml",
430
+ "nombank.1.0.words",
431
+ lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
432
+ treebank,
433
+ ) # Must be defined *after* treebank corpus.
434
+ propbank_ptb: PropbankCorpusReader = LazyCorpusLoader(
435
+ "propbank",
436
+ PropbankCorpusReader,
437
+ "prop.txt",
438
+ r"frames/.*\.xml",
439
+ "verbs.txt",
440
+ lambda filename: filename.upper(),
441
+ ptb,
442
+ ) # Must be defined *after* ptb corpus.
443
+ nombank_ptb: NombankCorpusReader = LazyCorpusLoader(
444
+ "nombank.1.0",
445
+ NombankCorpusReader,
446
+ "nombank.1.0",
447
+ r"frames/.*\.xml",
448
+ "nombank.1.0.words",
449
+ lambda filename: filename.upper(),
450
+ ptb,
451
+ ) # Must be defined *after* ptb corpus.
452
+ semcor: SemcorCorpusReader = LazyCorpusLoader(
453
+ "semcor", SemcorCorpusReader, r"brown./tagfiles/br-.*\.xml", wordnet
454
+ ) # Must be defined *after* wordnet corpus.
455
+
456
+ nonbreaking_prefixes: NonbreakingPrefixesCorpusReader = LazyCorpusLoader(
457
+ "nonbreaking_prefixes",
458
+ NonbreakingPrefixesCorpusReader,
459
+ r"(?!README|\.).*",
460
+ encoding="utf8",
461
+ )
462
+ perluniprops: UnicharsCorpusReader = LazyCorpusLoader(
463
+ "perluniprops",
464
+ UnicharsCorpusReader,
465
+ r"(?!README|\.).*",
466
+ nltk_data_subdir="misc",
467
+ encoding="utf8",
468
+ )
469
+
470
+ # mwa_ppdb = LazyCorpusLoader(
471
+ # 'mwa_ppdb', MWAPPDBCorpusReader, r'(?!README|\.).*', nltk_data_subdir='misc', encoding='utf8')
472
+
473
+ # See https://github.com/nltk/nltk/issues/1579
474
+ # and https://github.com/nltk/nltk/issues/1716
475
+ #
476
+ # pl196x = LazyCorpusLoader(
477
+ # 'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
478
+ # cat_file='cats.txt', textid_file='textids.txt', encoding='utf8')
479
+ #
480
+ # ipipan = LazyCorpusLoader(
481
+ # 'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
482
+ #
483
+ # nkjp = LazyCorpusLoader(
484
+ # 'nkjp', NKJPCorpusReader, r'', encoding='utf8')
485
+ #
486
+ # panlex_lite = LazyCorpusLoader(
487
+ # 'panlex_lite', PanLexLiteCorpusReader)
488
+ #
489
+ # ycoe = LazyCorpusLoader(
490
+ # 'ycoe', YCOECorpusReader)
491
+ #
492
+ # corpus not available with NLTK; these lines caused help(nltk.corpus) to break
493
+ # hebrew_treebank = LazyCorpusLoader(
494
+ # 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
495
+
496
+ # FIXME: override any imported demo from various corpora, see https://github.com/nltk/nltk/issues/2116
497
+ def demo():
498
+ # This is out-of-date:
499
+ abc.demo()
500
+ brown.demo()
501
+ # chat80.demo()
502
+ cmudict.demo()
503
+ conll2000.demo()
504
+ conll2002.demo()
505
+ genesis.demo()
506
+ gutenberg.demo()
507
+ ieer.demo()
508
+ inaugural.demo()
509
+ indian.demo()
510
+ names.demo()
511
+ ppattach.demo()
512
+ senseval.demo()
513
+ shakespeare.demo()
514
+ sinica_treebank.demo()
515
+ state_union.demo()
516
+ stopwords.demo()
517
+ timit.demo()
518
+ toolbox.demo()
519
+ treebank.demo()
520
+ udhr.demo()
521
+ webtext.demo()
522
+ words.demo()
523
+
524
+
525
+ # ycoe.demo()
526
+
527
+ if __name__ == "__main__":
528
+ # demo()
529
+ pass
env-llmeval/lib/python3.10/site-packages/nltk/corpus/europarl_raw.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Europarl Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nitin Madnani <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+
10
+ from nltk.corpus.reader import *
11
+ from nltk.corpus.util import LazyCorpusLoader
12
+
13
+ # Create a new corpus reader instance for each European language
14
+ danish: EuroparlCorpusReader = LazyCorpusLoader(
15
+ "europarl_raw/danish", EuroparlCorpusReader, r"ep-.*\.da", encoding="utf-8"
16
+ )
17
+
18
+ dutch: EuroparlCorpusReader = LazyCorpusLoader(
19
+ "europarl_raw/dutch", EuroparlCorpusReader, r"ep-.*\.nl", encoding="utf-8"
20
+ )
21
+
22
+ english: EuroparlCorpusReader = LazyCorpusLoader(
23
+ "europarl_raw/english", EuroparlCorpusReader, r"ep-.*\.en", encoding="utf-8"
24
+ )
25
+
26
+ finnish: EuroparlCorpusReader = LazyCorpusLoader(
27
+ "europarl_raw/finnish", EuroparlCorpusReader, r"ep-.*\.fi", encoding="utf-8"
28
+ )
29
+
30
+ french: EuroparlCorpusReader = LazyCorpusLoader(
31
+ "europarl_raw/french", EuroparlCorpusReader, r"ep-.*\.fr", encoding="utf-8"
32
+ )
33
+
34
+ german: EuroparlCorpusReader = LazyCorpusLoader(
35
+ "europarl_raw/german", EuroparlCorpusReader, r"ep-.*\.de", encoding="utf-8"
36
+ )
37
+
38
+ greek: EuroparlCorpusReader = LazyCorpusLoader(
39
+ "europarl_raw/greek", EuroparlCorpusReader, r"ep-.*\.el", encoding="utf-8"
40
+ )
41
+
42
+ italian: EuroparlCorpusReader = LazyCorpusLoader(
43
+ "europarl_raw/italian", EuroparlCorpusReader, r"ep-.*\.it", encoding="utf-8"
44
+ )
45
+
46
+ portuguese: EuroparlCorpusReader = LazyCorpusLoader(
47
+ "europarl_raw/portuguese", EuroparlCorpusReader, r"ep-.*\.pt", encoding="utf-8"
48
+ )
49
+
50
+ spanish: EuroparlCorpusReader = LazyCorpusLoader(
51
+ "europarl_raw/spanish", EuroparlCorpusReader, r"ep-.*\.es", encoding="utf-8"
52
+ )
53
+
54
+ swedish: EuroparlCorpusReader = LazyCorpusLoader(
55
+ "europarl_raw/swedish", EuroparlCorpusReader, r"ep-.*\.sv", encoding="utf-8"
56
+ )
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__init__.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ NLTK corpus readers. The modules in this package provide functions
11
+ that can be used to read corpus fileids in a variety of formats. These
12
+ functions can be used to read both the corpus fileids that are
13
+ distributed in the NLTK corpus package, and corpus fileids that are part
14
+ of external corpora.
15
+
16
+ Corpus Reader Functions
17
+ =======================
18
+ Each corpus module defines one or more "corpus reader functions",
19
+ which can be used to read documents from that corpus. These functions
20
+ take an argument, ``item``, which is used to indicate which document
21
+ should be read from the corpus:
22
+
23
+ - If ``item`` is one of the unique identifiers listed in the corpus
24
+ module's ``items`` variable, then the corresponding document will
25
+ be loaded from the NLTK corpus package.
26
+ - If ``item`` is a fileid, then that file will be read.
27
+
28
+ Additionally, corpus reader functions can be given lists of item
29
+ names; in which case, they will return a concatenation of the
30
+ corresponding documents.
31
+
32
+ Corpus reader functions are named based on the type of information
33
+ they return. Some common examples, and their return types, are:
34
+
35
+ - words(): list of str
36
+ - sents(): list of (list of str)
37
+ - paras(): list of (list of (list of str))
38
+ - tagged_words(): list of (str,str) tuple
39
+ - tagged_sents(): list of (list of (str,str))
40
+ - tagged_paras(): list of (list of (list of (str,str)))
41
+ - chunked_sents(): list of (Tree w/ (str,str) leaves)
42
+ - parsed_sents(): list of (Tree with str leaves)
43
+ - parsed_paras(): list of (list of (Tree with str leaves))
44
+ - xml(): A single xml ElementTree
45
+ - raw(): unprocessed corpus contents
46
+
47
+ For example, to read a list of the words in the Brown Corpus, use
48
+ ``nltk.corpus.brown.words()``:
49
+
50
+ >>> from nltk.corpus import brown
51
+ >>> print(", ".join(brown.words()[:6])) # only first 6 words
52
+ The, Fulton, County, Grand, Jury, said
53
+
54
+ isort:skip_file
55
+ """
56
+
57
+ from nltk.corpus.reader.plaintext import *
58
+ from nltk.corpus.reader.util import *
59
+ from nltk.corpus.reader.api import *
60
+ from nltk.corpus.reader.tagged import *
61
+ from nltk.corpus.reader.cmudict import *
62
+ from nltk.corpus.reader.conll import *
63
+ from nltk.corpus.reader.chunked import *
64
+ from nltk.corpus.reader.wordlist import *
65
+ from nltk.corpus.reader.xmldocs import *
66
+ from nltk.corpus.reader.ppattach import *
67
+ from nltk.corpus.reader.senseval import *
68
+ from nltk.corpus.reader.ieer import *
69
+ from nltk.corpus.reader.sinica_treebank import *
70
+ from nltk.corpus.reader.bracket_parse import *
71
+ from nltk.corpus.reader.indian import *
72
+ from nltk.corpus.reader.toolbox import *
73
+ from nltk.corpus.reader.timit import *
74
+ from nltk.corpus.reader.ycoe import *
75
+ from nltk.corpus.reader.rte import *
76
+ from nltk.corpus.reader.string_category import *
77
+ from nltk.corpus.reader.propbank import *
78
+ from nltk.corpus.reader.verbnet import *
79
+ from nltk.corpus.reader.bnc import *
80
+ from nltk.corpus.reader.nps_chat import *
81
+ from nltk.corpus.reader.wordnet import *
82
+ from nltk.corpus.reader.switchboard import *
83
+ from nltk.corpus.reader.dependency import *
84
+ from nltk.corpus.reader.nombank import *
85
+ from nltk.corpus.reader.ipipan import *
86
+ from nltk.corpus.reader.pl196x import *
87
+ from nltk.corpus.reader.knbc import *
88
+ from nltk.corpus.reader.chasen import *
89
+ from nltk.corpus.reader.childes import *
90
+ from nltk.corpus.reader.aligned import *
91
+ from nltk.corpus.reader.lin import *
92
+ from nltk.corpus.reader.semcor import *
93
+ from nltk.corpus.reader.framenet import *
94
+ from nltk.corpus.reader.udhr import *
95
+ from nltk.corpus.reader.bnc import *
96
+ from nltk.corpus.reader.sentiwordnet import *
97
+ from nltk.corpus.reader.twitter import *
98
+ from nltk.corpus.reader.nkjp import *
99
+ from nltk.corpus.reader.crubadan import *
100
+ from nltk.corpus.reader.mte import *
101
+ from nltk.corpus.reader.reviews import *
102
+ from nltk.corpus.reader.opinion_lexicon import *
103
+ from nltk.corpus.reader.pros_cons import *
104
+ from nltk.corpus.reader.categorized_sents import *
105
+ from nltk.corpus.reader.comparative_sents import *
106
+ from nltk.corpus.reader.panlex_lite import *
107
+ from nltk.corpus.reader.panlex_swadesh import *
108
+ from nltk.corpus.reader.bcp47 import *
109
+
110
+ # Make sure that nltk.corpus.reader.bracket_parse gives the module, not
111
+ # the function bracket_parse() defined in nltk.tree:
112
+ from nltk.corpus.reader import bracket_parse
113
+
114
+ __all__ = [
115
+ "CorpusReader",
116
+ "CategorizedCorpusReader",
117
+ "PlaintextCorpusReader",
118
+ "find_corpus_fileids",
119
+ "TaggedCorpusReader",
120
+ "CMUDictCorpusReader",
121
+ "ConllChunkCorpusReader",
122
+ "WordListCorpusReader",
123
+ "PPAttachmentCorpusReader",
124
+ "SensevalCorpusReader",
125
+ "IEERCorpusReader",
126
+ "ChunkedCorpusReader",
127
+ "SinicaTreebankCorpusReader",
128
+ "BracketParseCorpusReader",
129
+ "IndianCorpusReader",
130
+ "ToolboxCorpusReader",
131
+ "TimitCorpusReader",
132
+ "YCOECorpusReader",
133
+ "MacMorphoCorpusReader",
134
+ "SyntaxCorpusReader",
135
+ "AlpinoCorpusReader",
136
+ "RTECorpusReader",
137
+ "StringCategoryCorpusReader",
138
+ "EuroparlCorpusReader",
139
+ "CategorizedBracketParseCorpusReader",
140
+ "CategorizedTaggedCorpusReader",
141
+ "CategorizedPlaintextCorpusReader",
142
+ "PortugueseCategorizedPlaintextCorpusReader",
143
+ "tagged_treebank_para_block_reader",
144
+ "PropbankCorpusReader",
145
+ "VerbnetCorpusReader",
146
+ "BNCCorpusReader",
147
+ "ConllCorpusReader",
148
+ "XMLCorpusReader",
149
+ "NPSChatCorpusReader",
150
+ "SwadeshCorpusReader",
151
+ "WordNetCorpusReader",
152
+ "WordNetICCorpusReader",
153
+ "SwitchboardCorpusReader",
154
+ "DependencyCorpusReader",
155
+ "NombankCorpusReader",
156
+ "IPIPANCorpusReader",
157
+ "Pl196xCorpusReader",
158
+ "TEICorpusView",
159
+ "KNBCorpusReader",
160
+ "ChasenCorpusReader",
161
+ "CHILDESCorpusReader",
162
+ "AlignedCorpusReader",
163
+ "TimitTaggedCorpusReader",
164
+ "LinThesaurusCorpusReader",
165
+ "SemcorCorpusReader",
166
+ "FramenetCorpusReader",
167
+ "UdhrCorpusReader",
168
+ "BNCCorpusReader",
169
+ "SentiWordNetCorpusReader",
170
+ "SentiSynset",
171
+ "TwitterCorpusReader",
172
+ "NKJPCorpusReader",
173
+ "CrubadanCorpusReader",
174
+ "MTECorpusReader",
175
+ "ReviewsCorpusReader",
176
+ "OpinionLexiconCorpusReader",
177
+ "ProsConsCorpusReader",
178
+ "CategorizedSentencesCorpusReader",
179
+ "ComparativeSentencesCorpusReader",
180
+ "PanLexLiteCorpusReader",
181
+ "NonbreakingPrefixesCorpusReader",
182
+ "UnicharsCorpusReader",
183
+ "MWAPPDBCorpusReader",
184
+ "PanlexSwadeshCorpusReader",
185
+ "BCP47CorpusReader",
186
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/aligned.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Aligned Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # URL: <https://www.nltk.org/>
5
+ # Author: Steven Bird <[email protected]>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.corpus.reader.api import CorpusReader
9
+ from nltk.corpus.reader.util import (
10
+ StreamBackedCorpusView,
11
+ concat,
12
+ read_alignedsent_block,
13
+ )
14
+ from nltk.tokenize import RegexpTokenizer, WhitespaceTokenizer
15
+ from nltk.translate import AlignedSent, Alignment
16
+
17
+
18
+ class AlignedCorpusReader(CorpusReader):
19
+ """
20
+ Reader for corpora of word-aligned sentences. Tokens are assumed
21
+ to be separated by whitespace. Sentences begin on separate lines.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ root,
27
+ fileids,
28
+ sep="/",
29
+ word_tokenizer=WhitespaceTokenizer(),
30
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
31
+ alignedsent_block_reader=read_alignedsent_block,
32
+ encoding="latin1",
33
+ ):
34
+ """
35
+ Construct a new Aligned Corpus reader for a set of documents
36
+ located at the given root directory. Example usage:
37
+
38
+ >>> root = '/...path to corpus.../'
39
+ >>> reader = AlignedCorpusReader(root, '.*', '.txt') # doctest: +SKIP
40
+
41
+ :param root: The root directory for this corpus.
42
+ :param fileids: A list or regexp specifying the fileids in this corpus.
43
+ """
44
+ CorpusReader.__init__(self, root, fileids, encoding)
45
+ self._sep = sep
46
+ self._word_tokenizer = word_tokenizer
47
+ self._sent_tokenizer = sent_tokenizer
48
+ self._alignedsent_block_reader = alignedsent_block_reader
49
+
50
+ def words(self, fileids=None):
51
+ """
52
+ :return: the given file(s) as a list of words
53
+ and punctuation symbols.
54
+ :rtype: list(str)
55
+ """
56
+ return concat(
57
+ [
58
+ AlignedSentCorpusView(
59
+ fileid,
60
+ enc,
61
+ False,
62
+ False,
63
+ self._word_tokenizer,
64
+ self._sent_tokenizer,
65
+ self._alignedsent_block_reader,
66
+ )
67
+ for (fileid, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ AlignedSentCorpusView(
81
+ fileid,
82
+ enc,
83
+ False,
84
+ True,
85
+ self._word_tokenizer,
86
+ self._sent_tokenizer,
87
+ self._alignedsent_block_reader,
88
+ )
89
+ for (fileid, enc) in self.abspaths(fileids, True)
90
+ ]
91
+ )
92
+
93
+ def aligned_sents(self, fileids=None):
94
+ """
95
+ :return: the given file(s) as a list of AlignedSent objects.
96
+ :rtype: list(AlignedSent)
97
+ """
98
+ return concat(
99
+ [
100
+ AlignedSentCorpusView(
101
+ fileid,
102
+ enc,
103
+ True,
104
+ True,
105
+ self._word_tokenizer,
106
+ self._sent_tokenizer,
107
+ self._alignedsent_block_reader,
108
+ )
109
+ for (fileid, enc) in self.abspaths(fileids, True)
110
+ ]
111
+ )
112
+
113
+
114
+ class AlignedSentCorpusView(StreamBackedCorpusView):
115
+ """
116
+ A specialized corpus view for aligned sentences.
117
+ ``AlignedSentCorpusView`` objects are typically created by
118
+ ``AlignedCorpusReader`` (not directly by nltk users).
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ corpus_file,
124
+ encoding,
125
+ aligned,
126
+ group_by_sent,
127
+ word_tokenizer,
128
+ sent_tokenizer,
129
+ alignedsent_block_reader,
130
+ ):
131
+ self._aligned = aligned
132
+ self._group_by_sent = group_by_sent
133
+ self._word_tokenizer = word_tokenizer
134
+ self._sent_tokenizer = sent_tokenizer
135
+ self._alignedsent_block_reader = alignedsent_block_reader
136
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
137
+
138
+ def read_block(self, stream):
139
+ block = [
140
+ self._word_tokenizer.tokenize(sent_str)
141
+ for alignedsent_str in self._alignedsent_block_reader(stream)
142
+ for sent_str in self._sent_tokenizer.tokenize(alignedsent_str)
143
+ ]
144
+ if self._aligned:
145
+ block[2] = Alignment.fromstring(
146
+ " ".join(block[2])
147
+ ) # kludge; we shouldn't have tokenized the alignment string
148
+ block = [AlignedSent(*block)]
149
+ elif self._group_by_sent:
150
+ block = [block[0]]
151
+ else:
152
+ block = block[0]
153
+
154
+ return block
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/api.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: API for Corpus Readers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ API for corpus readers.
11
+ """
12
+
13
+ import os
14
+ import re
15
+ from collections import defaultdict
16
+ from itertools import chain
17
+
18
+ from nltk.corpus.reader.util import *
19
+ from nltk.data import FileSystemPathPointer, PathPointer, ZipFilePathPointer
20
+
21
+
22
+ class CorpusReader:
23
+ """
24
+ A base class for "corpus reader" classes, each of which can be
25
+ used to read a specific corpus format. Each individual corpus
26
+ reader instance is used to read a specific corpus, consisting of
27
+ one or more files under a common root directory. Each file is
28
+ identified by its ``file identifier``, which is the relative path
29
+ to the file from the root directory.
30
+
31
+ A separate subclass is defined for each corpus format. These
32
+ subclasses define one or more methods that provide 'views' on the
33
+ corpus contents, such as ``words()`` (for a list of words) and
34
+ ``parsed_sents()`` (for a list of parsed sentences). Called with
35
+ no arguments, these methods will return the contents of the entire
36
+ corpus. For most corpora, these methods define one or more
37
+ selection arguments, such as ``fileids`` or ``categories``, which can
38
+ be used to select which portion of the corpus should be returned.
39
+ """
40
+
41
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
42
+ """
43
+ :type root: PathPointer or str
44
+ :param root: A path pointer identifying the root directory for
45
+ this corpus. If a string is specified, then it will be
46
+ converted to a ``PathPointer`` automatically.
47
+ :param fileids: A list of the files that make up this corpus.
48
+ This list can either be specified explicitly, as a list of
49
+ strings; or implicitly, as a regular expression over file
50
+ paths. The absolute path for each file will be constructed
51
+ by joining the reader's root to each file name.
52
+ :param encoding: The default unicode encoding for the files
53
+ that make up the corpus. The value of ``encoding`` can be any
54
+ of the following:
55
+
56
+ - A string: ``encoding`` is the encoding name for all files.
57
+ - A dictionary: ``encoding[file_id]`` is the encoding
58
+ name for the file whose identifier is ``file_id``. If
59
+ ``file_id`` is not in ``encoding``, then the file
60
+ contents will be processed using non-unicode byte strings.
61
+ - A list: ``encoding`` should be a list of ``(regexp, encoding)``
62
+ tuples. The encoding for a file whose identifier is ``file_id``
63
+ will be the ``encoding`` value for the first tuple whose
64
+ ``regexp`` matches the ``file_id``. If no tuple's ``regexp``
65
+ matches the ``file_id``, the file contents will be processed
66
+ using non-unicode byte strings.
67
+ - None: the file contents of all files will be
68
+ processed using non-unicode byte strings.
69
+ :param tagset: The name of the tagset used by this corpus, to be used
70
+ for normalizing or converting the POS tags returned by the
71
+ ``tagged_...()`` methods.
72
+ """
73
+ # Convert the root to a path pointer, if necessary.
74
+ if isinstance(root, str) and not isinstance(root, PathPointer):
75
+ m = re.match(r"(.*\.zip)/?(.*)$|", root)
76
+ zipfile, zipentry = m.groups()
77
+ if zipfile:
78
+ root = ZipFilePathPointer(zipfile, zipentry)
79
+ else:
80
+ root = FileSystemPathPointer(root)
81
+ elif not isinstance(root, PathPointer):
82
+ raise TypeError("CorpusReader: expected a string or a PathPointer")
83
+
84
+ # If `fileids` is a regexp, then expand it.
85
+ if isinstance(fileids, str):
86
+ fileids = find_corpus_fileids(root, fileids)
87
+
88
+ self._fileids = fileids
89
+ """A list of the relative paths for the fileids that make up
90
+ this corpus."""
91
+
92
+ self._root = root
93
+ """The root directory for this corpus."""
94
+
95
+ self._readme = "README"
96
+ self._license = "LICENSE"
97
+ self._citation = "citation.bib"
98
+
99
+ # If encoding was specified as a list of regexps, then convert
100
+ # it to a dictionary.
101
+ if isinstance(encoding, list):
102
+ encoding_dict = {}
103
+ for fileid in self._fileids:
104
+ for x in encoding:
105
+ (regexp, enc) = x
106
+ if re.match(regexp, fileid):
107
+ encoding_dict[fileid] = enc
108
+ break
109
+ encoding = encoding_dict
110
+
111
+ self._encoding = encoding
112
+ """The default unicode encoding for the fileids that make up
113
+ this corpus. If ``encoding`` is None, then the file
114
+ contents are processed using byte strings."""
115
+ self._tagset = tagset
116
+
117
+ def __repr__(self):
118
+ if isinstance(self._root, ZipFilePathPointer):
119
+ path = f"{self._root.zipfile.filename}/{self._root.entry}"
120
+ else:
121
+ path = "%s" % self._root.path
122
+ return f"<{self.__class__.__name__} in {path!r}>"
123
+
124
+ def ensure_loaded(self):
125
+ """
126
+ Load this corpus (if it has not already been loaded). This is
127
+ used by LazyCorpusLoader as a simple method that can be used to
128
+ make sure a corpus is loaded -- e.g., in case a user wants to
129
+ do help(some_corpus).
130
+ """
131
+ pass # no need to actually do anything.
132
+
133
+ def readme(self):
134
+ """
135
+ Return the contents of the corpus README file, if it exists.
136
+ """
137
+ with self.open(self._readme) as f:
138
+ return f.read()
139
+
140
+ def license(self):
141
+ """
142
+ Return the contents of the corpus LICENSE file, if it exists.
143
+ """
144
+ with self.open(self._license) as f:
145
+ return f.read()
146
+
147
+ def citation(self):
148
+ """
149
+ Return the contents of the corpus citation.bib file, if it exists.
150
+ """
151
+ with self.open(self._citation) as f:
152
+ return f.read()
153
+
154
+ def fileids(self):
155
+ """
156
+ Return a list of file identifiers for the fileids that make up
157
+ this corpus.
158
+ """
159
+ return self._fileids
160
+
161
+ def abspath(self, fileid):
162
+ """
163
+ Return the absolute path for the given file.
164
+
165
+ :type fileid: str
166
+ :param fileid: The file identifier for the file whose path
167
+ should be returned.
168
+ :rtype: PathPointer
169
+ """
170
+ return self._root.join(fileid)
171
+
172
+ def abspaths(self, fileids=None, include_encoding=False, include_fileid=False):
173
+ """
174
+ Return a list of the absolute paths for all fileids in this corpus;
175
+ or for the given list of fileids, if specified.
176
+
177
+ :type fileids: None or str or list
178
+ :param fileids: Specifies the set of fileids for which paths should
179
+ be returned. Can be None, for all fileids; a list of
180
+ file identifiers, for a specified set of fileids; or a single
181
+ file identifier, for a single file. Note that the return
182
+ value is always a list of paths, even if ``fileids`` is a
183
+ single file identifier.
184
+
185
+ :param include_encoding: If true, then return a list of
186
+ ``(path_pointer, encoding)`` tuples.
187
+
188
+ :rtype: list(PathPointer)
189
+ """
190
+ if fileids is None:
191
+ fileids = self._fileids
192
+ elif isinstance(fileids, str):
193
+ fileids = [fileids]
194
+
195
+ paths = [self._root.join(f) for f in fileids]
196
+
197
+ if include_encoding and include_fileid:
198
+ return list(zip(paths, [self.encoding(f) for f in fileids], fileids))
199
+ elif include_fileid:
200
+ return list(zip(paths, fileids))
201
+ elif include_encoding:
202
+ return list(zip(paths, [self.encoding(f) for f in fileids]))
203
+ else:
204
+ return paths
205
+
206
+ def raw(self, fileids=None):
207
+ """
208
+ :param fileids: A list specifying the fileids that should be used.
209
+ :return: the given file(s) as a single string.
210
+ :rtype: str
211
+ """
212
+ if fileids is None:
213
+ fileids = self._fileids
214
+ elif isinstance(fileids, str):
215
+ fileids = [fileids]
216
+ contents = []
217
+ for f in fileids:
218
+ with self.open(f) as fp:
219
+ contents.append(fp.read())
220
+ return concat(contents)
221
+
222
+ def open(self, file):
223
+ """
224
+ Return an open stream that can be used to read the given file.
225
+ If the file's encoding is not None, then the stream will
226
+ automatically decode the file's contents into unicode.
227
+
228
+ :param file: The file identifier of the file to read.
229
+ """
230
+ encoding = self.encoding(file)
231
+ stream = self._root.join(file).open(encoding)
232
+ return stream
233
+
234
+ def encoding(self, file):
235
+ """
236
+ Return the unicode encoding for the given corpus file, if known.
237
+ If the encoding is unknown, or if the given file should be
238
+ processed using byte strings (str), then return None.
239
+ """
240
+ if isinstance(self._encoding, dict):
241
+ return self._encoding.get(file)
242
+ else:
243
+ return self._encoding
244
+
245
+ def _get_root(self):
246
+ return self._root
247
+
248
+ root = property(
249
+ _get_root,
250
+ doc="""
251
+ The directory where this corpus is stored.
252
+
253
+ :type: PathPointer""",
254
+ )
255
+
256
+
257
+ ######################################################################
258
+ # { Corpora containing categorized items
259
+ ######################################################################
260
+
261
+
262
+ class CategorizedCorpusReader:
263
+ """
264
+ A mixin class used to aid in the implementation of corpus readers
265
+ for categorized corpora. This class defines the method
266
+ ``categories()``, which returns a list of the categories for the
267
+ corpus or for a specified set of fileids; and overrides ``fileids()``
268
+ to take a ``categories`` argument, restricting the set of fileids to
269
+ be returned.
270
+
271
+ Subclasses are expected to:
272
+
273
+ - Call ``__init__()`` to set up the mapping.
274
+
275
+ - Override all view methods to accept a ``categories`` parameter,
276
+ which can be used *instead* of the ``fileids`` parameter, to
277
+ select which fileids should be included in the returned view.
278
+ """
279
+
280
+ def __init__(self, kwargs):
281
+ """
282
+ Initialize this mapping based on keyword arguments, as
283
+ follows:
284
+
285
+ - cat_pattern: A regular expression pattern used to find the
286
+ category for each file identifier. The pattern will be
287
+ applied to each file identifier, and the first matching
288
+ group will be used as the category label for that file.
289
+
290
+ - cat_map: A dictionary, mapping from file identifiers to
291
+ category labels.
292
+
293
+ - cat_file: The name of a file that contains the mapping
294
+ from file identifiers to categories. The argument
295
+ ``cat_delimiter`` can be used to specify a delimiter.
296
+
297
+ The corresponding argument will be deleted from ``kwargs``. If
298
+ more than one argument is specified, an exception will be
299
+ raised.
300
+ """
301
+ self._f2c = None #: file-to-category mapping
302
+ self._c2f = None #: category-to-file mapping
303
+
304
+ self._pattern = None #: regexp specifying the mapping
305
+ self._map = None #: dict specifying the mapping
306
+ self._file = None #: fileid of file containing the mapping
307
+ self._delimiter = None #: delimiter for ``self._file``
308
+
309
+ if "cat_pattern" in kwargs:
310
+ self._pattern = kwargs["cat_pattern"]
311
+ del kwargs["cat_pattern"]
312
+ elif "cat_map" in kwargs:
313
+ self._map = kwargs["cat_map"]
314
+ del kwargs["cat_map"]
315
+ elif "cat_file" in kwargs:
316
+ self._file = kwargs["cat_file"]
317
+ del kwargs["cat_file"]
318
+ if "cat_delimiter" in kwargs:
319
+ self._delimiter = kwargs["cat_delimiter"]
320
+ del kwargs["cat_delimiter"]
321
+ else:
322
+ raise ValueError(
323
+ "Expected keyword argument cat_pattern or " "cat_map or cat_file."
324
+ )
325
+
326
+ if "cat_pattern" in kwargs or "cat_map" in kwargs or "cat_file" in kwargs:
327
+ raise ValueError(
328
+ "Specify exactly one of: cat_pattern, " "cat_map, cat_file."
329
+ )
330
+
331
+ def _init(self):
332
+ self._f2c = defaultdict(set)
333
+ self._c2f = defaultdict(set)
334
+
335
+ if self._pattern is not None:
336
+ for file_id in self._fileids:
337
+ category = re.match(self._pattern, file_id).group(1)
338
+ self._add(file_id, category)
339
+
340
+ elif self._map is not None:
341
+ for (file_id, categories) in self._map.items():
342
+ for category in categories:
343
+ self._add(file_id, category)
344
+
345
+ elif self._file is not None:
346
+ with self.open(self._file) as f:
347
+ for line in f.readlines():
348
+ line = line.strip()
349
+ file_id, categories = line.split(self._delimiter, 1)
350
+ if file_id not in self.fileids():
351
+ raise ValueError(
352
+ "In category mapping file %s: %s "
353
+ "not found" % (self._file, file_id)
354
+ )
355
+ for category in categories.split(self._delimiter):
356
+ self._add(file_id, category)
357
+
358
+ def _add(self, file_id, category):
359
+ self._f2c[file_id].add(category)
360
+ self._c2f[category].add(file_id)
361
+
362
+ def categories(self, fileids=None):
363
+ """
364
+ Return a list of the categories that are defined for this corpus,
365
+ or for the file(s) if it is given.
366
+ """
367
+ if self._f2c is None:
368
+ self._init()
369
+ if fileids is None:
370
+ return sorted(self._c2f)
371
+ if isinstance(fileids, str):
372
+ fileids = [fileids]
373
+ return sorted(set.union(*(self._f2c[d] for d in fileids)))
374
+
375
+ def fileids(self, categories=None):
376
+ """
377
+ Return a list of file identifiers for the files that make up
378
+ this corpus, or that make up the given category(s) if specified.
379
+ """
380
+ if categories is None:
381
+ return super().fileids()
382
+ elif isinstance(categories, str):
383
+ if self._f2c is None:
384
+ self._init()
385
+ if categories in self._c2f:
386
+ return sorted(self._c2f[categories])
387
+ else:
388
+ raise ValueError("Category %s not found" % categories)
389
+ else:
390
+ if self._f2c is None:
391
+ self._init()
392
+ return sorted(set.union(*(self._c2f[c] for c in categories)))
393
+
394
+ def _resolve(self, fileids, categories):
395
+ if fileids is not None and categories is not None:
396
+ raise ValueError("Specify fileids or categories, not both")
397
+ if categories is not None:
398
+ return self.fileids(categories)
399
+ else:
400
+ return fileids
401
+
402
+ def raw(self, fileids=None, categories=None):
403
+ return super().raw(self._resolve(fileids, categories))
404
+
405
+ def words(self, fileids=None, categories=None):
406
+ return super().words(self._resolve(fileids, categories))
407
+
408
+ def sents(self, fileids=None, categories=None):
409
+ return super().sents(self._resolve(fileids, categories))
410
+
411
+ def paras(self, fileids=None, categories=None):
412
+ return super().paras(self._resolve(fileids, categories))
413
+
414
+
415
+ ######################################################################
416
+ # { Treebank readers
417
+ ######################################################################
418
+
419
+ # [xx] is it worth it to factor this out?
420
+ class SyntaxCorpusReader(CorpusReader):
421
+ """
422
+ An abstract base class for reading corpora consisting of
423
+ syntactically parsed text. Subclasses should define:
424
+
425
+ - ``__init__``, which specifies the location of the corpus
426
+ and a method for detecting the sentence blocks in corpus files.
427
+ - ``_read_block``, which reads a block from the input stream.
428
+ - ``_word``, which takes a block and returns a list of list of words.
429
+ - ``_tag``, which takes a block and returns a list of list of tagged
430
+ words.
431
+ - ``_parse``, which takes a block and returns a list of parsed
432
+ sentences.
433
+ """
434
+
435
+ def _parse(self, s):
436
+ raise NotImplementedError()
437
+
438
+ def _word(self, s):
439
+ raise NotImplementedError()
440
+
441
+ def _tag(self, s):
442
+ raise NotImplementedError()
443
+
444
+ def _read_block(self, stream):
445
+ raise NotImplementedError()
446
+
447
+ def parsed_sents(self, fileids=None):
448
+ reader = self._read_parsed_sent_block
449
+ return concat(
450
+ [
451
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
452
+ for fileid, enc in self.abspaths(fileids, True)
453
+ ]
454
+ )
455
+
456
+ def tagged_sents(self, fileids=None, tagset=None):
457
+ def reader(stream):
458
+ return self._read_tagged_sent_block(stream, tagset)
459
+
460
+ return concat(
461
+ [
462
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
463
+ for fileid, enc in self.abspaths(fileids, True)
464
+ ]
465
+ )
466
+
467
+ def sents(self, fileids=None):
468
+ reader = self._read_sent_block
469
+ return concat(
470
+ [
471
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
472
+ for fileid, enc in self.abspaths(fileids, True)
473
+ ]
474
+ )
475
+
476
+ def tagged_words(self, fileids=None, tagset=None):
477
+ def reader(stream):
478
+ return self._read_tagged_word_block(stream, tagset)
479
+
480
+ return concat(
481
+ [
482
+ StreamBackedCorpusView(fileid, reader, encoding=enc)
483
+ for fileid, enc in self.abspaths(fileids, True)
484
+ ]
485
+ )
486
+
487
+ def words(self, fileids=None):
488
+ return concat(
489
+ [
490
+ StreamBackedCorpusView(fileid, self._read_word_block, encoding=enc)
491
+ for fileid, enc in self.abspaths(fileids, True)
492
+ ]
493
+ )
494
+
495
+ # ------------------------------------------------------------
496
+ # { Block Readers
497
+
498
+ def _read_word_block(self, stream):
499
+ return list(chain.from_iterable(self._read_sent_block(stream)))
500
+
501
+ def _read_tagged_word_block(self, stream, tagset=None):
502
+ return list(chain.from_iterable(self._read_tagged_sent_block(stream, tagset)))
503
+
504
+ def _read_sent_block(self, stream):
505
+ return list(filter(None, [self._word(t) for t in self._read_block(stream)]))
506
+
507
+ def _read_tagged_sent_block(self, stream, tagset=None):
508
+ return list(
509
+ filter(None, [self._tag(t, tagset) for t in self._read_block(stream)])
510
+ )
511
+
512
+ def _read_parsed_sent_block(self, stream):
513
+ return list(filter(None, [self._parse(t) for t in self._read_block(stream)]))
514
+
515
+ # } End of Block Readers
516
+ # ------------------------------------------------------------
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bcp47.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: BCP-47 language tags
2
+ #
3
+ # Copyright (C) 2022-2023 NLTK Project
4
+ # Author: Eric Kafe <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+ from warnings import warn
10
+ from xml.etree import ElementTree as et
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class BCP47CorpusReader(CorpusReader):
16
+ """
17
+ Parse BCP-47 composite language tags
18
+
19
+ Supports all the main subtags, and the 'u-sd' extension:
20
+
21
+ >>> from nltk.corpus import bcp47
22
+ >>> bcp47.name('oc-gascon-u-sd-fr64')
23
+ 'Occitan (post 1500): Gascon: Pyrénées-Atlantiques'
24
+
25
+ Can load a conversion table to Wikidata Q-codes:
26
+ >>> bcp47.load_wiki_q()
27
+ >>> bcp47.wiki_q['en-GI-spanglis']
28
+ 'Q79388'
29
+
30
+ """
31
+
32
+ def __init__(self, root, fileids):
33
+ """Read the BCP-47 database"""
34
+ super().__init__(root, fileids)
35
+ self.langcode = {}
36
+ with self.open("iana/language-subtag-registry.txt") as fp:
37
+ self.db = self.data_dict(fp.read().split("%%\n"))
38
+ with self.open("cldr/common-subdivisions-en.xml") as fp:
39
+ self.subdiv = self.subdiv_dict(
40
+ et.parse(fp).iterfind("localeDisplayNames/subdivisions/subdivision")
41
+ )
42
+ self.morphology()
43
+
44
+ def load_wiki_q(self):
45
+ """Load conversion table to Wikidata Q-codes (only if needed)"""
46
+ with self.open("cldr/tools-cldr-rdf-external-entityToCode.tsv") as fp:
47
+ self.wiki_q = self.wiki_dict(fp.read().strip().split("\n")[1:])
48
+
49
+ def wiki_dict(self, lines):
50
+ """Convert Wikidata list of Q-codes to a BCP-47 dictionary"""
51
+ return {
52
+ pair[1]: pair[0].split("/")[-1]
53
+ for pair in [line.strip().split("\t") for line in lines]
54
+ }
55
+
56
+ def subdiv_dict(self, subdivs):
57
+ """Convert the CLDR subdivisions list to a dictionary"""
58
+ return {sub.attrib["type"]: sub.text for sub in subdivs}
59
+
60
+ def morphology(self):
61
+ self.casing = {
62
+ "language": str.lower,
63
+ "extlang": str.lower,
64
+ "script": str.title,
65
+ "region": str.upper,
66
+ "variant": str.lower,
67
+ }
68
+ dig = "[0-9]"
69
+ low = "[a-z]"
70
+ up = "[A-Z]"
71
+ alnum = "[a-zA-Z0-9]"
72
+ self.format = {
73
+ "language": re.compile(f"{low*3}?"),
74
+ "extlang": re.compile(f"{low*3}"),
75
+ "script": re.compile(f"{up}{low*3}"),
76
+ "region": re.compile(f"({up*2})|({dig*3})"),
77
+ "variant": re.compile(f"{alnum*4}{(alnum+'?')*4}"),
78
+ "singleton": re.compile(f"{low}"),
79
+ }
80
+
81
+ def data_dict(self, records):
82
+ """Convert the BCP-47 language subtag registry to a dictionary"""
83
+ self.version = records[0].replace("File-Date:", "").strip()
84
+ dic = {}
85
+ dic["deprecated"] = {}
86
+ for label in [
87
+ "language",
88
+ "extlang",
89
+ "script",
90
+ "region",
91
+ "variant",
92
+ "redundant",
93
+ "grandfathered",
94
+ ]:
95
+ dic["deprecated"][label] = {}
96
+ for record in records[1:]:
97
+ fields = [field.split(": ") for field in record.strip().split("\n")]
98
+ typ = fields[0][1]
99
+ tag = fields[1][1]
100
+ if typ not in dic:
101
+ dic[typ] = {}
102
+ subfields = {}
103
+ for field in fields[2:]:
104
+ if len(field) == 2:
105
+ [key, val] = field
106
+ if key not in subfields:
107
+ subfields[key] = [val]
108
+ else: # multiple value
109
+ subfields[key].append(val)
110
+ else: # multiline field
111
+ subfields[key][-1] += " " + field[0].strip()
112
+ if (
113
+ "Deprecated" not in record
114
+ and typ == "language"
115
+ and key == "Description"
116
+ ):
117
+ self.langcode[subfields[key][-1]] = tag
118
+ for key in subfields:
119
+ if len(subfields[key]) == 1: # single value
120
+ subfields[key] = subfields[key][0]
121
+ if "Deprecated" in record:
122
+ dic["deprecated"][typ][tag] = subfields
123
+ else:
124
+ dic[typ][tag] = subfields
125
+ return dic
126
+
127
+ def val2str(self, val):
128
+ """Return only first value"""
129
+ if type(val) == list:
130
+ # val = "/".join(val) # Concatenate all values
131
+ val = val[0]
132
+ return val
133
+
134
+ def lang2str(self, lg_record):
135
+ """Concatenate subtag values"""
136
+ name = f"{lg_record['language']}"
137
+ for label in ["extlang", "script", "region", "variant", "extension"]:
138
+ if label in lg_record:
139
+ name += f": {lg_record[label]}"
140
+ return name
141
+
142
+ def parse_tag(self, tag):
143
+ """Convert a BCP-47 tag to a dictionary of labelled subtags"""
144
+ subtags = tag.split("-")
145
+ lang = {}
146
+ labels = ["language", "extlang", "script", "region", "variant", "variant"]
147
+ while subtags and labels:
148
+ subtag = subtags.pop(0)
149
+ found = False
150
+ while labels:
151
+ label = labels.pop(0)
152
+ subtag = self.casing[label](subtag)
153
+ if self.format[label].fullmatch(subtag):
154
+ if subtag in self.db[label]:
155
+ found = True
156
+ valstr = self.val2str(self.db[label][subtag]["Description"])
157
+ if label == "variant" and label in lang:
158
+ lang[label] += ": " + valstr
159
+ else:
160
+ lang[label] = valstr
161
+ break
162
+ elif subtag in self.db["deprecated"][label]:
163
+ found = True
164
+ note = f"The {subtag!r} {label} code is deprecated"
165
+ if "Preferred-Value" in self.db["deprecated"][label][subtag]:
166
+ prefer = self.db["deprecated"][label][subtag][
167
+ "Preferred-Value"
168
+ ]
169
+ note += f"', prefer '{self.val2str(prefer)}'"
170
+ lang[label] = self.val2str(
171
+ self.db["deprecated"][label][subtag]["Description"]
172
+ )
173
+ warn(note)
174
+ break
175
+ if not found:
176
+ if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions
177
+ sd = subtags[1]
178
+ if sd in self.subdiv:
179
+ ext = self.subdiv[sd]
180
+ else:
181
+ ext = f"<Unknown subdivision: {ext}>"
182
+ else: # other extension subtags are not supported yet
183
+ ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower()
184
+ if not self.format["singleton"].fullmatch(subtag):
185
+ ext = f"<Invalid extension: {ext}>"
186
+ warn(ext)
187
+ lang["extension"] = ext
188
+ subtags = []
189
+ return lang
190
+
191
+ def name(self, tag):
192
+ """
193
+ Convert a BCP-47 tag to a colon-separated string of subtag names
194
+
195
+ >>> from nltk.corpus import bcp47
196
+ >>> bcp47.name('ca-Latn-ES-valencia')
197
+ 'Catalan: Latin: Spain: Valencian'
198
+
199
+ """
200
+ for label in ["redundant", "grandfathered"]:
201
+ val = None
202
+ if tag in self.db[label]:
203
+ val = f"{self.db[label][tag]['Description']}"
204
+ note = f"The {tag!r} code is {label}"
205
+ elif tag in self.db["deprecated"][label]:
206
+ val = f"{self.db['deprecated'][label][tag]['Description']}"
207
+ note = f"The {tag!r} code is {label} and deprecated"
208
+ if "Preferred-Value" in self.db["deprecated"][label][tag]:
209
+ prefer = self.db["deprecated"][label][tag]["Preferred-Value"]
210
+ note += f", prefer {self.val2str(prefer)!r}"
211
+ if val:
212
+ warn(note)
213
+ return val
214
+ try:
215
+ return self.lang2str(self.parse_tag(tag))
216
+ except:
217
+ warn(f"Tag {tag!r} was not recognized")
218
+ return None
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bnc.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Plaintext Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """Corpus reader for the XML version of the British National Corpus."""
9
+
10
+ from nltk.corpus.reader.util import concat
11
+ from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader, XMLCorpusView
12
+
13
+
14
+ class BNCCorpusReader(XMLCorpusReader):
15
+ r"""Corpus reader for the XML version of the British National Corpus.
16
+
17
+ For access to the complete XML data structure, use the ``xml()``
18
+ method. For access to simple word lists and tagged word lists, use
19
+ ``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
20
+
21
+ You can obtain the full version of the BNC corpus at
22
+ https://www.ota.ox.ac.uk/desc/2554
23
+
24
+ If you extracted the archive to a directory called `BNC`, then you can
25
+ instantiate the reader as::
26
+
27
+ BNCCorpusReader(root='BNC/Texts/', fileids=r'[A-K]/\w*/\w*\.xml')
28
+
29
+ """
30
+
31
+ def __init__(self, root, fileids, lazy=True):
32
+ XMLCorpusReader.__init__(self, root, fileids)
33
+ self._lazy = lazy
34
+
35
+ def words(self, fileids=None, strip_space=True, stem=False):
36
+ """
37
+ :return: the given file(s) as a list of words
38
+ and punctuation symbols.
39
+ :rtype: list(str)
40
+
41
+ :param strip_space: If true, then strip trailing spaces from
42
+ word tokens. Otherwise, leave the spaces on the tokens.
43
+ :param stem: If true, then use word stems instead of word strings.
44
+ """
45
+ return self._views(fileids, False, None, strip_space, stem)
46
+
47
+ def tagged_words(self, fileids=None, c5=False, strip_space=True, stem=False):
48
+ """
49
+ :return: the given file(s) as a list of tagged
50
+ words and punctuation symbols, encoded as tuples
51
+ ``(word,tag)``.
52
+ :rtype: list(tuple(str,str))
53
+
54
+ :param c5: If true, then the tags used will be the more detailed
55
+ c5 tags. Otherwise, the simplified tags will be used.
56
+ :param strip_space: If true, then strip trailing spaces from
57
+ word tokens. Otherwise, leave the spaces on the tokens.
58
+ :param stem: If true, then use word stems instead of word strings.
59
+ """
60
+ tag = "c5" if c5 else "pos"
61
+ return self._views(fileids, False, tag, strip_space, stem)
62
+
63
+ def sents(self, fileids=None, strip_space=True, stem=False):
64
+ """
65
+ :return: the given file(s) as a list of
66
+ sentences or utterances, each encoded as a list of word
67
+ strings.
68
+ :rtype: list(list(str))
69
+
70
+ :param strip_space: If true, then strip trailing spaces from
71
+ word tokens. Otherwise, leave the spaces on the tokens.
72
+ :param stem: If true, then use word stems instead of word strings.
73
+ """
74
+ return self._views(fileids, True, None, strip_space, stem)
75
+
76
+ def tagged_sents(self, fileids=None, c5=False, strip_space=True, stem=False):
77
+ """
78
+ :return: the given file(s) as a list of
79
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
80
+ :rtype: list(list(tuple(str,str)))
81
+
82
+ :param c5: If true, then the tags used will be the more detailed
83
+ c5 tags. Otherwise, the simplified tags will be used.
84
+ :param strip_space: If true, then strip trailing spaces from
85
+ word tokens. Otherwise, leave the spaces on the tokens.
86
+ :param stem: If true, then use word stems instead of word strings.
87
+ """
88
+ tag = "c5" if c5 else "pos"
89
+ return self._views(
90
+ fileids, sent=True, tag=tag, strip_space=strip_space, stem=stem
91
+ )
92
+
93
+ def _views(self, fileids=None, sent=False, tag=False, strip_space=True, stem=False):
94
+ """A helper function that instantiates BNCWordViews or the list of words/sentences."""
95
+ f = BNCWordView if self._lazy else self._words
96
+ return concat(
97
+ [
98
+ f(fileid, sent, tag, strip_space, stem)
99
+ for fileid in self.abspaths(fileids)
100
+ ]
101
+ )
102
+
103
+ def _words(self, fileid, bracket_sent, tag, strip_space, stem):
104
+ """
105
+ Helper used to implement the view methods -- returns a list of
106
+ words or a list of sentences, optionally tagged.
107
+
108
+ :param fileid: The name of the underlying file.
109
+ :param bracket_sent: If true, include sentence bracketing.
110
+ :param tag: The name of the tagset to use, or None for no tags.
111
+ :param strip_space: If true, strip spaces from word tokens.
112
+ :param stem: If true, then substitute stems for words.
113
+ """
114
+ result = []
115
+
116
+ xmldoc = ElementTree.parse(fileid).getroot()
117
+ for xmlsent in xmldoc.findall(".//s"):
118
+ sent = []
119
+ for xmlword in _all_xmlwords_in(xmlsent):
120
+ word = xmlword.text
121
+ if not word:
122
+ word = "" # fixes issue 337?
123
+ if strip_space or stem:
124
+ word = word.strip()
125
+ if stem:
126
+ word = xmlword.get("hw", word)
127
+ if tag == "c5":
128
+ word = (word, xmlword.get("c5"))
129
+ elif tag == "pos":
130
+ word = (word, xmlword.get("pos", xmlword.get("c5")))
131
+ sent.append(word)
132
+ if bracket_sent:
133
+ result.append(BNCSentence(xmlsent.attrib["n"], sent))
134
+ else:
135
+ result.extend(sent)
136
+
137
+ assert None not in result
138
+ return result
139
+
140
+
141
+ def _all_xmlwords_in(elt, result=None):
142
+ if result is None:
143
+ result = []
144
+ for child in elt:
145
+ if child.tag in ("c", "w"):
146
+ result.append(child)
147
+ else:
148
+ _all_xmlwords_in(child, result)
149
+ return result
150
+
151
+
152
+ class BNCSentence(list):
153
+ """
154
+ A list of words, augmented by an attribute ``num`` used to record
155
+ the sentence identifier (the ``n`` attribute from the XML).
156
+ """
157
+
158
+ def __init__(self, num, items):
159
+ self.num = num
160
+ list.__init__(self, items)
161
+
162
+
163
+ class BNCWordView(XMLCorpusView):
164
+ """
165
+ A stream backed corpus view specialized for use with the BNC corpus.
166
+ """
167
+
168
+ tags_to_ignore = {
169
+ "pb",
170
+ "gap",
171
+ "vocal",
172
+ "event",
173
+ "unclear",
174
+ "shift",
175
+ "pause",
176
+ "align",
177
+ }
178
+ """These tags are ignored. For their description refer to the
179
+ technical documentation, for example,
180
+ http://www.natcorp.ox.ac.uk/docs/URG/ref-vocal.html
181
+
182
+ """
183
+
184
+ def __init__(self, fileid, sent, tag, strip_space, stem):
185
+ """
186
+ :param fileid: The name of the underlying file.
187
+ :param sent: If true, include sentence bracketing.
188
+ :param tag: The name of the tagset to use, or None for no tags.
189
+ :param strip_space: If true, strip spaces from word tokens.
190
+ :param stem: If true, then substitute stems for words.
191
+ """
192
+ if sent:
193
+ tagspec = ".*/s"
194
+ else:
195
+ tagspec = ".*/s/(.*/)?(c|w)"
196
+ self._sent = sent
197
+ self._tag = tag
198
+ self._strip_space = strip_space
199
+ self._stem = stem
200
+
201
+ self.title = None #: Title of the document.
202
+ self.author = None #: Author of the document.
203
+ self.editor = None #: Editor
204
+ self.resps = None #: Statement of responsibility
205
+
206
+ XMLCorpusView.__init__(self, fileid, tagspec)
207
+
208
+ # Read in a tasty header.
209
+ self._open()
210
+ self.read_block(self._stream, ".*/teiHeader$", self.handle_header)
211
+ self.close()
212
+
213
+ # Reset tag context.
214
+ self._tag_context = {0: ()}
215
+
216
+ def handle_header(self, elt, context):
217
+ # Set up some metadata!
218
+ titles = elt.findall("titleStmt/title")
219
+ if titles:
220
+ self.title = "\n".join(title.text.strip() for title in titles)
221
+
222
+ authors = elt.findall("titleStmt/author")
223
+ if authors:
224
+ self.author = "\n".join(author.text.strip() for author in authors)
225
+
226
+ editors = elt.findall("titleStmt/editor")
227
+ if editors:
228
+ self.editor = "\n".join(editor.text.strip() for editor in editors)
229
+
230
+ resps = elt.findall("titleStmt/respStmt")
231
+ if resps:
232
+ self.resps = "\n\n".join(
233
+ "\n".join(resp_elt.text.strip() for resp_elt in resp) for resp in resps
234
+ )
235
+
236
+ def handle_elt(self, elt, context):
237
+ if self._sent:
238
+ return self.handle_sent(elt)
239
+ else:
240
+ return self.handle_word(elt)
241
+
242
+ def handle_word(self, elt):
243
+ word = elt.text
244
+ if not word:
245
+ word = "" # fixes issue 337?
246
+ if self._strip_space or self._stem:
247
+ word = word.strip()
248
+ if self._stem:
249
+ word = elt.get("hw", word)
250
+ if self._tag == "c5":
251
+ word = (word, elt.get("c5"))
252
+ elif self._tag == "pos":
253
+ word = (word, elt.get("pos", elt.get("c5")))
254
+ return word
255
+
256
+ def handle_sent(self, elt):
257
+ sent = []
258
+ for child in elt:
259
+ if child.tag in ("mw", "hi", "corr", "trunc"):
260
+ sent += [self.handle_word(w) for w in child]
261
+ elif child.tag in ("w", "c"):
262
+ sent.append(self.handle_word(child))
263
+ elif child.tag not in self.tags_to_ignore:
264
+ raise ValueError("Unexpected element %s" % child.tag)
265
+ return BNCSentence(elt.attrib["n"], sent)
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/bracket_parse.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Penn Treebank Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ Corpus reader for corpora that consist of parenthesis-delineated parse trees.
10
+ """
11
+
12
+ import sys
13
+
14
+ from nltk.corpus.reader.api import *
15
+ from nltk.corpus.reader.util import *
16
+ from nltk.tag import map_tag
17
+ from nltk.tree import Tree
18
+
19
+ # we use [^\s()]+ instead of \S+? to avoid matching ()
20
+ SORTTAGWRD = re.compile(r"\((\d+) ([^\s()]+) ([^\s()]+)\)")
21
+ TAGWORD = re.compile(r"\(([^\s()]+) ([^\s()]+)\)")
22
+ WORD = re.compile(r"\([^\s()]+ ([^\s()]+)\)")
23
+ EMPTY_BRACKETS = re.compile(r"\s*\(\s*\(")
24
+
25
+
26
+ class BracketParseCorpusReader(SyntaxCorpusReader):
27
+ """
28
+ Reader for corpora that consist of parenthesis-delineated parse trees,
29
+ like those found in the "combined" section of the Penn Treebank,
30
+ e.g. "(S (NP (DT the) (JJ little) (NN dog)) (VP (VBD barked)))".
31
+
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ root,
37
+ fileids,
38
+ comment_char=None,
39
+ detect_blocks="unindented_paren",
40
+ encoding="utf8",
41
+ tagset=None,
42
+ ):
43
+ """
44
+ :param root: The root directory for this corpus.
45
+ :param fileids: A list or regexp specifying the fileids in this corpus.
46
+ :param comment_char: The character which can appear at the start of
47
+ a line to indicate that the rest of the line is a comment.
48
+ :param detect_blocks: The method that is used to find blocks
49
+ in the corpus; can be 'unindented_paren' (every unindented
50
+ parenthesis starts a new parse) or 'sexpr' (brackets are
51
+ matched).
52
+ :param tagset: The name of the tagset used by this corpus, to be used
53
+ for normalizing or converting the POS tags returned by the
54
+ ``tagged_...()`` methods.
55
+ """
56
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
57
+ self._comment_char = comment_char
58
+ self._detect_blocks = detect_blocks
59
+ self._tagset = tagset
60
+
61
+ def _read_block(self, stream):
62
+ if self._detect_blocks == "sexpr":
63
+ return read_sexpr_block(stream, comment_char=self._comment_char)
64
+ elif self._detect_blocks == "blankline":
65
+ return read_blankline_block(stream)
66
+ elif self._detect_blocks == "unindented_paren":
67
+ # Tokens start with unindented left parens.
68
+ toks = read_regexp_block(stream, start_re=r"^\(")
69
+ # Strip any comments out of the tokens.
70
+ if self._comment_char:
71
+ toks = [
72
+ re.sub("(?m)^%s.*" % re.escape(self._comment_char), "", tok)
73
+ for tok in toks
74
+ ]
75
+ return toks
76
+ else:
77
+ assert 0, "bad block type"
78
+
79
+ def _normalize(self, t):
80
+ # Replace leaves of the form (!), (,), with (! !), (, ,)
81
+ t = re.sub(r"\((.)\)", r"(\1 \1)", t)
82
+ # Replace leaves of the form (tag word root) with (tag word)
83
+ t = re.sub(r"\(([^\s()]+) ([^\s()]+) [^\s()]+\)", r"(\1 \2)", t)
84
+ return t
85
+
86
+ def _parse(self, t):
87
+ try:
88
+ tree = Tree.fromstring(self._normalize(t))
89
+ # If there's an empty node at the top, strip it off
90
+ if tree.label() == "" and len(tree) == 1:
91
+ return tree[0]
92
+ else:
93
+ return tree
94
+
95
+ except ValueError as e:
96
+ sys.stderr.write("Bad tree detected; trying to recover...\n")
97
+ # Try to recover, if we can:
98
+ if e.args == ("mismatched parens",):
99
+ for n in range(1, 5):
100
+ try:
101
+ v = Tree(self._normalize(t + ")" * n))
102
+ sys.stderr.write(
103
+ " Recovered by adding %d close " "paren(s)\n" % n
104
+ )
105
+ return v
106
+ except ValueError:
107
+ pass
108
+ # Try something else:
109
+ sys.stderr.write(" Recovered by returning a flat parse.\n")
110
+ # sys.stderr.write(' '.join(t.split())+'\n')
111
+ return Tree("S", self._tag(t))
112
+
113
+ def _tag(self, t, tagset=None):
114
+ tagged_sent = [(w, p) for (p, w) in TAGWORD.findall(self._normalize(t))]
115
+ if tagset and tagset != self._tagset:
116
+ tagged_sent = [
117
+ (w, map_tag(self._tagset, tagset, p)) for (w, p) in tagged_sent
118
+ ]
119
+ return tagged_sent
120
+
121
+ def _word(self, t):
122
+ return WORD.findall(self._normalize(t))
123
+
124
+
125
+ class CategorizedBracketParseCorpusReader(
126
+ CategorizedCorpusReader, BracketParseCorpusReader
127
+ ):
128
+ """
129
+ A reader for parsed corpora whose documents are
130
+ divided into categories based on their file identifiers.
131
+ @author: Nathan Schneider <[email protected]>
132
+ """
133
+
134
+ def __init__(self, *args, **kwargs):
135
+ """
136
+ Initialize the corpus reader. Categorization arguments
137
+ (C{cat_pattern}, C{cat_map}, and C{cat_file}) are passed to
138
+ the L{CategorizedCorpusReader constructor
139
+ <CategorizedCorpusReader.__init__>}. The remaining arguments
140
+ are passed to the L{BracketParseCorpusReader constructor
141
+ <BracketParseCorpusReader.__init__>}.
142
+ """
143
+ CategorizedCorpusReader.__init__(self, kwargs)
144
+ BracketParseCorpusReader.__init__(self, *args, **kwargs)
145
+
146
+ def tagged_words(self, fileids=None, categories=None, tagset=None):
147
+ return super().tagged_words(self._resolve(fileids, categories), tagset)
148
+
149
+ def tagged_sents(self, fileids=None, categories=None, tagset=None):
150
+ return super().tagged_sents(self._resolve(fileids, categories), tagset)
151
+
152
+ def tagged_paras(self, fileids=None, categories=None, tagset=None):
153
+ return super().tagged_paras(self._resolve(fileids, categories), tagset)
154
+
155
+ def parsed_words(self, fileids=None, categories=None):
156
+ return super().parsed_words(self._resolve(fileids, categories))
157
+
158
+ def parsed_sents(self, fileids=None, categories=None):
159
+ return super().parsed_sents(self._resolve(fileids, categories))
160
+
161
+ def parsed_paras(self, fileids=None, categories=None):
162
+ return super().parsed_paras(self._resolve(fileids, categories))
163
+
164
+
165
+ class AlpinoCorpusReader(BracketParseCorpusReader):
166
+ """
167
+ Reader for the Alpino Dutch Treebank.
168
+ This corpus has a lexical breakdown structure embedded, as read by `_parse`
169
+ Unfortunately this puts punctuation and some other words out of the sentence
170
+ order in the xml element tree. This is no good for `tag_` and `word_`
171
+ `_tag` and `_word` will be overridden to use a non-default new parameter 'ordered'
172
+ to the overridden _normalize function. The _parse function can then remain
173
+ untouched.
174
+ """
175
+
176
+ def __init__(self, root, encoding="ISO-8859-1", tagset=None):
177
+ BracketParseCorpusReader.__init__(
178
+ self,
179
+ root,
180
+ r"alpino\.xml",
181
+ detect_blocks="blankline",
182
+ encoding=encoding,
183
+ tagset=tagset,
184
+ )
185
+
186
+ def _normalize(self, t, ordered=False):
187
+ """Normalize the xml sentence element in t.
188
+ The sentence elements <alpino_ds>, although embedded in a few overall
189
+ xml elements, are separated by blank lines. That's how the reader can
190
+ deliver them one at a time.
191
+ Each sentence has a few category subnodes that are of no use to us.
192
+ The remaining word nodes may or may not appear in the proper order.
193
+ Each word node has attributes, among which:
194
+ - begin : the position of the word in the sentence
195
+ - pos : Part of Speech: the Tag
196
+ - word : the actual word
197
+ The return value is a string with all xml elementes replaced by
198
+ clauses: either a cat clause with nested clauses, or a word clause.
199
+ The order of the bracket clauses closely follows the xml.
200
+ If ordered == True, the word clauses include an order sequence number.
201
+ If ordered == False, the word clauses only have pos and word parts.
202
+ """
203
+ if t[:10] != "<alpino_ds":
204
+ return ""
205
+ # convert XML to sexpr notation
206
+ t = re.sub(r' <node .*? cat="(\w+)".*>', r"(\1", t)
207
+ if ordered:
208
+ t = re.sub(
209
+ r' <node. *?begin="(\d+)".*? pos="(\w+)".*? word="([^"]+)".*?/>',
210
+ r"(\1 \2 \3)",
211
+ t,
212
+ )
213
+ else:
214
+ t = re.sub(r' <node .*?pos="(\w+)".*? word="([^"]+)".*?/>', r"(\1 \2)", t)
215
+ t = re.sub(r" </node>", r")", t)
216
+ t = re.sub(r"<sentence>.*</sentence>", r"", t)
217
+ t = re.sub(r"</?alpino_ds.*>", r"", t)
218
+ return t
219
+
220
+ def _tag(self, t, tagset=None):
221
+ tagged_sent = [
222
+ (int(o), w, p)
223
+ for (o, p, w) in SORTTAGWRD.findall(self._normalize(t, ordered=True))
224
+ ]
225
+ tagged_sent.sort()
226
+ if tagset and tagset != self._tagset:
227
+ tagged_sent = [
228
+ (w, map_tag(self._tagset, tagset, p)) for (o, w, p) in tagged_sent
229
+ ]
230
+ else:
231
+ tagged_sent = [(w, p) for (o, w, p) in tagged_sent]
232
+ return tagged_sent
233
+
234
+ def _word(self, t):
235
+ """Return a correctly ordered list if words"""
236
+ tagged_sent = self._tag(t)
237
+ return [w for (w, p) in tagged_sent]
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chasen.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (C) 2001-2023 NLTK Project
3
+ # Author: Masato Hagiwara <[email protected]>
4
+ # URL: <https://www.nltk.org/>
5
+ # For license information, see LICENSE.TXT
6
+
7
+ import sys
8
+
9
+ from nltk.corpus.reader import util
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+
13
+
14
+ class ChasenCorpusReader(CorpusReader):
15
+ def __init__(self, root, fileids, encoding="utf8", sent_splitter=None):
16
+ self._sent_splitter = sent_splitter
17
+ CorpusReader.__init__(self, root, fileids, encoding)
18
+
19
+ def words(self, fileids=None):
20
+ return concat(
21
+ [
22
+ ChasenCorpusView(fileid, enc, False, False, False, self._sent_splitter)
23
+ for (fileid, enc) in self.abspaths(fileids, True)
24
+ ]
25
+ )
26
+
27
+ def tagged_words(self, fileids=None):
28
+ return concat(
29
+ [
30
+ ChasenCorpusView(fileid, enc, True, False, False, self._sent_splitter)
31
+ for (fileid, enc) in self.abspaths(fileids, True)
32
+ ]
33
+ )
34
+
35
+ def sents(self, fileids=None):
36
+ return concat(
37
+ [
38
+ ChasenCorpusView(fileid, enc, False, True, False, self._sent_splitter)
39
+ for (fileid, enc) in self.abspaths(fileids, True)
40
+ ]
41
+ )
42
+
43
+ def tagged_sents(self, fileids=None):
44
+ return concat(
45
+ [
46
+ ChasenCorpusView(fileid, enc, True, True, False, self._sent_splitter)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def paras(self, fileids=None):
52
+ return concat(
53
+ [
54
+ ChasenCorpusView(fileid, enc, False, True, True, self._sent_splitter)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_paras(self, fileids=None):
60
+ return concat(
61
+ [
62
+ ChasenCorpusView(fileid, enc, True, True, True, self._sent_splitter)
63
+ for (fileid, enc) in self.abspaths(fileids, True)
64
+ ]
65
+ )
66
+
67
+
68
+ class ChasenCorpusView(StreamBackedCorpusView):
69
+ """
70
+ A specialized corpus view for ChasenReader. Similar to ``TaggedCorpusView``,
71
+ but this'll use fixed sets of word and sentence tokenizer.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ corpus_file,
77
+ encoding,
78
+ tagged,
79
+ group_by_sent,
80
+ group_by_para,
81
+ sent_splitter=None,
82
+ ):
83
+ self._tagged = tagged
84
+ self._group_by_sent = group_by_sent
85
+ self._group_by_para = group_by_para
86
+ self._sent_splitter = sent_splitter
87
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
88
+
89
+ def read_block(self, stream):
90
+ """Reads one paragraph at a time."""
91
+ block = []
92
+ for para_str in read_regexp_block(stream, r".", r"^EOS\n"):
93
+
94
+ para = []
95
+
96
+ sent = []
97
+ for line in para_str.splitlines():
98
+
99
+ _eos = line.strip() == "EOS"
100
+ _cells = line.split("\t")
101
+ w = (_cells[0], "\t".join(_cells[1:]))
102
+ if not _eos:
103
+ sent.append(w)
104
+
105
+ if _eos or (self._sent_splitter and self._sent_splitter(w)):
106
+ if not self._tagged:
107
+ sent = [w for (w, t) in sent]
108
+ if self._group_by_sent:
109
+ para.append(sent)
110
+ else:
111
+ para.extend(sent)
112
+ sent = []
113
+
114
+ if len(sent) > 0:
115
+ if not self._tagged:
116
+ sent = [w for (w, t) in sent]
117
+
118
+ if self._group_by_sent:
119
+ para.append(sent)
120
+ else:
121
+ para.extend(sent)
122
+
123
+ if self._group_by_para:
124
+ block.append(para)
125
+ else:
126
+ block.extend(para)
127
+
128
+ return block
129
+
130
+
131
+ def demo():
132
+
133
+ import nltk
134
+ from nltk.corpus.util import LazyCorpusLoader
135
+
136
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
137
+ print("/".join(jeita.words()[22100:22140]))
138
+
139
+ print(
140
+ "\nEOS\n".join(
141
+ "\n".join("{}/{}".format(w[0], w[1].split("\t")[2]) for w in sent)
142
+ for sent in jeita.tagged_sents()[2170:2173]
143
+ )
144
+ )
145
+
146
+
147
+ def test():
148
+
149
+ from nltk.corpus.util import LazyCorpusLoader
150
+
151
+ jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
152
+
153
+ assert isinstance(jeita.tagged_words()[0][1], str)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ demo()
158
+ test()
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/childes.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CHILDES XML Corpus Reader
2
+
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tomonori Nagano <[email protected]>
5
+ # Alexis Dimitriadis <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Corpus reader for the XML version of the CHILDES corpus.
11
+ """
12
+
13
+ __docformat__ = "epytext en"
14
+
15
+ import re
16
+ from collections import defaultdict
17
+
18
+ from nltk.corpus.reader.util import concat
19
+ from nltk.corpus.reader.xmldocs import ElementTree, XMLCorpusReader
20
+ from nltk.util import LazyConcatenation, LazyMap, flatten
21
+
22
+ # to resolve the namespace issue
23
+ NS = "http://www.talkbank.org/ns/talkbank"
24
+
25
+
26
+ class CHILDESCorpusReader(XMLCorpusReader):
27
+ """
28
+ Corpus reader for the XML version of the CHILDES corpus.
29
+ The CHILDES corpus is available at ``https://childes.talkbank.org/``. The XML
30
+ version of CHILDES is located at ``https://childes.talkbank.org/data-xml/``.
31
+ Copy the needed parts of the CHILDES XML corpus into the NLTK data directory
32
+ (``nltk_data/corpora/CHILDES/``).
33
+
34
+ For access to the file text use the usual nltk functions,
35
+ ``words()``, ``sents()``, ``tagged_words()`` and ``tagged_sents()``.
36
+ """
37
+
38
+ def __init__(self, root, fileids, lazy=True):
39
+ XMLCorpusReader.__init__(self, root, fileids)
40
+ self._lazy = lazy
41
+
42
+ def words(
43
+ self,
44
+ fileids=None,
45
+ speaker="ALL",
46
+ stem=False,
47
+ relation=False,
48
+ strip_space=True,
49
+ replace=False,
50
+ ):
51
+ """
52
+ :return: the given file(s) as a list of words
53
+ :rtype: list(str)
54
+
55
+ :param speaker: If specified, select specific speaker(s) defined
56
+ in the corpus. Default is 'ALL' (all participants). Common choices
57
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
58
+ researchers)
59
+ :param stem: If true, then use word stems instead of word strings.
60
+ :param relation: If true, then return tuples of (stem, index,
61
+ dependent_index)
62
+ :param strip_space: If true, then strip trailing spaces from word
63
+ tokens. Otherwise, leave the spaces on the tokens.
64
+ :param replace: If true, then use the replaced (intended) word instead
65
+ of the original word (e.g., 'wat' will be replaced with 'watch')
66
+ """
67
+ sent = None
68
+ pos = False
69
+ if not self._lazy:
70
+ return [
71
+ self._get_words(
72
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
73
+ )
74
+ for fileid in self.abspaths(fileids)
75
+ ]
76
+
77
+ get_words = lambda fileid: self._get_words(
78
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
79
+ )
80
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
81
+
82
+ def tagged_words(
83
+ self,
84
+ fileids=None,
85
+ speaker="ALL",
86
+ stem=False,
87
+ relation=False,
88
+ strip_space=True,
89
+ replace=False,
90
+ ):
91
+ """
92
+ :return: the given file(s) as a list of tagged
93
+ words and punctuation symbols, encoded as tuples
94
+ ``(word,tag)``.
95
+ :rtype: list(tuple(str,str))
96
+
97
+ :param speaker: If specified, select specific speaker(s) defined
98
+ in the corpus. Default is 'ALL' (all participants). Common choices
99
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
100
+ researchers)
101
+ :param stem: If true, then use word stems instead of word strings.
102
+ :param relation: If true, then return tuples of (stem, index,
103
+ dependent_index)
104
+ :param strip_space: If true, then strip trailing spaces from word
105
+ tokens. Otherwise, leave the spaces on the tokens.
106
+ :param replace: If true, then use the replaced (intended) word instead
107
+ of the original word (e.g., 'wat' will be replaced with 'watch')
108
+ """
109
+ sent = None
110
+ pos = True
111
+ if not self._lazy:
112
+ return [
113
+ self._get_words(
114
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
115
+ )
116
+ for fileid in self.abspaths(fileids)
117
+ ]
118
+
119
+ get_words = lambda fileid: self._get_words(
120
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
121
+ )
122
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
123
+
124
+ def sents(
125
+ self,
126
+ fileids=None,
127
+ speaker="ALL",
128
+ stem=False,
129
+ relation=None,
130
+ strip_space=True,
131
+ replace=False,
132
+ ):
133
+ """
134
+ :return: the given file(s) as a list of sentences or utterances, each
135
+ encoded as a list of word strings.
136
+ :rtype: list(list(str))
137
+
138
+ :param speaker: If specified, select specific speaker(s) defined
139
+ in the corpus. Default is 'ALL' (all participants). Common choices
140
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
141
+ researchers)
142
+ :param stem: If true, then use word stems instead of word strings.
143
+ :param relation: If true, then return tuples of ``(str,pos,relation_list)``.
144
+ If there is manually-annotated relation info, it will return
145
+ tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)``
146
+ :param strip_space: If true, then strip trailing spaces from word
147
+ tokens. Otherwise, leave the spaces on the tokens.
148
+ :param replace: If true, then use the replaced (intended) word instead
149
+ of the original word (e.g., 'wat' will be replaced with 'watch')
150
+ """
151
+ sent = True
152
+ pos = False
153
+ if not self._lazy:
154
+ return [
155
+ self._get_words(
156
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
157
+ )
158
+ for fileid in self.abspaths(fileids)
159
+ ]
160
+
161
+ get_words = lambda fileid: self._get_words(
162
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
163
+ )
164
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
165
+
166
+ def tagged_sents(
167
+ self,
168
+ fileids=None,
169
+ speaker="ALL",
170
+ stem=False,
171
+ relation=None,
172
+ strip_space=True,
173
+ replace=False,
174
+ ):
175
+ """
176
+ :return: the given file(s) as a list of
177
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
178
+ :rtype: list(list(tuple(str,str)))
179
+
180
+ :param speaker: If specified, select specific speaker(s) defined
181
+ in the corpus. Default is 'ALL' (all participants). Common choices
182
+ are 'CHI' (the child), 'MOT' (mother), ['CHI','MOT'] (exclude
183
+ researchers)
184
+ :param stem: If true, then use word stems instead of word strings.
185
+ :param relation: If true, then return tuples of ``(str,pos,relation_list)``.
186
+ If there is manually-annotated relation info, it will return
187
+ tuples of ``(str,pos,test_relation_list,str,pos,gold_relation_list)``
188
+ :param strip_space: If true, then strip trailing spaces from word
189
+ tokens. Otherwise, leave the spaces on the tokens.
190
+ :param replace: If true, then use the replaced (intended) word instead
191
+ of the original word (e.g., 'wat' will be replaced with 'watch')
192
+ """
193
+ sent = True
194
+ pos = True
195
+ if not self._lazy:
196
+ return [
197
+ self._get_words(
198
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
199
+ )
200
+ for fileid in self.abspaths(fileids)
201
+ ]
202
+
203
+ get_words = lambda fileid: self._get_words(
204
+ fileid, speaker, sent, stem, relation, pos, strip_space, replace
205
+ )
206
+ return LazyConcatenation(LazyMap(get_words, self.abspaths(fileids)))
207
+
208
+ def corpus(self, fileids=None):
209
+ """
210
+ :return: the given file(s) as a dict of ``(corpus_property_key, value)``
211
+ :rtype: list(dict)
212
+ """
213
+ if not self._lazy:
214
+ return [self._get_corpus(fileid) for fileid in self.abspaths(fileids)]
215
+ return LazyMap(self._get_corpus, self.abspaths(fileids))
216
+
217
+ def _get_corpus(self, fileid):
218
+ results = dict()
219
+ xmldoc = ElementTree.parse(fileid).getroot()
220
+ for key, value in xmldoc.items():
221
+ results[key] = value
222
+ return results
223
+
224
+ def participants(self, fileids=None):
225
+ """
226
+ :return: the given file(s) as a dict of
227
+ ``(participant_property_key, value)``
228
+ :rtype: list(dict)
229
+ """
230
+ if not self._lazy:
231
+ return [self._get_participants(fileid) for fileid in self.abspaths(fileids)]
232
+ return LazyMap(self._get_participants, self.abspaths(fileids))
233
+
234
+ def _get_participants(self, fileid):
235
+ # multidimensional dicts
236
+ def dictOfDicts():
237
+ return defaultdict(dictOfDicts)
238
+
239
+ xmldoc = ElementTree.parse(fileid).getroot()
240
+ # getting participants' data
241
+ pat = dictOfDicts()
242
+ for participant in xmldoc.findall(
243
+ f".//{{{NS}}}Participants/{{{NS}}}participant"
244
+ ):
245
+ for (key, value) in participant.items():
246
+ pat[participant.get("id")][key] = value
247
+ return pat
248
+
249
+ def age(self, fileids=None, speaker="CHI", month=False):
250
+ """
251
+ :return: the given file(s) as string or int
252
+ :rtype: list or int
253
+
254
+ :param month: If true, return months instead of year-month-date
255
+ """
256
+ if not self._lazy:
257
+ return [
258
+ self._get_age(fileid, speaker, month)
259
+ for fileid in self.abspaths(fileids)
260
+ ]
261
+ get_age = lambda fileid: self._get_age(fileid, speaker, month)
262
+ return LazyMap(get_age, self.abspaths(fileids))
263
+
264
+ def _get_age(self, fileid, speaker, month):
265
+ xmldoc = ElementTree.parse(fileid).getroot()
266
+ for pat in xmldoc.findall(f".//{{{NS}}}Participants/{{{NS}}}participant"):
267
+ try:
268
+ if pat.get("id") == speaker:
269
+ age = pat.get("age")
270
+ if month:
271
+ age = self.convert_age(age)
272
+ return age
273
+ # some files don't have age data
274
+ except (TypeError, AttributeError) as e:
275
+ return None
276
+
277
+ def convert_age(self, age_year):
278
+ "Caclculate age in months from a string in CHILDES format"
279
+ m = re.match(r"P(\d+)Y(\d+)M?(\d?\d?)D?", age_year)
280
+ age_month = int(m.group(1)) * 12 + int(m.group(2))
281
+ try:
282
+ if int(m.group(3)) > 15:
283
+ age_month += 1
284
+ # some corpora don't have age information?
285
+ except ValueError as e:
286
+ pass
287
+ return age_month
288
+
289
+ def MLU(self, fileids=None, speaker="CHI"):
290
+ """
291
+ :return: the given file(s) as a floating number
292
+ :rtype: list(float)
293
+ """
294
+ if not self._lazy:
295
+ return [
296
+ self._getMLU(fileid, speaker=speaker)
297
+ for fileid in self.abspaths(fileids)
298
+ ]
299
+ get_MLU = lambda fileid: self._getMLU(fileid, speaker=speaker)
300
+ return LazyMap(get_MLU, self.abspaths(fileids))
301
+
302
+ def _getMLU(self, fileid, speaker):
303
+ sents = self._get_words(
304
+ fileid,
305
+ speaker=speaker,
306
+ sent=True,
307
+ stem=True,
308
+ relation=False,
309
+ pos=True,
310
+ strip_space=True,
311
+ replace=True,
312
+ )
313
+ results = []
314
+ lastSent = []
315
+ numFillers = 0
316
+ sentDiscount = 0
317
+ for sent in sents:
318
+ posList = [pos for (word, pos) in sent]
319
+ # if any part of the sentence is intelligible
320
+ if any(pos == "unk" for pos in posList):
321
+ continue
322
+ # if the sentence is null
323
+ elif sent == []:
324
+ continue
325
+ # if the sentence is the same as the last sent
326
+ elif sent == lastSent:
327
+ continue
328
+ else:
329
+ results.append([word for (word, pos) in sent])
330
+ # count number of fillers
331
+ if len({"co", None}.intersection(posList)) > 0:
332
+ numFillers += posList.count("co")
333
+ numFillers += posList.count(None)
334
+ sentDiscount += 1
335
+ lastSent = sent
336
+ try:
337
+ thisWordList = flatten(results)
338
+ # count number of morphemes
339
+ # (e.g., 'read' = 1 morpheme but 'read-PAST' is 2 morphemes)
340
+ numWords = (
341
+ len(flatten([word.split("-") for word in thisWordList])) - numFillers
342
+ )
343
+ numSents = len(results) - sentDiscount
344
+ mlu = numWords / numSents
345
+ except ZeroDivisionError:
346
+ mlu = 0
347
+ # return {'mlu':mlu,'wordNum':numWords,'sentNum':numSents}
348
+ return mlu
349
+
350
+ def _get_words(
351
+ self, fileid, speaker, sent, stem, relation, pos, strip_space, replace
352
+ ):
353
+ if (
354
+ isinstance(speaker, str) and speaker != "ALL"
355
+ ): # ensure we have a list of speakers
356
+ speaker = [speaker]
357
+ xmldoc = ElementTree.parse(fileid).getroot()
358
+ # processing each xml doc
359
+ results = []
360
+ for xmlsent in xmldoc.findall(".//{%s}u" % NS):
361
+ sents = []
362
+ # select speakers
363
+ if speaker == "ALL" or xmlsent.get("who") in speaker:
364
+ for xmlword in xmlsent.findall(".//{%s}w" % NS):
365
+ infl = None
366
+ suffixStem = None
367
+ suffixTag = None
368
+ # getting replaced words
369
+ if replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}replacement"):
370
+ xmlword = xmlsent.find(
371
+ f".//{{{NS}}}w/{{{NS}}}replacement/{{{NS}}}w"
372
+ )
373
+ elif replace and xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk"):
374
+ xmlword = xmlsent.find(f".//{{{NS}}}w/{{{NS}}}wk")
375
+ # get text
376
+ if xmlword.text:
377
+ word = xmlword.text
378
+ else:
379
+ word = ""
380
+ # strip tailing space
381
+ if strip_space:
382
+ word = word.strip()
383
+ # stem
384
+ if relation or stem:
385
+ try:
386
+ xmlstem = xmlword.find(".//{%s}stem" % NS)
387
+ word = xmlstem.text
388
+ except AttributeError as e:
389
+ pass
390
+ # if there is an inflection
391
+ try:
392
+ xmlinfl = xmlword.find(
393
+ f".//{{{NS}}}mor/{{{NS}}}mw/{{{NS}}}mk"
394
+ )
395
+ word += "-" + xmlinfl.text
396
+ except:
397
+ pass
398
+ # if there is a suffix
399
+ try:
400
+ xmlsuffix = xmlword.find(
401
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}stem"
402
+ % (NS, NS, NS, NS)
403
+ )
404
+ suffixStem = xmlsuffix.text
405
+ except AttributeError:
406
+ suffixStem = ""
407
+ if suffixStem:
408
+ word += "~" + suffixStem
409
+ # pos
410
+ if relation or pos:
411
+ try:
412
+ xmlpos = xmlword.findall(".//{%s}c" % NS)
413
+ xmlpos2 = xmlword.findall(".//{%s}s" % NS)
414
+ if xmlpos2 != []:
415
+ tag = xmlpos[0].text + ":" + xmlpos2[0].text
416
+ else:
417
+ tag = xmlpos[0].text
418
+ except (AttributeError, IndexError) as e:
419
+ tag = ""
420
+ try:
421
+ xmlsuffixpos = xmlword.findall(
422
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}c"
423
+ % (NS, NS, NS, NS, NS)
424
+ )
425
+ xmlsuffixpos2 = xmlword.findall(
426
+ ".//{%s}mor/{%s}mor-post/{%s}mw/{%s}pos/{%s}s"
427
+ % (NS, NS, NS, NS, NS)
428
+ )
429
+ if xmlsuffixpos2:
430
+ suffixTag = (
431
+ xmlsuffixpos[0].text + ":" + xmlsuffixpos2[0].text
432
+ )
433
+ else:
434
+ suffixTag = xmlsuffixpos[0].text
435
+ except:
436
+ pass
437
+ if suffixTag:
438
+ tag += "~" + suffixTag
439
+ word = (word, tag)
440
+ # relational
441
+ # the gold standard is stored in
442
+ # <mor></mor><mor type="trn"><gra type="grt">
443
+ if relation == True:
444
+ for xmlstem_rel in xmlword.findall(
445
+ f".//{{{NS}}}mor/{{{NS}}}gra"
446
+ ):
447
+ if not xmlstem_rel.get("type") == "grt":
448
+ word = (
449
+ word[0],
450
+ word[1],
451
+ xmlstem_rel.get("index")
452
+ + "|"
453
+ + xmlstem_rel.get("head")
454
+ + "|"
455
+ + xmlstem_rel.get("relation"),
456
+ )
457
+ else:
458
+ word = (
459
+ word[0],
460
+ word[1],
461
+ word[2],
462
+ word[0],
463
+ word[1],
464
+ xmlstem_rel.get("index")
465
+ + "|"
466
+ + xmlstem_rel.get("head")
467
+ + "|"
468
+ + xmlstem_rel.get("relation"),
469
+ )
470
+ try:
471
+ for xmlpost_rel in xmlword.findall(
472
+ f".//{{{NS}}}mor/{{{NS}}}mor-post/{{{NS}}}gra"
473
+ ):
474
+ if not xmlpost_rel.get("type") == "grt":
475
+ suffixStem = (
476
+ suffixStem[0],
477
+ suffixStem[1],
478
+ xmlpost_rel.get("index")
479
+ + "|"
480
+ + xmlpost_rel.get("head")
481
+ + "|"
482
+ + xmlpost_rel.get("relation"),
483
+ )
484
+ else:
485
+ suffixStem = (
486
+ suffixStem[0],
487
+ suffixStem[1],
488
+ suffixStem[2],
489
+ suffixStem[0],
490
+ suffixStem[1],
491
+ xmlpost_rel.get("index")
492
+ + "|"
493
+ + xmlpost_rel.get("head")
494
+ + "|"
495
+ + xmlpost_rel.get("relation"),
496
+ )
497
+ except:
498
+ pass
499
+ sents.append(word)
500
+ if sent or relation:
501
+ results.append(sents)
502
+ else:
503
+ results.extend(sents)
504
+ return LazyMap(lambda x: x, results)
505
+
506
+ # Ready-to-use browser opener
507
+
508
+ """
509
+ The base URL for viewing files on the childes website. This
510
+ shouldn't need to be changed, unless CHILDES changes the configuration
511
+ of their server or unless the user sets up their own corpus webserver.
512
+ """
513
+ childes_url_base = r"https://childes.talkbank.org/browser/index.php?url="
514
+
515
+ def webview_file(self, fileid, urlbase=None):
516
+ """Map a corpus file to its web version on the CHILDES website,
517
+ and open it in a web browser.
518
+
519
+ The complete URL to be used is:
520
+ childes.childes_url_base + urlbase + fileid.replace('.xml', '.cha')
521
+
522
+ If no urlbase is passed, we try to calculate it. This
523
+ requires that the childes corpus was set up to mirror the
524
+ folder hierarchy under childes.psy.cmu.edu/data-xml/, e.g.:
525
+ nltk_data/corpora/childes/Eng-USA/Cornell/??? or
526
+ nltk_data/corpora/childes/Romance/Spanish/Aguirre/???
527
+
528
+ The function first looks (as a special case) if "Eng-USA" is
529
+ on the path consisting of <corpus root>+fileid; then if
530
+ "childes", possibly followed by "data-xml", appears. If neither
531
+ one is found, we use the unmodified fileid and hope for the best.
532
+ If this is not right, specify urlbase explicitly, e.g., if the
533
+ corpus root points to the Cornell folder, urlbase='Eng-USA/Cornell'.
534
+ """
535
+
536
+ import webbrowser
537
+
538
+ if urlbase:
539
+ path = urlbase + "/" + fileid
540
+ else:
541
+ full = self.root + "/" + fileid
542
+ full = re.sub(r"\\", "/", full)
543
+ if "/childes/" in full.lower():
544
+ # Discard /data-xml/ if present
545
+ path = re.findall(r"(?i)/childes(?:/data-xml)?/(.*)\.xml", full)[0]
546
+ elif "eng-usa" in full.lower():
547
+ path = "Eng-USA/" + re.findall(r"/(?i)Eng-USA/(.*)\.xml", full)[0]
548
+ else:
549
+ path = fileid
550
+
551
+ # Strip ".xml" and add ".cha", as necessary:
552
+ if path.endswith(".xml"):
553
+ path = path[:-4]
554
+
555
+ if not path.endswith(".cha"):
556
+ path = path + ".cha"
557
+
558
+ url = self.childes_url_base + path
559
+
560
+ webbrowser.open_new_tab(url)
561
+ print("Opening in browser:", url)
562
+ # Pausing is a good idea, but it's up to the user...
563
+ # raw_input("Hit Return to continue")
564
+
565
+
566
+ def demo(corpus_root=None):
567
+ """
568
+ The CHILDES corpus should be manually downloaded and saved
569
+ to ``[NLTK_Data_Dir]/corpora/childes/``
570
+ """
571
+ if not corpus_root:
572
+ from nltk.data import find
573
+
574
+ corpus_root = find("corpora/childes/data-xml/Eng-USA/")
575
+
576
+ try:
577
+ childes = CHILDESCorpusReader(corpus_root, ".*.xml")
578
+ # describe all corpus
579
+ for file in childes.fileids()[:5]:
580
+ corpus = ""
581
+ corpus_id = ""
582
+ for (key, value) in childes.corpus(file)[0].items():
583
+ if key == "Corpus":
584
+ corpus = value
585
+ if key == "Id":
586
+ corpus_id = value
587
+ print("Reading", corpus, corpus_id, " .....")
588
+ print("words:", childes.words(file)[:7], "...")
589
+ print(
590
+ "words with replaced words:",
591
+ childes.words(file, replace=True)[:7],
592
+ " ...",
593
+ )
594
+ print("words with pos tags:", childes.tagged_words(file)[:7], " ...")
595
+ print("words (only MOT):", childes.words(file, speaker="MOT")[:7], "...")
596
+ print("words (only CHI):", childes.words(file, speaker="CHI")[:7], "...")
597
+ print("stemmed words:", childes.words(file, stem=True)[:7], " ...")
598
+ print(
599
+ "words with relations and pos-tag:",
600
+ childes.words(file, relation=True)[:5],
601
+ " ...",
602
+ )
603
+ print("sentence:", childes.sents(file)[:2], " ...")
604
+ for (participant, values) in childes.participants(file)[0].items():
605
+ for (key, value) in values.items():
606
+ print("\tparticipant", participant, key, ":", value)
607
+ print("num of sent:", len(childes.sents(file)))
608
+ print("num of morphemes:", len(childes.words(file, stem=True)))
609
+ print("age:", childes.age(file))
610
+ print("age in month:", childes.age(file, month=True))
611
+ print("MLU:", childes.MLU(file))
612
+ print()
613
+
614
+ except LookupError as e:
615
+ print(
616
+ """The CHILDES corpus, or the parts you need, should be manually
617
+ downloaded from https://childes.talkbank.org/data-xml/ and saved at
618
+ [NLTK_Data_Dir]/corpora/childes/
619
+ Alternately, you can call the demo with the path to a portion of the CHILDES corpus, e.g.:
620
+ demo('/path/to/childes/data-xml/Eng-USA/")
621
+ """
622
+ )
623
+ # corpus_root_http = urllib2.urlopen('https://childes.talkbank.org/data-xml/Eng-USA/Bates.zip')
624
+ # corpus_root_http_bates = zipfile.ZipFile(cStringIO.StringIO(corpus_root_http.read()))
625
+ ##this fails
626
+ # childes = CHILDESCorpusReader(corpus_root_http_bates,corpus_root_http_bates.namelist())
627
+
628
+
629
+ if __name__ == "__main__":
630
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/chunked.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunked Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A reader for corpora that contain chunked (and optionally tagged)
11
+ documents.
12
+ """
13
+
14
+ import codecs
15
+ import os.path
16
+
17
+ import nltk
18
+ from nltk.chunk import tagstr2tree
19
+ from nltk.corpus.reader.api import *
20
+ from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
21
+ from nltk.corpus.reader.util import *
22
+ from nltk.tokenize import *
23
+ from nltk.tree import Tree
24
+
25
+
26
+ class ChunkedCorpusReader(CorpusReader):
27
+ """
28
+ Reader for chunked (and optionally tagged) corpora. Paragraphs
29
+ are split using a block reader. They are then tokenized into
30
+ sentences using a sentence tokenizer. Finally, these sentences
31
+ are parsed into chunk trees using a string-to-chunktree conversion
32
+ function. Each of these steps can be performed using a default
33
+ function or a custom function. By default, paragraphs are split
34
+ on blank lines; sentences are listed one per line; and sentences
35
+ are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ root,
41
+ fileids,
42
+ extension="",
43
+ str2chunktree=tagstr2tree,
44
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
45
+ para_block_reader=read_blankline_block,
46
+ encoding="utf8",
47
+ tagset=None,
48
+ ):
49
+ """
50
+ :param root: The root directory for this corpus.
51
+ :param fileids: A list or regexp specifying the fileids in this corpus.
52
+ """
53
+ CorpusReader.__init__(self, root, fileids, encoding)
54
+ self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader, tagset)
55
+ """Arguments for corpus views generated by this corpus: a tuple
56
+ (str2chunktree, sent_tokenizer, para_block_tokenizer)"""
57
+
58
+ def words(self, fileids=None):
59
+ """
60
+ :return: the given file(s) as a list of words
61
+ and punctuation symbols.
62
+ :rtype: list(str)
63
+ """
64
+ return concat(
65
+ [
66
+ ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
67
+ for (f, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+ def sents(self, fileids=None):
72
+ """
73
+ :return: the given file(s) as a list of
74
+ sentences or utterances, each encoded as a list of word
75
+ strings.
76
+ :rtype: list(list(str))
77
+ """
78
+ return concat(
79
+ [
80
+ ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
81
+ for (f, enc) in self.abspaths(fileids, True)
82
+ ]
83
+ )
84
+
85
+ def paras(self, fileids=None):
86
+ """
87
+ :return: the given file(s) as a list of
88
+ paragraphs, each encoded as a list of sentences, which are
89
+ in turn encoded as lists of word strings.
90
+ :rtype: list(list(list(str)))
91
+ """
92
+ return concat(
93
+ [
94
+ ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
95
+ for (f, enc) in self.abspaths(fileids, True)
96
+ ]
97
+ )
98
+
99
+ def tagged_words(self, fileids=None, tagset=None):
100
+ """
101
+ :return: the given file(s) as a list of tagged
102
+ words and punctuation symbols, encoded as tuples
103
+ ``(word,tag)``.
104
+ :rtype: list(tuple(str,str))
105
+ """
106
+ return concat(
107
+ [
108
+ ChunkedCorpusView(
109
+ f, enc, 1, 0, 0, 0, *self._cv_args, target_tagset=tagset
110
+ )
111
+ for (f, enc) in self.abspaths(fileids, True)
112
+ ]
113
+ )
114
+
115
+ def tagged_sents(self, fileids=None, tagset=None):
116
+ """
117
+ :return: the given file(s) as a list of
118
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
119
+
120
+ :rtype: list(list(tuple(str,str)))
121
+ """
122
+ return concat(
123
+ [
124
+ ChunkedCorpusView(
125
+ f, enc, 1, 1, 0, 0, *self._cv_args, target_tagset=tagset
126
+ )
127
+ for (f, enc) in self.abspaths(fileids, True)
128
+ ]
129
+ )
130
+
131
+ def tagged_paras(self, fileids=None, tagset=None):
132
+ """
133
+ :return: the given file(s) as a list of
134
+ paragraphs, each encoded as a list of sentences, which are
135
+ in turn encoded as lists of ``(word,tag)`` tuples.
136
+ :rtype: list(list(list(tuple(str,str))))
137
+ """
138
+ return concat(
139
+ [
140
+ ChunkedCorpusView(
141
+ f, enc, 1, 1, 1, 0, *self._cv_args, target_tagset=tagset
142
+ )
143
+ for (f, enc) in self.abspaths(fileids, True)
144
+ ]
145
+ )
146
+
147
+ def chunked_words(self, fileids=None, tagset=None):
148
+ """
149
+ :return: the given file(s) as a list of tagged
150
+ words and chunks. Words are encoded as ``(word, tag)``
151
+ tuples (if the corpus has tags) or word strings (if the
152
+ corpus has no tags). Chunks are encoded as depth-one
153
+ trees over ``(word,tag)`` tuples or word strings.
154
+ :rtype: list(tuple(str,str) and Tree)
155
+ """
156
+ return concat(
157
+ [
158
+ ChunkedCorpusView(
159
+ f, enc, 1, 0, 0, 1, *self._cv_args, target_tagset=tagset
160
+ )
161
+ for (f, enc) in self.abspaths(fileids, True)
162
+ ]
163
+ )
164
+
165
+ def chunked_sents(self, fileids=None, tagset=None):
166
+ """
167
+ :return: the given file(s) as a list of
168
+ sentences, each encoded as a shallow Tree. The leaves
169
+ of these trees are encoded as ``(word, tag)`` tuples (if
170
+ the corpus has tags) or word strings (if the corpus has no
171
+ tags).
172
+ :rtype: list(Tree)
173
+ """
174
+ return concat(
175
+ [
176
+ ChunkedCorpusView(
177
+ f, enc, 1, 1, 0, 1, *self._cv_args, target_tagset=tagset
178
+ )
179
+ for (f, enc) in self.abspaths(fileids, True)
180
+ ]
181
+ )
182
+
183
+ def chunked_paras(self, fileids=None, tagset=None):
184
+ """
185
+ :return: the given file(s) as a list of
186
+ paragraphs, each encoded as a list of sentences, which are
187
+ in turn encoded as a shallow Tree. The leaves of these
188
+ trees are encoded as ``(word, tag)`` tuples (if the corpus
189
+ has tags) or word strings (if the corpus has no tags).
190
+ :rtype: list(list(Tree))
191
+ """
192
+ return concat(
193
+ [
194
+ ChunkedCorpusView(
195
+ f, enc, 1, 1, 1, 1, *self._cv_args, target_tagset=tagset
196
+ )
197
+ for (f, enc) in self.abspaths(fileids, True)
198
+ ]
199
+ )
200
+
201
+ def _read_block(self, stream):
202
+ return [tagstr2tree(t) for t in read_blankline_block(stream)]
203
+
204
+
205
+ class ChunkedCorpusView(StreamBackedCorpusView):
206
+ def __init__(
207
+ self,
208
+ fileid,
209
+ encoding,
210
+ tagged,
211
+ group_by_sent,
212
+ group_by_para,
213
+ chunked,
214
+ str2chunktree,
215
+ sent_tokenizer,
216
+ para_block_reader,
217
+ source_tagset=None,
218
+ target_tagset=None,
219
+ ):
220
+ StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
221
+ self._tagged = tagged
222
+ self._group_by_sent = group_by_sent
223
+ self._group_by_para = group_by_para
224
+ self._chunked = chunked
225
+ self._str2chunktree = str2chunktree
226
+ self._sent_tokenizer = sent_tokenizer
227
+ self._para_block_reader = para_block_reader
228
+ self._source_tagset = source_tagset
229
+ self._target_tagset = target_tagset
230
+
231
+ def read_block(self, stream):
232
+ block = []
233
+ for para_str in self._para_block_reader(stream):
234
+ para = []
235
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
236
+ sent = self._str2chunktree(
237
+ sent_str,
238
+ source_tagset=self._source_tagset,
239
+ target_tagset=self._target_tagset,
240
+ )
241
+
242
+ # If requested, throw away the tags.
243
+ if not self._tagged:
244
+ sent = self._untag(sent)
245
+
246
+ # If requested, throw away the chunks.
247
+ if not self._chunked:
248
+ sent = sent.leaves()
249
+
250
+ # Add the sentence to `para`.
251
+ if self._group_by_sent:
252
+ para.append(sent)
253
+ else:
254
+ para.extend(sent)
255
+
256
+ # Add the paragraph to `block`.
257
+ if self._group_by_para:
258
+ block.append(para)
259
+ else:
260
+ block.extend(para)
261
+
262
+ # Return the block
263
+ return block
264
+
265
+ def _untag(self, tree):
266
+ for i, child in enumerate(tree):
267
+ if isinstance(child, Tree):
268
+ self._untag(child)
269
+ elif isinstance(child, tuple):
270
+ tree[i] = child[0]
271
+ else:
272
+ raise ValueError("expected child to be Tree or tuple")
273
+ return tree
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/comparative_sents.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Comparative Sentence Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Comparative Sentence Dataset.
10
+
11
+ - Comparative Sentence Dataset information -
12
+
13
+ Annotated by: Nitin Jindal and Bing Liu, 2006.
14
+ Department of Computer Sicence
15
+ University of Illinois at Chicago
16
+
17
+ Contact: Nitin Jindal, [email protected]
18
+ Bing Liu, [email protected] (https://www.cs.uic.edu/~liub)
19
+
20
+ Distributed with permission.
21
+
22
+ Related papers:
23
+
24
+ - Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents".
25
+ Proceedings of the ACM SIGIR International Conference on Information Retrieval
26
+ (SIGIR-06), 2006.
27
+
28
+ - Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations".
29
+ Proceedings of Twenty First National Conference on Artificial Intelligence
30
+ (AAAI-2006), 2006.
31
+
32
+ - Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
33
+ Proceedings of the 22nd International Conference on Computational Linguistics
34
+ (Coling-2008), Manchester, 18-22 August, 2008.
35
+ """
36
+ import re
37
+
38
+ from nltk.corpus.reader.api import *
39
+ from nltk.tokenize import *
40
+
41
+ # Regular expressions for dataset components
42
+ STARS = re.compile(r"^\*+$")
43
+ COMPARISON = re.compile(r"<cs-[1234]>")
44
+ CLOSE_COMPARISON = re.compile(r"</cs-[1234]>")
45
+ GRAD_COMPARISON = re.compile(r"<cs-[123]>")
46
+ NON_GRAD_COMPARISON = re.compile(r"<cs-4>")
47
+ ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)")
48
+ KEYWORD = re.compile(r"\(([^\(]*)\)$")
49
+
50
+
51
+ class Comparison:
52
+ """
53
+ A Comparison represents a comparative sentence and its constituents.
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ text=None,
59
+ comp_type=None,
60
+ entity_1=None,
61
+ entity_2=None,
62
+ feature=None,
63
+ keyword=None,
64
+ ):
65
+ """
66
+ :param text: a string (optionally tokenized) containing a comparison.
67
+ :param comp_type: an integer defining the type of comparison expressed.
68
+ Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative),
69
+ 4 (Non-gradable).
70
+ :param entity_1: the first entity considered in the comparison relation.
71
+ :param entity_2: the second entity considered in the comparison relation.
72
+ :param feature: the feature considered in the comparison relation.
73
+ :param keyword: the word or phrase which is used for that comparative relation.
74
+ """
75
+ self.text = text
76
+ self.comp_type = comp_type
77
+ self.entity_1 = entity_1
78
+ self.entity_2 = entity_2
79
+ self.feature = feature
80
+ self.keyword = keyword
81
+
82
+ def __repr__(self):
83
+ return (
84
+ 'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", '
85
+ 'feature="{}", keyword="{}")'
86
+ ).format(
87
+ self.text,
88
+ self.comp_type,
89
+ self.entity_1,
90
+ self.entity_2,
91
+ self.feature,
92
+ self.keyword,
93
+ )
94
+
95
+
96
+ class ComparativeSentencesCorpusReader(CorpusReader):
97
+ """
98
+ Reader for the Comparative Sentence Dataset by Jindal and Liu (2006).
99
+
100
+ >>> from nltk.corpus import comparative_sentences
101
+ >>> comparison = comparative_sentences.comparisons()[0]
102
+ >>> comparison.text # doctest: +NORMALIZE_WHITESPACE
103
+ ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly',
104
+ 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve",
105
+ 'had', '.']
106
+ >>> comparison.entity_2
107
+ 'models'
108
+ >>> (comparison.feature, comparison.keyword)
109
+ ('rewind', 'more')
110
+ >>> len(comparative_sentences.comparisons())
111
+ 853
112
+ """
113
+
114
+ CorpusView = StreamBackedCorpusView
115
+
116
+ def __init__(
117
+ self,
118
+ root,
119
+ fileids,
120
+ word_tokenizer=WhitespaceTokenizer(),
121
+ sent_tokenizer=None,
122
+ encoding="utf8",
123
+ ):
124
+ """
125
+ :param root: The root directory for this corpus.
126
+ :param fileids: a list or regexp specifying the fileids in this corpus.
127
+ :param word_tokenizer: tokenizer for breaking sentences or paragraphs
128
+ into words. Default: `WhitespaceTokenizer`
129
+ :param sent_tokenizer: tokenizer for breaking paragraphs into sentences.
130
+ :param encoding: the encoding that should be used to read the corpus.
131
+ """
132
+
133
+ CorpusReader.__init__(self, root, fileids, encoding)
134
+ self._word_tokenizer = word_tokenizer
135
+ self._sent_tokenizer = sent_tokenizer
136
+ self._readme = "README.txt"
137
+
138
+ def comparisons(self, fileids=None):
139
+ """
140
+ Return all comparisons in the corpus.
141
+
142
+ :param fileids: a list or regexp specifying the ids of the files whose
143
+ comparisons have to be returned.
144
+ :return: the given file(s) as a list of Comparison objects.
145
+ :rtype: list(Comparison)
146
+ """
147
+ if fileids is None:
148
+ fileids = self._fileids
149
+ elif isinstance(fileids, str):
150
+ fileids = [fileids]
151
+ return concat(
152
+ [
153
+ self.CorpusView(path, self._read_comparison_block, encoding=enc)
154
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
155
+ ]
156
+ )
157
+
158
+ def keywords(self, fileids=None):
159
+ """
160
+ Return a set of all keywords used in the corpus.
161
+
162
+ :param fileids: a list or regexp specifying the ids of the files whose
163
+ keywords have to be returned.
164
+ :return: the set of keywords and comparative phrases used in the corpus.
165
+ :rtype: set(str)
166
+ """
167
+ all_keywords = concat(
168
+ [
169
+ self.CorpusView(path, self._read_keyword_block, encoding=enc)
170
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
171
+ ]
172
+ )
173
+
174
+ keywords_set = {keyword.lower() for keyword in all_keywords if keyword}
175
+ return keywords_set
176
+
177
+ def keywords_readme(self):
178
+ """
179
+ Return the list of words and constituents considered as clues of a
180
+ comparison (from listOfkeywords.txt).
181
+ """
182
+ keywords = []
183
+ with self.open("listOfkeywords.txt") as fp:
184
+ raw_text = fp.read()
185
+ for line in raw_text.split("\n"):
186
+ if not line or line.startswith("//"):
187
+ continue
188
+ keywords.append(line.strip())
189
+ return keywords
190
+
191
+ def sents(self, fileids=None):
192
+ """
193
+ Return all sentences in the corpus.
194
+
195
+ :param fileids: a list or regexp specifying the ids of the files whose
196
+ sentences have to be returned.
197
+ :return: all sentences of the corpus as lists of tokens (or as plain
198
+ strings, if no word tokenizer is specified).
199
+ :rtype: list(list(str)) or list(str)
200
+ """
201
+ return concat(
202
+ [
203
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
204
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
205
+ ]
206
+ )
207
+
208
+ def words(self, fileids=None):
209
+ """
210
+ Return all words and punctuation symbols in the corpus.
211
+
212
+ :param fileids: a list or regexp specifying the ids of the files whose
213
+ words have to be returned.
214
+ :return: the given file(s) as a list of words and punctuation symbols.
215
+ :rtype: list(str)
216
+ """
217
+ return concat(
218
+ [
219
+ self.CorpusView(path, self._read_word_block, encoding=enc)
220
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
221
+ ]
222
+ )
223
+
224
+ def _read_comparison_block(self, stream):
225
+ while True:
226
+ line = stream.readline()
227
+ if not line:
228
+ return [] # end of file.
229
+ comparison_tags = re.findall(COMPARISON, line)
230
+ if comparison_tags:
231
+ grad_comparisons = re.findall(GRAD_COMPARISON, line)
232
+ non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line)
233
+ # Advance to the next line (it contains the comparative sentence)
234
+ comparison_text = stream.readline().strip()
235
+ if self._word_tokenizer:
236
+ comparison_text = self._word_tokenizer.tokenize(comparison_text)
237
+ # Skip the next line (it contains closing comparison tags)
238
+ stream.readline()
239
+ # If gradable comparisons are found, create Comparison instances
240
+ # and populate their fields
241
+ comparison_bundle = []
242
+ if grad_comparisons:
243
+ # Each comparison tag has its own relations on a separate line
244
+ for comp in grad_comparisons:
245
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
246
+ comparison = Comparison(
247
+ text=comparison_text, comp_type=comp_type
248
+ )
249
+ line = stream.readline()
250
+ entities_feats = ENTITIES_FEATS.findall(line)
251
+ if entities_feats:
252
+ for (code, entity_feat) in entities_feats:
253
+ if code == "1":
254
+ comparison.entity_1 = entity_feat.strip()
255
+ elif code == "2":
256
+ comparison.entity_2 = entity_feat.strip()
257
+ elif code == "3":
258
+ comparison.feature = entity_feat.strip()
259
+ keyword = KEYWORD.findall(line)
260
+ if keyword:
261
+ comparison.keyword = keyword[0]
262
+ comparison_bundle.append(comparison)
263
+ # If non-gradable comparisons are found, create a simple Comparison
264
+ # instance for each one
265
+ if non_grad_comparisons:
266
+ for comp in non_grad_comparisons:
267
+ # comp_type in this case should always be 4.
268
+ comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
269
+ comparison = Comparison(
270
+ text=comparison_text, comp_type=comp_type
271
+ )
272
+ comparison_bundle.append(comparison)
273
+ # Flatten the list of comparisons before returning them
274
+ # return concat([comparison_bundle])
275
+ return comparison_bundle
276
+
277
+ def _read_keyword_block(self, stream):
278
+ keywords = []
279
+ for comparison in self._read_comparison_block(stream):
280
+ keywords.append(comparison.keyword)
281
+ return keywords
282
+
283
+ def _read_sent_block(self, stream):
284
+ while True:
285
+ line = stream.readline()
286
+ if re.match(STARS, line):
287
+ while True:
288
+ line = stream.readline()
289
+ if re.match(STARS, line):
290
+ break
291
+ continue
292
+ if (
293
+ not re.findall(COMPARISON, line)
294
+ and not ENTITIES_FEATS.findall(line)
295
+ and not re.findall(CLOSE_COMPARISON, line)
296
+ ):
297
+ if self._sent_tokenizer:
298
+ return [
299
+ self._word_tokenizer.tokenize(sent)
300
+ for sent in self._sent_tokenizer.tokenize(line)
301
+ ]
302
+ else:
303
+ return [self._word_tokenizer.tokenize(line)]
304
+
305
+ def _read_word_block(self, stream):
306
+ words = []
307
+ for sent in self._read_sent_block(stream):
308
+ words.extend(sent)
309
+ return words
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/conll.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: CONLL Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read CoNLL-style chunk fileids.
11
+ """
12
+
13
+ import textwrap
14
+
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.tag import map_tag
18
+ from nltk.tree import Tree
19
+ from nltk.util import LazyConcatenation, LazyMap
20
+
21
+
22
+ class ConllCorpusReader(CorpusReader):
23
+ """
24
+ A corpus reader for CoNLL-style files. These files consist of a
25
+ series of sentences, separated by blank lines. Each sentence is
26
+ encoded using a table (or "grid") of values, where each line
27
+ corresponds to a single word, and each column corresponds to an
28
+ annotation type. The set of columns used by CoNLL-style files can
29
+ vary from corpus to corpus; the ``ConllCorpusReader`` constructor
30
+ therefore takes an argument, ``columntypes``, which is used to
31
+ specify the columns that are used by a given corpus. By default
32
+ columns are split by consecutive whitespaces, with the
33
+ ``separator`` argument you can set a string to split by (e.g.
34
+ ``\'\t\'``).
35
+
36
+
37
+ @todo: Add support for reading from corpora where different
38
+ parallel files contain different columns.
39
+ @todo: Possibly add caching of the grid corpus view? This would
40
+ allow the same grid view to be used by different data access
41
+ methods (eg words() and parsed_sents() could both share the
42
+ same grid corpus view object).
43
+ @todo: Better support for -DOCSTART-. Currently, we just ignore
44
+ it, but it could be used to define methods that retrieve a
45
+ document at a time (eg parsed_documents()).
46
+ """
47
+
48
+ # /////////////////////////////////////////////////////////////////
49
+ # Column Types
50
+ # /////////////////////////////////////////////////////////////////
51
+
52
+ WORDS = "words" #: column type for words
53
+ POS = "pos" #: column type for part-of-speech tags
54
+ TREE = "tree" #: column type for parse trees
55
+ CHUNK = "chunk" #: column type for chunk structures
56
+ NE = "ne" #: column type for named entities
57
+ SRL = "srl" #: column type for semantic role labels
58
+ IGNORE = "ignore" #: column type for column that should be ignored
59
+
60
+ #: A list of all column types supported by the conll corpus reader.
61
+ COLUMN_TYPES = (WORDS, POS, TREE, CHUNK, NE, SRL, IGNORE)
62
+
63
+ # /////////////////////////////////////////////////////////////////
64
+ # Constructor
65
+ # /////////////////////////////////////////////////////////////////
66
+
67
+ def __init__(
68
+ self,
69
+ root,
70
+ fileids,
71
+ columntypes,
72
+ chunk_types=None,
73
+ root_label="S",
74
+ pos_in_tree=False,
75
+ srl_includes_roleset=True,
76
+ encoding="utf8",
77
+ tree_class=Tree,
78
+ tagset=None,
79
+ separator=None,
80
+ ):
81
+ for columntype in columntypes:
82
+ if columntype not in self.COLUMN_TYPES:
83
+ raise ValueError("Bad column type %r" % columntype)
84
+ if isinstance(chunk_types, str):
85
+ chunk_types = [chunk_types]
86
+ self._chunk_types = chunk_types
87
+ self._colmap = {c: i for (i, c) in enumerate(columntypes)}
88
+ self._pos_in_tree = pos_in_tree
89
+ self._root_label = root_label # for chunks
90
+ self._srl_includes_roleset = srl_includes_roleset
91
+ self._tree_class = tree_class
92
+ CorpusReader.__init__(self, root, fileids, encoding)
93
+ self._tagset = tagset
94
+ self.sep = separator
95
+
96
+ # /////////////////////////////////////////////////////////////////
97
+ # Data Access Methods
98
+ # /////////////////////////////////////////////////////////////////
99
+
100
+ def words(self, fileids=None):
101
+ self._require(self.WORDS)
102
+ return LazyConcatenation(LazyMap(self._get_words, self._grids(fileids)))
103
+
104
+ def sents(self, fileids=None):
105
+ self._require(self.WORDS)
106
+ return LazyMap(self._get_words, self._grids(fileids))
107
+
108
+ def tagged_words(self, fileids=None, tagset=None):
109
+ self._require(self.WORDS, self.POS)
110
+
111
+ def get_tagged_words(grid):
112
+ return self._get_tagged_words(grid, tagset)
113
+
114
+ return LazyConcatenation(LazyMap(get_tagged_words, self._grids(fileids)))
115
+
116
+ def tagged_sents(self, fileids=None, tagset=None):
117
+ self._require(self.WORDS, self.POS)
118
+
119
+ def get_tagged_words(grid):
120
+ return self._get_tagged_words(grid, tagset)
121
+
122
+ return LazyMap(get_tagged_words, self._grids(fileids))
123
+
124
+ def chunked_words(self, fileids=None, chunk_types=None, tagset=None):
125
+ self._require(self.WORDS, self.POS, self.CHUNK)
126
+ if chunk_types is None:
127
+ chunk_types = self._chunk_types
128
+
129
+ def get_chunked_words(grid): # capture chunk_types as local var
130
+ return self._get_chunked_words(grid, chunk_types, tagset)
131
+
132
+ return LazyConcatenation(LazyMap(get_chunked_words, self._grids(fileids)))
133
+
134
+ def chunked_sents(self, fileids=None, chunk_types=None, tagset=None):
135
+ self._require(self.WORDS, self.POS, self.CHUNK)
136
+ if chunk_types is None:
137
+ chunk_types = self._chunk_types
138
+
139
+ def get_chunked_words(grid): # capture chunk_types as local var
140
+ return self._get_chunked_words(grid, chunk_types, tagset)
141
+
142
+ return LazyMap(get_chunked_words, self._grids(fileids))
143
+
144
+ def parsed_sents(self, fileids=None, pos_in_tree=None, tagset=None):
145
+ self._require(self.WORDS, self.POS, self.TREE)
146
+ if pos_in_tree is None:
147
+ pos_in_tree = self._pos_in_tree
148
+
149
+ def get_parsed_sent(grid): # capture pos_in_tree as local var
150
+ return self._get_parsed_sent(grid, pos_in_tree, tagset)
151
+
152
+ return LazyMap(get_parsed_sent, self._grids(fileids))
153
+
154
+ def srl_spans(self, fileids=None):
155
+ self._require(self.SRL)
156
+ return LazyMap(self._get_srl_spans, self._grids(fileids))
157
+
158
+ def srl_instances(self, fileids=None, pos_in_tree=None, flatten=True):
159
+ self._require(self.WORDS, self.POS, self.TREE, self.SRL)
160
+ if pos_in_tree is None:
161
+ pos_in_tree = self._pos_in_tree
162
+
163
+ def get_srl_instances(grid): # capture pos_in_tree as local var
164
+ return self._get_srl_instances(grid, pos_in_tree)
165
+
166
+ result = LazyMap(get_srl_instances, self._grids(fileids))
167
+ if flatten:
168
+ result = LazyConcatenation(result)
169
+ return result
170
+
171
+ def iob_words(self, fileids=None, tagset=None):
172
+ """
173
+ :return: a list of word/tag/IOB tuples
174
+ :rtype: list(tuple)
175
+ :param fileids: the list of fileids that make up this corpus
176
+ :type fileids: None or str or list
177
+ """
178
+ self._require(self.WORDS, self.POS, self.CHUNK)
179
+
180
+ def get_iob_words(grid):
181
+ return self._get_iob_words(grid, tagset)
182
+
183
+ return LazyConcatenation(LazyMap(get_iob_words, self._grids(fileids)))
184
+
185
+ def iob_sents(self, fileids=None, tagset=None):
186
+ """
187
+ :return: a list of lists of word/tag/IOB tuples
188
+ :rtype: list(list)
189
+ :param fileids: the list of fileids that make up this corpus
190
+ :type fileids: None or str or list
191
+ """
192
+ self._require(self.WORDS, self.POS, self.CHUNK)
193
+
194
+ def get_iob_words(grid):
195
+ return self._get_iob_words(grid, tagset)
196
+
197
+ return LazyMap(get_iob_words, self._grids(fileids))
198
+
199
+ # /////////////////////////////////////////////////////////////////
200
+ # Grid Reading
201
+ # /////////////////////////////////////////////////////////////////
202
+
203
+ def _grids(self, fileids=None):
204
+ # n.b.: we could cache the object returned here (keyed on
205
+ # fileids), which would let us reuse the same corpus view for
206
+ # different things (eg srl and parse trees).
207
+ return concat(
208
+ [
209
+ StreamBackedCorpusView(fileid, self._read_grid_block, encoding=enc)
210
+ for (fileid, enc) in self.abspaths(fileids, True)
211
+ ]
212
+ )
213
+
214
+ def _read_grid_block(self, stream):
215
+ grids = []
216
+ for block in read_blankline_block(stream):
217
+ block = block.strip()
218
+ if not block:
219
+ continue
220
+
221
+ grid = [line.split(self.sep) for line in block.split("\n")]
222
+
223
+ # If there's a docstart row, then discard. ([xx] eventually it
224
+ # would be good to actually use it)
225
+ if grid[0][self._colmap.get("words", 0)] == "-DOCSTART-":
226
+ del grid[0]
227
+
228
+ # Check that the grid is consistent.
229
+ for row in grid:
230
+ if len(row) != len(grid[0]):
231
+ raise ValueError("Inconsistent number of columns:\n%s" % block)
232
+ grids.append(grid)
233
+ return grids
234
+
235
+ # /////////////////////////////////////////////////////////////////
236
+ # Transforms
237
+ # /////////////////////////////////////////////////////////////////
238
+ # given a grid, transform it into some representation (e.g.,
239
+ # a list of words or a parse tree).
240
+
241
+ def _get_words(self, grid):
242
+ return self._get_column(grid, self._colmap["words"])
243
+
244
+ def _get_tagged_words(self, grid, tagset=None):
245
+ pos_tags = self._get_column(grid, self._colmap["pos"])
246
+ if tagset and tagset != self._tagset:
247
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
248
+ return list(zip(self._get_column(grid, self._colmap["words"]), pos_tags))
249
+
250
+ def _get_iob_words(self, grid, tagset=None):
251
+ pos_tags = self._get_column(grid, self._colmap["pos"])
252
+ if tagset and tagset != self._tagset:
253
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
254
+ return list(
255
+ zip(
256
+ self._get_column(grid, self._colmap["words"]),
257
+ pos_tags,
258
+ self._get_column(grid, self._colmap["chunk"]),
259
+ )
260
+ )
261
+
262
+ def _get_chunked_words(self, grid, chunk_types, tagset=None):
263
+ # n.b.: this method is very similar to conllstr2tree.
264
+ words = self._get_column(grid, self._colmap["words"])
265
+ pos_tags = self._get_column(grid, self._colmap["pos"])
266
+ if tagset and tagset != self._tagset:
267
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
268
+ chunk_tags = self._get_column(grid, self._colmap["chunk"])
269
+
270
+ stack = [Tree(self._root_label, [])]
271
+
272
+ for (word, pos_tag, chunk_tag) in zip(words, pos_tags, chunk_tags):
273
+ if chunk_tag == "O":
274
+ state, chunk_type = "O", ""
275
+ else:
276
+ (state, chunk_type) = chunk_tag.split("-")
277
+ # If it's a chunk we don't care about, treat it as O.
278
+ if chunk_types is not None and chunk_type not in chunk_types:
279
+ state = "O"
280
+ # Treat a mismatching I like a B.
281
+ if state == "I" and chunk_type != stack[-1].label():
282
+ state = "B"
283
+ # For B or I: close any open chunks
284
+ if state in "BO" and len(stack) == 2:
285
+ stack.pop()
286
+ # For B: start a new chunk.
287
+ if state == "B":
288
+ new_chunk = Tree(chunk_type, [])
289
+ stack[-1].append(new_chunk)
290
+ stack.append(new_chunk)
291
+ # Add the word token.
292
+ stack[-1].append((word, pos_tag))
293
+
294
+ return stack[0]
295
+
296
+ def _get_parsed_sent(self, grid, pos_in_tree, tagset=None):
297
+ words = self._get_column(grid, self._colmap["words"])
298
+ pos_tags = self._get_column(grid, self._colmap["pos"])
299
+ if tagset and tagset != self._tagset:
300
+ pos_tags = [map_tag(self._tagset, tagset, t) for t in pos_tags]
301
+ parse_tags = self._get_column(grid, self._colmap["tree"])
302
+
303
+ treestr = ""
304
+ for (word, pos_tag, parse_tag) in zip(words, pos_tags, parse_tags):
305
+ if word == "(":
306
+ word = "-LRB-"
307
+ if word == ")":
308
+ word = "-RRB-"
309
+ if pos_tag == "(":
310
+ pos_tag = "-LRB-"
311
+ if pos_tag == ")":
312
+ pos_tag = "-RRB-"
313
+ (left, right) = parse_tag.split("*")
314
+ right = right.count(")") * ")" # only keep ')'.
315
+ treestr += f"{left} ({pos_tag} {word}) {right}"
316
+ try:
317
+ tree = self._tree_class.fromstring(treestr)
318
+ except (ValueError, IndexError):
319
+ tree = self._tree_class.fromstring(f"({self._root_label} {treestr})")
320
+
321
+ if not pos_in_tree:
322
+ for subtree in tree.subtrees():
323
+ for i, child in enumerate(subtree):
324
+ if (
325
+ isinstance(child, Tree)
326
+ and len(child) == 1
327
+ and isinstance(child[0], str)
328
+ ):
329
+ subtree[i] = (child[0], child.label())
330
+
331
+ return tree
332
+
333
+ def _get_srl_spans(self, grid):
334
+ """
335
+ list of list of (start, end), tag) tuples
336
+ """
337
+ if self._srl_includes_roleset:
338
+ predicates = self._get_column(grid, self._colmap["srl"] + 1)
339
+ start_col = self._colmap["srl"] + 2
340
+ else:
341
+ predicates = self._get_column(grid, self._colmap["srl"])
342
+ start_col = self._colmap["srl"] + 1
343
+
344
+ # Count how many predicates there are. This tells us how many
345
+ # columns to expect for SRL data.
346
+ num_preds = len([p for p in predicates if p != "-"])
347
+
348
+ spanlists = []
349
+ for i in range(num_preds):
350
+ col = self._get_column(grid, start_col + i)
351
+ spanlist = []
352
+ stack = []
353
+ for wordnum, srl_tag in enumerate(col):
354
+ (left, right) = srl_tag.split("*")
355
+ for tag in left.split("("):
356
+ if tag:
357
+ stack.append((tag, wordnum))
358
+ for i in range(right.count(")")):
359
+ (tag, start) = stack.pop()
360
+ spanlist.append(((start, wordnum + 1), tag))
361
+ spanlists.append(spanlist)
362
+
363
+ return spanlists
364
+
365
+ def _get_srl_instances(self, grid, pos_in_tree):
366
+ tree = self._get_parsed_sent(grid, pos_in_tree)
367
+ spanlists = self._get_srl_spans(grid)
368
+ if self._srl_includes_roleset:
369
+ predicates = self._get_column(grid, self._colmap["srl"] + 1)
370
+ rolesets = self._get_column(grid, self._colmap["srl"])
371
+ else:
372
+ predicates = self._get_column(grid, self._colmap["srl"])
373
+ rolesets = [None] * len(predicates)
374
+
375
+ instances = ConllSRLInstanceList(tree)
376
+ for wordnum, predicate in enumerate(predicates):
377
+ if predicate == "-":
378
+ continue
379
+ # Decide which spanlist to use. Don't assume that they're
380
+ # sorted in the same order as the predicates (even though
381
+ # they usually are).
382
+ for spanlist in spanlists:
383
+ for (start, end), tag in spanlist:
384
+ if wordnum in range(start, end) and tag in ("V", "C-V"):
385
+ break
386
+ else:
387
+ continue
388
+ break
389
+ else:
390
+ raise ValueError("No srl column found for %r" % predicate)
391
+ instances.append(
392
+ ConllSRLInstance(tree, wordnum, predicate, rolesets[wordnum], spanlist)
393
+ )
394
+
395
+ return instances
396
+
397
+ # /////////////////////////////////////////////////////////////////
398
+ # Helper Methods
399
+ # /////////////////////////////////////////////////////////////////
400
+
401
+ def _require(self, *columntypes):
402
+ for columntype in columntypes:
403
+ if columntype not in self._colmap:
404
+ raise ValueError(
405
+ "This corpus does not contain a %s " "column." % columntype
406
+ )
407
+
408
+ @staticmethod
409
+ def _get_column(grid, column_index):
410
+ return [grid[i][column_index] for i in range(len(grid))]
411
+
412
+
413
+ class ConllSRLInstance:
414
+ """
415
+ An SRL instance from a CoNLL corpus, which identifies and
416
+ providing labels for the arguments of a single verb.
417
+ """
418
+
419
+ # [xx] add inst.core_arguments, inst.argm_arguments?
420
+
421
+ def __init__(self, tree, verb_head, verb_stem, roleset, tagged_spans):
422
+ self.verb = []
423
+ """A list of the word indices of the words that compose the
424
+ verb whose arguments are identified by this instance.
425
+ This will contain multiple word indices when multi-word
426
+ verbs are used (e.g. 'turn on')."""
427
+
428
+ self.verb_head = verb_head
429
+ """The word index of the head word of the verb whose arguments
430
+ are identified by this instance. E.g., for a sentence that
431
+ uses the verb 'turn on,' ``verb_head`` will be the word index
432
+ of the word 'turn'."""
433
+
434
+ self.verb_stem = verb_stem
435
+
436
+ self.roleset = roleset
437
+
438
+ self.arguments = []
439
+ """A list of ``(argspan, argid)`` tuples, specifying the location
440
+ and type for each of the arguments identified by this
441
+ instance. ``argspan`` is a tuple ``start, end``, indicating
442
+ that the argument consists of the ``words[start:end]``."""
443
+
444
+ self.tagged_spans = tagged_spans
445
+ """A list of ``(span, id)`` tuples, specifying the location and
446
+ type for each of the arguments, as well as the verb pieces,
447
+ that make up this instance."""
448
+
449
+ self.tree = tree
450
+ """The parse tree for the sentence containing this instance."""
451
+
452
+ self.words = tree.leaves()
453
+ """A list of the words in the sentence containing this
454
+ instance."""
455
+
456
+ # Fill in the self.verb and self.arguments values.
457
+ for (start, end), tag in tagged_spans:
458
+ if tag in ("V", "C-V"):
459
+ self.verb += list(range(start, end))
460
+ else:
461
+ self.arguments.append(((start, end), tag))
462
+
463
+ def __repr__(self):
464
+ # Originally, its:
465
+ ##plural = 's' if len(self.arguments) != 1 else ''
466
+ plural = "s" if len(self.arguments) != 1 else ""
467
+ return "<ConllSRLInstance for %r with %d argument%s>" % (
468
+ (self.verb_stem, len(self.arguments), plural)
469
+ )
470
+
471
+ def pprint(self):
472
+ verbstr = " ".join(self.words[i][0] for i in self.verb)
473
+ hdr = f"SRL for {verbstr!r} (stem={self.verb_stem!r}):\n"
474
+ s = ""
475
+ for i, word in enumerate(self.words):
476
+ if isinstance(word, tuple):
477
+ word = word[0]
478
+ for (start, end), argid in self.arguments:
479
+ if i == start:
480
+ s += "[%s " % argid
481
+ if i == end:
482
+ s += "] "
483
+ if i in self.verb:
484
+ word = "<<%s>>" % word
485
+ s += word + " "
486
+ return hdr + textwrap.fill(
487
+ s.replace(" ]", "]"), initial_indent=" ", subsequent_indent=" "
488
+ )
489
+
490
+
491
+ class ConllSRLInstanceList(list):
492
+ """
493
+ Set of instances for a single sentence
494
+ """
495
+
496
+ def __init__(self, tree, instances=()):
497
+ self.tree = tree
498
+ list.__init__(self, instances)
499
+
500
+ def __str__(self):
501
+ return self.pprint()
502
+
503
+ def pprint(self, include_tree=False):
504
+ # Sanity check: trees should be the same
505
+ for inst in self:
506
+ if inst.tree != self.tree:
507
+ raise ValueError("Tree mismatch!")
508
+
509
+ # If desired, add trees:
510
+ if include_tree:
511
+ words = self.tree.leaves()
512
+ pos = [None] * len(words)
513
+ synt = ["*"] * len(words)
514
+ self._tree2conll(self.tree, 0, words, pos, synt)
515
+
516
+ s = ""
517
+ for i in range(len(words)):
518
+ # optional tree columns
519
+ if include_tree:
520
+ s += "%-20s " % words[i]
521
+ s += "%-8s " % pos[i]
522
+ s += "%15s*%-8s " % tuple(synt[i].split("*"))
523
+
524
+ # verb head column
525
+ for inst in self:
526
+ if i == inst.verb_head:
527
+ s += "%-20s " % inst.verb_stem
528
+ break
529
+ else:
530
+ s += "%-20s " % "-"
531
+ # Remaining columns: self
532
+ for inst in self:
533
+ argstr = "*"
534
+ for (start, end), argid in inst.tagged_spans:
535
+ if i == start:
536
+ argstr = f"({argid}{argstr}"
537
+ if i == (end - 1):
538
+ argstr += ")"
539
+ s += "%-12s " % argstr
540
+ s += "\n"
541
+ return s
542
+
543
+ def _tree2conll(self, tree, wordnum, words, pos, synt):
544
+ assert isinstance(tree, Tree)
545
+ if len(tree) == 1 and isinstance(tree[0], str):
546
+ pos[wordnum] = tree.label()
547
+ assert words[wordnum] == tree[0]
548
+ return wordnum + 1
549
+ elif len(tree) == 1 and isinstance(tree[0], tuple):
550
+ assert len(tree[0]) == 2
551
+ pos[wordnum], pos[wordnum] = tree[0]
552
+ return wordnum + 1
553
+ else:
554
+ synt[wordnum] = f"({tree.label()}{synt[wordnum]}"
555
+ for child in tree:
556
+ wordnum = self._tree2conll(child, wordnum, words, pos, synt)
557
+ synt[wordnum - 1] += ")"
558
+ return wordnum
559
+
560
+
561
+ class ConllChunkCorpusReader(ConllCorpusReader):
562
+ """
563
+ A ConllCorpusReader whose data file contains three columns: words,
564
+ pos, and chunk.
565
+ """
566
+
567
+ def __init__(
568
+ self, root, fileids, chunk_types, encoding="utf8", tagset=None, separator=None
569
+ ):
570
+ ConllCorpusReader.__init__(
571
+ self,
572
+ root,
573
+ fileids,
574
+ ("words", "pos", "chunk"),
575
+ chunk_types=chunk_types,
576
+ encoding=encoding,
577
+ tagset=tagset,
578
+ separator=separator,
579
+ )
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/dependency.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Kepa Sarasola <[email protected]>
5
+ # Iker Manterola <[email protected]>
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from nltk.corpus.reader.api import *
11
+ from nltk.corpus.reader.util import *
12
+ from nltk.parse import DependencyGraph
13
+ from nltk.tokenize import *
14
+
15
+
16
+ class DependencyCorpusReader(SyntaxCorpusReader):
17
+ def __init__(
18
+ self,
19
+ root,
20
+ fileids,
21
+ encoding="utf8",
22
+ word_tokenizer=TabTokenizer(),
23
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
24
+ para_block_reader=read_blankline_block,
25
+ ):
26
+ SyntaxCorpusReader.__init__(self, root, fileids, encoding)
27
+
28
+ #########################################################
29
+
30
+ def words(self, fileids=None):
31
+ return concat(
32
+ [
33
+ DependencyCorpusView(fileid, False, False, False, encoding=enc)
34
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
35
+ ]
36
+ )
37
+
38
+ def tagged_words(self, fileids=None):
39
+ return concat(
40
+ [
41
+ DependencyCorpusView(fileid, True, False, False, encoding=enc)
42
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
43
+ ]
44
+ )
45
+
46
+ def sents(self, fileids=None):
47
+ return concat(
48
+ [
49
+ DependencyCorpusView(fileid, False, True, False, encoding=enc)
50
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
51
+ ]
52
+ )
53
+
54
+ def tagged_sents(self, fileids=None):
55
+ return concat(
56
+ [
57
+ DependencyCorpusView(fileid, True, True, False, encoding=enc)
58
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
59
+ ]
60
+ )
61
+
62
+ def parsed_sents(self, fileids=None):
63
+ sents = concat(
64
+ [
65
+ DependencyCorpusView(fileid, False, True, True, encoding=enc)
66
+ for fileid, enc in self.abspaths(fileids, include_encoding=True)
67
+ ]
68
+ )
69
+ return [DependencyGraph(sent) for sent in sents]
70
+
71
+
72
+ class DependencyCorpusView(StreamBackedCorpusView):
73
+ _DOCSTART = "-DOCSTART- -DOCSTART- O\n" # dokumentu hasiera definitzen da
74
+
75
+ def __init__(
76
+ self,
77
+ corpus_file,
78
+ tagged,
79
+ group_by_sent,
80
+ dependencies,
81
+ chunk_types=None,
82
+ encoding="utf8",
83
+ ):
84
+ self._tagged = tagged
85
+ self._dependencies = dependencies
86
+ self._group_by_sent = group_by_sent
87
+ self._chunk_types = chunk_types
88
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
89
+
90
+ def read_block(self, stream):
91
+ # Read the next sentence.
92
+ sent = read_blankline_block(stream)[0].strip()
93
+ # Strip off the docstart marker, if present.
94
+ if sent.startswith(self._DOCSTART):
95
+ sent = sent[len(self._DOCSTART) :].lstrip()
96
+
97
+ # extract word and tag from any of the formats
98
+ if not self._dependencies:
99
+ lines = [line.split("\t") for line in sent.split("\n")]
100
+ if len(lines[0]) == 3 or len(lines[0]) == 4:
101
+ sent = [(line[0], line[1]) for line in lines]
102
+ elif len(lines[0]) == 10:
103
+ sent = [(line[1], line[4]) for line in lines]
104
+ else:
105
+ raise ValueError("Unexpected number of fields in dependency tree file")
106
+
107
+ # discard tags if they weren't requested
108
+ if not self._tagged:
109
+ sent = [word for (word, tag) in sent]
110
+
111
+ # Return the result.
112
+ if self._group_by_sent:
113
+ return [sent]
114
+ else:
115
+ return list(sent)
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ieer.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IEER Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Corpus reader for the Information Extraction and Entity Recognition Corpus.
11
+
12
+ NIST 1999 Information Extraction: Entity Recognition Evaluation
13
+ https://www.itl.nist.gov/iad/894.01/tests/ie-er/er_99/er_99.htm
14
+
15
+ This corpus contains the NEWSWIRE development test data for the
16
+ NIST 1999 IE-ER Evaluation. The files were taken from the
17
+ subdirectory: ``/ie_er_99/english/devtest/newswire/*.ref.nwt``
18
+ and filenames were shortened.
19
+
20
+ The corpus contains the following files: APW_19980314, APW_19980424,
21
+ APW_19980429, NYT_19980315, NYT_19980403, and NYT_19980407.
22
+ """
23
+
24
+ import nltk
25
+ from nltk.corpus.reader.api import *
26
+
27
+ #: A dictionary whose keys are the names of documents in this corpus;
28
+ #: and whose values are descriptions of those documents' contents.
29
+ titles = {
30
+ "APW_19980314": "Associated Press Weekly, 14 March 1998",
31
+ "APW_19980424": "Associated Press Weekly, 24 April 1998",
32
+ "APW_19980429": "Associated Press Weekly, 29 April 1998",
33
+ "NYT_19980315": "New York Times, 15 March 1998",
34
+ "NYT_19980403": "New York Times, 3 April 1998",
35
+ "NYT_19980407": "New York Times, 7 April 1998",
36
+ }
37
+
38
+ #: A list of all documents in this corpus.
39
+ documents = sorted(titles)
40
+
41
+
42
+ class IEERDocument:
43
+ def __init__(self, text, docno=None, doctype=None, date_time=None, headline=""):
44
+ self.text = text
45
+ self.docno = docno
46
+ self.doctype = doctype
47
+ self.date_time = date_time
48
+ self.headline = headline
49
+
50
+ def __repr__(self):
51
+ if self.headline:
52
+ headline = " ".join(self.headline.leaves())
53
+ else:
54
+ headline = (
55
+ " ".join([w for w in self.text.leaves() if w[:1] != "<"][:12]) + "..."
56
+ )
57
+ if self.docno is not None:
58
+ return f"<IEERDocument {self.docno}: {headline!r}>"
59
+ else:
60
+ return "<IEERDocument: %r>" % headline
61
+
62
+
63
+ class IEERCorpusReader(CorpusReader):
64
+ """ """
65
+
66
+ def docs(self, fileids=None):
67
+ return concat(
68
+ [
69
+ StreamBackedCorpusView(fileid, self._read_block, encoding=enc)
70
+ for (fileid, enc) in self.abspaths(fileids, True)
71
+ ]
72
+ )
73
+
74
+ def parsed_docs(self, fileids=None):
75
+ return concat(
76
+ [
77
+ StreamBackedCorpusView(fileid, self._read_parsed_block, encoding=enc)
78
+ for (fileid, enc) in self.abspaths(fileids, True)
79
+ ]
80
+ )
81
+
82
+ def _read_parsed_block(self, stream):
83
+ # TODO: figure out while empty documents are being returned
84
+ return [
85
+ self._parse(doc)
86
+ for doc in self._read_block(stream)
87
+ if self._parse(doc).docno is not None
88
+ ]
89
+
90
+ def _parse(self, doc):
91
+ val = nltk.chunk.ieerstr2tree(doc, root_label="DOCUMENT")
92
+ if isinstance(val, dict):
93
+ return IEERDocument(**val)
94
+ else:
95
+ return IEERDocument(val)
96
+
97
+ def _read_block(self, stream):
98
+ out = []
99
+ # Skip any preamble.
100
+ while True:
101
+ line = stream.readline()
102
+ if not line:
103
+ break
104
+ if line.strip() == "<DOC>":
105
+ break
106
+ out.append(line)
107
+ # Read the document
108
+ while True:
109
+ line = stream.readline()
110
+ if not line:
111
+ break
112
+ out.append(line)
113
+ if line.strip() == "</DOC>":
114
+ break
115
+ # Return the document
116
+ return ["\n".join(out)]
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/indian.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Indian Language POS-Tagged Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Indian Language POS-Tagged Corpus
11
+ Collected by A Kumaran, Microsoft Research, India
12
+ Distributed with permission
13
+
14
+ Contents:
15
+ - Bangla: IIT Kharagpur
16
+ - Hindi: Microsoft Research India
17
+ - Marathi: IIT Bombay
18
+ - Telugu: IIIT Hyderabad
19
+ """
20
+
21
+ from nltk.corpus.reader.api import *
22
+ from nltk.corpus.reader.util import *
23
+ from nltk.tag import map_tag, str2tuple
24
+
25
+
26
+ class IndianCorpusReader(CorpusReader):
27
+ """
28
+ List of words, one per line. Blank lines are ignored.
29
+ """
30
+
31
+ def words(self, fileids=None):
32
+ return concat(
33
+ [
34
+ IndianCorpusView(fileid, enc, False, False)
35
+ for (fileid, enc) in self.abspaths(fileids, True)
36
+ ]
37
+ )
38
+
39
+ def tagged_words(self, fileids=None, tagset=None):
40
+ if tagset and tagset != self._tagset:
41
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
42
+ else:
43
+ tag_mapping_function = None
44
+ return concat(
45
+ [
46
+ IndianCorpusView(fileid, enc, True, False, tag_mapping_function)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def sents(self, fileids=None):
52
+ return concat(
53
+ [
54
+ IndianCorpusView(fileid, enc, False, True)
55
+ for (fileid, enc) in self.abspaths(fileids, True)
56
+ ]
57
+ )
58
+
59
+ def tagged_sents(self, fileids=None, tagset=None):
60
+ if tagset and tagset != self._tagset:
61
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
62
+ else:
63
+ tag_mapping_function = None
64
+ return concat(
65
+ [
66
+ IndianCorpusView(fileid, enc, True, True, tag_mapping_function)
67
+ for (fileid, enc) in self.abspaths(fileids, True)
68
+ ]
69
+ )
70
+
71
+
72
+ class IndianCorpusView(StreamBackedCorpusView):
73
+ def __init__(
74
+ self, corpus_file, encoding, tagged, group_by_sent, tag_mapping_function=None
75
+ ):
76
+ self._tagged = tagged
77
+ self._group_by_sent = group_by_sent
78
+ self._tag_mapping_function = tag_mapping_function
79
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
80
+
81
+ def read_block(self, stream):
82
+ line = stream.readline()
83
+ if line.startswith("<"):
84
+ return []
85
+ sent = [str2tuple(word, sep="_") for word in line.split()]
86
+ if self._tag_mapping_function:
87
+ sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
88
+ if not self._tagged:
89
+ sent = [w for (w, t) in sent]
90
+ if self._group_by_sent:
91
+ return [sent]
92
+ else:
93
+ return sent
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/ipipan.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: IPI PAN Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Konrad Goluchowski <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import functools
9
+
10
+ from nltk.corpus.reader.api import CorpusReader
11
+ from nltk.corpus.reader.util import StreamBackedCorpusView, concat
12
+
13
+
14
+ def _parse_args(fun):
15
+ @functools.wraps(fun)
16
+ def decorator(self, fileids=None, **kwargs):
17
+ kwargs.pop("tags", None)
18
+ if not fileids:
19
+ fileids = self.fileids()
20
+ return fun(self, fileids, **kwargs)
21
+
22
+ return decorator
23
+
24
+
25
+ class IPIPANCorpusReader(CorpusReader):
26
+ """
27
+ Corpus reader designed to work with corpus created by IPI PAN.
28
+ See http://korpus.pl/en/ for more details about IPI PAN corpus.
29
+
30
+ The corpus includes information about text domain, channel and categories.
31
+ You can access possible values using ``domains()``, ``channels()`` and
32
+ ``categories()``. You can use also this metadata to filter files, e.g.:
33
+ ``fileids(channel='prasa')``, ``fileids(categories='publicystyczny')``.
34
+
35
+ The reader supports methods: words, sents, paras and their tagged versions.
36
+ You can get part of speech instead of full tag by giving "simplify_tags=True"
37
+ parameter, e.g.: ``tagged_sents(simplify_tags=True)``.
38
+
39
+ Also you can get all tags disambiguated tags specifying parameter
40
+ "one_tag=False", e.g.: ``tagged_paras(one_tag=False)``.
41
+
42
+ You can get all tags that were assigned by a morphological analyzer specifying
43
+ parameter "disamb_only=False", e.g. ``tagged_words(disamb_only=False)``.
44
+
45
+ The IPIPAN Corpus contains tags indicating if there is a space between two
46
+ tokens. To add special "no space" markers, you should specify parameter
47
+ "append_no_space=True", e.g. ``tagged_words(append_no_space=True)``.
48
+ As a result in place where there should be no space between two tokens new
49
+ pair ('', 'no-space') will be inserted (for tagged data) and just '' for
50
+ methods without tags.
51
+
52
+ The corpus reader can also try to append spaces between words. To enable this
53
+ option, specify parameter "append_space=True", e.g. ``words(append_space=True)``.
54
+ As a result either ' ' or (' ', 'space') will be inserted between tokens.
55
+
56
+ By default, xml entities like &quot; and &amp; are replaced by corresponding
57
+ characters. You can turn off this feature, specifying parameter
58
+ "replace_xmlentities=False", e.g. ``words(replace_xmlentities=False)``.
59
+ """
60
+
61
+ def __init__(self, root, fileids):
62
+ CorpusReader.__init__(self, root, fileids, None, None)
63
+
64
+ def channels(self, fileids=None):
65
+ if not fileids:
66
+ fileids = self.fileids()
67
+ return self._parse_header(fileids, "channel")
68
+
69
+ def domains(self, fileids=None):
70
+ if not fileids:
71
+ fileids = self.fileids()
72
+ return self._parse_header(fileids, "domain")
73
+
74
+ def categories(self, fileids=None):
75
+ if not fileids:
76
+ fileids = self.fileids()
77
+ return [
78
+ self._map_category(cat) for cat in self._parse_header(fileids, "keyTerm")
79
+ ]
80
+
81
+ def fileids(self, channels=None, domains=None, categories=None):
82
+ if channels is not None and domains is not None and categories is not None:
83
+ raise ValueError(
84
+ "You can specify only one of channels, domains "
85
+ "and categories parameter at once"
86
+ )
87
+ if channels is None and domains is None and categories is None:
88
+ return CorpusReader.fileids(self)
89
+ if isinstance(channels, str):
90
+ channels = [channels]
91
+ if isinstance(domains, str):
92
+ domains = [domains]
93
+ if isinstance(categories, str):
94
+ categories = [categories]
95
+ if channels:
96
+ return self._list_morph_files_by("channel", channels)
97
+ elif domains:
98
+ return self._list_morph_files_by("domain", domains)
99
+ else:
100
+ return self._list_morph_files_by(
101
+ "keyTerm", categories, map=self._map_category
102
+ )
103
+
104
+ @_parse_args
105
+ def sents(self, fileids=None, **kwargs):
106
+ return concat(
107
+ [
108
+ self._view(
109
+ fileid, mode=IPIPANCorpusView.SENTS_MODE, tags=False, **kwargs
110
+ )
111
+ for fileid in self._list_morph_files(fileids)
112
+ ]
113
+ )
114
+
115
+ @_parse_args
116
+ def paras(self, fileids=None, **kwargs):
117
+ return concat(
118
+ [
119
+ self._view(
120
+ fileid, mode=IPIPANCorpusView.PARAS_MODE, tags=False, **kwargs
121
+ )
122
+ for fileid in self._list_morph_files(fileids)
123
+ ]
124
+ )
125
+
126
+ @_parse_args
127
+ def words(self, fileids=None, **kwargs):
128
+ return concat(
129
+ [
130
+ self._view(fileid, tags=False, **kwargs)
131
+ for fileid in self._list_morph_files(fileids)
132
+ ]
133
+ )
134
+
135
+ @_parse_args
136
+ def tagged_sents(self, fileids=None, **kwargs):
137
+ return concat(
138
+ [
139
+ self._view(fileid, mode=IPIPANCorpusView.SENTS_MODE, **kwargs)
140
+ for fileid in self._list_morph_files(fileids)
141
+ ]
142
+ )
143
+
144
+ @_parse_args
145
+ def tagged_paras(self, fileids=None, **kwargs):
146
+ return concat(
147
+ [
148
+ self._view(fileid, mode=IPIPANCorpusView.PARAS_MODE, **kwargs)
149
+ for fileid in self._list_morph_files(fileids)
150
+ ]
151
+ )
152
+
153
+ @_parse_args
154
+ def tagged_words(self, fileids=None, **kwargs):
155
+ return concat(
156
+ [self._view(fileid, **kwargs) for fileid in self._list_morph_files(fileids)]
157
+ )
158
+
159
+ def _list_morph_files(self, fileids):
160
+ return [f for f in self.abspaths(fileids)]
161
+
162
+ def _list_header_files(self, fileids):
163
+ return [
164
+ f.replace("morph.xml", "header.xml")
165
+ for f in self._list_morph_files(fileids)
166
+ ]
167
+
168
+ def _parse_header(self, fileids, tag):
169
+ values = set()
170
+ for f in self._list_header_files(fileids):
171
+ values_list = self._get_tag(f, tag)
172
+ for v in values_list:
173
+ values.add(v)
174
+ return list(values)
175
+
176
+ def _list_morph_files_by(self, tag, values, map=None):
177
+ fileids = self.fileids()
178
+ ret_fileids = set()
179
+ for f in fileids:
180
+ fp = self.abspath(f).replace("morph.xml", "header.xml")
181
+ values_list = self._get_tag(fp, tag)
182
+ for value in values_list:
183
+ if map is not None:
184
+ value = map(value)
185
+ if value in values:
186
+ ret_fileids.add(f)
187
+ return list(ret_fileids)
188
+
189
+ def _get_tag(self, f, tag):
190
+ tags = []
191
+ with open(f) as infile:
192
+ header = infile.read()
193
+ tag_end = 0
194
+ while True:
195
+ tag_pos = header.find("<" + tag, tag_end)
196
+ if tag_pos < 0:
197
+ return tags
198
+ tag_end = header.find("</" + tag + ">", tag_pos)
199
+ tags.append(header[tag_pos + len(tag) + 2 : tag_end])
200
+
201
+ def _map_category(self, cat):
202
+ pos = cat.find(">")
203
+ if pos == -1:
204
+ return cat
205
+ else:
206
+ return cat[pos + 1 :]
207
+
208
+ def _view(self, filename, **kwargs):
209
+ tags = kwargs.pop("tags", True)
210
+ mode = kwargs.pop("mode", 0)
211
+ simplify_tags = kwargs.pop("simplify_tags", False)
212
+ one_tag = kwargs.pop("one_tag", True)
213
+ disamb_only = kwargs.pop("disamb_only", True)
214
+ append_no_space = kwargs.pop("append_no_space", False)
215
+ append_space = kwargs.pop("append_space", False)
216
+ replace_xmlentities = kwargs.pop("replace_xmlentities", True)
217
+
218
+ if len(kwargs) > 0:
219
+ raise ValueError("Unexpected arguments: %s" % kwargs.keys())
220
+ if not one_tag and not disamb_only:
221
+ raise ValueError(
222
+ "You cannot specify both one_tag=False and " "disamb_only=False"
223
+ )
224
+ if not tags and (simplify_tags or not one_tag or not disamb_only):
225
+ raise ValueError(
226
+ "You cannot specify simplify_tags, one_tag or "
227
+ "disamb_only with functions other than tagged_*"
228
+ )
229
+
230
+ return IPIPANCorpusView(
231
+ filename,
232
+ tags=tags,
233
+ mode=mode,
234
+ simplify_tags=simplify_tags,
235
+ one_tag=one_tag,
236
+ disamb_only=disamb_only,
237
+ append_no_space=append_no_space,
238
+ append_space=append_space,
239
+ replace_xmlentities=replace_xmlentities,
240
+ )
241
+
242
+
243
+ class IPIPANCorpusView(StreamBackedCorpusView):
244
+
245
+ WORDS_MODE = 0
246
+ SENTS_MODE = 1
247
+ PARAS_MODE = 2
248
+
249
+ def __init__(self, filename, startpos=0, **kwargs):
250
+ StreamBackedCorpusView.__init__(self, filename, None, startpos, None)
251
+ self.in_sentence = False
252
+ self.position = 0
253
+
254
+ self.show_tags = kwargs.pop("tags", True)
255
+ self.disamb_only = kwargs.pop("disamb_only", True)
256
+ self.mode = kwargs.pop("mode", IPIPANCorpusView.WORDS_MODE)
257
+ self.simplify_tags = kwargs.pop("simplify_tags", False)
258
+ self.one_tag = kwargs.pop("one_tag", True)
259
+ self.append_no_space = kwargs.pop("append_no_space", False)
260
+ self.append_space = kwargs.pop("append_space", False)
261
+ self.replace_xmlentities = kwargs.pop("replace_xmlentities", True)
262
+
263
+ def read_block(self, stream):
264
+ sentence = []
265
+ sentences = []
266
+ space = False
267
+ no_space = False
268
+
269
+ tags = set()
270
+
271
+ lines = self._read_data(stream)
272
+
273
+ while True:
274
+
275
+ # we may have only part of last line
276
+ if len(lines) <= 1:
277
+ self._seek(stream)
278
+ lines = self._read_data(stream)
279
+
280
+ if lines == [""]:
281
+ assert not sentences
282
+ return []
283
+
284
+ line = lines.pop()
285
+ self.position += len(line) + 1
286
+
287
+ if line.startswith('<chunk type="s"'):
288
+ self.in_sentence = True
289
+ elif line.startswith('<chunk type="p"'):
290
+ pass
291
+ elif line.startswith("<tok"):
292
+ if self.append_space and space and not no_space:
293
+ self._append_space(sentence)
294
+ space = True
295
+ no_space = False
296
+ orth = ""
297
+ tags = set()
298
+ elif line.startswith("</chunk"):
299
+ if self.in_sentence:
300
+ self.in_sentence = False
301
+ self._seek(stream)
302
+ if self.mode == self.SENTS_MODE:
303
+ return [sentence]
304
+ elif self.mode == self.WORDS_MODE:
305
+ if self.append_space:
306
+ self._append_space(sentence)
307
+ return sentence
308
+ else:
309
+ sentences.append(sentence)
310
+ elif self.mode == self.PARAS_MODE:
311
+ self._seek(stream)
312
+ return [sentences]
313
+ elif line.startswith("<orth"):
314
+ orth = line[6:-7]
315
+ if self.replace_xmlentities:
316
+ orth = orth.replace("&quot;", '"').replace("&amp;", "&")
317
+ elif line.startswith("<lex"):
318
+ if not self.disamb_only or line.find("disamb=") != -1:
319
+ tag = line[line.index("<ctag") + 6 : line.index("</ctag")]
320
+ tags.add(tag)
321
+ elif line.startswith("</tok"):
322
+ if self.show_tags:
323
+ if self.simplify_tags:
324
+ tags = [t.split(":")[0] for t in tags]
325
+ if not self.one_tag or not self.disamb_only:
326
+ sentence.append((orth, tuple(tags)))
327
+ else:
328
+ sentence.append((orth, tags.pop()))
329
+ else:
330
+ sentence.append(orth)
331
+ elif line.startswith("<ns/>"):
332
+ if self.append_space:
333
+ no_space = True
334
+ if self.append_no_space:
335
+ if self.show_tags:
336
+ sentence.append(("", "no-space"))
337
+ else:
338
+ sentence.append("")
339
+ elif line.startswith("</cesAna"):
340
+ pass
341
+
342
+ def _read_data(self, stream):
343
+ self.position = stream.tell()
344
+ buff = stream.read(4096)
345
+ lines = buff.split("\n")
346
+ lines.reverse()
347
+ return lines
348
+
349
+ def _seek(self, stream):
350
+ stream.seek(self.position)
351
+
352
+ def _append_space(self, sentence):
353
+ if self.show_tags:
354
+ sentence.append((" ", "space"))
355
+ else:
356
+ sentence.append(" ")
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/lin.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lin's Thesaurus
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dan Blanchard <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.txt
7
+
8
+ import re
9
+ from collections import defaultdict
10
+ from functools import reduce
11
+
12
+ from nltk.corpus.reader import CorpusReader
13
+
14
+
15
+ class LinThesaurusCorpusReader(CorpusReader):
16
+ """Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin."""
17
+
18
+ # Compiled regular expression for extracting the key from the first line of each
19
+ # thesaurus entry
20
+ _key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+')
21
+
22
+ @staticmethod
23
+ def __defaultdict_factory():
24
+ """Factory for creating defaultdict of defaultdict(dict)s"""
25
+ return defaultdict(dict)
26
+
27
+ def __init__(self, root, badscore=0.0):
28
+ """
29
+ Initialize the thesaurus.
30
+
31
+ :param root: root directory containing thesaurus LISP files
32
+ :type root: C{string}
33
+ :param badscore: the score to give to words which do not appear in each other's sets of synonyms
34
+ :type badscore: C{float}
35
+ """
36
+
37
+ super().__init__(root, r"sim[A-Z]\.lsp")
38
+ self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory)
39
+ self._badscore = badscore
40
+ for path, encoding, fileid in self.abspaths(
41
+ include_encoding=True, include_fileid=True
42
+ ):
43
+ with open(path) as lin_file:
44
+ first = True
45
+ for line in lin_file:
46
+ line = line.strip()
47
+ # Start of entry
48
+ if first:
49
+ key = LinThesaurusCorpusReader._key_re.sub(r"\1", line)
50
+ first = False
51
+ # End of entry
52
+ elif line == "))":
53
+ first = True
54
+ # Lines with pairs of ngrams and scores
55
+ else:
56
+ split_line = line.split("\t")
57
+ if len(split_line) == 2:
58
+ ngram, score = split_line
59
+ self._thesaurus[fileid][key][ngram.strip('"')] = float(
60
+ score
61
+ )
62
+
63
+ def similarity(self, ngram1, ngram2, fileid=None):
64
+ """
65
+ Returns the similarity score for two ngrams.
66
+
67
+ :param ngram1: first ngram to compare
68
+ :type ngram1: C{string}
69
+ :param ngram2: second ngram to compare
70
+ :type ngram2: C{string}
71
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
72
+ :type fileid: C{string}
73
+ :return: If fileid is specified, just the score for the two ngrams; otherwise,
74
+ list of tuples of fileids and scores.
75
+ """
76
+ # Entries don't contain themselves, so make sure similarity between item and itself is 1.0
77
+ if ngram1 == ngram2:
78
+ if fileid:
79
+ return 1.0
80
+ else:
81
+ return [(fid, 1.0) for fid in self._fileids]
82
+ else:
83
+ if fileid:
84
+ return (
85
+ self._thesaurus[fileid][ngram1][ngram2]
86
+ if ngram2 in self._thesaurus[fileid][ngram1]
87
+ else self._badscore
88
+ )
89
+ else:
90
+ return [
91
+ (
92
+ fid,
93
+ (
94
+ self._thesaurus[fid][ngram1][ngram2]
95
+ if ngram2 in self._thesaurus[fid][ngram1]
96
+ else self._badscore
97
+ ),
98
+ )
99
+ for fid in self._fileids
100
+ ]
101
+
102
+ def scored_synonyms(self, ngram, fileid=None):
103
+ """
104
+ Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram
105
+
106
+ :param ngram: ngram to lookup
107
+ :type ngram: C{string}
108
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
109
+ :type fileid: C{string}
110
+ :return: If fileid is specified, list of tuples of scores and synonyms; otherwise,
111
+ list of tuples of fileids and lists, where inner lists consist of tuples of
112
+ scores and synonyms.
113
+ """
114
+ if fileid:
115
+ return self._thesaurus[fileid][ngram].items()
116
+ else:
117
+ return [
118
+ (fileid, self._thesaurus[fileid][ngram].items())
119
+ for fileid in self._fileids
120
+ ]
121
+
122
+ def synonyms(self, ngram, fileid=None):
123
+ """
124
+ Returns a list of synonyms for the current ngram.
125
+
126
+ :param ngram: ngram to lookup
127
+ :type ngram: C{string}
128
+ :param fileid: thesaurus fileid to search in. If None, search all fileids.
129
+ :type fileid: C{string}
130
+ :return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and
131
+ lists, where inner lists contain synonyms.
132
+ """
133
+ if fileid:
134
+ return self._thesaurus[fileid][ngram].keys()
135
+ else:
136
+ return [
137
+ (fileid, self._thesaurus[fileid][ngram].keys())
138
+ for fileid in self._fileids
139
+ ]
140
+
141
+ def __contains__(self, ngram):
142
+ """
143
+ Determines whether or not the given ngram is in the thesaurus.
144
+
145
+ :param ngram: ngram to lookup
146
+ :type ngram: C{string}
147
+ :return: whether the given ngram is in the thesaurus.
148
+ """
149
+ return reduce(
150
+ lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]),
151
+ self._fileids,
152
+ False,
153
+ )
154
+
155
+
156
+ ######################################################################
157
+ # Demo
158
+ ######################################################################
159
+
160
+
161
+ def demo():
162
+ from nltk.corpus import lin_thesaurus as thes
163
+
164
+ word1 = "business"
165
+ word2 = "enterprise"
166
+ print("Getting synonyms for " + word1)
167
+ print(thes.synonyms(word1))
168
+
169
+ print("Getting scored synonyms for " + word1)
170
+ print(thes.scored_synonyms(word1))
171
+
172
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
173
+ print(thes.synonyms(word1, fileid="simN.lsp"))
174
+
175
+ print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
176
+ print(thes.synonyms(word1, fileid="simN.lsp"))
177
+
178
+ print(f"Similarity score for {word1} and {word2}:")
179
+ print(thes.similarity(word1, word2))
180
+
181
+
182
+ if __name__ == "__main__":
183
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/markdown.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from functools import partial, wraps
3
+
4
+ from nltk.corpus.reader.api import CategorizedCorpusReader
5
+ from nltk.corpus.reader.plaintext import PlaintextCorpusReader
6
+ from nltk.corpus.reader.util import concat, read_blankline_block
7
+ from nltk.tokenize import blankline_tokenize, sent_tokenize, word_tokenize
8
+
9
+
10
+ def comma_separated_string_args(func):
11
+ """
12
+ A decorator that allows a function to be called with
13
+ a single string of comma-separated values which become
14
+ individual function arguments.
15
+ """
16
+
17
+ @wraps(func)
18
+ def wrapper(*args, **kwargs):
19
+ _args = list()
20
+ for arg in args:
21
+ if isinstance(arg, str):
22
+ _args.append({part.strip() for part in arg.split(",")})
23
+ elif isinstance(arg, list):
24
+ _args.append(set(arg))
25
+ else:
26
+ _args.append(arg)
27
+ for name, value in kwargs.items():
28
+ if isinstance(value, str):
29
+ kwargs[name] = {part.strip() for part in value.split(",")}
30
+ return func(*_args, **kwargs)
31
+
32
+ return wrapper
33
+
34
+
35
+ def read_parse_blankline_block(stream, parser):
36
+ block = read_blankline_block(stream)
37
+ if block:
38
+ return [parser.render(block[0])]
39
+ return block
40
+
41
+
42
+ class MarkdownBlock:
43
+ def __init__(self, content):
44
+ self.content = content
45
+ self.truncate_at = 16
46
+
47
+ def __repr__(self):
48
+ return f"{self.__class__.__name__}(content={repr(str(self))})"
49
+
50
+ def __str__(self):
51
+ return (
52
+ f"{self.content[:self.truncate_at]}"
53
+ f"{'...' if len(self.content) > self.truncate_at else ''}"
54
+ )
55
+
56
+ @property
57
+ def raw(self):
58
+ return self.content
59
+
60
+ @property
61
+ def words(self):
62
+ return word_tokenize(self.content)
63
+
64
+ @property
65
+ def sents(self):
66
+ return [word_tokenize(sent) for sent in sent_tokenize(self.content)]
67
+
68
+ @property
69
+ def paras(self):
70
+ return [
71
+ [word_tokenize(sent) for sent in sent_tokenize(para)]
72
+ for para in blankline_tokenize(self.content)
73
+ ]
74
+
75
+
76
+ class CodeBlock(MarkdownBlock):
77
+ def __init__(self, language, *args):
78
+ self.language = language
79
+ super().__init__(*args)
80
+
81
+ @property
82
+ def sents(self):
83
+ return [word_tokenize(line) for line in self.content.splitlines()]
84
+
85
+ @property
86
+ def lines(self):
87
+ return self.content.splitlines()
88
+
89
+ @property
90
+ def paras(self):
91
+ return [
92
+ [word_tokenize(line) for line in para.splitlines()]
93
+ for para in blankline_tokenize(self.content)
94
+ ]
95
+
96
+
97
+ class MarkdownSection(MarkdownBlock):
98
+ def __init__(self, heading, level, *args):
99
+ self.heading = heading
100
+ self.level = level
101
+ super().__init__(*args)
102
+
103
+
104
+ Image = namedtuple("Image", "label, src, title")
105
+ Link = namedtuple("Link", "label, href, title")
106
+ List = namedtuple("List", "is_ordered, items")
107
+
108
+
109
+ class MarkdownCorpusReader(PlaintextCorpusReader):
110
+ def __init__(self, *args, parser=None, **kwargs):
111
+ from markdown_it import MarkdownIt
112
+ from mdit_plain.renderer import RendererPlain
113
+ from mdit_py_plugins.front_matter import front_matter_plugin
114
+
115
+ self.parser = parser
116
+ if self.parser is None:
117
+ self.parser = MarkdownIt("commonmark", renderer_cls=RendererPlain)
118
+ self.parser.use(front_matter_plugin)
119
+
120
+ kwargs.setdefault(
121
+ "para_block_reader", partial(read_parse_blankline_block, parser=self.parser)
122
+ )
123
+ super().__init__(*args, **kwargs)
124
+
125
+ # This override takes care of removing markup.
126
+ def _read_word_block(self, stream):
127
+ words = list()
128
+ for para in self._para_block_reader(stream):
129
+ words.extend(self._word_tokenizer.tokenize(para))
130
+ return words
131
+
132
+
133
+ class CategorizedMarkdownCorpusReader(CategorizedCorpusReader, MarkdownCorpusReader):
134
+ """
135
+ A reader for markdown corpora whose documents are divided into
136
+ categories based on their file identifiers.
137
+
138
+ Based on nltk.corpus.reader.plaintext.CategorizedPlaintextCorpusReader:
139
+ https://www.nltk.org/_modules/nltk/corpus/reader/api.html#CategorizedCorpusReader
140
+ """
141
+
142
+ def __init__(self, *args, cat_field="tags", **kwargs):
143
+ """
144
+ Initialize the corpus reader. Categorization arguments
145
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
146
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
147
+ are passed to the ``MarkdownCorpusReader`` constructor.
148
+ """
149
+ cat_args = ["cat_pattern", "cat_map", "cat_file"]
150
+ if not any(arg in kwargs for arg in cat_args):
151
+ # Initialize with a blank map now,
152
+ # and try to build categories from document metadata later.
153
+ kwargs["cat_map"] = dict()
154
+ CategorizedCorpusReader.__init__(self, kwargs)
155
+ MarkdownCorpusReader.__init__(self, *args, **kwargs)
156
+
157
+ # Map file IDs to categories if self._map exists but is still empty:
158
+ if self._map is not None and not self._map:
159
+ for file_id in self._fileids:
160
+ metadata = self.metadata(file_id)
161
+ if metadata:
162
+ self._map[file_id] = metadata[0].get(cat_field, [])
163
+
164
+ ### Begin CategorizedCorpusReader Overrides
165
+ @comma_separated_string_args
166
+ def categories(self, fileids=None):
167
+ return super().categories(fileids)
168
+
169
+ @comma_separated_string_args
170
+ def fileids(self, categories=None):
171
+ if categories is None:
172
+ return self._fileids
173
+ return super().fileids(categories)
174
+
175
+ ### End CategorizedCorpusReader Overrides
176
+
177
+ ### Begin MarkdownCorpusReader Overrides
178
+ @comma_separated_string_args
179
+ def raw(self, fileids=None, categories=None):
180
+ return super().raw(self._resolve(fileids, categories))
181
+
182
+ @comma_separated_string_args
183
+ def words(self, fileids=None, categories=None):
184
+ return super().words(self._resolve(fileids, categories))
185
+
186
+ @comma_separated_string_args
187
+ def sents(self, fileids=None, categories=None):
188
+ return super().sents(self._resolve(fileids, categories))
189
+
190
+ @comma_separated_string_args
191
+ def paras(self, fileids=None, categories=None):
192
+ return super().paras(self._resolve(fileids, categories))
193
+
194
+ ### End MarkdownCorpusReader Overrides
195
+
196
+ def concatenated_view(self, reader, fileids, categories):
197
+ return concat(
198
+ [
199
+ self.CorpusView(path, reader, encoding=enc)
200
+ for (path, enc) in self.abspaths(
201
+ self._resolve(fileids, categories), include_encoding=True
202
+ )
203
+ ]
204
+ )
205
+
206
+ def metadata_reader(self, stream):
207
+ from yaml import safe_load
208
+
209
+ return [
210
+ safe_load(t.content)
211
+ for t in self.parser.parse(stream.read())
212
+ if t.type == "front_matter"
213
+ ]
214
+
215
+ @comma_separated_string_args
216
+ def metadata(self, fileids=None, categories=None):
217
+ return self.concatenated_view(self.metadata_reader, fileids, categories)
218
+
219
+ def blockquote_reader(self, stream):
220
+ tokens = self.parser.parse(stream.read())
221
+ opening_tokens = filter(
222
+ lambda t: t.level == 0 and t.type == "blockquote_open", tokens
223
+ )
224
+ closing_tokens = filter(
225
+ lambda t: t.level == 0 and t.type == "blockquote_close", tokens
226
+ )
227
+ blockquotes = list()
228
+ for o, c in zip(opening_tokens, closing_tokens):
229
+ opening_index = tokens.index(o)
230
+ closing_index = tokens.index(c, opening_index)
231
+ blockquotes.append(tokens[opening_index : closing_index + 1])
232
+ return [
233
+ MarkdownBlock(
234
+ self.parser.renderer.render(block, self.parser.options, env=None)
235
+ )
236
+ for block in blockquotes
237
+ ]
238
+
239
+ @comma_separated_string_args
240
+ def blockquotes(self, fileids=None, categories=None):
241
+ return self.concatenated_view(self.blockquote_reader, fileids, categories)
242
+
243
+ def code_block_reader(self, stream):
244
+ return [
245
+ CodeBlock(
246
+ t.info,
247
+ t.content,
248
+ )
249
+ for t in self.parser.parse(stream.read())
250
+ if t.level == 0 and t.type in ("fence", "code_block")
251
+ ]
252
+
253
+ @comma_separated_string_args
254
+ def code_blocks(self, fileids=None, categories=None):
255
+ return self.concatenated_view(self.code_block_reader, fileids, categories)
256
+
257
+ def image_reader(self, stream):
258
+ return [
259
+ Image(
260
+ child_token.content,
261
+ child_token.attrGet("src"),
262
+ child_token.attrGet("title"),
263
+ )
264
+ for inline_token in filter(
265
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
266
+ )
267
+ for child_token in inline_token.children
268
+ if child_token.type == "image"
269
+ ]
270
+
271
+ @comma_separated_string_args
272
+ def images(self, fileids=None, categories=None):
273
+ return self.concatenated_view(self.image_reader, fileids, categories)
274
+
275
+ def link_reader(self, stream):
276
+ return [
277
+ Link(
278
+ inline_token.children[i + 1].content,
279
+ child_token.attrGet("href"),
280
+ child_token.attrGet("title"),
281
+ )
282
+ for inline_token in filter(
283
+ lambda t: t.type == "inline", self.parser.parse(stream.read())
284
+ )
285
+ for i, child_token in enumerate(inline_token.children)
286
+ if child_token.type == "link_open"
287
+ ]
288
+
289
+ @comma_separated_string_args
290
+ def links(self, fileids=None, categories=None):
291
+ return self.concatenated_view(self.link_reader, fileids, categories)
292
+
293
+ def list_reader(self, stream):
294
+ tokens = self.parser.parse(stream.read())
295
+ opening_types = ("bullet_list_open", "ordered_list_open")
296
+ opening_tokens = filter(
297
+ lambda t: t.level == 0 and t.type in opening_types, tokens
298
+ )
299
+ closing_types = ("bullet_list_close", "ordered_list_close")
300
+ closing_tokens = filter(
301
+ lambda t: t.level == 0 and t.type in closing_types, tokens
302
+ )
303
+ list_blocks = list()
304
+ for o, c in zip(opening_tokens, closing_tokens):
305
+ opening_index = tokens.index(o)
306
+ closing_index = tokens.index(c, opening_index)
307
+ list_blocks.append(tokens[opening_index : closing_index + 1])
308
+ return [
309
+ List(
310
+ tokens[0].type == "ordered_list_open",
311
+ [t.content for t in tokens if t.content],
312
+ )
313
+ for tokens in list_blocks
314
+ ]
315
+
316
+ @comma_separated_string_args
317
+ def lists(self, fileids=None, categories=None):
318
+ return self.concatenated_view(self.list_reader, fileids, categories)
319
+
320
+ def section_reader(self, stream):
321
+ section_blocks, block = list(), list()
322
+ in_heading = False
323
+ for t in self.parser.parse(stream.read()):
324
+ if t.level == 0 and t.type == "heading_open":
325
+ if block:
326
+ section_blocks.append(block)
327
+ block = list()
328
+ in_heading = True
329
+ if in_heading:
330
+ block.append(t)
331
+ return [
332
+ MarkdownSection(
333
+ block[1].content,
334
+ block[0].markup.count("#"),
335
+ self.parser.renderer.render(block, self.parser.options, env=None),
336
+ )
337
+ for block in section_blocks
338
+ ]
339
+
340
+ @comma_separated_string_args
341
+ def sections(self, fileids=None, categories=None):
342
+ return self.concatenated_view(self.section_reader, fileids, categories)
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/nps_chat.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: NPS Chat Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+ import textwrap
10
+
11
+ from nltk.corpus.reader.api import *
12
+ from nltk.corpus.reader.util import *
13
+ from nltk.corpus.reader.xmldocs import *
14
+ from nltk.internals import ElementWrapper
15
+ from nltk.tag import map_tag
16
+ from nltk.util import LazyConcatenation
17
+
18
+
19
+ class NPSChatCorpusReader(XMLCorpusReader):
20
+ def __init__(self, root, fileids, wrap_etree=False, tagset=None):
21
+ XMLCorpusReader.__init__(self, root, fileids, wrap_etree)
22
+ self._tagset = tagset
23
+
24
+ def xml_posts(self, fileids=None):
25
+ if self._wrap_etree:
26
+ return concat(
27
+ [
28
+ XMLCorpusView(fileid, "Session/Posts/Post", self._wrap_elt)
29
+ for fileid in self.abspaths(fileids)
30
+ ]
31
+ )
32
+ else:
33
+ return concat(
34
+ [
35
+ XMLCorpusView(fileid, "Session/Posts/Post")
36
+ for fileid in self.abspaths(fileids)
37
+ ]
38
+ )
39
+
40
+ def posts(self, fileids=None):
41
+ return concat(
42
+ [
43
+ XMLCorpusView(
44
+ fileid, "Session/Posts/Post/terminals", self._elt_to_words
45
+ )
46
+ for fileid in self.abspaths(fileids)
47
+ ]
48
+ )
49
+
50
+ def tagged_posts(self, fileids=None, tagset=None):
51
+ def reader(elt, handler):
52
+ return self._elt_to_tagged_words(elt, handler, tagset)
53
+
54
+ return concat(
55
+ [
56
+ XMLCorpusView(fileid, "Session/Posts/Post/terminals", reader)
57
+ for fileid in self.abspaths(fileids)
58
+ ]
59
+ )
60
+
61
+ def words(self, fileids=None):
62
+ return LazyConcatenation(self.posts(fileids))
63
+
64
+ def tagged_words(self, fileids=None, tagset=None):
65
+ return LazyConcatenation(self.tagged_posts(fileids, tagset))
66
+
67
+ def _wrap_elt(self, elt, handler):
68
+ return ElementWrapper(elt)
69
+
70
+ def _elt_to_words(self, elt, handler):
71
+ return [self._simplify_username(t.attrib["word"]) for t in elt.findall("t")]
72
+
73
+ def _elt_to_tagged_words(self, elt, handler, tagset=None):
74
+ tagged_post = [
75
+ (self._simplify_username(t.attrib["word"]), t.attrib["pos"])
76
+ for t in elt.findall("t")
77
+ ]
78
+ if tagset and tagset != self._tagset:
79
+ tagged_post = [
80
+ (w, map_tag(self._tagset, tagset, t)) for (w, t) in tagged_post
81
+ ]
82
+ return tagged_post
83
+
84
+ @staticmethod
85
+ def _simplify_username(word):
86
+ if "User" in word:
87
+ word = "U" + word.split("User", 1)[1]
88
+ elif isinstance(word, bytes):
89
+ word = word.decode("ascii")
90
+ return word
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/opinion_lexicon.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Opinion Lexicon Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Opinion Lexicon.
10
+
11
+ Opinion Lexicon information
12
+ ===========================
13
+
14
+ Authors: Minqing Hu and Bing Liu, 2004.
15
+ Department of Computer Science
16
+ University of Illinois at Chicago
17
+
18
+ Contact: Bing Liu, [email protected]
19
+ https://www.cs.uic.edu/~liub
20
+
21
+ Distributed with permission.
22
+
23
+ Related papers:
24
+
25
+ - Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
26
+ Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery
27
+ & Data Mining (KDD-04), Aug 22-25, 2004, Seattle, Washington, USA.
28
+
29
+ - Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and
30
+ Comparing Opinions on the Web". Proceedings of the 14th International World
31
+ Wide Web conference (WWW-2005), May 10-14, 2005, Chiba, Japan.
32
+ """
33
+
34
+ from nltk.corpus.reader import WordListCorpusReader
35
+ from nltk.corpus.reader.api import *
36
+
37
+
38
+ class IgnoreReadmeCorpusView(StreamBackedCorpusView):
39
+ """
40
+ This CorpusView is used to skip the initial readme block of the corpus.
41
+ """
42
+
43
+ def __init__(self, *args, **kwargs):
44
+ StreamBackedCorpusView.__init__(self, *args, **kwargs)
45
+ # open self._stream
46
+ self._open()
47
+ # skip the readme block
48
+ read_blankline_block(self._stream)
49
+ # Set the initial position to the current stream position
50
+ self._filepos = [self._stream.tell()]
51
+
52
+
53
+ class OpinionLexiconCorpusReader(WordListCorpusReader):
54
+ """
55
+ Reader for Liu and Hu opinion lexicon. Blank lines and readme are ignored.
56
+
57
+ >>> from nltk.corpus import opinion_lexicon
58
+ >>> opinion_lexicon.words()
59
+ ['2-faced', '2-faces', 'abnormal', 'abolish', ...]
60
+
61
+ The OpinionLexiconCorpusReader provides shortcuts to retrieve positive/negative
62
+ words:
63
+
64
+ >>> opinion_lexicon.negative()
65
+ ['2-faced', '2-faces', 'abnormal', 'abolish', ...]
66
+
67
+ Note that words from `words()` method are sorted by file id, not alphabetically:
68
+
69
+ >>> opinion_lexicon.words()[0:10] # doctest: +NORMALIZE_WHITESPACE
70
+ ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably',
71
+ 'abominate', 'abomination', 'abort', 'aborted']
72
+ >>> sorted(opinion_lexicon.words())[0:10] # doctest: +NORMALIZE_WHITESPACE
73
+ ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably',
74
+ 'abominate', 'abomination', 'abort']
75
+ """
76
+
77
+ CorpusView = IgnoreReadmeCorpusView
78
+
79
+ def words(self, fileids=None):
80
+ """
81
+ Return all words in the opinion lexicon. Note that these words are not
82
+ sorted in alphabetical order.
83
+
84
+ :param fileids: a list or regexp specifying the ids of the files whose
85
+ words have to be returned.
86
+ :return: the given file(s) as a list of words and punctuation symbols.
87
+ :rtype: list(str)
88
+ """
89
+ if fileids is None:
90
+ fileids = self._fileids
91
+ elif isinstance(fileids, str):
92
+ fileids = [fileids]
93
+ return concat(
94
+ [
95
+ self.CorpusView(path, self._read_word_block, encoding=enc)
96
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
97
+ ]
98
+ )
99
+
100
+ def positive(self):
101
+ """
102
+ Return all positive words in alphabetical order.
103
+
104
+ :return: a list of positive words.
105
+ :rtype: list(str)
106
+ """
107
+ return self.words("positive-words.txt")
108
+
109
+ def negative(self):
110
+ """
111
+ Return all negative words in alphabetical order.
112
+
113
+ :return: a list of negative words.
114
+ :rtype: list(str)
115
+ """
116
+ return self.words("negative-words.txt")
117
+
118
+ def _read_word_block(self, stream):
119
+ words = []
120
+ for i in range(20): # Read 20 lines at a time.
121
+ line = stream.readline()
122
+ if not line:
123
+ continue
124
+ words.append(line.strip())
125
+ return words
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_lite.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: PanLex Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: David Kamholz <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for PanLex Lite, a stripped down version of PanLex distributed
10
+ as an SQLite database. See the README.txt in the panlex_lite corpus directory
11
+ for more information on PanLex Lite.
12
+ """
13
+
14
+ import os
15
+ import sqlite3
16
+
17
+ from nltk.corpus.reader.api import CorpusReader
18
+
19
+
20
+ class PanLexLiteCorpusReader(CorpusReader):
21
+ MEANING_Q = """
22
+ SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv
23
+ FROM dnx
24
+ JOIN ex ON (ex.ex = dnx.ex)
25
+ JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
26
+ JOIN ex ex2 ON (ex2.ex = dnx2.ex)
27
+ WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ?
28
+ ORDER BY dnx2.uq DESC
29
+ """
30
+
31
+ TRANSLATION_Q = """
32
+ SELECT s.tt, sum(s.uq) AS trq FROM (
33
+ SELECT ex2.tt, max(dnx.uq) AS uq
34
+ FROM dnx
35
+ JOIN ex ON (ex.ex = dnx.ex)
36
+ JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
37
+ JOIN ex ex2 ON (ex2.ex = dnx2.ex)
38
+ WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ?
39
+ GROUP BY ex2.tt, dnx.ui
40
+ ) s
41
+ GROUP BY s.tt
42
+ ORDER BY trq DESC, s.tt
43
+ """
44
+
45
+ def __init__(self, root):
46
+ self._c = sqlite3.connect(os.path.join(root, "db.sqlite")).cursor()
47
+
48
+ self._uid_lv = {}
49
+ self._lv_uid = {}
50
+
51
+ for row in self._c.execute("SELECT uid, lv FROM lv"):
52
+ self._uid_lv[row[0]] = row[1]
53
+ self._lv_uid[row[1]] = row[0]
54
+
55
+ def language_varieties(self, lc=None):
56
+ """
57
+ Return a list of PanLex language varieties.
58
+
59
+ :param lc: ISO 639 alpha-3 code. If specified, filters returned varieties
60
+ by this code. If unspecified, all varieties are returned.
61
+ :return: the specified language varieties as a list of tuples. The first
62
+ element is the language variety's seven-character uniform identifier,
63
+ and the second element is its default name.
64
+ :rtype: list(tuple)
65
+ """
66
+
67
+ if lc is None:
68
+ return self._c.execute("SELECT uid, tt FROM lv ORDER BY uid").fetchall()
69
+ else:
70
+ return self._c.execute(
71
+ "SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid", (lc,)
72
+ ).fetchall()
73
+
74
+ def meanings(self, expr_uid, expr_tt):
75
+ """
76
+ Return a list of meanings for an expression.
77
+
78
+ :param expr_uid: the expression's language variety, as a seven-character
79
+ uniform identifier.
80
+ :param expr_tt: the expression's text.
81
+ :return: a list of Meaning objects.
82
+ :rtype: list(Meaning)
83
+ """
84
+
85
+ expr_lv = self._uid_lv[expr_uid]
86
+
87
+ mn_info = {}
88
+
89
+ for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)):
90
+ mn = i[0]
91
+ uid = self._lv_uid[i[5]]
92
+
93
+ if not mn in mn_info:
94
+ mn_info[mn] = {
95
+ "uq": i[1],
96
+ "ap": i[2],
97
+ "ui": i[3],
98
+ "ex": {expr_uid: [expr_tt]},
99
+ }
100
+
101
+ if not uid in mn_info[mn]["ex"]:
102
+ mn_info[mn]["ex"][uid] = []
103
+
104
+ mn_info[mn]["ex"][uid].append(i[4])
105
+
106
+ return [Meaning(mn, mn_info[mn]) for mn in mn_info]
107
+
108
+ def translations(self, from_uid, from_tt, to_uid):
109
+ """
110
+ Return a list of translations for an expression into a single language
111
+ variety.
112
+
113
+ :param from_uid: the source expression's language variety, as a
114
+ seven-character uniform identifier.
115
+ :param from_tt: the source expression's text.
116
+ :param to_uid: the target language variety, as a seven-character
117
+ uniform identifier.
118
+ :return: a list of translation tuples. The first element is the expression
119
+ text and the second element is the translation quality.
120
+ :rtype: list(tuple)
121
+ """
122
+
123
+ from_lv = self._uid_lv[from_uid]
124
+ to_lv = self._uid_lv[to_uid]
125
+
126
+ return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall()
127
+
128
+
129
+ class Meaning(dict):
130
+ """
131
+ Represents a single PanLex meaning. A meaning is a translation set derived
132
+ from a single source.
133
+ """
134
+
135
+ def __init__(self, mn, attr):
136
+ super().__init__(**attr)
137
+ self["mn"] = mn
138
+
139
+ def id(self):
140
+ """
141
+ :return: the meaning's id.
142
+ :rtype: int
143
+ """
144
+ return self["mn"]
145
+
146
+ def quality(self):
147
+ """
148
+ :return: the meaning's source's quality (0=worst, 9=best).
149
+ :rtype: int
150
+ """
151
+ return self["uq"]
152
+
153
+ def source(self):
154
+ """
155
+ :return: the meaning's source id.
156
+ :rtype: int
157
+ """
158
+ return self["ap"]
159
+
160
+ def source_group(self):
161
+ """
162
+ :return: the meaning's source group id.
163
+ :rtype: int
164
+ """
165
+ return self["ui"]
166
+
167
+ def expressions(self):
168
+ """
169
+ :return: the meaning's expressions as a dictionary whose keys are language
170
+ variety uniform identifiers and whose values are lists of expression
171
+ texts.
172
+ :rtype: dict
173
+ """
174
+ return self["ex"]
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/panlex_swadesh.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word List Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+
10
+ import re
11
+ from collections import defaultdict, namedtuple
12
+
13
+ from nltk.corpus.reader.api import *
14
+ from nltk.corpus.reader.util import *
15
+ from nltk.corpus.reader.wordlist import WordListCorpusReader
16
+ from nltk.tokenize import line_tokenize
17
+
18
+ PanlexLanguage = namedtuple(
19
+ "PanlexLanguage",
20
+ [
21
+ "panlex_uid", # (1) PanLex UID
22
+ "iso639", # (2) ISO 639 language code
23
+ "iso639_type", # (3) ISO 639 language type, see README
24
+ "script", # (4) normal scripts of expressions
25
+ "name", # (5) PanLex default name
26
+ "langvar_uid", # (6) UID of the language variety in which the default name is an expression
27
+ ],
28
+ )
29
+
30
+
31
+ class PanlexSwadeshCorpusReader(WordListCorpusReader):
32
+ """
33
+ This is a class to read the PanLex Swadesh list from
34
+
35
+ David Kamholz, Jonathan Pool, and Susan M. Colowick (2014).
36
+ PanLex: Building a Resource for Panlingual Lexical Translation.
37
+ In LREC. http://www.lrec-conf.org/proceedings/lrec2014/pdf/1029_Paper.pdf
38
+
39
+ License: CC0 1.0 Universal
40
+ https://creativecommons.org/publicdomain/zero/1.0/legalcode
41
+ """
42
+
43
+ def __init__(self, *args, **kwargs):
44
+ super().__init__(*args, **kwargs)
45
+ # Find the swadesh size using the fileids' path.
46
+ self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1)
47
+ self._languages = {lang.panlex_uid: lang for lang in self.get_languages()}
48
+ self._macro_langauges = self.get_macrolanguages()
49
+
50
+ def license(self):
51
+ return "CC0 1.0 Universal"
52
+
53
+ def language_codes(self):
54
+ return self._languages.keys()
55
+
56
+ def get_languages(self):
57
+ for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"):
58
+ if not line.strip(): # Skip empty lines.
59
+ continue
60
+ yield PanlexLanguage(*line.strip().split("\t"))
61
+
62
+ def get_macrolanguages(self):
63
+ macro_langauges = defaultdict(list)
64
+ for lang in self._languages.values():
65
+ macro_langauges[lang.iso639].append(lang.panlex_uid)
66
+ return macro_langauges
67
+
68
+ def words_by_lang(self, lang_code):
69
+ """
70
+ :return: a list of list(str)
71
+ """
72
+ fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt"
73
+ return [concept.split("\t") for concept in self.words(fileid)]
74
+
75
+ def words_by_iso639(self, iso63_code):
76
+ """
77
+ :return: a list of list(str)
78
+ """
79
+ fileids = [
80
+ f"swadesh{self.swadesh_size}/{lang_code}.txt"
81
+ for lang_code in self._macro_langauges[iso63_code]
82
+ ]
83
+ return [
84
+ concept.split("\t") for fileid in fileids for concept in self.words(fileid)
85
+ ]
86
+
87
+ def entries(self, fileids=None):
88
+ """
89
+ :return: a tuple of words for the specified fileids.
90
+ """
91
+ if not fileids:
92
+ fileids = self.fileids()
93
+
94
+ wordlists = [self.words(f) for f in fileids]
95
+ return list(zip(*wordlists))
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pl196x.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit:
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Piotr Kasprzyk <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.corpus.reader.api import *
9
+ from nltk.corpus.reader.xmldocs import XMLCorpusReader
10
+
11
+ PARA = re.compile(r"<p(?: [^>]*){0,1}>(.*?)</p>")
12
+ SENT = re.compile(r"<s(?: [^>]*){0,1}>(.*?)</s>")
13
+
14
+ TAGGEDWORD = re.compile(r"<([wc](?: [^>]*){0,1}>)(.*?)</[wc]>")
15
+ WORD = re.compile(r"<[wc](?: [^>]*){0,1}>(.*?)</[wc]>")
16
+
17
+ TYPE = re.compile(r'type="(.*?)"')
18
+ ANA = re.compile(r'ana="(.*?)"')
19
+
20
+ TEXTID = re.compile(r'text id="(.*?)"')
21
+
22
+
23
+ class TEICorpusView(StreamBackedCorpusView):
24
+ def __init__(
25
+ self,
26
+ corpus_file,
27
+ tagged,
28
+ group_by_sent,
29
+ group_by_para,
30
+ tagset=None,
31
+ head_len=0,
32
+ textids=None,
33
+ ):
34
+
35
+ self._tagged = tagged
36
+ self._textids = textids
37
+
38
+ self._group_by_sent = group_by_sent
39
+ self._group_by_para = group_by_para
40
+ # WARNING -- skip header
41
+ StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len)
42
+
43
+ _pagesize = 4096
44
+
45
+ def read_block(self, stream):
46
+ block = stream.readlines(self._pagesize)
47
+ block = concat(block)
48
+ while (block.count("<text id") > block.count("</text>")) or block.count(
49
+ "<text id"
50
+ ) == 0:
51
+ tmp = stream.readline()
52
+ if len(tmp) <= 0:
53
+ break
54
+ block += tmp
55
+
56
+ block = block.replace("\n", "")
57
+
58
+ textids = TEXTID.findall(block)
59
+ if self._textids:
60
+ for tid in textids:
61
+ if tid not in self._textids:
62
+ beg = block.find(tid) - 1
63
+ end = block[beg:].find("</text>") + len("</text>")
64
+ block = block[:beg] + block[beg + end :]
65
+
66
+ output = []
67
+ for para_str in PARA.findall(block):
68
+ para = []
69
+ for sent_str in SENT.findall(para_str):
70
+ if not self._tagged:
71
+ sent = WORD.findall(sent_str)
72
+ else:
73
+ sent = list(map(self._parse_tag, TAGGEDWORD.findall(sent_str)))
74
+ if self._group_by_sent:
75
+ para.append(sent)
76
+ else:
77
+ para.extend(sent)
78
+ if self._group_by_para:
79
+ output.append(para)
80
+ else:
81
+ output.extend(para)
82
+ return output
83
+
84
+ def _parse_tag(self, tag_word_tuple):
85
+ (tag, word) = tag_word_tuple
86
+ if tag.startswith("w"):
87
+ tag = ANA.search(tag).group(1)
88
+ else: # tag.startswith('c')
89
+ tag = TYPE.search(tag).group(1)
90
+ return word, tag
91
+
92
+
93
+ class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader):
94
+ head_len = 2770
95
+
96
+ def __init__(self, *args, **kwargs):
97
+ if "textid_file" in kwargs:
98
+ self._textids = kwargs["textid_file"]
99
+ else:
100
+ self._textids = None
101
+
102
+ XMLCorpusReader.__init__(self, *args)
103
+ CategorizedCorpusReader.__init__(self, kwargs)
104
+
105
+ self._init_textids()
106
+
107
+ def _init_textids(self):
108
+ self._f2t = defaultdict(list)
109
+ self._t2f = defaultdict(list)
110
+ if self._textids is not None:
111
+ with open(self._textids) as fp:
112
+ for line in fp:
113
+ line = line.strip()
114
+ file_id, text_ids = line.split(" ", 1)
115
+ if file_id not in self.fileids():
116
+ raise ValueError(
117
+ "In text_id mapping file %s: %s not found"
118
+ % (self._textids, file_id)
119
+ )
120
+ for text_id in text_ids.split(self._delimiter):
121
+ self._add_textids(file_id, text_id)
122
+
123
+ def _add_textids(self, file_id, text_id):
124
+ self._f2t[file_id].append(text_id)
125
+ self._t2f[text_id].append(file_id)
126
+
127
+ def _resolve(self, fileids, categories, textids=None):
128
+ tmp = None
129
+ if (
130
+ len(
131
+ list(
132
+ filter(
133
+ lambda accessor: accessor is None,
134
+ (fileids, categories, textids),
135
+ )
136
+ )
137
+ )
138
+ != 1
139
+ ):
140
+
141
+ raise ValueError(
142
+ "Specify exactly one of: fileids, " "categories or textids"
143
+ )
144
+
145
+ if fileids is not None:
146
+ return fileids, None
147
+
148
+ if categories is not None:
149
+ return self.fileids(categories), None
150
+
151
+ if textids is not None:
152
+ if isinstance(textids, str):
153
+ textids = [textids]
154
+ files = sum((self._t2f[t] for t in textids), [])
155
+ tdict = dict()
156
+ for f in files:
157
+ tdict[f] = set(self._f2t[f]) & set(textids)
158
+ return files, tdict
159
+
160
+ def decode_tag(self, tag):
161
+ # to be implemented
162
+ return tag
163
+
164
+ def textids(self, fileids=None, categories=None):
165
+ """
166
+ In the pl196x corpus each category is stored in single
167
+ file and thus both methods provide identical functionality. In order
168
+ to accommodate finer granularity, a non-standard textids() method was
169
+ implemented. All the main functions can be supplied with a list
170
+ of required chunks---giving much more control to the user.
171
+ """
172
+ fileids, _ = self._resolve(fileids, categories)
173
+ if fileids is None:
174
+ return sorted(self._t2f)
175
+
176
+ if isinstance(fileids, str):
177
+ fileids = [fileids]
178
+ return sorted(sum((self._f2t[d] for d in fileids), []))
179
+
180
+ def words(self, fileids=None, categories=None, textids=None):
181
+ fileids, textids = self._resolve(fileids, categories, textids)
182
+ if fileids is None:
183
+ fileids = self._fileids
184
+ elif isinstance(fileids, str):
185
+ fileids = [fileids]
186
+
187
+ if textids:
188
+ return concat(
189
+ [
190
+ TEICorpusView(
191
+ self.abspath(fileid),
192
+ False,
193
+ False,
194
+ False,
195
+ head_len=self.head_len,
196
+ textids=textids[fileid],
197
+ )
198
+ for fileid in fileids
199
+ ]
200
+ )
201
+ else:
202
+ return concat(
203
+ [
204
+ TEICorpusView(
205
+ self.abspath(fileid),
206
+ False,
207
+ False,
208
+ False,
209
+ head_len=self.head_len,
210
+ )
211
+ for fileid in fileids
212
+ ]
213
+ )
214
+
215
+ def sents(self, fileids=None, categories=None, textids=None):
216
+ fileids, textids = self._resolve(fileids, categories, textids)
217
+ if fileids is None:
218
+ fileids = self._fileids
219
+ elif isinstance(fileids, str):
220
+ fileids = [fileids]
221
+
222
+ if textids:
223
+ return concat(
224
+ [
225
+ TEICorpusView(
226
+ self.abspath(fileid),
227
+ False,
228
+ True,
229
+ False,
230
+ head_len=self.head_len,
231
+ textids=textids[fileid],
232
+ )
233
+ for fileid in fileids
234
+ ]
235
+ )
236
+ else:
237
+ return concat(
238
+ [
239
+ TEICorpusView(
240
+ self.abspath(fileid), False, True, False, head_len=self.head_len
241
+ )
242
+ for fileid in fileids
243
+ ]
244
+ )
245
+
246
+ def paras(self, fileids=None, categories=None, textids=None):
247
+ fileids, textids = self._resolve(fileids, categories, textids)
248
+ if fileids is None:
249
+ fileids = self._fileids
250
+ elif isinstance(fileids, str):
251
+ fileids = [fileids]
252
+
253
+ if textids:
254
+ return concat(
255
+ [
256
+ TEICorpusView(
257
+ self.abspath(fileid),
258
+ False,
259
+ True,
260
+ True,
261
+ head_len=self.head_len,
262
+ textids=textids[fileid],
263
+ )
264
+ for fileid in fileids
265
+ ]
266
+ )
267
+ else:
268
+ return concat(
269
+ [
270
+ TEICorpusView(
271
+ self.abspath(fileid), False, True, True, head_len=self.head_len
272
+ )
273
+ for fileid in fileids
274
+ ]
275
+ )
276
+
277
+ def tagged_words(self, fileids=None, categories=None, textids=None):
278
+ fileids, textids = self._resolve(fileids, categories, textids)
279
+ if fileids is None:
280
+ fileids = self._fileids
281
+ elif isinstance(fileids, str):
282
+ fileids = [fileids]
283
+
284
+ if textids:
285
+ return concat(
286
+ [
287
+ TEICorpusView(
288
+ self.abspath(fileid),
289
+ True,
290
+ False,
291
+ False,
292
+ head_len=self.head_len,
293
+ textids=textids[fileid],
294
+ )
295
+ for fileid in fileids
296
+ ]
297
+ )
298
+ else:
299
+ return concat(
300
+ [
301
+ TEICorpusView(
302
+ self.abspath(fileid), True, False, False, head_len=self.head_len
303
+ )
304
+ for fileid in fileids
305
+ ]
306
+ )
307
+
308
+ def tagged_sents(self, fileids=None, categories=None, textids=None):
309
+ fileids, textids = self._resolve(fileids, categories, textids)
310
+ if fileids is None:
311
+ fileids = self._fileids
312
+ elif isinstance(fileids, str):
313
+ fileids = [fileids]
314
+
315
+ if textids:
316
+ return concat(
317
+ [
318
+ TEICorpusView(
319
+ self.abspath(fileid),
320
+ True,
321
+ True,
322
+ False,
323
+ head_len=self.head_len,
324
+ textids=textids[fileid],
325
+ )
326
+ for fileid in fileids
327
+ ]
328
+ )
329
+ else:
330
+ return concat(
331
+ [
332
+ TEICorpusView(
333
+ self.abspath(fileid), True, True, False, head_len=self.head_len
334
+ )
335
+ for fileid in fileids
336
+ ]
337
+ )
338
+
339
+ def tagged_paras(self, fileids=None, categories=None, textids=None):
340
+ fileids, textids = self._resolve(fileids, categories, textids)
341
+ if fileids is None:
342
+ fileids = self._fileids
343
+ elif isinstance(fileids, str):
344
+ fileids = [fileids]
345
+
346
+ if textids:
347
+ return concat(
348
+ [
349
+ TEICorpusView(
350
+ self.abspath(fileid),
351
+ True,
352
+ True,
353
+ True,
354
+ head_len=self.head_len,
355
+ textids=textids[fileid],
356
+ )
357
+ for fileid in fileids
358
+ ]
359
+ )
360
+ else:
361
+ return concat(
362
+ [
363
+ TEICorpusView(
364
+ self.abspath(fileid), True, True, True, head_len=self.head_len
365
+ )
366
+ for fileid in fileids
367
+ ]
368
+ )
369
+
370
+ def xml(self, fileids=None, categories=None):
371
+ fileids, _ = self._resolve(fileids, categories)
372
+ if len(fileids) == 1:
373
+ return XMLCorpusReader.xml(self, fileids[0])
374
+ else:
375
+ raise TypeError("Expected a single file")
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/propbank.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: PropBank Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import re
9
+ from functools import total_ordering
10
+ from xml.etree import ElementTree
11
+
12
+ from nltk.corpus.reader.api import *
13
+ from nltk.corpus.reader.util import *
14
+ from nltk.internals import raise_unorderable_types
15
+ from nltk.tree import Tree
16
+
17
+
18
+ class PropbankCorpusReader(CorpusReader):
19
+ """
20
+ Corpus reader for the propbank corpus, which augments the Penn
21
+ Treebank with information about the predicate argument structure
22
+ of every verb instance. The corpus consists of two parts: the
23
+ predicate-argument annotations themselves, and a set of "frameset
24
+ files" which define the argument labels used by the annotations,
25
+ on a per-verb basis. Each "frameset file" contains one or more
26
+ predicates, such as ``'turn'`` or ``'turn_on'``, each of which is
27
+ divided into coarse-grained word senses called "rolesets". For
28
+ each "roleset", the frameset file provides descriptions of the
29
+ argument roles, along with examples.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ root,
35
+ propfile,
36
+ framefiles="",
37
+ verbsfile=None,
38
+ parse_fileid_xform=None,
39
+ parse_corpus=None,
40
+ encoding="utf8",
41
+ ):
42
+ """
43
+ :param root: The root directory for this corpus.
44
+ :param propfile: The name of the file containing the predicate-
45
+ argument annotations (relative to ``root``).
46
+ :param framefiles: A list or regexp specifying the frameset
47
+ fileids for this corpus.
48
+ :param parse_fileid_xform: A transform that should be applied
49
+ to the fileids in this corpus. This should be a function
50
+ of one argument (a fileid) that returns a string (the new
51
+ fileid).
52
+ :param parse_corpus: The corpus containing the parse trees
53
+ corresponding to this corpus. These parse trees are
54
+ necessary to resolve the tree pointers used by propbank.
55
+ """
56
+ # If framefiles is specified as a regexp, expand it.
57
+ if isinstance(framefiles, str):
58
+ framefiles = find_corpus_fileids(root, framefiles)
59
+ framefiles = list(framefiles)
60
+ # Initialize the corpus reader.
61
+ CorpusReader.__init__(self, root, [propfile, verbsfile] + framefiles, encoding)
62
+
63
+ # Record our frame fileids & prop file.
64
+ self._propfile = propfile
65
+ self._framefiles = framefiles
66
+ self._verbsfile = verbsfile
67
+ self._parse_fileid_xform = parse_fileid_xform
68
+ self._parse_corpus = parse_corpus
69
+
70
+ def instances(self, baseform=None):
71
+ """
72
+ :return: a corpus view that acts as a list of
73
+ ``PropBankInstance`` objects, one for each noun in the corpus.
74
+ """
75
+ kwargs = {}
76
+ if baseform is not None:
77
+ kwargs["instance_filter"] = lambda inst: inst.baseform == baseform
78
+ return StreamBackedCorpusView(
79
+ self.abspath(self._propfile),
80
+ lambda stream: self._read_instance_block(stream, **kwargs),
81
+ encoding=self.encoding(self._propfile),
82
+ )
83
+
84
+ def lines(self):
85
+ """
86
+ :return: a corpus view that acts as a list of strings, one for
87
+ each line in the predicate-argument annotation file.
88
+ """
89
+ return StreamBackedCorpusView(
90
+ self.abspath(self._propfile),
91
+ read_line_block,
92
+ encoding=self.encoding(self._propfile),
93
+ )
94
+
95
+ def roleset(self, roleset_id):
96
+ """
97
+ :return: the xml description for the given roleset.
98
+ """
99
+ baseform = roleset_id.split(".")[0]
100
+ framefile = "frames/%s.xml" % baseform
101
+ if framefile not in self._framefiles:
102
+ raise ValueError("Frameset file for %s not found" % roleset_id)
103
+
104
+ # n.b.: The encoding for XML fileids is specified by the file
105
+ # itself; so we ignore self._encoding here.
106
+ with self.abspath(framefile).open() as fp:
107
+ etree = ElementTree.parse(fp).getroot()
108
+ for roleset in etree.findall("predicate/roleset"):
109
+ if roleset.attrib["id"] == roleset_id:
110
+ return roleset
111
+ raise ValueError(f"Roleset {roleset_id} not found in {framefile}")
112
+
113
+ def rolesets(self, baseform=None):
114
+ """
115
+ :return: list of xml descriptions for rolesets.
116
+ """
117
+ if baseform is not None:
118
+ framefile = "frames/%s.xml" % baseform
119
+ if framefile not in self._framefiles:
120
+ raise ValueError("Frameset file for %s not found" % baseform)
121
+ framefiles = [framefile]
122
+ else:
123
+ framefiles = self._framefiles
124
+
125
+ rsets = []
126
+ for framefile in framefiles:
127
+ # n.b.: The encoding for XML fileids is specified by the file
128
+ # itself; so we ignore self._encoding here.
129
+ with self.abspath(framefile).open() as fp:
130
+ etree = ElementTree.parse(fp).getroot()
131
+ rsets.append(etree.findall("predicate/roleset"))
132
+ return LazyConcatenation(rsets)
133
+
134
+ def verbs(self):
135
+ """
136
+ :return: a corpus view that acts as a list of all verb lemmas
137
+ in this corpus (from the verbs.txt file).
138
+ """
139
+ return StreamBackedCorpusView(
140
+ self.abspath(self._verbsfile),
141
+ read_line_block,
142
+ encoding=self.encoding(self._verbsfile),
143
+ )
144
+
145
+ def _read_instance_block(self, stream, instance_filter=lambda inst: True):
146
+ block = []
147
+
148
+ # Read 100 at a time.
149
+ for i in range(100):
150
+ line = stream.readline().strip()
151
+ if line:
152
+ inst = PropbankInstance.parse(
153
+ line, self._parse_fileid_xform, self._parse_corpus
154
+ )
155
+ if instance_filter(inst):
156
+ block.append(inst)
157
+
158
+ return block
159
+
160
+
161
+ ######################################################################
162
+ # { Propbank Instance & related datatypes
163
+ ######################################################################
164
+
165
+
166
+ class PropbankInstance:
167
+ def __init__(
168
+ self,
169
+ fileid,
170
+ sentnum,
171
+ wordnum,
172
+ tagger,
173
+ roleset,
174
+ inflection,
175
+ predicate,
176
+ arguments,
177
+ parse_corpus=None,
178
+ ):
179
+
180
+ self.fileid = fileid
181
+ """The name of the file containing the parse tree for this
182
+ instance's sentence."""
183
+
184
+ self.sentnum = sentnum
185
+ """The sentence number of this sentence within ``fileid``.
186
+ Indexing starts from zero."""
187
+
188
+ self.wordnum = wordnum
189
+ """The word number of this instance's predicate within its
190
+ containing sentence. Word numbers are indexed starting from
191
+ zero, and include traces and other empty parse elements."""
192
+
193
+ self.tagger = tagger
194
+ """An identifier for the tagger who tagged this instance; or
195
+ ``'gold'`` if this is an adjuticated instance."""
196
+
197
+ self.roleset = roleset
198
+ """The name of the roleset used by this instance's predicate.
199
+ Use ``propbank.roleset() <PropbankCorpusReader.roleset>`` to
200
+ look up information about the roleset."""
201
+
202
+ self.inflection = inflection
203
+ """A ``PropbankInflection`` object describing the inflection of
204
+ this instance's predicate."""
205
+
206
+ self.predicate = predicate
207
+ """A ``PropbankTreePointer`` indicating the position of this
208
+ instance's predicate within its containing sentence."""
209
+
210
+ self.arguments = tuple(arguments)
211
+ """A list of tuples (argloc, argid), specifying the location
212
+ and identifier for each of the predicate's argument in the
213
+ containing sentence. Argument identifiers are strings such as
214
+ ``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain
215
+ the predicate."""
216
+
217
+ self.parse_corpus = parse_corpus
218
+ """A corpus reader for the parse trees corresponding to the
219
+ instances in this propbank corpus."""
220
+
221
+ @property
222
+ def baseform(self):
223
+ """The baseform of the predicate."""
224
+ return self.roleset.split(".")[0]
225
+
226
+ @property
227
+ def sensenumber(self):
228
+ """The sense number of the predicate."""
229
+ return self.roleset.split(".")[1]
230
+
231
+ @property
232
+ def predid(self):
233
+ """Identifier of the predicate."""
234
+ return "rel"
235
+
236
+ def __repr__(self):
237
+ return "<PropbankInstance: {}, sent {}, word {}>".format(
238
+ self.fileid,
239
+ self.sentnum,
240
+ self.wordnum,
241
+ )
242
+
243
+ def __str__(self):
244
+ s = "{} {} {} {} {} {}".format(
245
+ self.fileid,
246
+ self.sentnum,
247
+ self.wordnum,
248
+ self.tagger,
249
+ self.roleset,
250
+ self.inflection,
251
+ )
252
+ items = self.arguments + ((self.predicate, "rel"),)
253
+ for (argloc, argid) in sorted(items):
254
+ s += f" {argloc}-{argid}"
255
+ return s
256
+
257
+ def _get_tree(self):
258
+ if self.parse_corpus is None:
259
+ return None
260
+ if self.fileid not in self.parse_corpus.fileids():
261
+ return None
262
+ return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum]
263
+
264
+ tree = property(
265
+ _get_tree,
266
+ doc="""
267
+ The parse tree corresponding to this instance, or None if
268
+ the corresponding tree is not available.""",
269
+ )
270
+
271
+ @staticmethod
272
+ def parse(s, parse_fileid_xform=None, parse_corpus=None):
273
+ pieces = s.split()
274
+ if len(pieces) < 7:
275
+ raise ValueError("Badly formatted propbank line: %r" % s)
276
+
277
+ # Divide the line into its basic pieces.
278
+ (fileid, sentnum, wordnum, tagger, roleset, inflection) = pieces[:6]
279
+ rel = [p for p in pieces[6:] if p.endswith("-rel")]
280
+ args = [p for p in pieces[6:] if not p.endswith("-rel")]
281
+ if len(rel) != 1:
282
+ raise ValueError("Badly formatted propbank line: %r" % s)
283
+
284
+ # Apply the fileid selector, if any.
285
+ if parse_fileid_xform is not None:
286
+ fileid = parse_fileid_xform(fileid)
287
+
288
+ # Convert sentence & word numbers to ints.
289
+ sentnum = int(sentnum)
290
+ wordnum = int(wordnum)
291
+
292
+ # Parse the inflection
293
+ inflection = PropbankInflection.parse(inflection)
294
+
295
+ # Parse the predicate location.
296
+ predicate = PropbankTreePointer.parse(rel[0][:-4])
297
+
298
+ # Parse the arguments.
299
+ arguments = []
300
+ for arg in args:
301
+ argloc, argid = arg.split("-", 1)
302
+ arguments.append((PropbankTreePointer.parse(argloc), argid))
303
+
304
+ # Put it all together.
305
+ return PropbankInstance(
306
+ fileid,
307
+ sentnum,
308
+ wordnum,
309
+ tagger,
310
+ roleset,
311
+ inflection,
312
+ predicate,
313
+ arguments,
314
+ parse_corpus,
315
+ )
316
+
317
+
318
+ class PropbankPointer:
319
+ """
320
+ A pointer used by propbank to identify one or more constituents in
321
+ a parse tree. ``PropbankPointer`` is an abstract base class with
322
+ three concrete subclasses:
323
+
324
+ - ``PropbankTreePointer`` is used to point to single constituents.
325
+ - ``PropbankSplitTreePointer`` is used to point to 'split'
326
+ constituents, which consist of a sequence of two or more
327
+ ``PropbankTreePointer`` pointers.
328
+ - ``PropbankChainTreePointer`` is used to point to entire trace
329
+ chains in a tree. It consists of a sequence of pieces, which
330
+ can be ``PropbankTreePointer`` or ``PropbankSplitTreePointer`` pointers.
331
+ """
332
+
333
+ def __init__(self):
334
+ if self.__class__ == PropbankPointer:
335
+ raise NotImplementedError()
336
+
337
+
338
+ class PropbankChainTreePointer(PropbankPointer):
339
+ def __init__(self, pieces):
340
+ self.pieces = pieces
341
+ """A list of the pieces that make up this chain. Elements may
342
+ be either ``PropbankSplitTreePointer`` or
343
+ ``PropbankTreePointer`` pointers."""
344
+
345
+ def __str__(self):
346
+ return "*".join("%s" % p for p in self.pieces)
347
+
348
+ def __repr__(self):
349
+ return "<PropbankChainTreePointer: %s>" % self
350
+
351
+ def select(self, tree):
352
+ if tree is None:
353
+ raise ValueError("Parse tree not available")
354
+ return Tree("*CHAIN*", [p.select(tree) for p in self.pieces])
355
+
356
+
357
+ class PropbankSplitTreePointer(PropbankPointer):
358
+ def __init__(self, pieces):
359
+ self.pieces = pieces
360
+ """A list of the pieces that make up this chain. Elements are
361
+ all ``PropbankTreePointer`` pointers."""
362
+
363
+ def __str__(self):
364
+ return ",".join("%s" % p for p in self.pieces)
365
+
366
+ def __repr__(self):
367
+ return "<PropbankSplitTreePointer: %s>" % self
368
+
369
+ def select(self, tree):
370
+ if tree is None:
371
+ raise ValueError("Parse tree not available")
372
+ return Tree("*SPLIT*", [p.select(tree) for p in self.pieces])
373
+
374
+
375
+ @total_ordering
376
+ class PropbankTreePointer(PropbankPointer):
377
+ """
378
+ wordnum:height*wordnum:height*...
379
+ wordnum:height,
380
+
381
+ """
382
+
383
+ def __init__(self, wordnum, height):
384
+ self.wordnum = wordnum
385
+ self.height = height
386
+
387
+ @staticmethod
388
+ def parse(s):
389
+ # Deal with chains (xx*yy*zz)
390
+ pieces = s.split("*")
391
+ if len(pieces) > 1:
392
+ return PropbankChainTreePointer(
393
+ [PropbankTreePointer.parse(elt) for elt in pieces]
394
+ )
395
+
396
+ # Deal with split args (xx,yy,zz)
397
+ pieces = s.split(",")
398
+ if len(pieces) > 1:
399
+ return PropbankSplitTreePointer(
400
+ [PropbankTreePointer.parse(elt) for elt in pieces]
401
+ )
402
+
403
+ # Deal with normal pointers.
404
+ pieces = s.split(":")
405
+ if len(pieces) != 2:
406
+ raise ValueError("bad propbank pointer %r" % s)
407
+ return PropbankTreePointer(int(pieces[0]), int(pieces[1]))
408
+
409
+ def __str__(self):
410
+ return f"{self.wordnum}:{self.height}"
411
+
412
+ def __repr__(self):
413
+ return "PropbankTreePointer(%d, %d)" % (self.wordnum, self.height)
414
+
415
+ def __eq__(self, other):
416
+ while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)):
417
+ other = other.pieces[0]
418
+
419
+ if not isinstance(other, PropbankTreePointer):
420
+ return self is other
421
+
422
+ return self.wordnum == other.wordnum and self.height == other.height
423
+
424
+ def __ne__(self, other):
425
+ return not self == other
426
+
427
+ def __lt__(self, other):
428
+ while isinstance(other, (PropbankChainTreePointer, PropbankSplitTreePointer)):
429
+ other = other.pieces[0]
430
+
431
+ if not isinstance(other, PropbankTreePointer):
432
+ return id(self) < id(other)
433
+
434
+ return (self.wordnum, -self.height) < (other.wordnum, -other.height)
435
+
436
+ def select(self, tree):
437
+ if tree is None:
438
+ raise ValueError("Parse tree not available")
439
+ return tree[self.treepos(tree)]
440
+
441
+ def treepos(self, tree):
442
+ """
443
+ Convert this pointer to a standard 'tree position' pointer,
444
+ given that it points to the given tree.
445
+ """
446
+ if tree is None:
447
+ raise ValueError("Parse tree not available")
448
+ stack = [tree]
449
+ treepos = []
450
+
451
+ wordnum = 0
452
+ while True:
453
+ # tree node:
454
+ if isinstance(stack[-1], Tree):
455
+ # Select the next child.
456
+ if len(treepos) < len(stack):
457
+ treepos.append(0)
458
+ else:
459
+ treepos[-1] += 1
460
+ # Update the stack.
461
+ if treepos[-1] < len(stack[-1]):
462
+ stack.append(stack[-1][treepos[-1]])
463
+ else:
464
+ # End of node's child list: pop up a level.
465
+ stack.pop()
466
+ treepos.pop()
467
+ # word node:
468
+ else:
469
+ if wordnum == self.wordnum:
470
+ return tuple(treepos[: len(treepos) - self.height - 1])
471
+ else:
472
+ wordnum += 1
473
+ stack.pop()
474
+
475
+
476
+ class PropbankInflection:
477
+ # { Inflection Form
478
+ INFINITIVE = "i"
479
+ GERUND = "g"
480
+ PARTICIPLE = "p"
481
+ FINITE = "v"
482
+ # { Inflection Tense
483
+ FUTURE = "f"
484
+ PAST = "p"
485
+ PRESENT = "n"
486
+ # { Inflection Aspect
487
+ PERFECT = "p"
488
+ PROGRESSIVE = "o"
489
+ PERFECT_AND_PROGRESSIVE = "b"
490
+ # { Inflection Person
491
+ THIRD_PERSON = "3"
492
+ # { Inflection Voice
493
+ ACTIVE = "a"
494
+ PASSIVE = "p"
495
+ # { Inflection
496
+ NONE = "-"
497
+ # }
498
+
499
+ def __init__(self, form="-", tense="-", aspect="-", person="-", voice="-"):
500
+ self.form = form
501
+ self.tense = tense
502
+ self.aspect = aspect
503
+ self.person = person
504
+ self.voice = voice
505
+
506
+ def __str__(self):
507
+ return self.form + self.tense + self.aspect + self.person + self.voice
508
+
509
+ def __repr__(self):
510
+ return "<PropbankInflection: %s>" % self
511
+
512
+ _VALIDATE = re.compile(r"[igpv\-][fpn\-][pob\-][3\-][ap\-]$")
513
+
514
+ @staticmethod
515
+ def parse(s):
516
+ if not isinstance(s, str):
517
+ raise TypeError("expected a string")
518
+ if len(s) != 5 or not PropbankInflection._VALIDATE.match(s):
519
+ raise ValueError("Bad propbank inflection string %r" % s)
520
+ return PropbankInflection(*s)
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/pros_cons.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Pros and Cons Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for the Pros and Cons dataset.
10
+
11
+ - Pros and Cons dataset information -
12
+
13
+ Contact: Bing Liu, [email protected]
14
+ https://www.cs.uic.edu/~liub
15
+
16
+ Distributed with permission.
17
+
18
+ Related papers:
19
+
20
+ - Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
21
+ Proceedings of the 22nd International Conference on Computational Linguistics
22
+ (Coling-2008), Manchester, 18-22 August, 2008.
23
+
24
+ - Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing and Comparing
25
+ Opinions on the Web". Proceedings of the 14th international World Wide Web
26
+ conference (WWW-2005), May 10-14, 2005, in Chiba, Japan.
27
+ """
28
+ import re
29
+
30
+ from nltk.corpus.reader.api import *
31
+ from nltk.tokenize import *
32
+
33
+
34
+ class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader):
35
+ """
36
+ Reader for the Pros and Cons sentence dataset.
37
+
38
+ >>> from nltk.corpus import pros_cons
39
+ >>> pros_cons.sents(categories='Cons') # doctest: +NORMALIZE_WHITESPACE
40
+ [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy',
41
+ 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'],
42
+ ...]
43
+ >>> pros_cons.words('IntegratedPros.txt')
44
+ ['Easy', 'to', 'use', ',', 'economical', '!', ...]
45
+ """
46
+
47
+ CorpusView = StreamBackedCorpusView
48
+
49
+ def __init__(
50
+ self,
51
+ root,
52
+ fileids,
53
+ word_tokenizer=WordPunctTokenizer(),
54
+ encoding="utf8",
55
+ **kwargs
56
+ ):
57
+ """
58
+ :param root: The root directory for the corpus.
59
+ :param fileids: a list or regexp specifying the fileids in the corpus.
60
+ :param word_tokenizer: a tokenizer for breaking sentences or paragraphs
61
+ into words. Default: `WhitespaceTokenizer`
62
+ :param encoding: the encoding that should be used to read the corpus.
63
+ :param kwargs: additional parameters passed to CategorizedCorpusReader.
64
+ """
65
+
66
+ CorpusReader.__init__(self, root, fileids, encoding)
67
+ CategorizedCorpusReader.__init__(self, kwargs)
68
+ self._word_tokenizer = word_tokenizer
69
+
70
+ def sents(self, fileids=None, categories=None):
71
+ """
72
+ Return all sentences in the corpus or in the specified files/categories.
73
+
74
+ :param fileids: a list or regexp specifying the ids of the files whose
75
+ sentences have to be returned.
76
+ :param categories: a list specifying the categories whose sentences
77
+ have to be returned.
78
+ :return: the given file(s) as a list of sentences. Each sentence is
79
+ tokenized using the specified word_tokenizer.
80
+ :rtype: list(list(str))
81
+ """
82
+ fileids = self._resolve(fileids, categories)
83
+ if fileids is None:
84
+ fileids = self._fileids
85
+ elif isinstance(fileids, str):
86
+ fileids = [fileids]
87
+ return concat(
88
+ [
89
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
90
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
91
+ ]
92
+ )
93
+
94
+ def words(self, fileids=None, categories=None):
95
+ """
96
+ Return all words and punctuation symbols in the corpus or in the specified
97
+ files/categories.
98
+
99
+ :param fileids: a list or regexp specifying the ids of the files whose
100
+ words have to be returned.
101
+ :param categories: a list specifying the categories whose words have
102
+ to be returned.
103
+ :return: the given file(s) as a list of words and punctuation symbols.
104
+ :rtype: list(str)
105
+ """
106
+ fileids = self._resolve(fileids, categories)
107
+ if fileids is None:
108
+ fileids = self._fileids
109
+ elif isinstance(fileids, str):
110
+ fileids = [fileids]
111
+ return concat(
112
+ [
113
+ self.CorpusView(path, self._read_word_block, encoding=enc)
114
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
115
+ ]
116
+ )
117
+
118
+ def _read_sent_block(self, stream):
119
+ sents = []
120
+ for i in range(20): # Read 20 lines at a time.
121
+ line = stream.readline()
122
+ if not line:
123
+ continue
124
+ sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)</(?:Pros|Cons)>", line)
125
+ if sent:
126
+ sents.append(self._word_tokenizer.tokenize(sent.group(2).strip()))
127
+ return sents
128
+
129
+ def _read_word_block(self, stream):
130
+ words = []
131
+ for sent in self._read_sent_block(stream):
132
+ words.extend(sent)
133
+ return words
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/reviews.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Product Reviews Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Pierpaolo Pantone <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
10
+
11
+ Customer Review Corpus information
12
+ ==================================
13
+
14
+ Annotated by: Minqing Hu and Bing Liu, 2004.
15
+ Department of Computer Science
16
+ University of Illinois at Chicago
17
+
18
+ Contact: Bing Liu, [email protected]
19
+ https://www.cs.uic.edu/~liub
20
+
21
+ Distributed with permission.
22
+
23
+ The "product_reviews_1" and "product_reviews_2" datasets respectively contain
24
+ annotated customer reviews of 5 and 9 products from amazon.com.
25
+
26
+ Related papers:
27
+
28
+ - Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
29
+ Proceedings of the ACM SIGKDD International Conference on Knowledge
30
+ Discovery & Data Mining (KDD-04), 2004.
31
+
32
+ - Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
33
+ Proceedings of Nineteeth National Conference on Artificial Intelligence
34
+ (AAAI-2004), 2004.
35
+
36
+ - Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
37
+ Opinion Mining." Proceedings of First ACM International Conference on Web
38
+ Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
39
+ Stanford, California, USA.
40
+
41
+ Symbols used in the annotated reviews:
42
+
43
+ :[t]: the title of the review: Each [t] tag starts a review.
44
+ :xxxx[+|-n]: xxxx is a product feature.
45
+ :[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
46
+ Note that the strength is quite subjective.
47
+ You may want ignore it, but only considering + and -
48
+ :[-n]: Negative opinion
49
+ :##: start of each sentence. Each line is a sentence.
50
+ :[u]: feature not appeared in the sentence.
51
+ :[p]: feature not appeared in the sentence. Pronoun resolution is needed.
52
+ :[s]: suggestion or recommendation.
53
+ :[cc]: comparison with a competing product from a different brand.
54
+ :[cs]: comparison with a competing product from the same brand.
55
+
56
+ Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
57
+ provide separation between different reviews. This is due to the fact that
58
+ the dataset was specifically designed for aspect/feature-based sentiment
59
+ analysis, for which sentence-level annotation is sufficient. For document-
60
+ level classification and analysis, this peculiarity should be taken into
61
+ consideration.
62
+ """
63
+
64
+ import re
65
+
66
+ from nltk.corpus.reader.api import *
67
+ from nltk.tokenize import *
68
+
69
+ TITLE = re.compile(r"^\[t\](.*)$") # [t] Title
70
+ FEATURES = re.compile(
71
+ r"((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]"
72
+ ) # find 'feature' in feature[+3]
73
+ NOTES = re.compile(r"\[(?!t)(p|u|s|cc|cs)\]") # find 'p' in camera[+2][p]
74
+ SENT = re.compile(r"##(.*)$") # find tokenized sentence
75
+
76
+
77
+ class Review:
78
+ """
79
+ A Review is the main block of a ReviewsCorpusReader.
80
+ """
81
+
82
+ def __init__(self, title=None, review_lines=None):
83
+ """
84
+ :param title: the title of the review.
85
+ :param review_lines: the list of the ReviewLines that belong to the Review.
86
+ """
87
+ self.title = title
88
+ if review_lines is None:
89
+ self.review_lines = []
90
+ else:
91
+ self.review_lines = review_lines
92
+
93
+ def add_line(self, review_line):
94
+ """
95
+ Add a line (ReviewLine) to the review.
96
+
97
+ :param review_line: a ReviewLine instance that belongs to the Review.
98
+ """
99
+ assert isinstance(review_line, ReviewLine)
100
+ self.review_lines.append(review_line)
101
+
102
+ def features(self):
103
+ """
104
+ Return a list of features in the review. Each feature is a tuple made of
105
+ the specific item feature and the opinion strength about that feature.
106
+
107
+ :return: all features of the review as a list of tuples (feat, score).
108
+ :rtype: list(tuple)
109
+ """
110
+ features = []
111
+ for review_line in self.review_lines:
112
+ features.extend(review_line.features)
113
+ return features
114
+
115
+ def sents(self):
116
+ """
117
+ Return all tokenized sentences in the review.
118
+
119
+ :return: all sentences of the review as lists of tokens.
120
+ :rtype: list(list(str))
121
+ """
122
+ return [review_line.sent for review_line in self.review_lines]
123
+
124
+ def __repr__(self):
125
+ return 'Review(title="{}", review_lines={})'.format(
126
+ self.title, self.review_lines
127
+ )
128
+
129
+
130
+ class ReviewLine:
131
+ """
132
+ A ReviewLine represents a sentence of the review, together with (optional)
133
+ annotations of its features and notes about the reviewed item.
134
+ """
135
+
136
+ def __init__(self, sent, features=None, notes=None):
137
+ self.sent = sent
138
+ if features is None:
139
+ self.features = []
140
+ else:
141
+ self.features = features
142
+
143
+ if notes is None:
144
+ self.notes = []
145
+ else:
146
+ self.notes = notes
147
+
148
+ def __repr__(self):
149
+ return "ReviewLine(features={}, notes={}, sent={})".format(
150
+ self.features, self.notes, self.sent
151
+ )
152
+
153
+
154
+ class ReviewsCorpusReader(CorpusReader):
155
+ """
156
+ Reader for the Customer Review Data dataset by Hu, Liu (2004).
157
+ Note: we are not applying any sentence tokenization at the moment, just word
158
+ tokenization.
159
+
160
+ >>> from nltk.corpus import product_reviews_1
161
+ >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
162
+ >>> review = camera_reviews[0]
163
+ >>> review.sents()[0] # doctest: +NORMALIZE_WHITESPACE
164
+ ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
165
+ 'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
166
+ >>> review.features() # doctest: +NORMALIZE_WHITESPACE
167
+ [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
168
+ ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
169
+ ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
170
+ ('option', '+1')]
171
+
172
+ We can also reach the same information directly from the stream:
173
+
174
+ >>> product_reviews_1.features('Canon_G3.txt')
175
+ [('canon powershot g3', '+3'), ('use', '+2'), ...]
176
+
177
+ We can compute stats for specific product features:
178
+
179
+ >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
180
+ >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
181
+ >>> mean = tot / n_reviews
182
+ >>> print(n_reviews, tot, mean)
183
+ 15 24 1.6
184
+ """
185
+
186
+ CorpusView = StreamBackedCorpusView
187
+
188
+ def __init__(
189
+ self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8"
190
+ ):
191
+ """
192
+ :param root: The root directory for the corpus.
193
+ :param fileids: a list or regexp specifying the fileids in the corpus.
194
+ :param word_tokenizer: a tokenizer for breaking sentences or paragraphs
195
+ into words. Default: `WordPunctTokenizer`
196
+ :param encoding: the encoding that should be used to read the corpus.
197
+ """
198
+
199
+ CorpusReader.__init__(self, root, fileids, encoding)
200
+ self._word_tokenizer = word_tokenizer
201
+ self._readme = "README.txt"
202
+
203
+ def features(self, fileids=None):
204
+ """
205
+ Return a list of features. Each feature is a tuple made of the specific
206
+ item feature and the opinion strength about that feature.
207
+
208
+ :param fileids: a list or regexp specifying the ids of the files whose
209
+ features have to be returned.
210
+ :return: all features for the item(s) in the given file(s).
211
+ :rtype: list(tuple)
212
+ """
213
+ if fileids is None:
214
+ fileids = self._fileids
215
+ elif isinstance(fileids, str):
216
+ fileids = [fileids]
217
+ return concat(
218
+ [
219
+ self.CorpusView(fileid, self._read_features, encoding=enc)
220
+ for (fileid, enc) in self.abspaths(fileids, True)
221
+ ]
222
+ )
223
+
224
+ def reviews(self, fileids=None):
225
+ """
226
+ Return all the reviews as a list of Review objects. If `fileids` is
227
+ specified, return all the reviews from each of the specified files.
228
+
229
+ :param fileids: a list or regexp specifying the ids of the files whose
230
+ reviews have to be returned.
231
+ :return: the given file(s) as a list of reviews.
232
+ """
233
+ if fileids is None:
234
+ fileids = self._fileids
235
+ return concat(
236
+ [
237
+ self.CorpusView(fileid, self._read_review_block, encoding=enc)
238
+ for (fileid, enc) in self.abspaths(fileids, True)
239
+ ]
240
+ )
241
+
242
+ def sents(self, fileids=None):
243
+ """
244
+ Return all sentences in the corpus or in the specified files.
245
+
246
+ :param fileids: a list or regexp specifying the ids of the files whose
247
+ sentences have to be returned.
248
+ :return: the given file(s) as a list of sentences, each encoded as a
249
+ list of word strings.
250
+ :rtype: list(list(str))
251
+ """
252
+ return concat(
253
+ [
254
+ self.CorpusView(path, self._read_sent_block, encoding=enc)
255
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
256
+ ]
257
+ )
258
+
259
+ def words(self, fileids=None):
260
+ """
261
+ Return all words and punctuation symbols in the corpus or in the specified
262
+ files.
263
+
264
+ :param fileids: a list or regexp specifying the ids of the files whose
265
+ words have to be returned.
266
+ :return: the given file(s) as a list of words and punctuation symbols.
267
+ :rtype: list(str)
268
+ """
269
+ return concat(
270
+ [
271
+ self.CorpusView(path, self._read_word_block, encoding=enc)
272
+ for (path, enc, fileid) in self.abspaths(fileids, True, True)
273
+ ]
274
+ )
275
+
276
+ def _read_features(self, stream):
277
+ features = []
278
+ for i in range(20):
279
+ line = stream.readline()
280
+ if not line:
281
+ return features
282
+ features.extend(re.findall(FEATURES, line))
283
+ return features
284
+
285
+ def _read_review_block(self, stream):
286
+ while True:
287
+ line = stream.readline()
288
+ if not line:
289
+ return [] # end of file.
290
+ title_match = re.match(TITLE, line)
291
+ if title_match:
292
+ review = Review(
293
+ title=title_match.group(1).strip()
294
+ ) # We create a new review
295
+ break
296
+
297
+ # Scan until we find another line matching the regexp, or EOF.
298
+ while True:
299
+ oldpos = stream.tell()
300
+ line = stream.readline()
301
+ # End of file:
302
+ if not line:
303
+ return [review]
304
+ # Start of a new review: backup to just before it starts, and
305
+ # return the review we've already collected.
306
+ if re.match(TITLE, line):
307
+ stream.seek(oldpos)
308
+ return [review]
309
+ # Anything else is part of the review line.
310
+ feats = re.findall(FEATURES, line)
311
+ notes = re.findall(NOTES, line)
312
+ sent = re.findall(SENT, line)
313
+ if sent:
314
+ sent = self._word_tokenizer.tokenize(sent[0])
315
+ review_line = ReviewLine(sent=sent, features=feats, notes=notes)
316
+ review.add_line(review_line)
317
+
318
+ def _read_sent_block(self, stream):
319
+ sents = []
320
+ for review in self._read_review_block(stream):
321
+ sents.extend([sent for sent in review.sents()])
322
+ return sents
323
+
324
+ def _read_word_block(self, stream):
325
+ words = []
326
+ for i in range(20): # Read 20 lines at a time.
327
+ line = stream.readline()
328
+ sent = re.findall(SENT, line)
329
+ if sent:
330
+ words.extend(self._word_tokenizer.tokenize(sent[0]))
331
+ return words
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/rte.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: RTE Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
10
+
11
+ The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
12
+ were regularized.
13
+
14
+ Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
15
+ gold standard annotated files.
16
+
17
+ Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
18
+ example is taken from RTE3::
19
+
20
+ <pair id="1" entailment="YES" task="IE" length="short" >
21
+
22
+ <t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
23
+ Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
24
+ company Baikalfinansgroup which was later bought by the Russian
25
+ state-owned oil company Rosneft .</t>
26
+
27
+ <h>Baikalfinansgroup was sold to Rosneft.</h>
28
+ </pair>
29
+
30
+ In order to provide globally unique IDs for each pair, a new attribute
31
+ ``challenge`` has been added to the root element ``entailment-corpus`` of each
32
+ file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
33
+ challenge number and 'n' is the pair ID.
34
+ """
35
+ from nltk.corpus.reader.api import *
36
+ from nltk.corpus.reader.util import *
37
+ from nltk.corpus.reader.xmldocs import *
38
+
39
+
40
+ def norm(value_string):
41
+ """
42
+ Normalize the string value in an RTE pair's ``value`` or ``entailment``
43
+ attribute as an integer (1, 0).
44
+
45
+ :param value_string: the label used to classify a text/hypothesis pair
46
+ :type value_string: str
47
+ :rtype: int
48
+ """
49
+
50
+ valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0}
51
+ return valdict[value_string.upper()]
52
+
53
+
54
+ class RTEPair:
55
+ """
56
+ Container for RTE text-hypothesis pairs.
57
+
58
+ The entailment relation is signalled by the ``value`` attribute in RTE1, and by
59
+ ``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
60
+ attribute of this class.
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ pair,
66
+ challenge=None,
67
+ id=None,
68
+ text=None,
69
+ hyp=None,
70
+ value=None,
71
+ task=None,
72
+ length=None,
73
+ ):
74
+ """
75
+ :param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
76
+ :param id: identifier for the pair
77
+ :param text: the text component of the pair
78
+ :param hyp: the hypothesis component of the pair
79
+ :param value: classification label for the pair
80
+ :param task: attribute for the particular NLP task that the data was drawn from
81
+ :param length: attribute for the length of the text of the pair
82
+ """
83
+ self.challenge = challenge
84
+ self.id = pair.attrib["id"]
85
+ self.gid = f"{self.challenge}-{self.id}"
86
+ self.text = pair[0].text
87
+ self.hyp = pair[1].text
88
+
89
+ if "value" in pair.attrib:
90
+ self.value = norm(pair.attrib["value"])
91
+ elif "entailment" in pair.attrib:
92
+ self.value = norm(pair.attrib["entailment"])
93
+ else:
94
+ self.value = value
95
+ if "task" in pair.attrib:
96
+ self.task = pair.attrib["task"]
97
+ else:
98
+ self.task = task
99
+ if "length" in pair.attrib:
100
+ self.length = pair.attrib["length"]
101
+ else:
102
+ self.length = length
103
+
104
+ def __repr__(self):
105
+ if self.challenge:
106
+ return f"<RTEPair: gid={self.challenge}-{self.id}>"
107
+ else:
108
+ return "<RTEPair: id=%s>" % self.id
109
+
110
+
111
+ class RTECorpusReader(XMLCorpusReader):
112
+ """
113
+ Corpus reader for corpora in RTE challenges.
114
+
115
+ This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
116
+ structure of input documents.
117
+ """
118
+
119
+ def _read_etree(self, doc):
120
+ """
121
+ Map the XML input into an RTEPair.
122
+
123
+ This uses the ``getiterator()`` method from the ElementTree package to
124
+ find all the ``<pair>`` elements.
125
+
126
+ :param doc: a parsed XML document
127
+ :rtype: list(RTEPair)
128
+ """
129
+ try:
130
+ challenge = doc.attrib["challenge"]
131
+ except KeyError:
132
+ challenge = None
133
+ pairiter = doc.iter("pair")
134
+ return [RTEPair(pair, challenge=challenge) for pair in pairiter]
135
+
136
+ def pairs(self, fileids):
137
+ """
138
+ Build a list of RTEPairs from a RTE corpus.
139
+
140
+ :param fileids: a list of RTE corpus fileids
141
+ :type: list
142
+ :rtype: list(RTEPair)
143
+ """
144
+ if isinstance(fileids, str):
145
+ fileids = [fileids]
146
+ return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/string_category.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: String Category Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Read tuples from a corpus consisting of categorized strings.
11
+ For example, from the question classification corpus:
12
+
13
+ NUM:dist How far is it from Denver to Aspen ?
14
+ LOC:city What county is Modesto , California in ?
15
+ HUM:desc Who was Galileo ?
16
+ DESC:def What is an atom ?
17
+ NUM:date When did Hawaii become a state ?
18
+ """
19
+
20
+ from nltk.corpus.reader.api import *
21
+
22
+ # based on PPAttachmentCorpusReader
23
+ from nltk.corpus.reader.util import *
24
+
25
+
26
+ # [xx] Should the order of the tuple be reversed -- in most other places
27
+ # in nltk, we use the form (data, tag) -- e.g., tagged words and
28
+ # labeled texts for classifiers.
29
+ class StringCategoryCorpusReader(CorpusReader):
30
+ def __init__(self, root, fileids, delimiter=" ", encoding="utf8"):
31
+ """
32
+ :param root: The root directory for this corpus.
33
+ :param fileids: A list or regexp specifying the fileids in this corpus.
34
+ :param delimiter: Field delimiter
35
+ """
36
+ CorpusReader.__init__(self, root, fileids, encoding)
37
+ self._delimiter = delimiter
38
+
39
+ def tuples(self, fileids=None):
40
+ if fileids is None:
41
+ fileids = self._fileids
42
+ elif isinstance(fileids, str):
43
+ fileids = [fileids]
44
+ return concat(
45
+ [
46
+ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc)
47
+ for (fileid, enc) in self.abspaths(fileids, True)
48
+ ]
49
+ )
50
+
51
+ def _read_tuple_block(self, stream):
52
+ line = stream.readline().strip()
53
+ if line:
54
+ return [tuple(line.split(self._delimiter, 1))]
55
+ else:
56
+ return []
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/tagged.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagged Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A reader for corpora whose documents contain part-of-speech-tagged words.
12
+ """
13
+
14
+ import os
15
+
16
+ from nltk.corpus.reader.api import *
17
+ from nltk.corpus.reader.timit import read_timit_block
18
+ from nltk.corpus.reader.util import *
19
+ from nltk.tag import map_tag, str2tuple
20
+ from nltk.tokenize import *
21
+
22
+
23
+ class TaggedCorpusReader(CorpusReader):
24
+ """
25
+ Reader for simple part-of-speech tagged corpora. Paragraphs are
26
+ assumed to be split using blank lines. Sentences and words can be
27
+ tokenized using the default tokenizers, or by custom tokenizers
28
+ specified as parameters to the constructor. Words are parsed
29
+ using ``nltk.tag.str2tuple``. By default, ``'/'`` is used as the
30
+ separator. I.e., words should have the form::
31
+
32
+ word1/tag1 word2/tag2 word3/tag3 ...
33
+
34
+ But custom separators may be specified as parameters to the
35
+ constructor. Part of speech tags are case-normalized to upper
36
+ case.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ root,
42
+ fileids,
43
+ sep="/",
44
+ word_tokenizer=WhitespaceTokenizer(),
45
+ sent_tokenizer=RegexpTokenizer("\n", gaps=True),
46
+ para_block_reader=read_blankline_block,
47
+ encoding="utf8",
48
+ tagset=None,
49
+ ):
50
+ """
51
+ Construct a new Tagged Corpus reader for a set of documents
52
+ located at the given root directory. Example usage:
53
+
54
+ >>> root = '/...path to corpus.../'
55
+ >>> reader = TaggedCorpusReader(root, '.*', '.txt') # doctest: +SKIP
56
+
57
+ :param root: The root directory for this corpus.
58
+ :param fileids: A list or regexp specifying the fileids in this corpus.
59
+ """
60
+ CorpusReader.__init__(self, root, fileids, encoding)
61
+ self._sep = sep
62
+ self._word_tokenizer = word_tokenizer
63
+ self._sent_tokenizer = sent_tokenizer
64
+ self._para_block_reader = para_block_reader
65
+ self._tagset = tagset
66
+
67
+ def words(self, fileids=None):
68
+ """
69
+ :return: the given file(s) as a list of words
70
+ and punctuation symbols.
71
+ :rtype: list(str)
72
+ """
73
+ return concat(
74
+ [
75
+ TaggedCorpusView(
76
+ fileid,
77
+ enc,
78
+ False,
79
+ False,
80
+ False,
81
+ self._sep,
82
+ self._word_tokenizer,
83
+ self._sent_tokenizer,
84
+ self._para_block_reader,
85
+ None,
86
+ )
87
+ for (fileid, enc) in self.abspaths(fileids, True)
88
+ ]
89
+ )
90
+
91
+ def sents(self, fileids=None):
92
+ """
93
+ :return: the given file(s) as a list of
94
+ sentences or utterances, each encoded as a list of word
95
+ strings.
96
+ :rtype: list(list(str))
97
+ """
98
+ return concat(
99
+ [
100
+ TaggedCorpusView(
101
+ fileid,
102
+ enc,
103
+ False,
104
+ True,
105
+ False,
106
+ self._sep,
107
+ self._word_tokenizer,
108
+ self._sent_tokenizer,
109
+ self._para_block_reader,
110
+ None,
111
+ )
112
+ for (fileid, enc) in self.abspaths(fileids, True)
113
+ ]
114
+ )
115
+
116
+ def paras(self, fileids=None):
117
+ """
118
+ :return: the given file(s) as a list of
119
+ paragraphs, each encoded as a list of sentences, which are
120
+ in turn encoded as lists of word strings.
121
+ :rtype: list(list(list(str)))
122
+ """
123
+ return concat(
124
+ [
125
+ TaggedCorpusView(
126
+ fileid,
127
+ enc,
128
+ False,
129
+ True,
130
+ True,
131
+ self._sep,
132
+ self._word_tokenizer,
133
+ self._sent_tokenizer,
134
+ self._para_block_reader,
135
+ None,
136
+ )
137
+ for (fileid, enc) in self.abspaths(fileids, True)
138
+ ]
139
+ )
140
+
141
+ def tagged_words(self, fileids=None, tagset=None):
142
+ """
143
+ :return: the given file(s) as a list of tagged
144
+ words and punctuation symbols, encoded as tuples
145
+ ``(word,tag)``.
146
+ :rtype: list(tuple(str,str))
147
+ """
148
+ if tagset and tagset != self._tagset:
149
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
150
+ else:
151
+ tag_mapping_function = None
152
+ return concat(
153
+ [
154
+ TaggedCorpusView(
155
+ fileid,
156
+ enc,
157
+ True,
158
+ False,
159
+ False,
160
+ self._sep,
161
+ self._word_tokenizer,
162
+ self._sent_tokenizer,
163
+ self._para_block_reader,
164
+ tag_mapping_function,
165
+ )
166
+ for (fileid, enc) in self.abspaths(fileids, True)
167
+ ]
168
+ )
169
+
170
+ def tagged_sents(self, fileids=None, tagset=None):
171
+ """
172
+ :return: the given file(s) as a list of
173
+ sentences, each encoded as a list of ``(word,tag)`` tuples.
174
+
175
+ :rtype: list(list(tuple(str,str)))
176
+ """
177
+ if tagset and tagset != self._tagset:
178
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
179
+ else:
180
+ tag_mapping_function = None
181
+ return concat(
182
+ [
183
+ TaggedCorpusView(
184
+ fileid,
185
+ enc,
186
+ True,
187
+ True,
188
+ False,
189
+ self._sep,
190
+ self._word_tokenizer,
191
+ self._sent_tokenizer,
192
+ self._para_block_reader,
193
+ tag_mapping_function,
194
+ )
195
+ for (fileid, enc) in self.abspaths(fileids, True)
196
+ ]
197
+ )
198
+
199
+ def tagged_paras(self, fileids=None, tagset=None):
200
+ """
201
+ :return: the given file(s) as a list of
202
+ paragraphs, each encoded as a list of sentences, which are
203
+ in turn encoded as lists of ``(word,tag)`` tuples.
204
+ :rtype: list(list(list(tuple(str,str))))
205
+ """
206
+ if tagset and tagset != self._tagset:
207
+ tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t)
208
+ else:
209
+ tag_mapping_function = None
210
+ return concat(
211
+ [
212
+ TaggedCorpusView(
213
+ fileid,
214
+ enc,
215
+ True,
216
+ True,
217
+ True,
218
+ self._sep,
219
+ self._word_tokenizer,
220
+ self._sent_tokenizer,
221
+ self._para_block_reader,
222
+ tag_mapping_function,
223
+ )
224
+ for (fileid, enc) in self.abspaths(fileids, True)
225
+ ]
226
+ )
227
+
228
+
229
+ class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader):
230
+ """
231
+ A reader for part-of-speech tagged corpora whose documents are
232
+ divided into categories based on their file identifiers.
233
+ """
234
+
235
+ def __init__(self, *args, **kwargs):
236
+ """
237
+ Initialize the corpus reader. Categorization arguments
238
+ (``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
239
+ the ``CategorizedCorpusReader`` constructor. The remaining arguments
240
+ are passed to the ``TaggedCorpusReader``.
241
+ """
242
+ CategorizedCorpusReader.__init__(self, kwargs)
243
+ TaggedCorpusReader.__init__(self, *args, **kwargs)
244
+
245
+ def tagged_words(self, fileids=None, categories=None, tagset=None):
246
+ return super().tagged_words(self._resolve(fileids, categories), tagset)
247
+
248
+ def tagged_sents(self, fileids=None, categories=None, tagset=None):
249
+ return super().tagged_sents(self._resolve(fileids, categories), tagset)
250
+
251
+ def tagged_paras(self, fileids=None, categories=None, tagset=None):
252
+ return super().tagged_paras(self._resolve(fileids, categories), tagset)
253
+
254
+
255
+ class TaggedCorpusView(StreamBackedCorpusView):
256
+ """
257
+ A specialized corpus view for tagged documents. It can be
258
+ customized via flags to divide the tagged corpus documents up by
259
+ sentence or paragraph, and to include or omit part of speech tags.
260
+ ``TaggedCorpusView`` objects are typically created by
261
+ ``TaggedCorpusReader`` (not directly by nltk users).
262
+ """
263
+
264
+ def __init__(
265
+ self,
266
+ corpus_file,
267
+ encoding,
268
+ tagged,
269
+ group_by_sent,
270
+ group_by_para,
271
+ sep,
272
+ word_tokenizer,
273
+ sent_tokenizer,
274
+ para_block_reader,
275
+ tag_mapping_function=None,
276
+ ):
277
+ self._tagged = tagged
278
+ self._group_by_sent = group_by_sent
279
+ self._group_by_para = group_by_para
280
+ self._sep = sep
281
+ self._word_tokenizer = word_tokenizer
282
+ self._sent_tokenizer = sent_tokenizer
283
+ self._para_block_reader = para_block_reader
284
+ self._tag_mapping_function = tag_mapping_function
285
+ StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding)
286
+
287
+ def read_block(self, stream):
288
+ """Reads one paragraph at a time."""
289
+ block = []
290
+ for para_str in self._para_block_reader(stream):
291
+ para = []
292
+ for sent_str in self._sent_tokenizer.tokenize(para_str):
293
+ sent = [
294
+ str2tuple(s, self._sep)
295
+ for s in self._word_tokenizer.tokenize(sent_str)
296
+ ]
297
+ if self._tag_mapping_function:
298
+ sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent]
299
+ if not self._tagged:
300
+ sent = [w for (w, t) in sent]
301
+ if self._group_by_sent:
302
+ para.append(sent)
303
+ else:
304
+ para.extend(sent)
305
+ if self._group_by_para:
306
+ block.append(para)
307
+ else:
308
+ block.extend(para)
309
+ return block
310
+
311
+
312
+ # needs to implement simplified tags
313
+ class MacMorphoCorpusReader(TaggedCorpusReader):
314
+ """
315
+ A corpus reader for the MAC_MORPHO corpus. Each line contains a
316
+ single tagged word, using '_' as a separator. Sentence boundaries
317
+ are based on the end-sentence tag ('_.'). Paragraph information
318
+ is not included in the corpus, so each paragraph returned by
319
+ ``self.paras()`` and ``self.tagged_paras()`` contains a single
320
+ sentence.
321
+ """
322
+
323
+ def __init__(self, root, fileids, encoding="utf8", tagset=None):
324
+ TaggedCorpusReader.__init__(
325
+ self,
326
+ root,
327
+ fileids,
328
+ sep="_",
329
+ word_tokenizer=LineTokenizer(),
330
+ sent_tokenizer=RegexpTokenizer(".*\n"),
331
+ para_block_reader=self._read_block,
332
+ encoding=encoding,
333
+ tagset=tagset,
334
+ )
335
+
336
+ def _read_block(self, stream):
337
+ return read_regexp_block(stream, r".*", r".*_\.")
338
+
339
+
340
+ class TimitTaggedCorpusReader(TaggedCorpusReader):
341
+ """
342
+ A corpus reader for tagged sentences that are included in the TIMIT corpus.
343
+ """
344
+
345
+ def __init__(self, *args, **kwargs):
346
+ TaggedCorpusReader.__init__(
347
+ self, para_block_reader=read_timit_block, *args, **kwargs
348
+ )
349
+
350
+ def paras(self):
351
+ raise NotImplementedError("use sents() instead")
352
+
353
+ def tagged_paras(self):
354
+ raise NotImplementedError("use tagged_sents() instead")
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/timit.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: TIMIT Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2007 NLTK Project
4
+ # Author: Haejoong Lee <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jacob Perkins <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ # [xx] this docstring is out-of-date:
11
+ """
12
+ Read tokens, phonemes and audio data from the NLTK TIMIT Corpus.
13
+
14
+ This corpus contains selected portion of the TIMIT corpus.
15
+
16
+ - 16 speakers from 8 dialect regions
17
+ - 1 male and 1 female from each dialect region
18
+ - total 130 sentences (10 sentences per speaker. Note that some
19
+ sentences are shared among other speakers, especially sa1 and sa2
20
+ are spoken by all speakers.)
21
+ - total 160 recording of sentences (10 recordings per speaker)
22
+ - audio format: NIST Sphere, single channel, 16kHz sampling,
23
+ 16 bit sample, PCM encoding
24
+
25
+
26
+ Module contents
27
+ ===============
28
+
29
+ The timit corpus reader provides 4 functions and 4 data items.
30
+
31
+ - utterances
32
+
33
+ List of utterances in the corpus. There are total 160 utterances,
34
+ each of which corresponds to a unique utterance of a speaker.
35
+ Here's an example of an utterance identifier in the list::
36
+
37
+ dr1-fvmh0/sx206
38
+ - _---- _---
39
+ | | | | |
40
+ | | | | |
41
+ | | | | `--- sentence number
42
+ | | | `----- sentence type (a:all, i:shared, x:exclusive)
43
+ | | `--------- speaker ID
44
+ | `------------ sex (m:male, f:female)
45
+ `-------------- dialect region (1..8)
46
+
47
+ - speakers
48
+
49
+ List of speaker IDs. An example of speaker ID::
50
+
51
+ dr1-fvmh0
52
+
53
+ Note that if you split an item ID with colon and take the first element of
54
+ the result, you will get a speaker ID.
55
+
56
+ >>> itemid = 'dr1-fvmh0/sx206'
57
+ >>> spkrid , sentid = itemid.split('/')
58
+ >>> spkrid
59
+ 'dr1-fvmh0'
60
+
61
+ The second element of the result is a sentence ID.
62
+
63
+ - dictionary()
64
+
65
+ Phonetic dictionary of words contained in this corpus. This is a Python
66
+ dictionary from words to phoneme lists.
67
+
68
+ - spkrinfo()
69
+
70
+ Speaker information table. It's a Python dictionary from speaker IDs to
71
+ records of 10 fields. Speaker IDs the same as the ones in timie.speakers.
72
+ Each record is a dictionary from field names to values, and the fields are
73
+ as follows::
74
+
75
+ id speaker ID as defined in the original TIMIT speaker info table
76
+ sex speaker gender (M:male, F:female)
77
+ dr speaker dialect region (1:new england, 2:northern,
78
+ 3:north midland, 4:south midland, 5:southern, 6:new york city,
79
+ 7:western, 8:army brat (moved around))
80
+ use corpus type (TRN:training, TST:test)
81
+ in this sample corpus only TRN is available
82
+ recdate recording date
83
+ birthdate speaker birth date
84
+ ht speaker height
85
+ race speaker race (WHT:white, BLK:black, AMR:american indian,
86
+ SPN:spanish-american, ORN:oriental,???:unknown)
87
+ edu speaker education level (HS:high school, AS:associate degree,
88
+ BS:bachelor's degree (BS or BA), MS:master's degree (MS or MA),
89
+ PHD:doctorate degree (PhD,JD,MD), ??:unknown)
90
+ comments comments by the recorder
91
+
92
+ The 4 functions are as follows.
93
+
94
+ - tokenized(sentences=items, offset=False)
95
+
96
+ Given a list of items, returns an iterator of a list of word lists,
97
+ each of which corresponds to an item (sentence). If offset is set to True,
98
+ each element of the word list is a tuple of word(string), start offset and
99
+ end offset, where offset is represented as a number of 16kHz samples.
100
+
101
+ - phonetic(sentences=items, offset=False)
102
+
103
+ Given a list of items, returns an iterator of a list of phoneme lists,
104
+ each of which corresponds to an item (sentence). If offset is set to True,
105
+ each element of the phoneme list is a tuple of word(string), start offset
106
+ and end offset, where offset is represented as a number of 16kHz samples.
107
+
108
+ - audiodata(item, start=0, end=None)
109
+
110
+ Given an item, returns a chunk of audio samples formatted into a string.
111
+ When the function is called, if start and end are omitted, the entire
112
+ samples of the recording will be returned. If only end is omitted,
113
+ samples from the start offset to the end of the recording will be returned.
114
+
115
+ - play(data)
116
+
117
+ Play the given audio samples. The audio samples can be obtained from the
118
+ timit.audiodata function.
119
+
120
+ """
121
+ import sys
122
+ import time
123
+
124
+ from nltk.corpus.reader.api import *
125
+ from nltk.internals import import_from_stdlib
126
+ from nltk.tree import Tree
127
+
128
+
129
+ class TimitCorpusReader(CorpusReader):
130
+ """
131
+ Reader for the TIMIT corpus (or any other corpus with the same
132
+ file layout and use of file formats). The corpus root directory
133
+ should contain the following files:
134
+
135
+ - timitdic.txt: dictionary of standard transcriptions
136
+ - spkrinfo.txt: table of speaker information
137
+
138
+ In addition, the root directory should contain one subdirectory
139
+ for each speaker, containing three files for each utterance:
140
+
141
+ - <utterance-id>.txt: text content of utterances
142
+ - <utterance-id>.wrd: tokenized text content of utterances
143
+ - <utterance-id>.phn: phonetic transcription of utterances
144
+ - <utterance-id>.wav: utterance sound file
145
+ """
146
+
147
+ _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt"
148
+ """A regexp matching fileids that are used by this corpus reader."""
149
+ _UTTERANCE_RE = r"\w+-\w+/\w+\.txt"
150
+
151
+ def __init__(self, root, encoding="utf8"):
152
+ """
153
+ Construct a new TIMIT corpus reader in the given directory.
154
+ :param root: The root directory for this corpus.
155
+ """
156
+ # Ensure that wave files don't get treated as unicode data:
157
+ if isinstance(encoding, str):
158
+ encoding = [(r".*\.wav", None), (".*", encoding)]
159
+
160
+ CorpusReader.__init__(
161
+ self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding
162
+ )
163
+
164
+ self._utterances = [
165
+ name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE)
166
+ ]
167
+ """A list of the utterance identifiers for all utterances in
168
+ this corpus."""
169
+
170
+ self._speakerinfo = None
171
+ self._root = root
172
+ self.speakers = sorted({u.split("/")[0] for u in self._utterances})
173
+
174
+ def fileids(self, filetype=None):
175
+ """
176
+ Return a list of file identifiers for the files that make up
177
+ this corpus.
178
+
179
+ :param filetype: If specified, then ``filetype`` indicates that
180
+ only the files that have the given type should be
181
+ returned. Accepted values are: ``txt``, ``wrd``, ``phn``,
182
+ ``wav``, or ``metadata``,
183
+ """
184
+ if filetype is None:
185
+ return CorpusReader.fileids(self)
186
+ elif filetype in ("txt", "wrd", "phn", "wav"):
187
+ return [f"{u}.{filetype}" for u in self._utterances]
188
+ elif filetype == "metadata":
189
+ return ["timitdic.txt", "spkrinfo.txt"]
190
+ else:
191
+ raise ValueError("Bad value for filetype: %r" % filetype)
192
+
193
+ def utteranceids(
194
+ self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None
195
+ ):
196
+ """
197
+ :return: A list of the utterance identifiers for all
198
+ utterances in this corpus, or for the given speaker, dialect
199
+ region, gender, sentence type, or sentence number, if
200
+ specified.
201
+ """
202
+ if isinstance(dialect, str):
203
+ dialect = [dialect]
204
+ if isinstance(sex, str):
205
+ sex = [sex]
206
+ if isinstance(spkrid, str):
207
+ spkrid = [spkrid]
208
+ if isinstance(sent_type, str):
209
+ sent_type = [sent_type]
210
+ if isinstance(sentid, str):
211
+ sentid = [sentid]
212
+
213
+ utterances = self._utterances[:]
214
+ if dialect is not None:
215
+ utterances = [u for u in utterances if u[2] in dialect]
216
+ if sex is not None:
217
+ utterances = [u for u in utterances if u[4] in sex]
218
+ if spkrid is not None:
219
+ utterances = [u for u in utterances if u[:9] in spkrid]
220
+ if sent_type is not None:
221
+ utterances = [u for u in utterances if u[11] in sent_type]
222
+ if sentid is not None:
223
+ utterances = [u for u in utterances if u[10:] in spkrid]
224
+ return utterances
225
+
226
+ def transcription_dict(self):
227
+ """
228
+ :return: A dictionary giving the 'standard' transcription for
229
+ each word.
230
+ """
231
+ _transcriptions = {}
232
+ with self.open("timitdic.txt") as fp:
233
+ for line in fp:
234
+ if not line.strip() or line[0] == ";":
235
+ continue
236
+ m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line)
237
+ if not m:
238
+ raise ValueError("Bad line: %r" % line)
239
+ _transcriptions[m.group(1)] = m.group(2).split()
240
+ return _transcriptions
241
+
242
+ def spkrid(self, utterance):
243
+ return utterance.split("/")[0]
244
+
245
+ def sentid(self, utterance):
246
+ return utterance.split("/")[1]
247
+
248
+ def utterance(self, spkrid, sentid):
249
+ return f"{spkrid}/{sentid}"
250
+
251
+ def spkrutteranceids(self, speaker):
252
+ """
253
+ :return: A list of all utterances associated with a given
254
+ speaker.
255
+ """
256
+ return [
257
+ utterance
258
+ for utterance in self._utterances
259
+ if utterance.startswith(speaker + "/")
260
+ ]
261
+
262
+ def spkrinfo(self, speaker):
263
+ """
264
+ :return: A dictionary mapping .. something.
265
+ """
266
+ if speaker in self._utterances:
267
+ speaker = self.spkrid(speaker)
268
+
269
+ if self._speakerinfo is None:
270
+ self._speakerinfo = {}
271
+ with self.open("spkrinfo.txt") as fp:
272
+ for line in fp:
273
+ if not line.strip() or line[0] == ";":
274
+ continue
275
+ rec = line.strip().split(None, 9)
276
+ key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}"
277
+ self._speakerinfo[key] = SpeakerInfo(*rec)
278
+
279
+ return self._speakerinfo[speaker]
280
+
281
+ def phones(self, utterances=None):
282
+ results = []
283
+ for fileid in self._utterance_fileids(utterances, ".phn"):
284
+ with self.open(fileid) as fp:
285
+ for line in fp:
286
+ if line.strip():
287
+ results.append(line.split()[-1])
288
+ return results
289
+
290
+ def phone_times(self, utterances=None):
291
+ """
292
+ offset is represented as a number of 16kHz samples!
293
+ """
294
+ results = []
295
+ for fileid in self._utterance_fileids(utterances, ".phn"):
296
+ with self.open(fileid) as fp:
297
+ for line in fp:
298
+ if line.strip():
299
+ results.append(
300
+ (
301
+ line.split()[2],
302
+ int(line.split()[0]),
303
+ int(line.split()[1]),
304
+ )
305
+ )
306
+ return results
307
+
308
+ def words(self, utterances=None):
309
+ results = []
310
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
311
+ with self.open(fileid) as fp:
312
+ for line in fp:
313
+ if line.strip():
314
+ results.append(line.split()[-1])
315
+ return results
316
+
317
+ def word_times(self, utterances=None):
318
+ results = []
319
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
320
+ with self.open(fileid) as fp:
321
+ for line in fp:
322
+ if line.strip():
323
+ results.append(
324
+ (
325
+ line.split()[2],
326
+ int(line.split()[0]),
327
+ int(line.split()[1]),
328
+ )
329
+ )
330
+ return results
331
+
332
+ def sents(self, utterances=None):
333
+ results = []
334
+ for fileid in self._utterance_fileids(utterances, ".wrd"):
335
+ with self.open(fileid) as fp:
336
+ results.append([line.split()[-1] for line in fp if line.strip()])
337
+ return results
338
+
339
+ def sent_times(self, utterances=None):
340
+ # TODO: Check this
341
+ return [
342
+ (
343
+ line.split(None, 2)[-1].strip(),
344
+ int(line.split()[0]),
345
+ int(line.split()[1]),
346
+ )
347
+ for fileid in self._utterance_fileids(utterances, ".txt")
348
+ for line in self.open(fileid)
349
+ if line.strip()
350
+ ]
351
+
352
+ def phone_trees(self, utterances=None):
353
+ if utterances is None:
354
+ utterances = self._utterances
355
+ if isinstance(utterances, str):
356
+ utterances = [utterances]
357
+
358
+ trees = []
359
+ for utterance in utterances:
360
+ word_times = self.word_times(utterance)
361
+ phone_times = self.phone_times(utterance)
362
+ sent_times = self.sent_times(utterance)
363
+
364
+ while sent_times:
365
+ (sent, sent_start, sent_end) = sent_times.pop(0)
366
+ trees.append(Tree("S", []))
367
+ while (
368
+ word_times and phone_times and phone_times[0][2] <= word_times[0][1]
369
+ ):
370
+ trees[-1].append(phone_times.pop(0)[0])
371
+ while word_times and word_times[0][2] <= sent_end:
372
+ (word, word_start, word_end) = word_times.pop(0)
373
+ trees[-1].append(Tree(word, []))
374
+ while phone_times and phone_times[0][2] <= word_end:
375
+ trees[-1][-1].append(phone_times.pop(0)[0])
376
+ while phone_times and phone_times[0][2] <= sent_end:
377
+ trees[-1].append(phone_times.pop(0)[0])
378
+ return trees
379
+
380
+ # [xx] NOTE: This is currently broken -- we're assuming that the
381
+ # fileids are WAV fileids (aka RIFF), but they're actually NIST SPHERE
382
+ # fileids.
383
+ def wav(self, utterance, start=0, end=None):
384
+ # nltk.chunk conflicts with the stdlib module 'chunk'
385
+ wave = import_from_stdlib("wave")
386
+
387
+ w = wave.open(self.open(utterance + ".wav"), "rb")
388
+
389
+ if end is None:
390
+ end = w.getnframes()
391
+
392
+ # Skip past frames before start, then read the frames we want
393
+ w.readframes(start)
394
+ frames = w.readframes(end - start)
395
+
396
+ # Open a new temporary file -- the wave module requires
397
+ # an actual file, and won't work w/ stringio. :(
398
+ tf = tempfile.TemporaryFile()
399
+ out = wave.open(tf, "w")
400
+
401
+ # Write the parameters & data to the new file.
402
+ out.setparams(w.getparams())
403
+ out.writeframes(frames)
404
+ out.close()
405
+
406
+ # Read the data back from the file, and return it. The
407
+ # file will automatically be deleted when we return.
408
+ tf.seek(0)
409
+ return tf.read()
410
+
411
+ def audiodata(self, utterance, start=0, end=None):
412
+ assert end is None or end > start
413
+ headersize = 44
414
+ with self.open(utterance + ".wav") as fp:
415
+ if end is None:
416
+ data = fp.read()
417
+ else:
418
+ data = fp.read(headersize + end * 2)
419
+ return data[headersize + start * 2 :]
420
+
421
+ def _utterance_fileids(self, utterances, extension):
422
+ if utterances is None:
423
+ utterances = self._utterances
424
+ if isinstance(utterances, str):
425
+ utterances = [utterances]
426
+ return [f"{u}{extension}" for u in utterances]
427
+
428
+ def play(self, utterance, start=0, end=None):
429
+ """
430
+ Play the given audio sample.
431
+
432
+ :param utterance: The utterance id of the sample to play
433
+ """
434
+ # Method 1: os audio dev.
435
+ try:
436
+ import ossaudiodev
437
+
438
+ try:
439
+ dsp = ossaudiodev.open("w")
440
+ dsp.setfmt(ossaudiodev.AFMT_S16_LE)
441
+ dsp.channels(1)
442
+ dsp.speed(16000)
443
+ dsp.write(self.audiodata(utterance, start, end))
444
+ dsp.close()
445
+ except OSError as e:
446
+ print(
447
+ (
448
+ "can't acquire the audio device; please "
449
+ "activate your audio device."
450
+ ),
451
+ file=sys.stderr,
452
+ )
453
+ print("system error message:", str(e), file=sys.stderr)
454
+ return
455
+ except ImportError:
456
+ pass
457
+
458
+ # Method 2: pygame
459
+ try:
460
+ # FIXME: this won't work under python 3
461
+ import pygame.mixer
462
+ import StringIO
463
+
464
+ pygame.mixer.init(16000)
465
+ f = StringIO.StringIO(self.wav(utterance, start, end))
466
+ pygame.mixer.Sound(f).play()
467
+ while pygame.mixer.get_busy():
468
+ time.sleep(0.01)
469
+ return
470
+ except ImportError:
471
+ pass
472
+
473
+ # Method 3: complain. :)
474
+ print(
475
+ ("you must install pygame or ossaudiodev " "for audio playback."),
476
+ file=sys.stderr,
477
+ )
478
+
479
+
480
+ class SpeakerInfo:
481
+ def __init__(
482
+ self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None
483
+ ):
484
+ self.id = id
485
+ self.sex = sex
486
+ self.dr = dr
487
+ self.use = use
488
+ self.recdate = recdate
489
+ self.birthdate = birthdate
490
+ self.ht = ht
491
+ self.race = race
492
+ self.edu = edu
493
+ self.comments = comments
494
+
495
+ def __repr__(self):
496
+ attribs = "id sex dr use recdate birthdate ht race edu comments"
497
+ args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()]
498
+ return "SpeakerInfo(%s)" % (", ".join(args))
499
+
500
+
501
+ def read_timit_block(stream):
502
+ """
503
+ Block reader for timit tagged sentences, which are preceded by a sentence
504
+ number that will be ignored.
505
+ """
506
+ line = stream.readline()
507
+ if not line:
508
+ return []
509
+ n, sent = line.split(" ", 1)
510
+ return [sent]
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/toolbox.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Toolbox Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Greg Aumann <[email protected]>
5
+ # Stuart Robinson <[email protected]>
6
+ # Steven Bird <[email protected]>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Module for reading, writing and manipulating
12
+ Toolbox databases and settings fileids.
13
+ """
14
+
15
+ from nltk.corpus.reader.api import *
16
+ from nltk.corpus.reader.util import *
17
+ from nltk.toolbox import ToolboxData
18
+
19
+
20
+ class ToolboxCorpusReader(CorpusReader):
21
+ def xml(self, fileids, key=None):
22
+ return concat(
23
+ [
24
+ ToolboxData(path, enc).parse(key=key)
25
+ for (path, enc) in self.abspaths(fileids, True)
26
+ ]
27
+ )
28
+
29
+ def fields(
30
+ self,
31
+ fileids,
32
+ strip=True,
33
+ unwrap=True,
34
+ encoding="utf8",
35
+ errors="strict",
36
+ unicode_fields=None,
37
+ ):
38
+ return concat(
39
+ [
40
+ list(
41
+ ToolboxData(fileid, enc).fields(
42
+ strip, unwrap, encoding, errors, unicode_fields
43
+ )
44
+ )
45
+ for (fileid, enc) in self.abspaths(fileids, include_encoding=True)
46
+ ]
47
+ )
48
+
49
+ # should probably be done lazily:
50
+ def entries(self, fileids, **kwargs):
51
+ if "key" in kwargs:
52
+ key = kwargs["key"]
53
+ del kwargs["key"]
54
+ else:
55
+ key = "lx" # the default key in MDF
56
+ entries = []
57
+ for marker, contents in self.fields(fileids, **kwargs):
58
+ if marker == key:
59
+ entries.append((contents, []))
60
+ else:
61
+ try:
62
+ entries[-1][-1].append((marker, contents))
63
+ except IndexError:
64
+ pass
65
+ return entries
66
+
67
+ def words(self, fileids, key="lx"):
68
+ return [contents for marker, contents in self.fields(fileids) if marker == key]
69
+
70
+
71
+ def demo():
72
+ pass
73
+
74
+
75
+ if __name__ == "__main__":
76
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/wordlist.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Word List Corpus Reader
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ from nltk.corpus.reader.api import *
9
+ from nltk.corpus.reader.util import *
10
+ from nltk.tokenize import line_tokenize
11
+
12
+
13
+ class WordListCorpusReader(CorpusReader):
14
+ """
15
+ List of words, one per line. Blank lines are ignored.
16
+ """
17
+
18
+ def words(self, fileids=None, ignore_lines_startswith="\n"):
19
+ return [
20
+ line
21
+ for line in line_tokenize(self.raw(fileids))
22
+ if not line.startswith(ignore_lines_startswith)
23
+ ]
24
+
25
+
26
+ class SwadeshCorpusReader(WordListCorpusReader):
27
+ def entries(self, fileids=None):
28
+ """
29
+ :return: a tuple of words for the specified fileids.
30
+ """
31
+ if not fileids:
32
+ fileids = self.fileids()
33
+
34
+ wordlists = [self.words(f) for f in fileids]
35
+ return list(zip(*wordlists))
36
+
37
+
38
+ class NonbreakingPrefixesCorpusReader(WordListCorpusReader):
39
+ """
40
+ This is a class to read the nonbreaking prefixes textfiles from the
41
+ Moses Machine Translation toolkit. These lists are used in the Python port
42
+ of the Moses' word tokenizer.
43
+ """
44
+
45
+ available_langs = {
46
+ "catalan": "ca",
47
+ "czech": "cs",
48
+ "german": "de",
49
+ "greek": "el",
50
+ "english": "en",
51
+ "spanish": "es",
52
+ "finnish": "fi",
53
+ "french": "fr",
54
+ "hungarian": "hu",
55
+ "icelandic": "is",
56
+ "italian": "it",
57
+ "latvian": "lv",
58
+ "dutch": "nl",
59
+ "polish": "pl",
60
+ "portuguese": "pt",
61
+ "romanian": "ro",
62
+ "russian": "ru",
63
+ "slovak": "sk",
64
+ "slovenian": "sl",
65
+ "swedish": "sv",
66
+ "tamil": "ta",
67
+ }
68
+ # Also, add the lang IDs as the keys.
69
+ available_langs.update({v: v for v in available_langs.values()})
70
+
71
+ def words(self, lang=None, fileids=None, ignore_lines_startswith="#"):
72
+ """
73
+ This module returns a list of nonbreaking prefixes for the specified
74
+ language(s).
75
+
76
+ >>> from nltk.corpus import nonbreaking_prefixes as nbp
77
+ >>> nbp.words('en')[:10] == [u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J']
78
+ True
79
+ >>> nbp.words('ta')[:5] == [u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89']
80
+ True
81
+
82
+ :return: a list words for the specified language(s).
83
+ """
84
+ # If *lang* in list of languages available, allocate apt fileid.
85
+ # Otherwise, the function returns non-breaking prefixes for
86
+ # all languages when fileids==None.
87
+ if lang in self.available_langs:
88
+ lang = self.available_langs[lang]
89
+ fileids = ["nonbreaking_prefix." + lang]
90
+ return [
91
+ line
92
+ for line in line_tokenize(self.raw(fileids))
93
+ if not line.startswith(ignore_lines_startswith)
94
+ ]
95
+
96
+
97
+ class UnicharsCorpusReader(WordListCorpusReader):
98
+ """
99
+ This class is used to read lists of characters from the Perl Unicode
100
+ Properties (see https://perldoc.perl.org/perluniprops.html).
101
+ The files in the perluniprop.zip are extracted using the Unicode::Tussle
102
+ module from https://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm
103
+ """
104
+
105
+ # These are categories similar to the Perl Unicode Properties
106
+ available_categories = [
107
+ "Close_Punctuation",
108
+ "Currency_Symbol",
109
+ "IsAlnum",
110
+ "IsAlpha",
111
+ "IsLower",
112
+ "IsN",
113
+ "IsSc",
114
+ "IsSo",
115
+ "IsUpper",
116
+ "Line_Separator",
117
+ "Number",
118
+ "Open_Punctuation",
119
+ "Punctuation",
120
+ "Separator",
121
+ "Symbol",
122
+ ]
123
+
124
+ def chars(self, category=None, fileids=None):
125
+ """
126
+ This module returns a list of characters from the Perl Unicode Properties.
127
+ They are very useful when porting Perl tokenizers to Python.
128
+
129
+ >>> from nltk.corpus import perluniprops as pup
130
+ >>> pup.chars('Open_Punctuation')[:5] == [u'(', u'[', u'{', u'\u0f3a', u'\u0f3c']
131
+ True
132
+ >>> pup.chars('Currency_Symbol')[:5] == [u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5']
133
+ True
134
+ >>> pup.available_categories
135
+ ['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower', 'IsN', 'IsSc', 'IsSo', 'IsUpper', 'Line_Separator', 'Number', 'Open_Punctuation', 'Punctuation', 'Separator', 'Symbol']
136
+
137
+ :return: a list of characters given the specific unicode character category
138
+ """
139
+ if category in self.available_categories:
140
+ fileids = [category + ".txt"]
141
+ return list(self.raw(fileids).strip())
142
+
143
+
144
+ class MWAPPDBCorpusReader(WordListCorpusReader):
145
+ """
146
+ This class is used to read the list of word pairs from the subset of lexical
147
+ pairs of The Paraphrase Database (PPDB) XXXL used in the Monolingual Word
148
+ Alignment (MWA) algorithm described in Sultan et al. (2014a, 2014b, 2015):
149
+
150
+ - http://acl2014.org/acl2014/Q14/pdf/Q14-1017
151
+ - https://www.aclweb.org/anthology/S14-2039
152
+ - https://www.aclweb.org/anthology/S15-2027
153
+
154
+ The original source of the full PPDB corpus can be found on
155
+ https://www.cis.upenn.edu/~ccb/ppdb/
156
+
157
+ :return: a list of tuples of similar lexical terms.
158
+ """
159
+
160
+ mwa_ppdb_xxxl_file = "ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs"
161
+
162
+ def entries(self, fileids=mwa_ppdb_xxxl_file):
163
+ """
164
+ :return: a tuple of synonym word pairs.
165
+ """
166
+ return [tuple(line.split("\t")) for line in line_tokenize(self.raw(fileids))]