applied-ai-018 commited on
Commit
c6a13b8
·
verified ·
1 Parent(s): e97cf36

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/nltk/classify/maxent.py +1569 -0
  17. llmeval-env/lib/python3.10/site-packages/nltk/classify/svm.py +17 -0
  18. llmeval-env/lib/python3.10/site-packages/nltk/classify/tadm.py +122 -0
  19. llmeval-env/lib/python3.10/site-packages/nltk/classify/weka.py +377 -0
  20. llmeval-env/lib/python3.10/site-packages/nltk/test/__init__.py +18 -0
  21. llmeval-env/lib/python3.10/site-packages/nltk/test/all.py +25 -0
  22. llmeval-env/lib/python3.10/site-packages/nltk/test/bleu.doctest +29 -0
  23. llmeval-env/lib/python3.10/site-packages/nltk/test/bnc.doctest +60 -0
  24. llmeval-env/lib/python3.10/site-packages/nltk/test/ccg.doctest +376 -0
  25. llmeval-env/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest +552 -0
  26. llmeval-env/lib/python3.10/site-packages/nltk/test/chat80.doctest +232 -0
  27. llmeval-env/lib/python3.10/site-packages/nltk/test/childes.doctest +190 -0
  28. llmeval-env/lib/python3.10/site-packages/nltk/test/childes_fixt.py +13 -0
  29. llmeval-env/lib/python3.10/site-packages/nltk/test/chunk.doctest +372 -0
  30. llmeval-env/lib/python3.10/site-packages/nltk/test/classify.doctest +202 -0
  31. llmeval-env/lib/python3.10/site-packages/nltk/test/classify_fixt.py +5 -0
  32. llmeval-env/lib/python3.10/site-packages/nltk/test/collections.doctest +31 -0
  33. llmeval-env/lib/python3.10/site-packages/nltk/test/concordance.doctest +75 -0
  34. llmeval-env/lib/python3.10/site-packages/nltk/test/corpus.doctest +0 -0
  35. llmeval-env/lib/python3.10/site-packages/nltk/test/crubadan.doctest +65 -0
  36. llmeval-env/lib/python3.10/site-packages/nltk/test/data.doctest +387 -0
  37. llmeval-env/lib/python3.10/site-packages/nltk/test/drt.doctest +515 -0
  38. llmeval-env/lib/python3.10/site-packages/nltk/test/featstruct.doctest +1229 -0
  39. llmeval-env/lib/python3.10/site-packages/nltk/test/framenet.doctest +288 -0
  40. llmeval-env/lib/python3.10/site-packages/nltk/test/generate.doctest +78 -0
  41. llmeval-env/lib/python3.10/site-packages/nltk/test/gensim.doctest +141 -0
  42. llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest +69 -0
  43. llmeval-env/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest +109 -0
  44. llmeval-env/lib/python3.10/site-packages/nltk/test/internals.doctest +161 -0
  45. llmeval-env/lib/python3.10/site-packages/nltk/test/lm.doctest +135 -0
  46. llmeval-env/lib/python3.10/site-packages/nltk/test/logic.doctest +1096 -0
  47. llmeval-env/lib/python3.10/site-packages/nltk/test/metrics.doctest +321 -0
  48. llmeval-env/lib/python3.10/site-packages/nltk/test/misc.doctest +118 -0
  49. llmeval-env/lib/python3.10/site-packages/nltk/test/portuguese_en_fixt.py +4 -0
  50. llmeval-env/lib/python3.10/site-packages/nltk/test/probability.doctest +306 -0
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/decisiontree.cpython-310.pyc ADDED
Binary file (9.59 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/maxent.cpython-310.pyc ADDED
Binary file (46 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/megam.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/naivebayes.cpython-310.pyc ADDED
Binary file (8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/positivenaivebayes.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/rte_classify.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/scikitlearn.cpython-310.pyc ADDED
Binary file (5.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/senna.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/svm.cpython-310.pyc ADDED
Binary file (699 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/tadm.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/textcat.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/util.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/__pycache__/weka.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/nltk/classify/maxent.py ADDED
@@ -0,0 +1,1569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Maximum Entropy Classifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Dmitry Chichkov <[email protected]> (TypedMaxentFeatureEncoding)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A classifier model based on maximum entropy modeling framework. This
11
+ framework considers all of the probability distributions that are
12
+ empirically consistent with the training data; and chooses the
13
+ distribution with the highest entropy. A probability distribution is
14
+ "empirically consistent" with a set of training data if its estimated
15
+ frequency with which a class and a feature vector value co-occur is
16
+ equal to the actual frequency in the data.
17
+
18
+ Terminology: 'feature'
19
+ ======================
20
+ The term *feature* is usually used to refer to some property of an
21
+ unlabeled token. For example, when performing word sense
22
+ disambiguation, we might define a ``'prevword'`` feature whose value is
23
+ the word preceding the target word. However, in the context of
24
+ maxent modeling, the term *feature* is typically used to refer to a
25
+ property of a "labeled" token. In order to prevent confusion, we
26
+ will introduce two distinct terms to disambiguate these two different
27
+ concepts:
28
+
29
+ - An "input-feature" is a property of an unlabeled token.
30
+ - A "joint-feature" is a property of a labeled token.
31
+
32
+ In the rest of the ``nltk.classify`` module, the term "features" is
33
+ used to refer to what we will call "input-features" in this module.
34
+
35
+ In literature that describes and discusses maximum entropy models,
36
+ input-features are typically called "contexts", and joint-features
37
+ are simply referred to as "features".
38
+
39
+ Converting Input-Features to Joint-Features
40
+ -------------------------------------------
41
+ In maximum entropy models, joint-features are required to have numeric
42
+ values. Typically, each input-feature ``input_feat`` is mapped to a
43
+ set of joint-features of the form:
44
+
45
+ | joint_feat(token, label) = { 1 if input_feat(token) == feat_val
46
+ | { and label == some_label
47
+ | {
48
+ | { 0 otherwise
49
+
50
+ For all values of ``feat_val`` and ``some_label``. This mapping is
51
+ performed by classes that implement the ``MaxentFeatureEncodingI``
52
+ interface.
53
+ """
54
+ try:
55
+ import numpy
56
+ except ImportError:
57
+ pass
58
+
59
+ import os
60
+ import tempfile
61
+ from collections import defaultdict
62
+
63
+ from nltk.classify.api import ClassifierI
64
+ from nltk.classify.megam import call_megam, parse_megam_weights, write_megam_file
65
+ from nltk.classify.tadm import call_tadm, parse_tadm_weights, write_tadm_file
66
+ from nltk.classify.util import CutoffChecker, accuracy, log_likelihood
67
+ from nltk.data import gzip_open_unicode
68
+ from nltk.probability import DictionaryProbDist
69
+ from nltk.util import OrderedDict
70
+
71
+ __docformat__ = "epytext en"
72
+
73
+ ######################################################################
74
+ # { Classifier Model
75
+ ######################################################################
76
+
77
+
78
+ class MaxentClassifier(ClassifierI):
79
+ """
80
+ A maximum entropy classifier (also known as a "conditional
81
+ exponential classifier"). This classifier is parameterized by a
82
+ set of "weights", which are used to combine the joint-features
83
+ that are generated from a featureset by an "encoding". In
84
+ particular, the encoding maps each ``(featureset, label)`` pair to
85
+ a vector. The probability of each label is then computed using
86
+ the following equation::
87
+
88
+ dotprod(weights, encode(fs,label))
89
+ prob(fs|label) = ---------------------------------------------------
90
+ sum(dotprod(weights, encode(fs,l)) for l in labels)
91
+
92
+ Where ``dotprod`` is the dot product::
93
+
94
+ dotprod(a,b) = sum(x*y for (x,y) in zip(a,b))
95
+ """
96
+
97
+ def __init__(self, encoding, weights, logarithmic=True):
98
+ """
99
+ Construct a new maxent classifier model. Typically, new
100
+ classifier models are created using the ``train()`` method.
101
+
102
+ :type encoding: MaxentFeatureEncodingI
103
+ :param encoding: An encoding that is used to convert the
104
+ featuresets that are given to the ``classify`` method into
105
+ joint-feature vectors, which are used by the maxent
106
+ classifier model.
107
+
108
+ :type weights: list of float
109
+ :param weights: The feature weight vector for this classifier.
110
+
111
+ :type logarithmic: bool
112
+ :param logarithmic: If false, then use non-logarithmic weights.
113
+ """
114
+ self._encoding = encoding
115
+ self._weights = weights
116
+ self._logarithmic = logarithmic
117
+ # self._logarithmic = False
118
+ assert encoding.length() == len(weights)
119
+
120
+ def labels(self):
121
+ return self._encoding.labels()
122
+
123
+ def set_weights(self, new_weights):
124
+ """
125
+ Set the feature weight vector for this classifier.
126
+ :param new_weights: The new feature weight vector.
127
+ :type new_weights: list of float
128
+ """
129
+ self._weights = new_weights
130
+ assert self._encoding.length() == len(new_weights)
131
+
132
+ def weights(self):
133
+ """
134
+ :return: The feature weight vector for this classifier.
135
+ :rtype: list of float
136
+ """
137
+ return self._weights
138
+
139
+ def classify(self, featureset):
140
+ return self.prob_classify(featureset).max()
141
+
142
+ def prob_classify(self, featureset):
143
+ prob_dict = {}
144
+ for label in self._encoding.labels():
145
+ feature_vector = self._encoding.encode(featureset, label)
146
+
147
+ if self._logarithmic:
148
+ total = 0.0
149
+ for (f_id, f_val) in feature_vector:
150
+ total += self._weights[f_id] * f_val
151
+ prob_dict[label] = total
152
+
153
+ else:
154
+ prod = 1.0
155
+ for (f_id, f_val) in feature_vector:
156
+ prod *= self._weights[f_id] ** f_val
157
+ prob_dict[label] = prod
158
+
159
+ # Normalize the dictionary to give a probability distribution
160
+ return DictionaryProbDist(prob_dict, log=self._logarithmic, normalize=True)
161
+
162
+ def explain(self, featureset, columns=4):
163
+ """
164
+ Print a table showing the effect of each of the features in
165
+ the given feature set, and how they combine to determine the
166
+ probabilities of each label for that featureset.
167
+ """
168
+ descr_width = 50
169
+ TEMPLATE = " %-" + str(descr_width - 2) + "s%s%8.3f"
170
+
171
+ pdist = self.prob_classify(featureset)
172
+ labels = sorted(pdist.samples(), key=pdist.prob, reverse=True)
173
+ labels = labels[:columns]
174
+ print(
175
+ " Feature".ljust(descr_width)
176
+ + "".join("%8s" % (("%s" % l)[:7]) for l in labels)
177
+ )
178
+ print(" " + "-" * (descr_width - 2 + 8 * len(labels)))
179
+ sums = defaultdict(int)
180
+ for i, label in enumerate(labels):
181
+ feature_vector = self._encoding.encode(featureset, label)
182
+ feature_vector.sort(
183
+ key=lambda fid__: abs(self._weights[fid__[0]]), reverse=True
184
+ )
185
+ for (f_id, f_val) in feature_vector:
186
+ if self._logarithmic:
187
+ score = self._weights[f_id] * f_val
188
+ else:
189
+ score = self._weights[f_id] ** f_val
190
+ descr = self._encoding.describe(f_id)
191
+ descr = descr.split(" and label is ")[0] # hack
192
+ descr += " (%s)" % f_val # hack
193
+ if len(descr) > 47:
194
+ descr = descr[:44] + "..."
195
+ print(TEMPLATE % (descr, i * 8 * " ", score))
196
+ sums[label] += score
197
+ print(" " + "-" * (descr_width - 1 + 8 * len(labels)))
198
+ print(
199
+ " TOTAL:".ljust(descr_width) + "".join("%8.3f" % sums[l] for l in labels)
200
+ )
201
+ print(
202
+ " PROBS:".ljust(descr_width)
203
+ + "".join("%8.3f" % pdist.prob(l) for l in labels)
204
+ )
205
+
206
+ def most_informative_features(self, n=10):
207
+ """
208
+ Generates the ranked list of informative features from most to least.
209
+ """
210
+ if hasattr(self, "_most_informative_features"):
211
+ return self._most_informative_features[:n]
212
+ else:
213
+ self._most_informative_features = sorted(
214
+ list(range(len(self._weights))),
215
+ key=lambda fid: abs(self._weights[fid]),
216
+ reverse=True,
217
+ )
218
+ return self._most_informative_features[:n]
219
+
220
+ def show_most_informative_features(self, n=10, show="all"):
221
+ """
222
+ :param show: all, neg, or pos (for negative-only or positive-only)
223
+ :type show: str
224
+ :param n: The no. of top features
225
+ :type n: int
226
+ """
227
+ # Use None the full list of ranked features.
228
+ fids = self.most_informative_features(None)
229
+ if show == "pos":
230
+ fids = [fid for fid in fids if self._weights[fid] > 0]
231
+ elif show == "neg":
232
+ fids = [fid for fid in fids if self._weights[fid] < 0]
233
+ for fid in fids[:n]:
234
+ print(f"{self._weights[fid]:8.3f} {self._encoding.describe(fid)}")
235
+
236
+ def __repr__(self):
237
+ return "<ConditionalExponentialClassifier: %d labels, %d features>" % (
238
+ len(self._encoding.labels()),
239
+ self._encoding.length(),
240
+ )
241
+
242
+ #: A list of the algorithm names that are accepted for the
243
+ #: ``train()`` method's ``algorithm`` parameter.
244
+ ALGORITHMS = ["GIS", "IIS", "MEGAM", "TADM"]
245
+
246
+ @classmethod
247
+ def train(
248
+ cls,
249
+ train_toks,
250
+ algorithm=None,
251
+ trace=3,
252
+ encoding=None,
253
+ labels=None,
254
+ gaussian_prior_sigma=0,
255
+ **cutoffs,
256
+ ):
257
+ """
258
+ Train a new maxent classifier based on the given corpus of
259
+ training samples. This classifier will have its weights
260
+ chosen to maximize entropy while remaining empirically
261
+ consistent with the training corpus.
262
+
263
+ :rtype: MaxentClassifier
264
+ :return: The new maxent classifier
265
+
266
+ :type train_toks: list
267
+ :param train_toks: Training data, represented as a list of
268
+ pairs, the first member of which is a featureset,
269
+ and the second of which is a classification label.
270
+
271
+ :type algorithm: str
272
+ :param algorithm: A case-insensitive string, specifying which
273
+ algorithm should be used to train the classifier. The
274
+ following algorithms are currently available.
275
+
276
+ - Iterative Scaling Methods: Generalized Iterative Scaling (``'GIS'``),
277
+ Improved Iterative Scaling (``'IIS'``)
278
+ - External Libraries (requiring megam):
279
+ LM-BFGS algorithm, with training performed by Megam (``'megam'``)
280
+
281
+ The default algorithm is ``'IIS'``.
282
+
283
+ :type trace: int
284
+ :param trace: The level of diagnostic tracing output to produce.
285
+ Higher values produce more verbose output.
286
+ :type encoding: MaxentFeatureEncodingI
287
+ :param encoding: A feature encoding, used to convert featuresets
288
+ into feature vectors. If none is specified, then a
289
+ ``BinaryMaxentFeatureEncoding`` will be built based on the
290
+ features that are attested in the training corpus.
291
+ :type labels: list(str)
292
+ :param labels: The set of possible labels. If none is given, then
293
+ the set of all labels attested in the training data will be
294
+ used instead.
295
+ :param gaussian_prior_sigma: The sigma value for a gaussian
296
+ prior on model weights. Currently, this is supported by
297
+ ``megam``. For other algorithms, its value is ignored.
298
+ :param cutoffs: Arguments specifying various conditions under
299
+ which the training should be halted. (Some of the cutoff
300
+ conditions are not supported by some algorithms.)
301
+
302
+ - ``max_iter=v``: Terminate after ``v`` iterations.
303
+ - ``min_ll=v``: Terminate after the negative average
304
+ log-likelihood drops under ``v``.
305
+ - ``min_lldelta=v``: Terminate if a single iteration improves
306
+ log likelihood by less than ``v``.
307
+ """
308
+ if algorithm is None:
309
+ algorithm = "iis"
310
+ for key in cutoffs:
311
+ if key not in (
312
+ "max_iter",
313
+ "min_ll",
314
+ "min_lldelta",
315
+ "max_acc",
316
+ "min_accdelta",
317
+ "count_cutoff",
318
+ "norm",
319
+ "explicit",
320
+ "bernoulli",
321
+ ):
322
+ raise TypeError("Unexpected keyword arg %r" % key)
323
+ algorithm = algorithm.lower()
324
+ if algorithm == "iis":
325
+ return train_maxent_classifier_with_iis(
326
+ train_toks, trace, encoding, labels, **cutoffs
327
+ )
328
+ elif algorithm == "gis":
329
+ return train_maxent_classifier_with_gis(
330
+ train_toks, trace, encoding, labels, **cutoffs
331
+ )
332
+ elif algorithm == "megam":
333
+ return train_maxent_classifier_with_megam(
334
+ train_toks, trace, encoding, labels, gaussian_prior_sigma, **cutoffs
335
+ )
336
+ elif algorithm == "tadm":
337
+ kwargs = cutoffs
338
+ kwargs["trace"] = trace
339
+ kwargs["encoding"] = encoding
340
+ kwargs["labels"] = labels
341
+ kwargs["gaussian_prior_sigma"] = gaussian_prior_sigma
342
+ return TadmMaxentClassifier.train(train_toks, **kwargs)
343
+ else:
344
+ raise ValueError("Unknown algorithm %s" % algorithm)
345
+
346
+
347
+ #: Alias for MaxentClassifier.
348
+ ConditionalExponentialClassifier = MaxentClassifier
349
+
350
+
351
+ ######################################################################
352
+ # { Feature Encodings
353
+ ######################################################################
354
+
355
+
356
+ class MaxentFeatureEncodingI:
357
+ """
358
+ A mapping that converts a set of input-feature values to a vector
359
+ of joint-feature values, given a label. This conversion is
360
+ necessary to translate featuresets into a format that can be used
361
+ by maximum entropy models.
362
+
363
+ The set of joint-features used by a given encoding is fixed, and
364
+ each index in the generated joint-feature vectors corresponds to a
365
+ single joint-feature. The length of the generated joint-feature
366
+ vectors is therefore constant (for a given encoding).
367
+
368
+ Because the joint-feature vectors generated by
369
+ ``MaxentFeatureEncodingI`` are typically very sparse, they are
370
+ represented as a list of ``(index, value)`` tuples, specifying the
371
+ value of each non-zero joint-feature.
372
+
373
+ Feature encodings are generally created using the ``train()``
374
+ method, which generates an appropriate encoding based on the
375
+ input-feature values and labels that are present in a given
376
+ corpus.
377
+ """
378
+
379
+ def encode(self, featureset, label):
380
+ """
381
+ Given a (featureset, label) pair, return the corresponding
382
+ vector of joint-feature values. This vector is represented as
383
+ a list of ``(index, value)`` tuples, specifying the value of
384
+ each non-zero joint-feature.
385
+
386
+ :type featureset: dict
387
+ :rtype: list(tuple(int, int))
388
+ """
389
+ raise NotImplementedError()
390
+
391
+ def length(self):
392
+ """
393
+ :return: The size of the fixed-length joint-feature vectors
394
+ that are generated by this encoding.
395
+ :rtype: int
396
+ """
397
+ raise NotImplementedError()
398
+
399
+ def labels(self):
400
+ """
401
+ :return: A list of the \"known labels\" -- i.e., all labels
402
+ ``l`` such that ``self.encode(fs,l)`` can be a nonzero
403
+ joint-feature vector for some value of ``fs``.
404
+ :rtype: list
405
+ """
406
+ raise NotImplementedError()
407
+
408
+ def describe(self, fid):
409
+ """
410
+ :return: A string describing the value of the joint-feature
411
+ whose index in the generated feature vectors is ``fid``.
412
+ :rtype: str
413
+ """
414
+ raise NotImplementedError()
415
+
416
+ def train(cls, train_toks):
417
+ """
418
+ Construct and return new feature encoding, based on a given
419
+ training corpus ``train_toks``.
420
+
421
+ :type train_toks: list(tuple(dict, str))
422
+ :param train_toks: Training data, represented as a list of
423
+ pairs, the first member of which is a feature dictionary,
424
+ and the second of which is a classification label.
425
+ """
426
+ raise NotImplementedError()
427
+
428
+
429
+ class FunctionBackedMaxentFeatureEncoding(MaxentFeatureEncodingI):
430
+ """
431
+ A feature encoding that calls a user-supplied function to map a
432
+ given featureset/label pair to a sparse joint-feature vector.
433
+ """
434
+
435
+ def __init__(self, func, length, labels):
436
+ """
437
+ Construct a new feature encoding based on the given function.
438
+
439
+ :type func: (callable)
440
+ :param func: A function that takes two arguments, a featureset
441
+ and a label, and returns the sparse joint feature vector
442
+ that encodes them::
443
+
444
+ func(featureset, label) -> feature_vector
445
+
446
+ This sparse joint feature vector (``feature_vector``) is a
447
+ list of ``(index,value)`` tuples.
448
+
449
+ :type length: int
450
+ :param length: The size of the fixed-length joint-feature
451
+ vectors that are generated by this encoding.
452
+
453
+ :type labels: list
454
+ :param labels: A list of the \"known labels\" for this
455
+ encoding -- i.e., all labels ``l`` such that
456
+ ``self.encode(fs,l)`` can be a nonzero joint-feature vector
457
+ for some value of ``fs``.
458
+ """
459
+ self._length = length
460
+ self._func = func
461
+ self._labels = labels
462
+
463
+ def encode(self, featureset, label):
464
+ return self._func(featureset, label)
465
+
466
+ def length(self):
467
+ return self._length
468
+
469
+ def labels(self):
470
+ return self._labels
471
+
472
+ def describe(self, fid):
473
+ return "no description available"
474
+
475
+
476
+ class BinaryMaxentFeatureEncoding(MaxentFeatureEncodingI):
477
+ """
478
+ A feature encoding that generates vectors containing a binary
479
+ joint-features of the form:
480
+
481
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
482
+ | {
483
+ | { 0 otherwise
484
+
485
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
486
+ for that input-feature, and ``label`` is a label.
487
+
488
+ Typically, these features are constructed based on a training
489
+ corpus, using the ``train()`` method. This method will create one
490
+ feature for each combination of ``fname``, ``fval``, and ``label``
491
+ that occurs at least once in the training corpus.
492
+
493
+ The ``unseen_features`` parameter can be used to add "unseen-value
494
+ features", which are used whenever an input feature has a value
495
+ that was not encountered in the training corpus. These features
496
+ have the form:
497
+
498
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
499
+ | { and l == label
500
+ | {
501
+ | { 0 otherwise
502
+
503
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
504
+ contain any joint features that are true when ``fs[fname]==fval``.
505
+
506
+ The ``alwayson_features`` parameter can be used to add "always-on
507
+ features", which have the form::
508
+
509
+ | joint_feat(fs, l) = { 1 if (l == label)
510
+ | {
511
+ | { 0 otherwise
512
+
513
+ These always-on features allow the maxent model to directly model
514
+ the prior probabilities of each label.
515
+ """
516
+
517
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
518
+ """
519
+ :param labels: A list of the \"known labels\" for this encoding.
520
+
521
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
522
+ tuples to corresponding joint-feature indexes. These
523
+ indexes must be the set of integers from 0...len(mapping).
524
+ If ``mapping[fname,fval,label]=id``, then
525
+ ``self.encode(..., fname:fval, ..., label)[id]`` is 1;
526
+ otherwise, it is 0.
527
+
528
+ :param unseen_features: If true, then include unseen value
529
+ features in the generated joint-feature vectors.
530
+
531
+ :param alwayson_features: If true, then include always-on
532
+ features in the generated joint-feature vectors.
533
+ """
534
+ if set(mapping.values()) != set(range(len(mapping))):
535
+ raise ValueError(
536
+ "Mapping values must be exactly the "
537
+ "set of integers from 0...len(mapping)"
538
+ )
539
+
540
+ self._labels = list(labels)
541
+ """A list of attested labels."""
542
+
543
+ self._mapping = mapping
544
+ """dict mapping from (fname,fval,label) -> fid"""
545
+
546
+ self._length = len(mapping)
547
+ """The length of generated joint feature vectors."""
548
+
549
+ self._alwayson = None
550
+ """dict mapping from label -> fid"""
551
+
552
+ self._unseen = None
553
+ """dict mapping from fname -> fid"""
554
+
555
+ if alwayson_features:
556
+ self._alwayson = {
557
+ label: i + self._length for (i, label) in enumerate(labels)
558
+ }
559
+ self._length += len(self._alwayson)
560
+
561
+ if unseen_features:
562
+ fnames = {fname for (fname, fval, label) in mapping}
563
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
564
+ self._length += len(fnames)
565
+
566
+ def encode(self, featureset, label):
567
+ # Inherit docs.
568
+ encoding = []
569
+
570
+ # Convert input-features to joint-features:
571
+ for fname, fval in featureset.items():
572
+ # Known feature name & value:
573
+ if (fname, fval, label) in self._mapping:
574
+ encoding.append((self._mapping[fname, fval, label], 1))
575
+
576
+ # Otherwise, we might want to fire an "unseen-value feature".
577
+ elif self._unseen:
578
+ # Have we seen this fname/fval combination with any label?
579
+ for label2 in self._labels:
580
+ if (fname, fval, label2) in self._mapping:
581
+ break # we've seen this fname/fval combo
582
+ # We haven't -- fire the unseen-value feature
583
+ else:
584
+ if fname in self._unseen:
585
+ encoding.append((self._unseen[fname], 1))
586
+
587
+ # Add always-on features:
588
+ if self._alwayson and label in self._alwayson:
589
+ encoding.append((self._alwayson[label], 1))
590
+
591
+ return encoding
592
+
593
+ def describe(self, f_id):
594
+ # Inherit docs.
595
+ if not isinstance(f_id, int):
596
+ raise TypeError("describe() expected an int")
597
+ try:
598
+ self._inv_mapping
599
+ except AttributeError:
600
+ self._inv_mapping = [-1] * len(self._mapping)
601
+ for (info, i) in self._mapping.items():
602
+ self._inv_mapping[i] = info
603
+
604
+ if f_id < len(self._mapping):
605
+ (fname, fval, label) = self._inv_mapping[f_id]
606
+ return f"{fname}=={fval!r} and label is {label!r}"
607
+ elif self._alwayson and f_id in self._alwayson.values():
608
+ for (label, f_id2) in self._alwayson.items():
609
+ if f_id == f_id2:
610
+ return "label is %r" % label
611
+ elif self._unseen and f_id in self._unseen.values():
612
+ for (fname, f_id2) in self._unseen.items():
613
+ if f_id == f_id2:
614
+ return "%s is unseen" % fname
615
+ else:
616
+ raise ValueError("Bad feature id")
617
+
618
+ def labels(self):
619
+ # Inherit docs.
620
+ return self._labels
621
+
622
+ def length(self):
623
+ # Inherit docs.
624
+ return self._length
625
+
626
+ @classmethod
627
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
628
+ """
629
+ Construct and return new feature encoding, based on a given
630
+ training corpus ``train_toks``. See the class description
631
+ ``BinaryMaxentFeatureEncoding`` for a description of the
632
+ joint-features that will be included in this encoding.
633
+
634
+ :type train_toks: list(tuple(dict, str))
635
+ :param train_toks: Training data, represented as a list of
636
+ pairs, the first member of which is a feature dictionary,
637
+ and the second of which is a classification label.
638
+
639
+ :type count_cutoff: int
640
+ :param count_cutoff: A cutoff value that is used to discard
641
+ rare joint-features. If a joint-feature's value is 1
642
+ fewer than ``count_cutoff`` times in the training corpus,
643
+ then that joint-feature is not included in the generated
644
+ encoding.
645
+
646
+ :type labels: list
647
+ :param labels: A list of labels that should be used by the
648
+ classifier. If not specified, then the set of labels
649
+ attested in ``train_toks`` will be used.
650
+
651
+ :param options: Extra parameters for the constructor, such as
652
+ ``unseen_features`` and ``alwayson_features``.
653
+ """
654
+ mapping = {} # maps (fname, fval, label) -> fid
655
+ seen_labels = set() # The set of labels we've encountered
656
+ count = defaultdict(int) # maps (fname, fval) -> count
657
+
658
+ for (tok, label) in train_toks:
659
+ if labels and label not in labels:
660
+ raise ValueError("Unexpected label %s" % label)
661
+ seen_labels.add(label)
662
+
663
+ # Record each of the features.
664
+ for (fname, fval) in tok.items():
665
+
666
+ # If a count cutoff is given, then only add a joint
667
+ # feature once the corresponding (fname, fval, label)
668
+ # tuple exceeds that cutoff.
669
+ count[fname, fval] += 1
670
+ if count[fname, fval] >= count_cutoff:
671
+ if (fname, fval, label) not in mapping:
672
+ mapping[fname, fval, label] = len(mapping)
673
+
674
+ if labels is None:
675
+ labels = seen_labels
676
+ return cls(labels, mapping, **options)
677
+
678
+
679
+ class GISEncoding(BinaryMaxentFeatureEncoding):
680
+ """
681
+ A binary feature encoding which adds one new joint-feature to the
682
+ joint-features defined by ``BinaryMaxentFeatureEncoding``: a
683
+ correction feature, whose value is chosen to ensure that the
684
+ sparse vector always sums to a constant non-negative number. This
685
+ new feature is used to ensure two preconditions for the GIS
686
+ training algorithm:
687
+
688
+ - At least one feature vector index must be nonzero for every
689
+ token.
690
+ - The feature vector must sum to a constant non-negative number
691
+ for every token.
692
+ """
693
+
694
+ def __init__(
695
+ self, labels, mapping, unseen_features=False, alwayson_features=False, C=None
696
+ ):
697
+ """
698
+ :param C: The correction constant. The value of the correction
699
+ feature is based on this value. In particular, its value is
700
+ ``C - sum([v for (f,v) in encoding])``.
701
+ :seealso: ``BinaryMaxentFeatureEncoding.__init__``
702
+ """
703
+ BinaryMaxentFeatureEncoding.__init__(
704
+ self, labels, mapping, unseen_features, alwayson_features
705
+ )
706
+ if C is None:
707
+ C = len({fname for (fname, fval, label) in mapping}) + 1
708
+ self._C = C
709
+
710
+ @property
711
+ def C(self):
712
+ """The non-negative constant that all encoded feature vectors
713
+ will sum to."""
714
+ return self._C
715
+
716
+ def encode(self, featureset, label):
717
+ # Get the basic encoding.
718
+ encoding = BinaryMaxentFeatureEncoding.encode(self, featureset, label)
719
+ base_length = BinaryMaxentFeatureEncoding.length(self)
720
+
721
+ # Add a correction feature.
722
+ total = sum(v for (f, v) in encoding)
723
+ if total >= self._C:
724
+ raise ValueError("Correction feature is not high enough!")
725
+ encoding.append((base_length, self._C - total))
726
+
727
+ # Return the result
728
+ return encoding
729
+
730
+ def length(self):
731
+ return BinaryMaxentFeatureEncoding.length(self) + 1
732
+
733
+ def describe(self, f_id):
734
+ if f_id == BinaryMaxentFeatureEncoding.length(self):
735
+ return "Correction feature (%s)" % self._C
736
+ else:
737
+ return BinaryMaxentFeatureEncoding.describe(self, f_id)
738
+
739
+
740
+ class TadmEventMaxentFeatureEncoding(BinaryMaxentFeatureEncoding):
741
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
742
+ self._mapping = OrderedDict(mapping)
743
+ self._label_mapping = OrderedDict()
744
+ BinaryMaxentFeatureEncoding.__init__(
745
+ self, labels, self._mapping, unseen_features, alwayson_features
746
+ )
747
+
748
+ def encode(self, featureset, label):
749
+ encoding = []
750
+ for feature, value in featureset.items():
751
+ if (feature, label) not in self._mapping:
752
+ self._mapping[(feature, label)] = len(self._mapping)
753
+ if value not in self._label_mapping:
754
+ if not isinstance(value, int):
755
+ self._label_mapping[value] = len(self._label_mapping)
756
+ else:
757
+ self._label_mapping[value] = value
758
+ encoding.append(
759
+ (self._mapping[(feature, label)], self._label_mapping[value])
760
+ )
761
+ return encoding
762
+
763
+ def labels(self):
764
+ return self._labels
765
+
766
+ def describe(self, fid):
767
+ for (feature, label) in self._mapping:
768
+ if self._mapping[(feature, label)] == fid:
769
+ return (feature, label)
770
+
771
+ def length(self):
772
+ return len(self._mapping)
773
+
774
+ @classmethod
775
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
776
+ mapping = OrderedDict()
777
+ if not labels:
778
+ labels = []
779
+
780
+ # This gets read twice, so compute the values in case it's lazy.
781
+ train_toks = list(train_toks)
782
+
783
+ for (featureset, label) in train_toks:
784
+ if label not in labels:
785
+ labels.append(label)
786
+
787
+ for (featureset, label) in train_toks:
788
+ for label in labels:
789
+ for feature in featureset:
790
+ if (feature, label) not in mapping:
791
+ mapping[(feature, label)] = len(mapping)
792
+
793
+ return cls(labels, mapping, **options)
794
+
795
+
796
+ class TypedMaxentFeatureEncoding(MaxentFeatureEncodingI):
797
+ """
798
+ A feature encoding that generates vectors containing integer,
799
+ float and binary joint-features of the form:
800
+
801
+ Binary (for string and boolean features):
802
+
803
+ | joint_feat(fs, l) = { 1 if (fs[fname] == fval) and (l == label)
804
+ | {
805
+ | { 0 otherwise
806
+
807
+ Value (for integer and float features):
808
+
809
+ | joint_feat(fs, l) = { fval if (fs[fname] == type(fval))
810
+ | { and (l == label)
811
+ | {
812
+ | { not encoded otherwise
813
+
814
+ Where ``fname`` is the name of an input-feature, ``fval`` is a value
815
+ for that input-feature, and ``label`` is a label.
816
+
817
+ Typically, these features are constructed based on a training
818
+ corpus, using the ``train()`` method.
819
+
820
+ For string and boolean features [type(fval) not in (int, float)]
821
+ this method will create one feature for each combination of
822
+ ``fname``, ``fval``, and ``label`` that occurs at least once in the
823
+ training corpus.
824
+
825
+ For integer and float features [type(fval) in (int, float)] this
826
+ method will create one feature for each combination of ``fname``
827
+ and ``label`` that occurs at least once in the training corpus.
828
+
829
+ For binary features the ``unseen_features`` parameter can be used
830
+ to add "unseen-value features", which are used whenever an input
831
+ feature has a value that was not encountered in the training
832
+ corpus. These features have the form:
833
+
834
+ | joint_feat(fs, l) = { 1 if is_unseen(fname, fs[fname])
835
+ | { and l == label
836
+ | {
837
+ | { 0 otherwise
838
+
839
+ Where ``is_unseen(fname, fval)`` is true if the encoding does not
840
+ contain any joint features that are true when ``fs[fname]==fval``.
841
+
842
+ The ``alwayson_features`` parameter can be used to add "always-on
843
+ features", which have the form:
844
+
845
+ | joint_feat(fs, l) = { 1 if (l == label)
846
+ | {
847
+ | { 0 otherwise
848
+
849
+ These always-on features allow the maxent model to directly model
850
+ the prior probabilities of each label.
851
+ """
852
+
853
+ def __init__(self, labels, mapping, unseen_features=False, alwayson_features=False):
854
+ """
855
+ :param labels: A list of the \"known labels\" for this encoding.
856
+
857
+ :param mapping: A dictionary mapping from ``(fname,fval,label)``
858
+ tuples to corresponding joint-feature indexes. These
859
+ indexes must be the set of integers from 0...len(mapping).
860
+ If ``mapping[fname,fval,label]=id``, then
861
+ ``self.encode({..., fname:fval, ...``, label)[id]} is 1;
862
+ otherwise, it is 0.
863
+
864
+ :param unseen_features: If true, then include unseen value
865
+ features in the generated joint-feature vectors.
866
+
867
+ :param alwayson_features: If true, then include always-on
868
+ features in the generated joint-feature vectors.
869
+ """
870
+ if set(mapping.values()) != set(range(len(mapping))):
871
+ raise ValueError(
872
+ "Mapping values must be exactly the "
873
+ "set of integers from 0...len(mapping)"
874
+ )
875
+
876
+ self._labels = list(labels)
877
+ """A list of attested labels."""
878
+
879
+ self._mapping = mapping
880
+ """dict mapping from (fname,fval,label) -> fid"""
881
+
882
+ self._length = len(mapping)
883
+ """The length of generated joint feature vectors."""
884
+
885
+ self._alwayson = None
886
+ """dict mapping from label -> fid"""
887
+
888
+ self._unseen = None
889
+ """dict mapping from fname -> fid"""
890
+
891
+ if alwayson_features:
892
+ self._alwayson = {
893
+ label: i + self._length for (i, label) in enumerate(labels)
894
+ }
895
+ self._length += len(self._alwayson)
896
+
897
+ if unseen_features:
898
+ fnames = {fname for (fname, fval, label) in mapping}
899
+ self._unseen = {fname: i + self._length for (i, fname) in enumerate(fnames)}
900
+ self._length += len(fnames)
901
+
902
+ def encode(self, featureset, label):
903
+ # Inherit docs.
904
+ encoding = []
905
+
906
+ # Convert input-features to joint-features:
907
+ for fname, fval in featureset.items():
908
+ if isinstance(fval, (int, float)):
909
+ # Known feature name & value:
910
+ if (fname, type(fval), label) in self._mapping:
911
+ encoding.append((self._mapping[fname, type(fval), label], fval))
912
+ else:
913
+ # Known feature name & value:
914
+ if (fname, fval, label) in self._mapping:
915
+ encoding.append((self._mapping[fname, fval, label], 1))
916
+
917
+ # Otherwise, we might want to fire an "unseen-value feature".
918
+ elif self._unseen:
919
+ # Have we seen this fname/fval combination with any label?
920
+ for label2 in self._labels:
921
+ if (fname, fval, label2) in self._mapping:
922
+ break # we've seen this fname/fval combo
923
+ # We haven't -- fire the unseen-value feature
924
+ else:
925
+ if fname in self._unseen:
926
+ encoding.append((self._unseen[fname], 1))
927
+
928
+ # Add always-on features:
929
+ if self._alwayson and label in self._alwayson:
930
+ encoding.append((self._alwayson[label], 1))
931
+
932
+ return encoding
933
+
934
+ def describe(self, f_id):
935
+ # Inherit docs.
936
+ if not isinstance(f_id, int):
937
+ raise TypeError("describe() expected an int")
938
+ try:
939
+ self._inv_mapping
940
+ except AttributeError:
941
+ self._inv_mapping = [-1] * len(self._mapping)
942
+ for (info, i) in self._mapping.items():
943
+ self._inv_mapping[i] = info
944
+
945
+ if f_id < len(self._mapping):
946
+ (fname, fval, label) = self._inv_mapping[f_id]
947
+ return f"{fname}=={fval!r} and label is {label!r}"
948
+ elif self._alwayson and f_id in self._alwayson.values():
949
+ for (label, f_id2) in self._alwayson.items():
950
+ if f_id == f_id2:
951
+ return "label is %r" % label
952
+ elif self._unseen and f_id in self._unseen.values():
953
+ for (fname, f_id2) in self._unseen.items():
954
+ if f_id == f_id2:
955
+ return "%s is unseen" % fname
956
+ else:
957
+ raise ValueError("Bad feature id")
958
+
959
+ def labels(self):
960
+ # Inherit docs.
961
+ return self._labels
962
+
963
+ def length(self):
964
+ # Inherit docs.
965
+ return self._length
966
+
967
+ @classmethod
968
+ def train(cls, train_toks, count_cutoff=0, labels=None, **options):
969
+ """
970
+ Construct and return new feature encoding, based on a given
971
+ training corpus ``train_toks``. See the class description
972
+ ``TypedMaxentFeatureEncoding`` for a description of the
973
+ joint-features that will be included in this encoding.
974
+
975
+ Note: recognized feature values types are (int, float), over
976
+ types are interpreted as regular binary features.
977
+
978
+ :type train_toks: list(tuple(dict, str))
979
+ :param train_toks: Training data, represented as a list of
980
+ pairs, the first member of which is a feature dictionary,
981
+ and the second of which is a classification label.
982
+
983
+ :type count_cutoff: int
984
+ :param count_cutoff: A cutoff value that is used to discard
985
+ rare joint-features. If a joint-feature's value is 1
986
+ fewer than ``count_cutoff`` times in the training corpus,
987
+ then that joint-feature is not included in the generated
988
+ encoding.
989
+
990
+ :type labels: list
991
+ :param labels: A list of labels that should be used by the
992
+ classifier. If not specified, then the set of labels
993
+ attested in ``train_toks`` will be used.
994
+
995
+ :param options: Extra parameters for the constructor, such as
996
+ ``unseen_features`` and ``alwayson_features``.
997
+ """
998
+ mapping = {} # maps (fname, fval, label) -> fid
999
+ seen_labels = set() # The set of labels we've encountered
1000
+ count = defaultdict(int) # maps (fname, fval) -> count
1001
+
1002
+ for (tok, label) in train_toks:
1003
+ if labels and label not in labels:
1004
+ raise ValueError("Unexpected label %s" % label)
1005
+ seen_labels.add(label)
1006
+
1007
+ # Record each of the features.
1008
+ for (fname, fval) in tok.items():
1009
+ if type(fval) in (int, float):
1010
+ fval = type(fval)
1011
+ # If a count cutoff is given, then only add a joint
1012
+ # feature once the corresponding (fname, fval, label)
1013
+ # tuple exceeds that cutoff.
1014
+ count[fname, fval] += 1
1015
+ if count[fname, fval] >= count_cutoff:
1016
+ if (fname, fval, label) not in mapping:
1017
+ mapping[fname, fval, label] = len(mapping)
1018
+
1019
+ if labels is None:
1020
+ labels = seen_labels
1021
+ return cls(labels, mapping, **options)
1022
+
1023
+
1024
+ ######################################################################
1025
+ # { Classifier Trainer: Generalized Iterative Scaling
1026
+ ######################################################################
1027
+
1028
+
1029
+ def train_maxent_classifier_with_gis(
1030
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1031
+ ):
1032
+ """
1033
+ Train a new ``ConditionalExponentialClassifier``, using the given
1034
+ training samples, using the Generalized Iterative Scaling
1035
+ algorithm. This ``ConditionalExponentialClassifier`` will encode
1036
+ the model that maximizes entropy from all the models that are
1037
+ empirically consistent with ``train_toks``.
1038
+
1039
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1040
+ """
1041
+ cutoffs.setdefault("max_iter", 100)
1042
+ cutoffchecker = CutoffChecker(cutoffs)
1043
+
1044
+ # Construct an encoding from the training data.
1045
+ if encoding is None:
1046
+ encoding = GISEncoding.train(train_toks, labels=labels)
1047
+
1048
+ if not hasattr(encoding, "C"):
1049
+ raise TypeError(
1050
+ "The GIS algorithm requires an encoding that "
1051
+ "defines C (e.g., GISEncoding)."
1052
+ )
1053
+
1054
+ # Cinv is the inverse of the sum of each joint feature vector.
1055
+ # This controls the learning rate: higher Cinv (or lower C) gives
1056
+ # faster learning.
1057
+ Cinv = 1.0 / encoding.C
1058
+
1059
+ # Count how many times each feature occurs in the training data.
1060
+ empirical_fcount = calculate_empirical_fcount(train_toks, encoding)
1061
+
1062
+ # Check for any features that are not attested in train_toks.
1063
+ unattested = set(numpy.nonzero(empirical_fcount == 0)[0])
1064
+
1065
+ # Build the classifier. Start with weight=0 for each attested
1066
+ # feature, and weight=-infinity for each unattested feature.
1067
+ weights = numpy.zeros(len(empirical_fcount), "d")
1068
+ for fid in unattested:
1069
+ weights[fid] = numpy.NINF
1070
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1071
+
1072
+ # Take the log of the empirical fcount.
1073
+ log_empirical_fcount = numpy.log2(empirical_fcount)
1074
+ del empirical_fcount
1075
+
1076
+ if trace > 0:
1077
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1078
+ if trace > 2:
1079
+ print()
1080
+ print(" Iteration Log Likelihood Accuracy")
1081
+ print(" ---------------------------------------")
1082
+
1083
+ # Train the classifier.
1084
+ try:
1085
+ while True:
1086
+ if trace > 2:
1087
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1088
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1089
+ iternum = cutoffchecker.iter
1090
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1091
+
1092
+ # Use the model to estimate the number of times each
1093
+ # feature should occur in the training data.
1094
+ estimated_fcount = calculate_estimated_fcount(
1095
+ classifier, train_toks, encoding
1096
+ )
1097
+
1098
+ # Take the log of estimated fcount (avoid taking log(0).)
1099
+ for fid in unattested:
1100
+ estimated_fcount[fid] += 1
1101
+ log_estimated_fcount = numpy.log2(estimated_fcount)
1102
+ del estimated_fcount
1103
+
1104
+ # Update the classifier weights
1105
+ weights = classifier.weights()
1106
+ weights += (log_empirical_fcount - log_estimated_fcount) * Cinv
1107
+ classifier.set_weights(weights)
1108
+
1109
+ # Check the log-likelihood & accuracy cutoffs.
1110
+ if cutoffchecker.check(classifier, train_toks):
1111
+ break
1112
+
1113
+ except KeyboardInterrupt:
1114
+ print(" Training stopped: keyboard interrupt")
1115
+ except:
1116
+ raise
1117
+
1118
+ if trace > 2:
1119
+ ll = log_likelihood(classifier, train_toks)
1120
+ acc = accuracy(classifier, train_toks)
1121
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1122
+
1123
+ # Return the classifier.
1124
+ return classifier
1125
+
1126
+
1127
+ def calculate_empirical_fcount(train_toks, encoding):
1128
+ fcount = numpy.zeros(encoding.length(), "d")
1129
+
1130
+ for tok, label in train_toks:
1131
+ for (index, val) in encoding.encode(tok, label):
1132
+ fcount[index] += val
1133
+
1134
+ return fcount
1135
+
1136
+
1137
+ def calculate_estimated_fcount(classifier, train_toks, encoding):
1138
+ fcount = numpy.zeros(encoding.length(), "d")
1139
+
1140
+ for tok, label in train_toks:
1141
+ pdist = classifier.prob_classify(tok)
1142
+ for label in pdist.samples():
1143
+ prob = pdist.prob(label)
1144
+ for (fid, fval) in encoding.encode(tok, label):
1145
+ fcount[fid] += prob * fval
1146
+
1147
+ return fcount
1148
+
1149
+
1150
+ ######################################################################
1151
+ # { Classifier Trainer: Improved Iterative Scaling
1152
+ ######################################################################
1153
+
1154
+
1155
+ def train_maxent_classifier_with_iis(
1156
+ train_toks, trace=3, encoding=None, labels=None, **cutoffs
1157
+ ):
1158
+ """
1159
+ Train a new ``ConditionalExponentialClassifier``, using the given
1160
+ training samples, using the Improved Iterative Scaling algorithm.
1161
+ This ``ConditionalExponentialClassifier`` will encode the model
1162
+ that maximizes entropy from all the models that are empirically
1163
+ consistent with ``train_toks``.
1164
+
1165
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1166
+ """
1167
+ cutoffs.setdefault("max_iter", 100)
1168
+ cutoffchecker = CutoffChecker(cutoffs)
1169
+
1170
+ # Construct an encoding from the training data.
1171
+ if encoding is None:
1172
+ encoding = BinaryMaxentFeatureEncoding.train(train_toks, labels=labels)
1173
+
1174
+ # Count how many times each feature occurs in the training data.
1175
+ empirical_ffreq = calculate_empirical_fcount(train_toks, encoding) / len(train_toks)
1176
+
1177
+ # Find the nf map, and related variables nfarray and nfident.
1178
+ # nf is the sum of the features for a given labeled text.
1179
+ # nfmap compresses this sparse set of values to a dense list.
1180
+ # nfarray performs the reverse operation. nfident is
1181
+ # nfarray multiplied by an identity matrix.
1182
+ nfmap = calculate_nfmap(train_toks, encoding)
1183
+ nfarray = numpy.array(sorted(nfmap, key=nfmap.__getitem__), "d")
1184
+ nftranspose = numpy.reshape(nfarray, (len(nfarray), 1))
1185
+
1186
+ # Check for any features that are not attested in train_toks.
1187
+ unattested = set(numpy.nonzero(empirical_ffreq == 0)[0])
1188
+
1189
+ # Build the classifier. Start with weight=0 for each attested
1190
+ # feature, and weight=-infinity for each unattested feature.
1191
+ weights = numpy.zeros(len(empirical_ffreq), "d")
1192
+ for fid in unattested:
1193
+ weights[fid] = numpy.NINF
1194
+ classifier = ConditionalExponentialClassifier(encoding, weights)
1195
+
1196
+ if trace > 0:
1197
+ print(" ==> Training (%d iterations)" % cutoffs["max_iter"])
1198
+ if trace > 2:
1199
+ print()
1200
+ print(" Iteration Log Likelihood Accuracy")
1201
+ print(" ---------------------------------------")
1202
+
1203
+ # Train the classifier.
1204
+ try:
1205
+ while True:
1206
+ if trace > 2:
1207
+ ll = cutoffchecker.ll or log_likelihood(classifier, train_toks)
1208
+ acc = cutoffchecker.acc or accuracy(classifier, train_toks)
1209
+ iternum = cutoffchecker.iter
1210
+ print(" %9d %14.5f %9.3f" % (iternum, ll, acc))
1211
+
1212
+ # Calculate the deltas for this iteration, using Newton's method.
1213
+ deltas = calculate_deltas(
1214
+ train_toks,
1215
+ classifier,
1216
+ unattested,
1217
+ empirical_ffreq,
1218
+ nfmap,
1219
+ nfarray,
1220
+ nftranspose,
1221
+ encoding,
1222
+ )
1223
+
1224
+ # Use the deltas to update our weights.
1225
+ weights = classifier.weights()
1226
+ weights += deltas
1227
+ classifier.set_weights(weights)
1228
+
1229
+ # Check the log-likelihood & accuracy cutoffs.
1230
+ if cutoffchecker.check(classifier, train_toks):
1231
+ break
1232
+
1233
+ except KeyboardInterrupt:
1234
+ print(" Training stopped: keyboard interrupt")
1235
+ except:
1236
+ raise
1237
+
1238
+ if trace > 2:
1239
+ ll = log_likelihood(classifier, train_toks)
1240
+ acc = accuracy(classifier, train_toks)
1241
+ print(f" Final {ll:14.5f} {acc:9.3f}")
1242
+
1243
+ # Return the classifier.
1244
+ return classifier
1245
+
1246
+
1247
+ def calculate_nfmap(train_toks, encoding):
1248
+ """
1249
+ Construct a map that can be used to compress ``nf`` (which is
1250
+ typically sparse).
1251
+
1252
+ *nf(feature_vector)* is the sum of the feature values for
1253
+ *feature_vector*.
1254
+
1255
+ This represents the number of features that are active for a
1256
+ given labeled text. This method finds all values of *nf(t)*
1257
+ that are attested for at least one token in the given list of
1258
+ training tokens; and constructs a dictionary mapping these
1259
+ attested values to a continuous range *0...N*. For example,
1260
+ if the only values of *nf()* that were attested were 3, 5, and
1261
+ 7, then ``_nfmap`` might return the dictionary ``{3:0, 5:1, 7:2}``.
1262
+
1263
+ :return: A map that can be used to compress ``nf`` to a dense
1264
+ vector.
1265
+ :rtype: dict(int -> int)
1266
+ """
1267
+ # Map from nf to indices. This allows us to use smaller arrays.
1268
+ nfset = set()
1269
+ for tok, _ in train_toks:
1270
+ for label in encoding.labels():
1271
+ nfset.add(sum(val for (id, val) in encoding.encode(tok, label)))
1272
+ return {nf: i for (i, nf) in enumerate(nfset)}
1273
+
1274
+
1275
+ def calculate_deltas(
1276
+ train_toks,
1277
+ classifier,
1278
+ unattested,
1279
+ ffreq_empirical,
1280
+ nfmap,
1281
+ nfarray,
1282
+ nftranspose,
1283
+ encoding,
1284
+ ):
1285
+ r"""
1286
+ Calculate the update values for the classifier weights for
1287
+ this iteration of IIS. These update weights are the value of
1288
+ ``delta`` that solves the equation::
1289
+
1290
+ ffreq_empirical[i]
1291
+ =
1292
+ SUM[fs,l] (classifier.prob_classify(fs).prob(l) *
1293
+ feature_vector(fs,l)[i] *
1294
+ exp(delta[i] * nf(feature_vector(fs,l))))
1295
+
1296
+ Where:
1297
+ - *(fs,l)* is a (featureset, label) tuple from ``train_toks``
1298
+ - *feature_vector(fs,l)* = ``encoding.encode(fs,l)``
1299
+ - *nf(vector)* = ``sum([val for (id,val) in vector])``
1300
+
1301
+ This method uses Newton's method to solve this equation for
1302
+ *delta[i]*. In particular, it starts with a guess of
1303
+ ``delta[i]`` = 1; and iteratively updates ``delta`` with:
1304
+
1305
+ | delta[i] -= (ffreq_empirical[i] - sum1[i])/(-sum2[i])
1306
+
1307
+ until convergence, where *sum1* and *sum2* are defined as:
1308
+
1309
+ | sum1[i](delta) = SUM[fs,l] f[i](fs,l,delta)
1310
+ | sum2[i](delta) = SUM[fs,l] (f[i](fs,l,delta).nf(feature_vector(fs,l)))
1311
+ | f[i](fs,l,delta) = (classifier.prob_classify(fs).prob(l) .
1312
+ | feature_vector(fs,l)[i] .
1313
+ | exp(delta[i] . nf(feature_vector(fs,l))))
1314
+
1315
+ Note that *sum1* and *sum2* depend on ``delta``; so they need
1316
+ to be re-computed each iteration.
1317
+
1318
+ The variables ``nfmap``, ``nfarray``, and ``nftranspose`` are
1319
+ used to generate a dense encoding for *nf(ltext)*. This
1320
+ allows ``_deltas`` to calculate *sum1* and *sum2* using
1321
+ matrices, which yields a significant performance improvement.
1322
+
1323
+ :param train_toks: The set of training tokens.
1324
+ :type train_toks: list(tuple(dict, str))
1325
+ :param classifier: The current classifier.
1326
+ :type classifier: ClassifierI
1327
+ :param ffreq_empirical: An array containing the empirical
1328
+ frequency for each feature. The *i*\ th element of this
1329
+ array is the empirical frequency for feature *i*.
1330
+ :type ffreq_empirical: sequence of float
1331
+ :param unattested: An array that is 1 for features that are
1332
+ not attested in the training data; and 0 for features that
1333
+ are attested. In other words, ``unattested[i]==0`` iff
1334
+ ``ffreq_empirical[i]==0``.
1335
+ :type unattested: sequence of int
1336
+ :param nfmap: A map that can be used to compress ``nf`` to a dense
1337
+ vector.
1338
+ :type nfmap: dict(int -> int)
1339
+ :param nfarray: An array that can be used to uncompress ``nf``
1340
+ from a dense vector.
1341
+ :type nfarray: array(float)
1342
+ :param nftranspose: The transpose of ``nfarray``
1343
+ :type nftranspose: array(float)
1344
+ """
1345
+ # These parameters control when we decide that we've
1346
+ # converged. It probably should be possible to set these
1347
+ # manually, via keyword arguments to train.
1348
+ NEWTON_CONVERGE = 1e-12
1349
+ MAX_NEWTON = 300
1350
+
1351
+ deltas = numpy.ones(encoding.length(), "d")
1352
+
1353
+ # Precompute the A matrix:
1354
+ # A[nf][id] = sum ( p(fs) * p(label|fs) * f(fs,label) )
1355
+ # over all label,fs s.t. num_features[label,fs]=nf
1356
+ A = numpy.zeros((len(nfmap), encoding.length()), "d")
1357
+
1358
+ for tok, label in train_toks:
1359
+ dist = classifier.prob_classify(tok)
1360
+
1361
+ for label in encoding.labels():
1362
+ # Generate the feature vector
1363
+ feature_vector = encoding.encode(tok, label)
1364
+ # Find the number of active features
1365
+ nf = sum(val for (id, val) in feature_vector)
1366
+ # Update the A matrix
1367
+ for (id, val) in feature_vector:
1368
+ A[nfmap[nf], id] += dist.prob(label) * val
1369
+ A /= len(train_toks)
1370
+
1371
+ # Iteratively solve for delta. Use the following variables:
1372
+ # - nf_delta[x][y] = nfarray[x] * delta[y]
1373
+ # - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
1374
+ # - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
1375
+ # - sum1[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1376
+ # exp(delta[i]nf)
1377
+ # - sum2[i][nf] = sum p(fs)p(label|fs)f[i](label,fs)
1378
+ # nf exp(delta[i]nf)
1379
+ for rangenum in range(MAX_NEWTON):
1380
+ nf_delta = numpy.outer(nfarray, deltas)
1381
+ exp_nf_delta = 2**nf_delta
1382
+ nf_exp_nf_delta = nftranspose * exp_nf_delta
1383
+ sum1 = numpy.sum(exp_nf_delta * A, axis=0)
1384
+ sum2 = numpy.sum(nf_exp_nf_delta * A, axis=0)
1385
+
1386
+ # Avoid division by zero.
1387
+ for fid in unattested:
1388
+ sum2[fid] += 1
1389
+
1390
+ # Update the deltas.
1391
+ deltas -= (ffreq_empirical - sum1) / -sum2
1392
+
1393
+ # We can stop once we converge.
1394
+ n_error = numpy.sum(abs(ffreq_empirical - sum1)) / numpy.sum(abs(deltas))
1395
+ if n_error < NEWTON_CONVERGE:
1396
+ return deltas
1397
+
1398
+ return deltas
1399
+
1400
+
1401
+ ######################################################################
1402
+ # { Classifier Trainer: megam
1403
+ ######################################################################
1404
+
1405
+ # [xx] possible extension: add support for using implicit file format;
1406
+ # this would need to put requirements on what encoding is used. But
1407
+ # we may need this for other maxent classifier trainers that require
1408
+ # implicit formats anyway.
1409
+ def train_maxent_classifier_with_megam(
1410
+ train_toks, trace=3, encoding=None, labels=None, gaussian_prior_sigma=0, **kwargs
1411
+ ):
1412
+ """
1413
+ Train a new ``ConditionalExponentialClassifier``, using the given
1414
+ training samples, using the external ``megam`` library. This
1415
+ ``ConditionalExponentialClassifier`` will encode the model that
1416
+ maximizes entropy from all the models that are empirically
1417
+ consistent with ``train_toks``.
1418
+
1419
+ :see: ``train_maxent_classifier()`` for parameter descriptions.
1420
+ :see: ``nltk.classify.megam``
1421
+ """
1422
+
1423
+ explicit = True
1424
+ bernoulli = True
1425
+ if "explicit" in kwargs:
1426
+ explicit = kwargs["explicit"]
1427
+ if "bernoulli" in kwargs:
1428
+ bernoulli = kwargs["bernoulli"]
1429
+
1430
+ # Construct an encoding from the training data.
1431
+ if encoding is None:
1432
+ # Count cutoff can also be controlled by megam with the -minfc
1433
+ # option. Not sure where the best place for it is.
1434
+ count_cutoff = kwargs.get("count_cutoff", 0)
1435
+ encoding = BinaryMaxentFeatureEncoding.train(
1436
+ train_toks, count_cutoff, labels=labels, alwayson_features=True
1437
+ )
1438
+ elif labels is not None:
1439
+ raise ValueError("Specify encoding or labels, not both")
1440
+
1441
+ # Write a training file for megam.
1442
+ try:
1443
+ fd, trainfile_name = tempfile.mkstemp(prefix="nltk-")
1444
+ with open(trainfile_name, "w") as trainfile:
1445
+ write_megam_file(
1446
+ train_toks, encoding, trainfile, explicit=explicit, bernoulli=bernoulli
1447
+ )
1448
+ os.close(fd)
1449
+ except (OSError, ValueError) as e:
1450
+ raise ValueError("Error while creating megam training file: %s" % e) from e
1451
+
1452
+ # Run megam on the training file.
1453
+ options = []
1454
+ options += ["-nobias", "-repeat", "10"]
1455
+ if explicit:
1456
+ options += ["-explicit"]
1457
+ if not bernoulli:
1458
+ options += ["-fvals"]
1459
+ if gaussian_prior_sigma:
1460
+ # Lambda is just the precision of the Gaussian prior, i.e. it's the
1461
+ # inverse variance, so the parameter conversion is 1.0/sigma**2.
1462
+ # See https://users.umiacs.umd.edu/~hal/docs/daume04cg-bfgs.pdf
1463
+ inv_variance = 1.0 / gaussian_prior_sigma**2
1464
+ else:
1465
+ inv_variance = 0
1466
+ options += ["-lambda", "%.2f" % inv_variance, "-tune"]
1467
+ if trace < 3:
1468
+ options += ["-quiet"]
1469
+ if "max_iter" in kwargs:
1470
+ options += ["-maxi", "%s" % kwargs["max_iter"]]
1471
+ if "ll_delta" in kwargs:
1472
+ # [xx] this is actually a perplexity delta, not a log
1473
+ # likelihood delta
1474
+ options += ["-dpp", "%s" % abs(kwargs["ll_delta"])]
1475
+ if hasattr(encoding, "cost"):
1476
+ options += ["-multilabel"] # each possible la
1477
+ options += ["multiclass", trainfile_name]
1478
+ stdout = call_megam(options)
1479
+ # print('./megam_i686.opt ', ' '.join(options))
1480
+ # Delete the training file
1481
+ try:
1482
+ os.remove(trainfile_name)
1483
+ except OSError as e:
1484
+ print(f"Warning: unable to delete {trainfile_name}: {e}")
1485
+
1486
+ # Parse the generated weight vector.
1487
+ weights = parse_megam_weights(stdout, encoding.length(), explicit)
1488
+
1489
+ # Convert from base-e to base-2 weights.
1490
+ weights *= numpy.log2(numpy.e)
1491
+
1492
+ # Build the classifier
1493
+ return MaxentClassifier(encoding, weights)
1494
+
1495
+
1496
+ ######################################################################
1497
+ # { Classifier Trainer: tadm
1498
+ ######################################################################
1499
+
1500
+
1501
+ class TadmMaxentClassifier(MaxentClassifier):
1502
+ @classmethod
1503
+ def train(cls, train_toks, **kwargs):
1504
+ algorithm = kwargs.get("algorithm", "tao_lmvm")
1505
+ trace = kwargs.get("trace", 3)
1506
+ encoding = kwargs.get("encoding", None)
1507
+ labels = kwargs.get("labels", None)
1508
+ sigma = kwargs.get("gaussian_prior_sigma", 0)
1509
+ count_cutoff = kwargs.get("count_cutoff", 0)
1510
+ max_iter = kwargs.get("max_iter")
1511
+ ll_delta = kwargs.get("min_lldelta")
1512
+
1513
+ # Construct an encoding from the training data.
1514
+ if not encoding:
1515
+ encoding = TadmEventMaxentFeatureEncoding.train(
1516
+ train_toks, count_cutoff, labels=labels
1517
+ )
1518
+
1519
+ trainfile_fd, trainfile_name = tempfile.mkstemp(
1520
+ prefix="nltk-tadm-events-", suffix=".gz"
1521
+ )
1522
+ weightfile_fd, weightfile_name = tempfile.mkstemp(prefix="nltk-tadm-weights-")
1523
+
1524
+ trainfile = gzip_open_unicode(trainfile_name, "w")
1525
+ write_tadm_file(train_toks, encoding, trainfile)
1526
+ trainfile.close()
1527
+
1528
+ options = []
1529
+ options.extend(["-monitor"])
1530
+ options.extend(["-method", algorithm])
1531
+ if sigma:
1532
+ options.extend(["-l2", "%.6f" % sigma**2])
1533
+ if max_iter:
1534
+ options.extend(["-max_it", "%d" % max_iter])
1535
+ if ll_delta:
1536
+ options.extend(["-fatol", "%.6f" % abs(ll_delta)])
1537
+ options.extend(["-events_in", trainfile_name])
1538
+ options.extend(["-params_out", weightfile_name])
1539
+ if trace < 3:
1540
+ options.extend(["2>&1"])
1541
+ else:
1542
+ options.extend(["-summary"])
1543
+
1544
+ call_tadm(options)
1545
+
1546
+ with open(weightfile_name) as weightfile:
1547
+ weights = parse_tadm_weights(weightfile)
1548
+
1549
+ os.remove(trainfile_name)
1550
+ os.remove(weightfile_name)
1551
+
1552
+ # Convert from base-e to base-2 weights.
1553
+ weights *= numpy.log2(numpy.e)
1554
+
1555
+ # Build the classifier
1556
+ return cls(encoding, weights)
1557
+
1558
+
1559
+ ######################################################################
1560
+ # { Demo
1561
+ ######################################################################
1562
+ def demo():
1563
+ from nltk.classify.util import names_demo
1564
+
1565
+ classifier = names_demo(MaxentClassifier.train)
1566
+
1567
+
1568
+ if __name__ == "__main__":
1569
+ demo()
llmeval-env/lib/python3.10/site-packages/nltk/classify/svm.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: SVM-based classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Leon Derczynski <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ nltk.classify.svm was deprecated. For classification based
10
+ on support vector machines SVMs use nltk.classify.scikitlearn
11
+ (or `scikit-learn <https://scikit-learn.org>`_ directly).
12
+ """
13
+
14
+
15
+ class SvmClassifier:
16
+ def __init__(self, *args, **kwargs):
17
+ raise NotImplementedError(__doc__)
llmeval-env/lib/python3.10/site-packages/nltk/classify/tadm.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to TADM Classifier
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Joseph Frazee <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import subprocess
9
+ import sys
10
+
11
+ from nltk.internals import find_binary
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ _tadm_bin = None
19
+
20
+
21
+ def config_tadm(bin=None):
22
+ global _tadm_bin
23
+ _tadm_bin = find_binary(
24
+ "tadm", bin, env_vars=["TADM"], binary_names=["tadm"], url="http://tadm.sf.net"
25
+ )
26
+
27
+
28
+ def write_tadm_file(train_toks, encoding, stream):
29
+ """
30
+ Generate an input file for ``tadm`` based on the given corpus of
31
+ classified tokens.
32
+
33
+ :type train_toks: list(tuple(dict, str))
34
+ :param train_toks: Training data, represented as a list of
35
+ pairs, the first member of which is a feature dictionary,
36
+ and the second of which is a classification label.
37
+ :type encoding: TadmEventMaxentFeatureEncoding
38
+ :param encoding: A feature encoding, used to convert featuresets
39
+ into feature vectors.
40
+ :type stream: stream
41
+ :param stream: The stream to which the ``tadm`` input file should be
42
+ written.
43
+ """
44
+ # See the following for a file format description:
45
+ #
46
+ # https://sf.net/forum/forum.php?thread_id=1391502&forum_id=473054
47
+ # https://sf.net/forum/forum.php?thread_id=1675097&forum_id=473054
48
+ labels = encoding.labels()
49
+ for featureset, label in train_toks:
50
+ length_line = "%d\n" % len(labels)
51
+ stream.write(length_line)
52
+ for known_label in labels:
53
+ v = encoding.encode(featureset, known_label)
54
+ line = "%d %d %s\n" % (
55
+ int(label == known_label),
56
+ len(v),
57
+ " ".join("%d %d" % u for u in v),
58
+ )
59
+ stream.write(line)
60
+
61
+
62
+ def parse_tadm_weights(paramfile):
63
+ """
64
+ Given the stdout output generated by ``tadm`` when training a
65
+ model, return a ``numpy`` array containing the corresponding weight
66
+ vector.
67
+ """
68
+ weights = []
69
+ for line in paramfile:
70
+ weights.append(float(line.strip()))
71
+ return numpy.array(weights, "d")
72
+
73
+
74
+ def call_tadm(args):
75
+ """
76
+ Call the ``tadm`` binary with the given arguments.
77
+ """
78
+ if isinstance(args, str):
79
+ raise TypeError("args should be a list of strings")
80
+ if _tadm_bin is None:
81
+ config_tadm()
82
+
83
+ # Call tadm via a subprocess
84
+ cmd = [_tadm_bin] + args
85
+ p = subprocess.Popen(cmd, stdout=sys.stdout)
86
+ (stdout, stderr) = p.communicate()
87
+
88
+ # Check the return code.
89
+ if p.returncode != 0:
90
+ print()
91
+ print(stderr)
92
+ raise OSError("tadm command failed!")
93
+
94
+
95
+ def names_demo():
96
+ from nltk.classify.maxent import TadmMaxentClassifier
97
+ from nltk.classify.util import names_demo
98
+
99
+ classifier = names_demo(TadmMaxentClassifier.train)
100
+
101
+
102
+ def encoding_demo():
103
+ import sys
104
+
105
+ from nltk.classify.maxent import TadmEventMaxentFeatureEncoding
106
+
107
+ tokens = [
108
+ ({"f0": 1, "f1": 1, "f3": 1}, "A"),
109
+ ({"f0": 1, "f2": 1, "f4": 1}, "B"),
110
+ ({"f0": 2, "f2": 1, "f3": 1, "f4": 1}, "A"),
111
+ ]
112
+ encoding = TadmEventMaxentFeatureEncoding.train(tokens)
113
+ write_tadm_file(tokens, encoding, sys.stdout)
114
+ print()
115
+ for i in range(encoding.length()):
116
+ print("%s --> %d" % (encoding.describe(i), i))
117
+ print()
118
+
119
+
120
+ if __name__ == "__main__":
121
+ encoding_demo()
122
+ names_demo()
llmeval-env/lib/python3.10/site-packages/nltk/classify/weka.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Weka Classsifiers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Classifiers that make use of the external 'Weka' package.
10
+ """
11
+
12
+ import os
13
+ import re
14
+ import subprocess
15
+ import tempfile
16
+ import time
17
+ import zipfile
18
+ from sys import stdin
19
+
20
+ from nltk.classify.api import ClassifierI
21
+ from nltk.internals import config_java, java
22
+ from nltk.probability import DictionaryProbDist
23
+
24
+ _weka_classpath = None
25
+ _weka_search = [
26
+ ".",
27
+ "/usr/share/weka",
28
+ "/usr/local/share/weka",
29
+ "/usr/lib/weka",
30
+ "/usr/local/lib/weka",
31
+ ]
32
+
33
+
34
+ def config_weka(classpath=None):
35
+ global _weka_classpath
36
+
37
+ # Make sure java's configured first.
38
+ config_java()
39
+
40
+ if classpath is not None:
41
+ _weka_classpath = classpath
42
+
43
+ if _weka_classpath is None:
44
+ searchpath = _weka_search
45
+ if "WEKAHOME" in os.environ:
46
+ searchpath.insert(0, os.environ["WEKAHOME"])
47
+
48
+ for path in searchpath:
49
+ if os.path.exists(os.path.join(path, "weka.jar")):
50
+ _weka_classpath = os.path.join(path, "weka.jar")
51
+ version = _check_weka_version(_weka_classpath)
52
+ if version:
53
+ print(f"[Found Weka: {_weka_classpath} (version {version})]")
54
+ else:
55
+ print("[Found Weka: %s]" % _weka_classpath)
56
+ _check_weka_version(_weka_classpath)
57
+
58
+ if _weka_classpath is None:
59
+ raise LookupError(
60
+ "Unable to find weka.jar! Use config_weka() "
61
+ "or set the WEKAHOME environment variable. "
62
+ "For more information about Weka, please see "
63
+ "https://www.cs.waikato.ac.nz/ml/weka/"
64
+ )
65
+
66
+
67
+ def _check_weka_version(jar):
68
+ try:
69
+ zf = zipfile.ZipFile(jar)
70
+ except (SystemExit, KeyboardInterrupt):
71
+ raise
72
+ except:
73
+ return None
74
+ try:
75
+ try:
76
+ return zf.read("weka/core/version.txt")
77
+ except KeyError:
78
+ return None
79
+ finally:
80
+ zf.close()
81
+
82
+
83
+ class WekaClassifier(ClassifierI):
84
+ def __init__(self, formatter, model_filename):
85
+ self._formatter = formatter
86
+ self._model = model_filename
87
+
88
+ def prob_classify_many(self, featuresets):
89
+ return self._classify_many(featuresets, ["-p", "0", "-distribution"])
90
+
91
+ def classify_many(self, featuresets):
92
+ return self._classify_many(featuresets, ["-p", "0"])
93
+
94
+ def _classify_many(self, featuresets, options):
95
+ # Make sure we can find java & weka.
96
+ config_weka()
97
+
98
+ temp_dir = tempfile.mkdtemp()
99
+ try:
100
+ # Write the test data file.
101
+ test_filename = os.path.join(temp_dir, "test.arff")
102
+ self._formatter.write(test_filename, featuresets)
103
+
104
+ # Call weka to classify the data.
105
+ cmd = [
106
+ "weka.classifiers.bayes.NaiveBayes",
107
+ "-l",
108
+ self._model,
109
+ "-T",
110
+ test_filename,
111
+ ] + options
112
+ (stdout, stderr) = java(
113
+ cmd,
114
+ classpath=_weka_classpath,
115
+ stdout=subprocess.PIPE,
116
+ stderr=subprocess.PIPE,
117
+ )
118
+
119
+ # Check if something went wrong:
120
+ if stderr and not stdout:
121
+ if "Illegal options: -distribution" in stderr:
122
+ raise ValueError(
123
+ "The installed version of weka does "
124
+ "not support probability distribution "
125
+ "output."
126
+ )
127
+ else:
128
+ raise ValueError("Weka failed to generate output:\n%s" % stderr)
129
+
130
+ # Parse weka's output.
131
+ return self.parse_weka_output(stdout.decode(stdin.encoding).split("\n"))
132
+
133
+ finally:
134
+ for f in os.listdir(temp_dir):
135
+ os.remove(os.path.join(temp_dir, f))
136
+ os.rmdir(temp_dir)
137
+
138
+ def parse_weka_distribution(self, s):
139
+ probs = [float(v) for v in re.split("[*,]+", s) if v.strip()]
140
+ probs = dict(zip(self._formatter.labels(), probs))
141
+ return DictionaryProbDist(probs)
142
+
143
+ def parse_weka_output(self, lines):
144
+ # Strip unwanted text from stdout
145
+ for i, line in enumerate(lines):
146
+ if line.strip().startswith("inst#"):
147
+ lines = lines[i:]
148
+ break
149
+
150
+ if lines[0].split() == ["inst#", "actual", "predicted", "error", "prediction"]:
151
+ return [line.split()[2].split(":")[1] for line in lines[1:] if line.strip()]
152
+ elif lines[0].split() == [
153
+ "inst#",
154
+ "actual",
155
+ "predicted",
156
+ "error",
157
+ "distribution",
158
+ ]:
159
+ return [
160
+ self.parse_weka_distribution(line.split()[-1])
161
+ for line in lines[1:]
162
+ if line.strip()
163
+ ]
164
+
165
+ # is this safe:?
166
+ elif re.match(r"^0 \w+ [01]\.[0-9]* \?\s*$", lines[0]):
167
+ return [line.split()[1] for line in lines if line.strip()]
168
+
169
+ else:
170
+ for line in lines[:10]:
171
+ print(line)
172
+ raise ValueError(
173
+ "Unhandled output format -- your version "
174
+ "of weka may not be supported.\n"
175
+ " Header: %s" % lines[0]
176
+ )
177
+
178
+ # [xx] full list of classifiers (some may be abstract?):
179
+ # ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
180
+ # DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
181
+ # JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
182
+ # LogisticBase, M5Base, MultilayerPerceptron,
183
+ # MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
184
+ # NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
185
+ # PreConstructedLinearModel, Prism, RandomForest,
186
+ # RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
187
+ # RuleNode, SimpleLinearRegression, SimpleLogistic,
188
+ # SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
189
+ # VotedPerceptron, Winnow, ZeroR
190
+
191
+ _CLASSIFIER_CLASS = {
192
+ "naivebayes": "weka.classifiers.bayes.NaiveBayes",
193
+ "C4.5": "weka.classifiers.trees.J48",
194
+ "log_regression": "weka.classifiers.functions.Logistic",
195
+ "svm": "weka.classifiers.functions.SMO",
196
+ "kstar": "weka.classifiers.lazy.KStar",
197
+ "ripper": "weka.classifiers.rules.JRip",
198
+ }
199
+
200
+ @classmethod
201
+ def train(
202
+ cls,
203
+ model_filename,
204
+ featuresets,
205
+ classifier="naivebayes",
206
+ options=[],
207
+ quiet=True,
208
+ ):
209
+ # Make sure we can find java & weka.
210
+ config_weka()
211
+
212
+ # Build an ARFF formatter.
213
+ formatter = ARFF_Formatter.from_train(featuresets)
214
+
215
+ temp_dir = tempfile.mkdtemp()
216
+ try:
217
+ # Write the training data file.
218
+ train_filename = os.path.join(temp_dir, "train.arff")
219
+ formatter.write(train_filename, featuresets)
220
+
221
+ if classifier in cls._CLASSIFIER_CLASS:
222
+ javaclass = cls._CLASSIFIER_CLASS[classifier]
223
+ elif classifier in cls._CLASSIFIER_CLASS.values():
224
+ javaclass = classifier
225
+ else:
226
+ raise ValueError("Unknown classifier %s" % classifier)
227
+
228
+ # Train the weka model.
229
+ cmd = [javaclass, "-d", model_filename, "-t", train_filename]
230
+ cmd += list(options)
231
+ if quiet:
232
+ stdout = subprocess.PIPE
233
+ else:
234
+ stdout = None
235
+ java(cmd, classpath=_weka_classpath, stdout=stdout)
236
+
237
+ # Return the new classifier.
238
+ return WekaClassifier(formatter, model_filename)
239
+
240
+ finally:
241
+ for f in os.listdir(temp_dir):
242
+ os.remove(os.path.join(temp_dir, f))
243
+ os.rmdir(temp_dir)
244
+
245
+
246
+ class ARFF_Formatter:
247
+ """
248
+ Converts featuresets and labeled featuresets to ARFF-formatted
249
+ strings, appropriate for input into Weka.
250
+
251
+ Features and classes can be specified manually in the constructor, or may
252
+ be determined from data using ``from_train``.
253
+ """
254
+
255
+ def __init__(self, labels, features):
256
+ """
257
+ :param labels: A list of all class labels that can be generated.
258
+ :param features: A list of feature specifications, where
259
+ each feature specification is a tuple (fname, ftype);
260
+ and ftype is an ARFF type string such as NUMERIC or
261
+ STRING.
262
+ """
263
+ self._labels = labels
264
+ self._features = features
265
+
266
+ def format(self, tokens):
267
+ """Returns a string representation of ARFF output for the given data."""
268
+ return self.header_section() + self.data_section(tokens)
269
+
270
+ def labels(self):
271
+ """Returns the list of classes."""
272
+ return list(self._labels)
273
+
274
+ def write(self, outfile, tokens):
275
+ """Writes ARFF data to a file for the given data."""
276
+ if not hasattr(outfile, "write"):
277
+ outfile = open(outfile, "w")
278
+ outfile.write(self.format(tokens))
279
+ outfile.close()
280
+
281
+ @staticmethod
282
+ def from_train(tokens):
283
+ """
284
+ Constructs an ARFF_Formatter instance with class labels and feature
285
+ types determined from the given data. Handles boolean, numeric and
286
+ string (note: not nominal) types.
287
+ """
288
+ # Find the set of all attested labels.
289
+ labels = {label for (tok, label) in tokens}
290
+
291
+ # Determine the types of all features.
292
+ features = {}
293
+ for tok, label in tokens:
294
+ for (fname, fval) in tok.items():
295
+ if issubclass(type(fval), bool):
296
+ ftype = "{True, False}"
297
+ elif issubclass(type(fval), (int, float, bool)):
298
+ ftype = "NUMERIC"
299
+ elif issubclass(type(fval), str):
300
+ ftype = "STRING"
301
+ elif fval is None:
302
+ continue # can't tell the type.
303
+ else:
304
+ raise ValueError("Unsupported value type %r" % ftype)
305
+
306
+ if features.get(fname, ftype) != ftype:
307
+ raise ValueError("Inconsistent type for %s" % fname)
308
+ features[fname] = ftype
309
+ features = sorted(features.items())
310
+
311
+ return ARFF_Formatter(labels, features)
312
+
313
+ def header_section(self):
314
+ """Returns an ARFF header as a string."""
315
+ # Header comment.
316
+ s = (
317
+ "% Weka ARFF file\n"
318
+ + "% Generated automatically by NLTK\n"
319
+ + "%% %s\n\n" % time.ctime()
320
+ )
321
+
322
+ # Relation name
323
+ s += "@RELATION rel\n\n"
324
+
325
+ # Input attribute specifications
326
+ for fname, ftype in self._features:
327
+ s += "@ATTRIBUTE %-30r %s\n" % (fname, ftype)
328
+
329
+ # Label attribute specification
330
+ s += "@ATTRIBUTE %-30r {%s}\n" % ("-label-", ",".join(self._labels))
331
+
332
+ return s
333
+
334
+ def data_section(self, tokens, labeled=None):
335
+ """
336
+ Returns the ARFF data section for the given data.
337
+
338
+ :param tokens: a list of featuresets (dicts) or labelled featuresets
339
+ which are tuples (featureset, label).
340
+ :param labeled: Indicates whether the given tokens are labeled
341
+ or not. If None, then the tokens will be assumed to be
342
+ labeled if the first token's value is a tuple or list.
343
+ """
344
+ # Check if the tokens are labeled or unlabeled. If unlabeled,
345
+ # then use 'None'
346
+ if labeled is None:
347
+ labeled = tokens and isinstance(tokens[0], (tuple, list))
348
+ if not labeled:
349
+ tokens = [(tok, None) for tok in tokens]
350
+
351
+ # Data section
352
+ s = "\n@DATA\n"
353
+ for (tok, label) in tokens:
354
+ for fname, ftype in self._features:
355
+ s += "%s," % self._fmt_arff_val(tok.get(fname))
356
+ s += "%s\n" % self._fmt_arff_val(label)
357
+
358
+ return s
359
+
360
+ def _fmt_arff_val(self, fval):
361
+ if fval is None:
362
+ return "?"
363
+ elif isinstance(fval, (bool, int)):
364
+ return "%s" % fval
365
+ elif isinstance(fval, float):
366
+ return "%r" % fval
367
+ else:
368
+ return "%r" % fval
369
+
370
+
371
+ if __name__ == "__main__":
372
+ from nltk.classify.util import binary_names_demo_features, names_demo
373
+
374
+ def make_classifier(featuresets):
375
+ return WekaClassifier.train("/tmp/name.model", featuresets, "C4.5")
376
+
377
+ classifier = names_demo(make_classifier, binary_names_demo_features)
llmeval-env/lib/python3.10/site-packages/nltk/test/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Unit Tests
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Unit tests for the NLTK modules. These tests are intended to ensure
10
+ that source code changes don't accidentally introduce bugs.
11
+ For instructions, please see:
12
+
13
+ ../../web/dev/local_testing.rst
14
+
15
+ https://github.com/nltk/nltk/blob/develop/web/dev/local_testing.rst
16
+
17
+
18
+ """
llmeval-env/lib/python3.10/site-packages/nltk/test/all.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test suite that runs all NLTK tests.
2
+
3
+ This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
4
+ project's ``setup-eggs.py`` file. Here, we create a test suite that
5
+ runs all of our doctests, and return it for processing by the setuptools
6
+ test harness.
7
+
8
+ """
9
+ import doctest
10
+ import os.path
11
+ import unittest
12
+ from glob import glob
13
+
14
+
15
+ def additional_tests():
16
+ # print("here-000000000000000")
17
+ # print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest')))
18
+ dir = os.path.dirname(__file__)
19
+ paths = glob(os.path.join(dir, "*.doctest"))
20
+ files = [os.path.basename(path) for path in paths]
21
+ return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
22
+
23
+
24
+ # if os.path.split(path)[-1] != 'index.rst'
25
+ # skips time-dependent doctest in index.rst
llmeval-env/lib/python3.10/site-packages/nltk/test/bleu.doctest ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ==========
2
+ BLEU tests
3
+ ==========
4
+
5
+ >>> from nltk.translate import bleu
6
+
7
+ If the candidate has no alignment to any of the references, the BLEU score is 0.
8
+
9
+ >>> bleu(
10
+ ... ['The candidate has no alignment to any of the references'.split()],
11
+ ... 'John loves Mary'.split(),
12
+ ... (1,),
13
+ ... )
14
+ 0
15
+
16
+ This is an implementation of the smoothing techniques
17
+ for segment-level BLEU scores that was presented in
18
+ Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
19
+ Smoothing Techniques for Sentence-Level BLEU. In WMT14.
20
+ http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
21
+ >>> from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction
22
+
23
+
24
+ >>> sentence_bleu(
25
+ ... ['It is a place of quiet contemplation .'.split()],
26
+ ... 'It is .'.split(),
27
+ ... smoothing_function=SmoothingFunction().method4,
28
+ ... )*100
29
+ 4.4267...
llmeval-env/lib/python3.10/site-packages/nltk/test/bnc.doctest ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ >>> import os.path
5
+
6
+ >>> from nltk.corpus.reader import BNCCorpusReader
7
+ >>> import nltk.test
8
+
9
+ >>> root = os.path.dirname(nltk.test.__file__)
10
+ >>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml')
11
+
12
+ Checking the word access.
13
+ -------------------------
14
+
15
+ >>> len(bnc.words())
16
+ 151
17
+
18
+ >>> bnc.words()[:6]
19
+ ['Ah', 'there', 'we', 'are', ',', '.']
20
+ >>> bnc.words(stem=True)[:6]
21
+ ['ah', 'there', 'we', 'be', ',', '.']
22
+
23
+ >>> bnc.tagged_words()[:6]
24
+ [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
25
+
26
+ >>> bnc.tagged_words(c5=True)[:6]
27
+ [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
28
+
29
+ Testing access to the sentences.
30
+ --------------------------------
31
+
32
+ >>> len(bnc.sents())
33
+ 15
34
+
35
+ >>> bnc.sents()[0]
36
+ ['Ah', 'there', 'we', 'are', ',', '.']
37
+ >>> bnc.sents(stem=True)[0]
38
+ ['ah', 'there', 'we', 'be', ',', '.']
39
+
40
+ >>> bnc.tagged_sents()[0]
41
+ [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
42
+ >>> bnc.tagged_sents(c5=True)[0]
43
+ [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
44
+
45
+ A not lazy loader.
46
+ ------------------
47
+
48
+ >>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False)
49
+
50
+ >>> len(eager.words())
51
+ 151
52
+ >>> eager.words(stem=True)[6:17]
53
+ ['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.']
54
+
55
+ >>> eager.tagged_words()[6:11]
56
+ [('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')]
57
+ >>> eager.tagged_words(c5=True)[6:17]
58
+ [('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')]
59
+ >>> len(eager.sents())
60
+ 15
llmeval-env/lib/python3.10/site-packages/nltk/test/ccg.doctest ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==============================
5
+ Combinatory Categorial Grammar
6
+ ==============================
7
+
8
+ Relative Clauses
9
+ ----------------
10
+
11
+ >>> from nltk.ccg import chart, lexicon
12
+
13
+ Construct a lexicon:
14
+
15
+ >>> lex = lexicon.fromstring('''
16
+ ... :- S, NP, N, VP
17
+ ...
18
+ ... Det :: NP/N
19
+ ... Pro :: NP
20
+ ... Modal :: S\\NP/VP
21
+ ...
22
+ ... TV :: VP/NP
23
+ ... DTV :: TV/NP
24
+ ...
25
+ ... the => Det
26
+ ...
27
+ ... that => Det
28
+ ... that => NP
29
+ ...
30
+ ... I => Pro
31
+ ... you => Pro
32
+ ... we => Pro
33
+ ...
34
+ ... chef => N
35
+ ... cake => N
36
+ ... children => N
37
+ ... dough => N
38
+ ...
39
+ ... will => Modal
40
+ ... should => Modal
41
+ ... might => Modal
42
+ ... must => Modal
43
+ ...
44
+ ... and => var\\.,var/.,var
45
+ ...
46
+ ... to => VP[to]/VP
47
+ ...
48
+ ... without => (VP\\VP)/VP[ing]
49
+ ...
50
+ ... be => TV
51
+ ... cook => TV
52
+ ... eat => TV
53
+ ...
54
+ ... cooking => VP[ing]/NP
55
+ ...
56
+ ... give => DTV
57
+ ...
58
+ ... is => (S\\NP)/NP
59
+ ... prefer => (S\\NP)/NP
60
+ ...
61
+ ... which => (N\\N)/(S/NP)
62
+ ...
63
+ ... persuade => (VP/VP[to])/NP
64
+ ... ''')
65
+
66
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
67
+ >>> for parse in parser.parse("you prefer that cake".split()):
68
+ ... chart.printCCGDerivation(parse)
69
+ ... break
70
+ ...
71
+ you prefer that cake
72
+ NP ((S\NP)/NP) (NP/N) N
73
+ -------------->
74
+ NP
75
+ --------------------------->
76
+ (S\NP)
77
+ --------------------------------<
78
+ S
79
+
80
+ >>> for parse in parser.parse("that is the cake which you prefer".split()):
81
+ ... chart.printCCGDerivation(parse)
82
+ ... break
83
+ ...
84
+ that is the cake which you prefer
85
+ NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/NP)
86
+ ----->T
87
+ (S/(S\NP))
88
+ ------------------>B
89
+ (S/NP)
90
+ ---------------------------------->
91
+ (N\N)
92
+ ----------------------------------------<
93
+ N
94
+ ------------------------------------------------>
95
+ NP
96
+ ------------------------------------------------------------->
97
+ (S\NP)
98
+ -------------------------------------------------------------------<
99
+ S
100
+
101
+
102
+ Some other sentences to try:
103
+ "that is the cake which we will persuade the chef to cook"
104
+ "that is the cake which we will persuade the chef to give the children"
105
+
106
+ >>> sent = "that is the dough which you will eat without cooking".split()
107
+ >>> nosub_parser = chart.CCGChartParser(lex, chart.ApplicationRuleSet +
108
+ ... chart.CompositionRuleSet + chart.TypeRaiseRuleSet)
109
+
110
+ Without Substitution (no output)
111
+
112
+ >>> for parse in nosub_parser.parse(sent):
113
+ ... chart.printCCGDerivation(parse)
114
+
115
+ With Substitution:
116
+
117
+ >>> for parse in parser.parse(sent):
118
+ ... chart.printCCGDerivation(parse)
119
+ ... break
120
+ ...
121
+ that is the dough which you will eat without cooking
122
+ NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/VP) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
123
+ ----->T
124
+ (S/(S\NP))
125
+ ------------------------------------->B
126
+ ((VP\VP)/NP)
127
+ ----------------------------------------------<Sx
128
+ (VP/NP)
129
+ ----------------------------------------------------------->B
130
+ ((S\NP)/NP)
131
+ ---------------------------------------------------------------->B
132
+ (S/NP)
133
+ -------------------------------------------------------------------------------->
134
+ (N\N)
135
+ ---------------------------------------------------------------------------------------<
136
+ N
137
+ ----------------------------------------------------------------------------------------------->
138
+ NP
139
+ ------------------------------------------------------------------------------------------------------------>
140
+ (S\NP)
141
+ ------------------------------------------------------------------------------------------------------------------<
142
+ S
143
+
144
+
145
+ Conjunction
146
+ -----------
147
+
148
+ >>> from nltk.ccg.chart import CCGChartParser, ApplicationRuleSet, CompositionRuleSet
149
+ >>> from nltk.ccg.chart import SubstitutionRuleSet, TypeRaiseRuleSet, printCCGDerivation
150
+ >>> from nltk.ccg import lexicon
151
+
152
+ Lexicons for the tests:
153
+
154
+ >>> test1_lex = '''
155
+ ... :- S,N,NP,VP
156
+ ... I => NP
157
+ ... you => NP
158
+ ... will => S\\NP/VP
159
+ ... cook => VP/NP
160
+ ... which => (N\\N)/(S/NP)
161
+ ... and => var\\.,var/.,var
162
+ ... might => S\\NP/VP
163
+ ... eat => VP/NP
164
+ ... the => NP/N
165
+ ... mushrooms => N
166
+ ... parsnips => N'''
167
+ >>> test2_lex = '''
168
+ ... :- N, S, NP, VP
169
+ ... articles => N
170
+ ... the => NP/N
171
+ ... and => var\\.,var/.,var
172
+ ... which => (N\\N)/(S/NP)
173
+ ... I => NP
174
+ ... anyone => NP
175
+ ... will => (S/VP)\\NP
176
+ ... file => VP/NP
177
+ ... without => (VP\\VP)/VP[ing]
178
+ ... forget => VP/NP
179
+ ... reading => VP[ing]/NP
180
+ ... '''
181
+
182
+ Tests handling of conjunctions.
183
+ Note that while the two derivations are different, they are semantically equivalent.
184
+
185
+ >>> lex = lexicon.fromstring(test1_lex)
186
+ >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
187
+ >>> for parse in parser.parse("I will cook and might eat the mushrooms and parsnips".split()):
188
+ ... printCCGDerivation(parse)
189
+ I will cook and might eat the mushrooms and parsnips
190
+ NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
191
+ ---------------------->B
192
+ ((S\NP)/NP)
193
+ ---------------------->B
194
+ ((S\NP)/NP)
195
+ ------------------------------------------------->
196
+ (((S\NP)/NP)\.,((S\NP)/NP))
197
+ -----------------------------------------------------------------------<
198
+ ((S\NP)/NP)
199
+ ------------------------------------->
200
+ (N\.,N)
201
+ ------------------------------------------------<
202
+ N
203
+ -------------------------------------------------------->
204
+ NP
205
+ ------------------------------------------------------------------------------------------------------------------------------->
206
+ (S\NP)
207
+ -----------------------------------------------------------------------------------------------------------------------------------<
208
+ S
209
+ I will cook and might eat the mushrooms and parsnips
210
+ NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N
211
+ ---------------------->B
212
+ ((S\NP)/NP)
213
+ ---------------------->B
214
+ ((S\NP)/NP)
215
+ ------------------------------------------------->
216
+ (((S\NP)/NP)\.,((S\NP)/NP))
217
+ -----------------------------------------------------------------------<
218
+ ((S\NP)/NP)
219
+ ------------------------------------------------------------------------------->B
220
+ ((S\NP)/N)
221
+ ------------------------------------->
222
+ (N\.,N)
223
+ ------------------------------------------------<
224
+ N
225
+ ------------------------------------------------------------------------------------------------------------------------------->
226
+ (S\NP)
227
+ -----------------------------------------------------------------------------------------------------------------------------------<
228
+ S
229
+
230
+
231
+ Tests handling subject extraction.
232
+ Interesting to point that the two parses are clearly semantically different.
233
+
234
+ >>> lex = lexicon.fromstring(test2_lex)
235
+ >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet)
236
+ >>> for parse in parser.parse("articles which I will file and forget without reading".split()):
237
+ ... printCCGDerivation(parse)
238
+ articles which I will file and forget without reading
239
+ N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
240
+ -----------------<
241
+ (S/VP)
242
+ ------------------------------------->B
243
+ ((VP\VP)/NP)
244
+ ----------------------------------------------<Sx
245
+ (VP/NP)
246
+ ------------------------------------------------------------------------->
247
+ ((VP/NP)\.,(VP/NP))
248
+ ----------------------------------------------------------------------------------<
249
+ (VP/NP)
250
+ --------------------------------------------------------------------------------------------------->B
251
+ (S/NP)
252
+ ------------------------------------------------------------------------------------------------------------------->
253
+ (N\N)
254
+ -----------------------------------------------------------------------------------------------------------------------------<
255
+ N
256
+ articles which I will file and forget without reading
257
+ N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP)
258
+ -----------------<
259
+ (S/VP)
260
+ ------------------------------------>
261
+ ((VP/NP)\.,(VP/NP))
262
+ ---------------------------------------------<
263
+ (VP/NP)
264
+ ------------------------------------->B
265
+ ((VP\VP)/NP)
266
+ ----------------------------------------------------------------------------------<Sx
267
+ (VP/NP)
268
+ --------------------------------------------------------------------------------------------------->B
269
+ (S/NP)
270
+ ------------------------------------------------------------------------------------------------------------------->
271
+ (N\N)
272
+ -----------------------------------------------------------------------------------------------------------------------------<
273
+ N
274
+
275
+
276
+ Unicode support
277
+ ---------------
278
+
279
+ Unicode words are supported.
280
+
281
+ >>> from nltk.ccg import chart, lexicon
282
+
283
+ Lexicons for the tests:
284
+
285
+ >>> lex = lexicon.fromstring('''
286
+ ... :- S, N, NP, PP
287
+ ...
288
+ ... AdjI :: N\\N
289
+ ... AdjD :: N/N
290
+ ... AdvD :: S/S
291
+ ... AdvI :: S\\S
292
+ ... Det :: NP/N
293
+ ... PrepNPCompl :: PP/NP
294
+ ... PrepNAdjN :: S\\S/N
295
+ ... PrepNAdjNP :: S\\S/NP
296
+ ... VPNP :: S\\NP/NP
297
+ ... VPPP :: S\\NP/PP
298
+ ... VPser :: S\\NP/AdjI
299
+ ...
300
+ ... auto => N
301
+ ... bebidas => N
302
+ ... cine => N
303
+ ... ley => N
304
+ ... libro => N
305
+ ... ministro => N
306
+ ... panadería => N
307
+ ... presidente => N
308
+ ... super => N
309
+ ...
310
+ ... el => Det
311
+ ... la => Det
312
+ ... las => Det
313
+ ... un => Det
314
+ ...
315
+ ... Ana => NP
316
+ ... Pablo => NP
317
+ ...
318
+ ... y => var\\.,var/.,var
319
+ ...
320
+ ... pero => (S/NP)\\(S/NP)/(S/NP)
321
+ ...
322
+ ... anunció => VPNP
323
+ ... compró => VPNP
324
+ ... cree => S\\NP/S[dep]
325
+ ... desmintió => VPNP
326
+ ... lee => VPNP
327
+ ... fueron => VPPP
328
+ ...
329
+ ... es => VPser
330
+ ...
331
+ ... interesante => AdjD
332
+ ... interesante => AdjI
333
+ ... nueva => AdjD
334
+ ... nueva => AdjI
335
+ ...
336
+ ... a => PrepNPCompl
337
+ ... en => PrepNAdjN
338
+ ... en => PrepNAdjNP
339
+ ...
340
+ ... ayer => AdvI
341
+ ...
342
+ ... que => (NP\\NP)/(S/NP)
343
+ ... que => S[dep]/S
344
+ ... ''')
345
+
346
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
347
+ >>> for parse in parser.parse(u"el ministro anunció pero el presidente desmintió la nueva ley".split()):
348
+ ... printCCGDerivation(parse) # doctest: +SKIP
349
+ ... # it fails on python2.7 because of the unicode problem explained in https://github.com/nltk/nltk/pull/1354
350
+ ... break
351
+ el ministro anunció pero el presidente desmintió la nueva ley
352
+ (NP/N) N ((S\NP)/NP) (((S/NP)\(S/NP))/(S/NP)) (NP/N) N ((S\NP)/NP) (NP/N) (N/N) N
353
+ ------------------>
354
+ NP
355
+ ------------------>T
356
+ (S/(S\NP))
357
+ -------------------->
358
+ NP
359
+ -------------------->T
360
+ (S/(S\NP))
361
+ --------------------------------->B
362
+ (S/NP)
363
+ ----------------------------------------------------------->
364
+ ((S/NP)\(S/NP))
365
+ ------------>
366
+ N
367
+ -------------------->
368
+ NP
369
+ --------------------<T
370
+ (S\(S/NP))
371
+ -------------------------------------------------------------------------------<B
372
+ (S\(S/NP))
373
+ --------------------------------------------------------------------------------------------<B
374
+ (S/NP)
375
+ -------------------------------------------------------------------------------------------------------------->
376
+ S
llmeval-env/lib/python3.10/site-packages/nltk/test/ccg_semantics.doctest ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==============================================
5
+ Combinatory Categorial Grammar with semantics
6
+ ==============================================
7
+
8
+ -----
9
+ Chart
10
+ -----
11
+
12
+
13
+ >>> from nltk.ccg import chart, lexicon
14
+ >>> from nltk.ccg.chart import printCCGDerivation
15
+
16
+ No semantics
17
+ -------------------
18
+
19
+ >>> lex = lexicon.fromstring('''
20
+ ... :- S, NP, N
21
+ ... She => NP
22
+ ... has => (S\\NP)/NP
23
+ ... books => NP
24
+ ... ''',
25
+ ... False)
26
+
27
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
28
+ >>> parses = list(parser.parse("She has books".split()))
29
+ >>> print(str(len(parses)) + " parses")
30
+ 3 parses
31
+
32
+ >>> printCCGDerivation(parses[0])
33
+ She has books
34
+ NP ((S\NP)/NP) NP
35
+ -------------------->
36
+ (S\NP)
37
+ -------------------------<
38
+ S
39
+
40
+ >>> printCCGDerivation(parses[1])
41
+ She has books
42
+ NP ((S\NP)/NP) NP
43
+ ----->T
44
+ (S/(S\NP))
45
+ -------------------->
46
+ (S\NP)
47
+ ------------------------->
48
+ S
49
+
50
+
51
+ >>> printCCGDerivation(parses[2])
52
+ She has books
53
+ NP ((S\NP)/NP) NP
54
+ ----->T
55
+ (S/(S\NP))
56
+ ------------------>B
57
+ (S/NP)
58
+ ------------------------->
59
+ S
60
+
61
+ Simple semantics
62
+ -------------------
63
+
64
+ >>> lex = lexicon.fromstring('''
65
+ ... :- S, NP, N
66
+ ... She => NP {she}
67
+ ... has => (S\\NP)/NP {\\x y.have(y, x)}
68
+ ... a => NP/N {\\P.exists z.P(z)}
69
+ ... book => N {book}
70
+ ... ''',
71
+ ... True)
72
+
73
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
74
+ >>> parses = list(parser.parse("She has a book".split()))
75
+ >>> print(str(len(parses)) + " parses")
76
+ 7 parses
77
+
78
+ >>> printCCGDerivation(parses[0])
79
+ She has a book
80
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
81
+ ------------------------------------->
82
+ NP {exists z.book(z)}
83
+ ------------------------------------------------------------------->
84
+ (S\NP) {\y.have(y,exists z.book(z))}
85
+ -----------------------------------------------------------------------------<
86
+ S {have(she,exists z.book(z))}
87
+
88
+ >>> printCCGDerivation(parses[1])
89
+ She has a book
90
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
91
+ --------------------------------------------------------->B
92
+ ((S\NP)/N) {\P y.have(y,exists z.P(z))}
93
+ ------------------------------------------------------------------->
94
+ (S\NP) {\y.have(y,exists z.book(z))}
95
+ -----------------------------------------------------------------------------<
96
+ S {have(she,exists z.book(z))}
97
+
98
+ >>> printCCGDerivation(parses[2])
99
+ She has a book
100
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
101
+ ---------->T
102
+ (S/(S\NP)) {\F.F(she)}
103
+ ------------------------------------->
104
+ NP {exists z.book(z)}
105
+ ------------------------------------------------------------------->
106
+ (S\NP) {\y.have(y,exists z.book(z))}
107
+ ----------------------------------------------------------------------------->
108
+ S {have(she,exists z.book(z))}
109
+
110
+ >>> printCCGDerivation(parses[3])
111
+ She has a book
112
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
113
+ ---------->T
114
+ (S/(S\NP)) {\F.F(she)}
115
+ --------------------------------------------------------->B
116
+ ((S\NP)/N) {\P y.have(y,exists z.P(z))}
117
+ ------------------------------------------------------------------->
118
+ (S\NP) {\y.have(y,exists z.book(z))}
119
+ ----------------------------------------------------------------------------->
120
+ S {have(she,exists z.book(z))}
121
+
122
+ >>> printCCGDerivation(parses[4])
123
+ She has a book
124
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
125
+ ---------->T
126
+ (S/(S\NP)) {\F.F(she)}
127
+ ---------------------------------------->B
128
+ (S/NP) {\x.have(she,x)}
129
+ ------------------------------------->
130
+ NP {exists z.book(z)}
131
+ ----------------------------------------------------------------------------->
132
+ S {have(she,exists z.book(z))}
133
+
134
+ >>> printCCGDerivation(parses[5])
135
+ She has a book
136
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
137
+ ---------->T
138
+ (S/(S\NP)) {\F.F(she)}
139
+ --------------------------------------------------------->B
140
+ ((S\NP)/N) {\P y.have(y,exists z.P(z))}
141
+ ------------------------------------------------------------------->B
142
+ (S/N) {\P.have(she,exists z.P(z))}
143
+ ----------------------------------------------------------------------------->
144
+ S {have(she,exists z.book(z))}
145
+
146
+ >>> printCCGDerivation(parses[6])
147
+ She has a book
148
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (NP/N) {\P.exists z.P(z)} N {book}
149
+ ---------->T
150
+ (S/(S\NP)) {\F.F(she)}
151
+ ---------------------------------------->B
152
+ (S/NP) {\x.have(she,x)}
153
+ ------------------------------------------------------------------->B
154
+ (S/N) {\P.have(she,exists z.P(z))}
155
+ ----------------------------------------------------------------------------->
156
+ S {have(she,exists z.book(z))}
157
+
158
+ Complex semantics
159
+ -------------------
160
+
161
+ >>> lex = lexicon.fromstring('''
162
+ ... :- S, NP, N
163
+ ... She => NP {she}
164
+ ... has => (S\\NP)/NP {\\x y.have(y, x)}
165
+ ... a => ((S\\NP)\\((S\\NP)/NP))/N {\\P R x.(exists z.P(z) & R(z,x))}
166
+ ... book => N {book}
167
+ ... ''',
168
+ ... True)
169
+
170
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
171
+ >>> parses = list(parser.parse("She has a book".split()))
172
+ >>> print(str(len(parses)) + " parses")
173
+ 2 parses
174
+
175
+ >>> printCCGDerivation(parses[0])
176
+ She has a book
177
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
178
+ ---------------------------------------------------------------------->
179
+ ((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
180
+ ----------------------------------------------------------------------------------------------------<
181
+ (S\NP) {\x.(exists z.book(z) & have(x,z))}
182
+ --------------------------------------------------------------------------------------------------------------<
183
+ S {(exists z.book(z) & have(she,z))}
184
+
185
+ >>> printCCGDerivation(parses[1])
186
+ She has a book
187
+ NP {she} ((S\NP)/NP) {\x y.have(y,x)} (((S\NP)\((S\NP)/NP))/N) {\P R x.(exists z.P(z) & R(z,x))} N {book}
188
+ ---------->T
189
+ (S/(S\NP)) {\F.F(she)}
190
+ ---------------------------------------------------------------------->
191
+ ((S\NP)\((S\NP)/NP)) {\R x.(exists z.book(z) & R(z,x))}
192
+ ----------------------------------------------------------------------------------------------------<
193
+ (S\NP) {\x.(exists z.book(z) & have(x,z))}
194
+ -------------------------------------------------------------------------------------------------------------->
195
+ S {(exists z.book(z) & have(she,z))}
196
+
197
+ Using conjunctions
198
+ ---------------------
199
+
200
+ # TODO: The semantics of "and" should have been more flexible
201
+ >>> lex = lexicon.fromstring('''
202
+ ... :- S, NP, N
203
+ ... I => NP {I}
204
+ ... cook => (S\\NP)/NP {\\x y.cook(x,y)}
205
+ ... and => var\\.,var/.,var {\\P Q x y.(P(x,y) & Q(x,y))}
206
+ ... eat => (S\\NP)/NP {\\x y.eat(x,y)}
207
+ ... the => NP/N {\\x.the(x)}
208
+ ... bacon => N {bacon}
209
+ ... ''',
210
+ ... True)
211
+
212
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
213
+ >>> parses = list(parser.parse("I cook and eat the bacon".split()))
214
+ >>> print(str(len(parses)) + " parses")
215
+ 7 parses
216
+
217
+ >>> printCCGDerivation(parses[0])
218
+ I cook and eat the bacon
219
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
220
+ ------------------------------------------------------------------------------------->
221
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
222
+ -------------------------------------------------------------------------------------------------------------------<
223
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
224
+ ------------------------------->
225
+ NP {the(bacon)}
226
+ -------------------------------------------------------------------------------------------------------------------------------------------------->
227
+ (S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
228
+ ----------------------------------------------------------------------------------------------------------------------------------------------------------<
229
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
230
+
231
+ >>> printCCGDerivation(parses[1])
232
+ I cook and eat the bacon
233
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
234
+ ------------------------------------------------------------------------------------->
235
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
236
+ -------------------------------------------------------------------------------------------------------------------<
237
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
238
+ --------------------------------------------------------------------------------------------------------------------------------------->B
239
+ ((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
240
+ -------------------------------------------------------------------------------------------------------------------------------------------------->
241
+ (S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
242
+ ----------------------------------------------------------------------------------------------------------------------------------------------------------<
243
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
244
+
245
+ >>> printCCGDerivation(parses[2])
246
+ I cook and eat the bacon
247
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
248
+ -------->T
249
+ (S/(S\NP)) {\F.F(I)}
250
+ ------------------------------------------------------------------------------------->
251
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
252
+ -------------------------------------------------------------------------------------------------------------------<
253
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
254
+ ------------------------------->
255
+ NP {the(bacon)}
256
+ -------------------------------------------------------------------------------------------------------------------------------------------------->
257
+ (S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
258
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------->
259
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
260
+
261
+ >>> printCCGDerivation(parses[3])
262
+ I cook and eat the bacon
263
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
264
+ -------->T
265
+ (S/(S\NP)) {\F.F(I)}
266
+ ------------------------------------------------------------------------------------->
267
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
268
+ -------------------------------------------------------------------------------------------------------------------<
269
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
270
+ --------------------------------------------------------------------------------------------------------------------------------------->B
271
+ ((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
272
+ -------------------------------------------------------------------------------------------------------------------------------------------------->
273
+ (S\NP) {\y.(eat(the(bacon),y) & cook(the(bacon),y))}
274
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------->
275
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
276
+
277
+ >>> printCCGDerivation(parses[4])
278
+ I cook and eat the bacon
279
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
280
+ -------->T
281
+ (S/(S\NP)) {\F.F(I)}
282
+ ------------------------------------------------------------------------------------->
283
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
284
+ -------------------------------------------------------------------------------------------------------------------<
285
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
286
+ --------------------------------------------------------------------------------------------------------------------------->B
287
+ (S/NP) {\x.(eat(x,I) & cook(x,I))}
288
+ ------------------------------->
289
+ NP {the(bacon)}
290
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------->
291
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
292
+
293
+ >>> printCCGDerivation(parses[5])
294
+ I cook and eat the bacon
295
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
296
+ -------->T
297
+ (S/(S\NP)) {\F.F(I)}
298
+ ------------------------------------------------------------------------------------->
299
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
300
+ -------------------------------------------------------------------------------------------------------------------<
301
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
302
+ --------------------------------------------------------------------------------------------------------------------------------------->B
303
+ ((S\NP)/N) {\x y.(eat(the(x),y) & cook(the(x),y))}
304
+ ----------------------------------------------------------------------------------------------------------------------------------------------->B
305
+ (S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
306
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------->
307
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
308
+
309
+ >>> printCCGDerivation(parses[6])
310
+ I cook and eat the bacon
311
+ NP {I} ((S\NP)/NP) {\x y.cook(x,y)} ((_var0\.,_var0)/.,_var0) {\P Q x y.(P(x,y) & Q(x,y))} ((S\NP)/NP) {\x y.eat(x,y)} (NP/N) {\x.the(x)} N {bacon}
312
+ -------->T
313
+ (S/(S\NP)) {\F.F(I)}
314
+ ------------------------------------------------------------------------------------->
315
+ (((S\NP)/NP)\.,((S\NP)/NP)) {\Q x y.(eat(x,y) & Q(x,y))}
316
+ -------------------------------------------------------------------------------------------------------------------<
317
+ ((S\NP)/NP) {\x y.(eat(x,y) & cook(x,y))}
318
+ --------------------------------------------------------------------------------------------------------------------------->B
319
+ (S/NP) {\x.(eat(x,I) & cook(x,I))}
320
+ ----------------------------------------------------------------------------------------------------------------------------------------------->B
321
+ (S/N) {\x.(eat(the(x),I) & cook(the(x),I))}
322
+ ---------------------------------------------------------------------------------------------------------------------------------------------------------->
323
+ S {(eat(the(bacon),I) & cook(the(bacon),I))}
324
+
325
+ Tests from published papers
326
+ ------------------------------
327
+
328
+ An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
329
+
330
+ >>> lex = lexicon.fromstring('''
331
+ ... :- S, NP
332
+ ... I => NP {I}
333
+ ... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
334
+ ... them => NP {them}
335
+ ... money => NP {money}
336
+ ... ''',
337
+ ... True)
338
+
339
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
340
+ >>> parses = list(parser.parse("I give them money".split()))
341
+ >>> print(str(len(parses)) + " parses")
342
+ 3 parses
343
+
344
+ >>> printCCGDerivation(parses[0])
345
+ I give them money
346
+ NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
347
+ -------------------------------------------------->
348
+ ((S\NP)/NP) {\y z.give(y,them,z)}
349
+ -------------------------------------------------------------->
350
+ (S\NP) {\z.give(money,them,z)}
351
+ ----------------------------------------------------------------------<
352
+ S {give(money,them,I)}
353
+
354
+ >>> printCCGDerivation(parses[1])
355
+ I give them money
356
+ NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
357
+ -------->T
358
+ (S/(S\NP)) {\F.F(I)}
359
+ -------------------------------------------------->
360
+ ((S\NP)/NP) {\y z.give(y,them,z)}
361
+ -------------------------------------------------------------->
362
+ (S\NP) {\z.give(money,them,z)}
363
+ ---------------------------------------------------------------------->
364
+ S {give(money,them,I)}
365
+
366
+
367
+ >>> printCCGDerivation(parses[2])
368
+ I give them money
369
+ NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them} NP {money}
370
+ -------->T
371
+ (S/(S\NP)) {\F.F(I)}
372
+ -------------------------------------------------->
373
+ ((S\NP)/NP) {\y z.give(y,them,z)}
374
+ ---------------------------------------------------------->B
375
+ (S/NP) {\y.give(y,them,I)}
376
+ ---------------------------------------------------------------------->
377
+ S {give(money,them,I)}
378
+
379
+
380
+ An example from "CCGbank: A Corpus of CCG Derivations and Dependency Structures Extracted from the Penn Treebank", Hockenmaier and Steedman, 2007, Page 359, https://www.aclweb.org/anthology/J/J07/J07-3004.pdf
381
+
382
+ >>> lex = lexicon.fromstring('''
383
+ ... :- N, NP, S
384
+ ... money => N {money}
385
+ ... that => (N\\N)/(S/NP) {\\P Q x.(P(x) & Q(x))}
386
+ ... I => NP {I}
387
+ ... give => ((S\\NP)/NP)/NP {\\x y z.give(y,x,z)}
388
+ ... them => NP {them}
389
+ ... ''',
390
+ ... True)
391
+
392
+ >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
393
+ >>> parses = list(parser.parse("money that I give them".split()))
394
+ >>> print(str(len(parses)) + " parses")
395
+ 3 parses
396
+
397
+ >>> printCCGDerivation(parses[0])
398
+ money that I give them
399
+ N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
400
+ -------->T
401
+ (S/(S\NP)) {\F.F(I)}
402
+ -------------------------------------------------->
403
+ ((S\NP)/NP) {\y z.give(y,them,z)}
404
+ ---------------------------------------------------------->B
405
+ (S/NP) {\y.give(y,them,I)}
406
+ ------------------------------------------------------------------------------------------------->
407
+ (N\N) {\Q x.(give(x,them,I) & Q(x))}
408
+ ------------------------------------------------------------------------------------------------------------<
409
+ N {\x.(give(x,them,I) & money(x))}
410
+
411
+ >>> printCCGDerivation(parses[1])
412
+ money that I give them
413
+ N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
414
+ ----------->T
415
+ (N/(N\N)) {\F.F(money)}
416
+ -------->T
417
+ (S/(S\NP)) {\F.F(I)}
418
+ -------------------------------------------------->
419
+ ((S\NP)/NP) {\y z.give(y,them,z)}
420
+ ---------------------------------------------------------->B
421
+ (S/NP) {\y.give(y,them,I)}
422
+ ------------------------------------------------------------------------------------------------->
423
+ (N\N) {\Q x.(give(x,them,I) & Q(x))}
424
+ ------------------------------------------------------------------------------------------------------------>
425
+ N {\x.(give(x,them,I) & money(x))}
426
+
427
+ >>> printCCGDerivation(parses[2])
428
+ money that I give them
429
+ N {money} ((N\N)/(S/NP)) {\P Q x.(P(x) & Q(x))} NP {I} (((S\NP)/NP)/NP) {\x y z.give(y,x,z)} NP {them}
430
+ ----------->T
431
+ (N/(N\N)) {\F.F(money)}
432
+ -------------------------------------------------->B
433
+ (N/(S/NP)) {\P x.(P(x) & money(x))}
434
+ -------->T
435
+ (S/(S\NP)) {\F.F(I)}
436
+ -------------------------------------------------->
437
+ ((S\NP)/NP) {\y z.give(y,them,z)}
438
+ ---------------------------------------------------------->B
439
+ (S/NP) {\y.give(y,them,I)}
440
+ ------------------------------------------------------------------------------------------------------------>
441
+ N {\x.(give(x,them,I) & money(x))}
442
+
443
+
444
+ -------
445
+ Lexicon
446
+ -------
447
+
448
+ >>> from nltk.ccg import lexicon
449
+
450
+ Parse lexicon with semantics
451
+
452
+ >>> print(str(lexicon.fromstring(
453
+ ... '''
454
+ ... :- S,NP
455
+ ...
456
+ ... IntransVsg :: S\\NP[sg]
457
+ ...
458
+ ... sleeps => IntransVsg {\\x.sleep(x)}
459
+ ... eats => S\\NP[sg]/NP {\\x y.eat(x,y)}
460
+ ...
461
+ ... and => var\\var/var {\\x y.x & y}
462
+ ... ''',
463
+ ... True
464
+ ... )))
465
+ and => ((_var0\_var0)/_var0) {(\x y.x & y)}
466
+ eats => ((S\NP['sg'])/NP) {\x y.eat(x,y)}
467
+ sleeps => (S\NP['sg']) {\x.sleep(x)}
468
+
469
+ Parse lexicon without semantics
470
+
471
+ >>> print(str(lexicon.fromstring(
472
+ ... '''
473
+ ... :- S,NP
474
+ ...
475
+ ... IntransVsg :: S\\NP[sg]
476
+ ...
477
+ ... sleeps => IntransVsg
478
+ ... eats => S\\NP[sg]/NP {sem=\\x y.eat(x,y)}
479
+ ...
480
+ ... and => var\\var/var
481
+ ... ''',
482
+ ... False
483
+ ... )))
484
+ and => ((_var0\_var0)/_var0)
485
+ eats => ((S\NP['sg'])/NP)
486
+ sleeps => (S\NP['sg'])
487
+
488
+ Semantics are missing
489
+
490
+ >>> print(str(lexicon.fromstring(
491
+ ... '''
492
+ ... :- S,NP
493
+ ...
494
+ ... eats => S\\NP[sg]/NP
495
+ ... ''',
496
+ ... True
497
+ ... )))
498
+ Traceback (most recent call last):
499
+ ...
500
+ AssertionError: eats => S\NP[sg]/NP must contain semantics because include_semantics is set to True
501
+
502
+
503
+ ------------------------------------
504
+ CCG combinator semantics computation
505
+ ------------------------------------
506
+
507
+ >>> from nltk.sem.logic import *
508
+ >>> from nltk.ccg.logic import *
509
+
510
+ >>> read_expr = Expression.fromstring
511
+
512
+ Compute semantics from function application
513
+
514
+ >>> print(str(compute_function_semantics(read_expr(r'\x.P(x)'), read_expr(r'book'))))
515
+ P(book)
516
+
517
+ >>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'read'))))
518
+ read(book)
519
+
520
+ >>> print(str(compute_function_semantics(read_expr(r'\P.P(book)'), read_expr(r'\x.read(x)'))))
521
+ read(book)
522
+
523
+ Compute semantics from composition
524
+
525
+ >>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'\x.Q(x)'))))
526
+ \x.P(Q(x))
527
+
528
+ >>> print(str(compute_composition_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
529
+ Traceback (most recent call last):
530
+ ...
531
+ AssertionError: `read` must be a lambda expression
532
+
533
+ Compute semantics from substitution
534
+
535
+ >>> print(str(compute_substitution_semantics(read_expr(r'\x y.P(x,y)'), read_expr(r'\x.Q(x)'))))
536
+ \x.P(x,Q(x))
537
+
538
+ >>> print(str(compute_substitution_semantics(read_expr(r'\x.P(x)'), read_expr(r'read'))))
539
+ Traceback (most recent call last):
540
+ ...
541
+ AssertionError: `\x.P(x)` must be a lambda expression with 2 arguments
542
+
543
+ Compute type-raise semantics
544
+
545
+ >>> print(str(compute_type_raised_semantics(read_expr(r'\x.P(x)'))))
546
+ \F x.F(P(x))
547
+
548
+ >>> print(str(compute_type_raised_semantics(read_expr(r'\x.F(x)'))))
549
+ \F1 x.F1(F(x))
550
+
551
+ >>> print(str(compute_type_raised_semantics(read_expr(r'\x y z.P(x,y,z)'))))
552
+ \F x y z.F(P(x,y,z))
llmeval-env/lib/python3.10/site-packages/nltk/test/chat80.doctest ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======
5
+ Chat-80
6
+ =======
7
+
8
+ Chat-80 was a natural language system which allowed the user to
9
+ interrogate a Prolog knowledge base in the domain of world
10
+ geography. It was developed in the early '80s by Warren and Pereira; see
11
+ `<https://aclanthology.org/J82-3002.pdf>`_ for a description and
12
+ `<http://www.cis.upenn.edu/~pereira/oldies.html>`_ for the source
13
+ files.
14
+
15
+ The ``chat80`` module contains functions to extract data from the Chat-80
16
+ relation files ('the world database'), and convert then into a format
17
+ that can be incorporated in the FOL models of
18
+ ``nltk.sem.evaluate``. The code assumes that the Prolog
19
+ input files are available in the NLTK corpora directory.
20
+
21
+ The Chat-80 World Database consists of the following files::
22
+
23
+ world0.pl
24
+ rivers.pl
25
+ cities.pl
26
+ countries.pl
27
+ contain.pl
28
+ borders.pl
29
+
30
+ This module uses a slightly modified version of ``world0.pl``, in which
31
+ a set of Prolog rules have been omitted. The modified file is named
32
+ ``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
33
+ it uses a list rather than a string in the second field.
34
+
35
+ Reading Chat-80 Files
36
+ =====================
37
+
38
+ Chat-80 relations are like tables in a relational database. The
39
+ relation acts as the name of the table; the first argument acts as the
40
+ 'primary key'; and subsequent arguments are further fields in the
41
+ table. In general, the name of the table provides a label for a unary
42
+ predicate whose extension is all the primary keys. For example,
43
+ relations in ``cities.pl`` are of the following form::
44
+
45
+ 'city(athens,greece,1368).'
46
+
47
+ Here, ``'athens'`` is the key, and will be mapped to a member of the
48
+ unary predicate *city*.
49
+
50
+ By analogy with NLTK corpora, ``chat80`` defines a number of 'items'
51
+ which correspond to the relations.
52
+
53
+ >>> from nltk.sem import chat80
54
+ >>> print(chat80.items)
55
+ ('borders', 'circle_of_lat', 'circle_of_long', 'city', ...)
56
+
57
+ The fields in the table are mapped to binary predicates. The first
58
+ argument of the predicate is the primary key, while the second
59
+ argument is the data in the relevant field. Thus, in the above
60
+ example, the third field is mapped to the binary predicate
61
+ *population_of*, whose extension is a set of pairs such as
62
+ ``'(athens, 1368)'``.
63
+
64
+ An exception to this general framework is required by the relations in
65
+ the files ``borders.pl`` and ``contains.pl``. These contain facts of the
66
+ following form::
67
+
68
+ 'borders(albania,greece).'
69
+
70
+ 'contains0(africa,central_africa).'
71
+
72
+ We do not want to form a unary concept out the element in
73
+ the first field of these records, and we want the label of the binary
74
+ relation just to be ``'border'``/``'contain'`` respectively.
75
+
76
+ In order to drive the extraction process, we use 'relation metadata bundles'
77
+ which are Python dictionaries such as the following::
78
+
79
+ city = {'label': 'city',
80
+ 'closures': [],
81
+ 'schema': ['city', 'country', 'population'],
82
+ 'filename': 'cities.pl'}
83
+
84
+ According to this, the file ``city['filename']`` contains a list of
85
+ relational tuples (or more accurately, the corresponding strings in
86
+ Prolog form) whose predicate symbol is ``city['label']`` and whose
87
+ relational schema is ``city['schema']``. The notion of a ``closure`` is
88
+ discussed in the next section.
89
+
90
+ Concepts
91
+ ========
92
+ In order to encapsulate the results of the extraction, a class of
93
+ ``Concept``\ s is introduced. A ``Concept`` object has a number of
94
+ attributes, in particular a ``prefLabel``, an arity and ``extension``.
95
+
96
+ >>> c1 = chat80.Concept('dog', arity=1, extension=set(['d1', 'd2']))
97
+ >>> print(c1)
98
+ Label = 'dog'
99
+ Arity = 1
100
+ Extension = ['d1', 'd2']
101
+
102
+
103
+
104
+ The ``extension`` attribute makes it easier to inspect the output of
105
+ the extraction.
106
+
107
+ >>> schema = ['city', 'country', 'population']
108
+ >>> concepts = chat80.clause2concepts('cities.pl', 'city', schema)
109
+ >>> concepts
110
+ [Concept('city'), Concept('country_of'), Concept('population_of')]
111
+ >>> for c in concepts:
112
+ ... print("%s:\n\t%s" % (c.prefLabel, c.extension[:4]))
113
+ city:
114
+ ['athens', 'bangkok', 'barcelona', 'berlin']
115
+ country_of:
116
+ [('athens', 'greece'), ('bangkok', 'thailand'), ('barcelona', 'spain'), ('berlin', 'east_germany')]
117
+ population_of:
118
+ [('athens', '1368'), ('bangkok', '1178'), ('barcelona', '1280'), ('berlin', '3481')]
119
+
120
+ In addition, the ``extension`` can be further
121
+ processed: in the case of the ``'border'`` relation, we check that the
122
+ relation is **symmetric**, and in the case of the ``'contain'``
123
+ relation, we carry out the **transitive closure**. The closure
124
+ properties associated with a concept is indicated in the relation
125
+ metadata, as indicated earlier.
126
+
127
+ >>> borders = set([('a1', 'a2'), ('a2', 'a3')])
128
+ >>> c2 = chat80.Concept('borders', arity=2, extension=borders)
129
+ >>> print(c2)
130
+ Label = 'borders'
131
+ Arity = 2
132
+ Extension = [('a1', 'a2'), ('a2', 'a3')]
133
+ >>> c3 = chat80.Concept('borders', arity=2, closures=['symmetric'], extension=borders)
134
+ >>> c3.close()
135
+ >>> print(c3)
136
+ Label = 'borders'
137
+ Arity = 2
138
+ Extension = [('a1', 'a2'), ('a2', 'a1'), ('a2', 'a3'), ('a3', 'a2')]
139
+
140
+ The ``extension`` of a ``Concept`` object is then incorporated into a
141
+ ``Valuation`` object.
142
+
143
+ Persistence
144
+ ===========
145
+ The functions ``val_dump`` and ``val_load`` are provided to allow a
146
+ valuation to be stored in a persistent database and re-loaded, rather
147
+ than having to be re-computed each time.
148
+
149
+ Individuals and Lexical Items
150
+ =============================
151
+ As well as deriving relations from the Chat-80 data, we also create a
152
+ set of individual constants, one for each entity in the domain. The
153
+ individual constants are string-identical to the entities. For
154
+ example, given a data item such as ``'zloty'``, we add to the valuation
155
+ a pair ``('zloty', 'zloty')``. In order to parse English sentences that
156
+ refer to these entities, we also create a lexical item such as the
157
+ following for each individual constant::
158
+
159
+ PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
160
+
161
+ The set of rules is written to the file ``chat_pnames.fcfg`` in the
162
+ current directory.
163
+
164
+ SQL Query
165
+ =========
166
+
167
+ The ``city`` relation is also available in RDB form and can be queried
168
+ using SQL statements.
169
+
170
+ >>> import nltk
171
+ >>> q = "SELECT City, Population FROM city_table WHERE Country = 'china' and Population > 1000"
172
+ >>> for answer in chat80.sql_query('corpora/city_database/city.db', q):
173
+ ... print("%-10s %4s" % answer)
174
+ canton 1496
175
+ chungking 1100
176
+ mukden 1551
177
+ peking 2031
178
+ shanghai 5407
179
+ tientsin 1795
180
+
181
+ The (deliberately naive) grammar ``sql.fcfg`` translates from English
182
+ to SQL:
183
+
184
+ >>> nltk.data.show_cfg('grammars/book_grammars/sql0.fcfg')
185
+ % start S
186
+ S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp]
187
+ VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp]
188
+ VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap]
189
+ NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n]
190
+ PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np]
191
+ AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp]
192
+ NP[SEM='Country="greece"'] -> 'Greece'
193
+ NP[SEM='Country="china"'] -> 'China'
194
+ Det[SEM='SELECT'] -> 'Which' | 'What'
195
+ N[SEM='City FROM city_table'] -> 'cities'
196
+ IV[SEM=''] -> 'are'
197
+ A[SEM=''] -> 'located'
198
+ P[SEM=''] -> 'in'
199
+
200
+ Given this grammar, we can express, and then execute, queries in English.
201
+
202
+ >>> cp = nltk.parse.load_parser('grammars/book_grammars/sql0.fcfg')
203
+ >>> query = 'What cities are in China'
204
+ >>> for tree in cp.parse(query.split()):
205
+ ... answer = tree.label()['SEM']
206
+ ... q = " ".join(answer)
207
+ ... print(q)
208
+ ...
209
+ SELECT City FROM city_table WHERE Country="china"
210
+
211
+ >>> rows = chat80.sql_query('corpora/city_database/city.db', q)
212
+ >>> for r in rows: print("%s" % r, end=' ')
213
+ canton chungking dairen harbin kowloon mukden peking shanghai sian tientsin
214
+
215
+
216
+ Using Valuations
217
+ -----------------
218
+
219
+ In order to convert such an extension into a valuation, we use the
220
+ ``make_valuation()`` method; setting ``read=True`` creates and returns
221
+ a new ``Valuation`` object which contains the results.
222
+
223
+ >>> val = chat80.make_valuation(concepts, read=True)
224
+ >>> 'calcutta' in val['city']
225
+ True
226
+ >>> [town for (town, country) in val['country_of'] if country == 'india']
227
+ ['bombay', 'calcutta', 'delhi', 'hyderabad', 'madras']
228
+ >>> dom = val.domain
229
+ >>> g = nltk.sem.Assignment(dom)
230
+ >>> m = nltk.sem.Model(dom, val)
231
+ >>> m.evaluate(r'population_of(jakarta, 533)', g)
232
+ True
llmeval-env/lib/python3.10/site-packages/nltk/test/childes.doctest ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ =======================
2
+ CHILDES Corpus Readers
3
+ =======================
4
+
5
+ Read the XML version of the CHILDES corpus.
6
+
7
+ Setup
8
+ =====
9
+
10
+ >>> from nltk.test.childes_fixt import setup_module
11
+ >>> setup_module()
12
+
13
+ How to use CHILDESCorpusReader
14
+ ==============================
15
+
16
+ Read the CHILDESCorpusReader class and read the CHILDES corpus saved in
17
+ the nltk_data directory.
18
+
19
+ >>> import nltk
20
+ >>> from nltk.corpus.reader import CHILDESCorpusReader
21
+ >>> corpus_root = nltk.data.find('corpora/childes/data-xml/Eng-USA-MOR/')
22
+
23
+ Reading files in the Valian corpus (Valian, 1991).
24
+
25
+ >>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
26
+ >>> valian.fileids()
27
+ ['Valian/01a.xml', 'Valian/01b.xml', 'Valian/02a.xml', 'Valian/02b.xml',...
28
+
29
+ Count the number of files
30
+
31
+ >>> len(valian.fileids())
32
+ 43
33
+
34
+ Printing properties of the corpus files.
35
+
36
+ >>> corpus_data = valian.corpus(valian.fileids())
37
+ >>> print(corpus_data[0]['Lang'])
38
+ eng
39
+ >>> for key in sorted(corpus_data[0].keys()):
40
+ ... print(key, ": ", corpus_data[0][key])
41
+ Corpus : valian
42
+ Date : 1986-03-04
43
+ Id : 01a
44
+ Lang : eng
45
+ Version : 2.0.1
46
+ {http://www.w3.org/2001/XMLSchema-instance}schemaLocation : http://www.talkbank.org/ns/talkbank http://talkbank.org/software/talkbank.xsd
47
+
48
+ Printing information of participants of the corpus. The most common codes for
49
+ the participants are 'CHI' (target child), 'MOT' (mother), and 'INV' (investigator).
50
+
51
+ >>> corpus_participants = valian.participants(valian.fileids())
52
+ >>> for this_corpus_participants in corpus_participants[:2]:
53
+ ... for key in sorted(this_corpus_participants.keys()):
54
+ ... dct = this_corpus_participants[key]
55
+ ... print(key, ": ", [(k, dct[k]) for k in sorted(dct.keys())])
56
+ CHI : [('age', 'P2Y1M3D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
57
+ INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
58
+ MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
59
+ CHI : [('age', 'P2Y1M12D'), ('group', 'normal'), ('id', 'CHI'), ('language', 'eng'), ('role', 'Target_Child'), ('sex', 'female')]
60
+ INV : [('id', 'INV'), ('language', 'eng'), ('role', 'Investigator')]
61
+ MOT : [('id', 'MOT'), ('language', 'eng'), ('role', 'Mother')]
62
+
63
+ printing words.
64
+
65
+ >>> valian.words('Valian/01a.xml')
66
+ ['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
67
+
68
+ printing sentences.
69
+
70
+ >>> valian.sents('Valian/01a.xml')
71
+ [['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname',
72
+ 'and', 'it', 'is', 'March', 'fourth', 'I', 'believe', 'and', 'when',
73
+ 'was', "Parent's", 'birthday'], ["Child's"], ['oh', "I'm", 'sorry'],
74
+ ["that's", 'okay'], ...
75
+
76
+ You can specify the participants with the argument *speaker*.
77
+
78
+ >>> valian.words('Valian/01a.xml',speaker=['INV'])
79
+ ['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', ...
80
+ >>> valian.words('Valian/01a.xml',speaker=['MOT'])
81
+ ["Child's", "that's", 'okay', 'February', 'first', 'nineteen', ...
82
+ >>> valian.words('Valian/01a.xml',speaker=['CHI'])
83
+ ['tape', 'it', 'up', 'and', 'two', 'tape', 'players', 'have',...
84
+
85
+
86
+ tagged_words() and tagged_sents() return the usual (word,pos) tuple lists.
87
+ POS tags in the CHILDES are automatically assigned by MOR and POST programs
88
+ (MacWhinney, 2000).
89
+
90
+ >>> valian.tagged_words('Valian/01a.xml')[:30]
91
+ [('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
92
+ ('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
93
+ ('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
94
+ ('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
95
+ ('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n'), ("Child's", 'n:prop'),
96
+ ('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj'), ("that's", 'pro:dem'),
97
+ ('okay', 'adj'), ('February', 'n:prop'), ('first', 'adj'),
98
+ ('nineteen', 'det:num'), ('eighty', 'det:num'), ('four', 'det:num')]
99
+
100
+ >>> valian.tagged_sents('Valian/01a.xml')[:10]
101
+ [[('at', 'prep'), ('Parent', 'n:prop'), ("Lastname's", 'n:prop'), ('house', 'n'),
102
+ ('with', 'prep'), ('Child', 'n:prop'), ('Lastname', 'n:prop'), ('and', 'coord'),
103
+ ('it', 'pro'), ('is', 'v:cop'), ('March', 'n:prop'), ('fourth', 'adj'),
104
+ ('I', 'pro:sub'), ('believe', 'v'), ('and', 'coord'), ('when', 'adv:wh'),
105
+ ('was', 'v:cop'), ("Parent's", 'n:prop'), ('birthday', 'n')],
106
+ [("Child's", 'n:prop')], [('oh', 'co'), ("I'm", 'pro:sub'), ('sorry', 'adj')],
107
+ [("that's", 'pro:dem'), ('okay', 'adj')],
108
+ [('February', 'n:prop'), ('first', 'adj'), ('nineteen', 'det:num'),
109
+ ('eighty', 'det:num'), ('four', 'det:num')],
110
+ [('great', 'adj')],
111
+ [('and', 'coord'), ("she's", 'pro:sub'), ('two', 'det:num'), ('years', 'n'), ('old', 'adj')],
112
+ [('correct', 'adj')],
113
+ [('okay', 'co')], [('she', 'pro:sub'), ('just', 'adv:int'), ('turned', 'part'), ('two', 'det:num'),
114
+ ('a', 'det'), ('month', 'n'), ('ago', 'adv')]]
115
+
116
+ When the argument *stem* is true, the word stems (e.g., 'is' -> 'be-3PS') are
117
+ used instead of the original words.
118
+
119
+ >>> valian.words('Valian/01a.xml')[:30]
120
+ ['at', 'Parent', "Lastname's", 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'is', ...
121
+ >>> valian.words('Valian/01a.xml',stem=True)[:30]
122
+ ['at', 'Parent', 'Lastname', 's', 'house', 'with', 'Child', 'Lastname', 'and', 'it', 'be-3S', ...
123
+
124
+ When the argument *replace* is true, the replaced words are used instead of
125
+ the original words.
126
+
127
+ >>> valian.words('Valian/01a.xml',speaker='CHI')[247]
128
+ 'tikteat'
129
+ >>> valian.words('Valian/01a.xml',speaker='CHI',replace=True)[247]
130
+ 'trick'
131
+
132
+ When the argument *relation* is true, the relational relationships in the
133
+ sentence are returned. See Sagae et al. (2010) for details of the relational
134
+ structure adopted in the CHILDES.
135
+
136
+ >>> valian.words('Valian/01a.xml',relation=True)[:10]
137
+ [[('at', 'prep', '1|0|ROOT'), ('Parent', 'n', '2|5|VOC'), ('Lastname', 'n', '3|5|MOD'), ('s', 'poss', '4|5|MOD'), ('house', 'n', '5|1|POBJ'), ('with', 'prep', '6|1|JCT'), ('Child', 'n', '7|8|NAME'), ('Lastname', 'n', '8|6|POBJ'), ('and', 'coord', '9|8|COORD'), ('it', 'pro', '10|11|SUBJ'), ('be-3S', 'v', '11|9|COMP'), ('March', 'n', '12|11|PRED'), ('fourth', 'adj', '13|12|MOD'), ('I', 'pro', '15|16|SUBJ'), ('believe', 'v', '16|14|ROOT'), ('and', 'coord', '18|17|ROOT'), ('when', 'adv', '19|20|PRED'), ('be-PAST', 'v', '20|18|COMP'), ('Parent', 'n', '21|23|MOD'), ('s', 'poss', '22|23|MOD'), ('birth', 'n', '23|20|SUBJ')], [('Child', 'n', '1|2|MOD'), ('s', 'poss', '2|0|ROOT')], [('oh', 'co', '1|4|COM'), ('I', 'pro', '3|4|SUBJ'), ('be', 'v', '4|0|ROOT'), ('sorry', 'adj', '5|4|PRED')], [('that', 'pro', '1|2|SUBJ'), ('be', 'v', '2|0|ROOT'), ('okay', 'adj', '3|2|PRED')], [('February', 'n', '1|6|VOC'), ('first', 'adj', '2|6|ENUM'), ('nineteen', 'det', '4|6|ENUM'), ('eighty', 'det', '5|6|ENUM'), ('four', 'det', '6|0|ROOT')], [('great', 'adj', '1|0|ROOT')], [('and', 'coord', '1|0|ROOT'), ('she', 'pro', '2|1|ROOT'), ('be', 'aux', '3|5|AUX'), ('two', 'det', '4|5|QUANT'), ('year-PL', 'n', '5|2|ROOT'), ('old', 'adj', '6|5|MOD')], [('correct', 'adj', '1|0|ROOT')], [('okay', 'co', '1|0|ROOT')], [('she', 'pro', '1|0|ROOT'), ('just', 'adv', '2|3|JCT'), ('turn-PERF', 'part', '3|1|XCOMP'), ('two', 'det', '4|6|QUANT'), ('a', 'det', '5|6|DET'), ('month', 'n', '6|3|OBJ'), ('ago', 'adv', '7|3|JCT')]]
138
+
139
+ Printing age. When the argument *month* is true, the age information in
140
+ the CHILDES format is converted into the number of months.
141
+
142
+ >>> valian.age()
143
+ ['P2Y1M3D', 'P2Y1M12D', 'P1Y9M21D', 'P1Y9M28D', 'P2Y1M23D', ...
144
+ >>> valian.age('Valian/01a.xml')
145
+ ['P2Y1M3D']
146
+ >>> valian.age('Valian/01a.xml',month=True)
147
+ [25]
148
+
149
+ Printing MLU. The criteria for the MLU computation is broadly based on
150
+ Brown (1973).
151
+
152
+ >>> valian.MLU()
153
+ [2.3574660633484..., 2.292682926829..., 3.492857142857..., 2.961783439490...,
154
+ 2.0842696629213..., 3.169811320754..., 3.137404580152..., 3.0578034682080...,
155
+ 4.090163934426..., 3.488372093023..., 2.8773584905660..., 3.4792899408284...,
156
+ 4.0111940298507..., 3.456790123456..., 4.487603305785..., 4.007936507936...,
157
+ 5.25, 5.154696132596..., ...]
158
+
159
+ >>> valian.MLU('Valian/01a.xml')
160
+ [2.35746606334...]
161
+
162
+
163
+ Basic stuff
164
+ ==============================
165
+
166
+ Count the number of words and sentences of each file.
167
+
168
+ >>> valian = CHILDESCorpusReader(corpus_root, 'Valian/.*.xml')
169
+ >>> for this_file in valian.fileids()[:6]:
170
+ ... print(valian.corpus(this_file)[0]['Corpus'], valian.corpus(this_file)[0]['Id'])
171
+ ... print("num of words: %i" % len(valian.words(this_file)))
172
+ ... print("num of sents: %i" % len(valian.sents(this_file)))
173
+ valian 01a
174
+ num of words: 3606
175
+ num of sents: 1027
176
+ valian 01b
177
+ num of words: 4376
178
+ num of sents: 1274
179
+ valian 02a
180
+ num of words: 2673
181
+ num of sents: 801
182
+ valian 02b
183
+ num of words: 5020
184
+ num of sents: 1583
185
+ valian 03a
186
+ num of words: 2743
187
+ num of sents: 988
188
+ valian 03b
189
+ num of words: 4409
190
+ num of sents: 1397
llmeval-env/lib/python3.10/site-packages/nltk/test/childes_fixt.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def setup_module():
2
+ import pytest
3
+
4
+ import nltk.data
5
+
6
+ try:
7
+ nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
8
+ except LookupError as e:
9
+ pytest.skip(
10
+ "The CHILDES corpus is not found. "
11
+ "It should be manually downloaded and saved/unpacked "
12
+ "to [NLTK_Data_Dir]/corpora/childes/"
13
+ )
llmeval-env/lib/python3.10/site-packages/nltk/test/chunk.doctest ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==========
5
+ Chunking
6
+ ==========
7
+
8
+ >>> from nltk.chunk import *
9
+ >>> from nltk.chunk.util import *
10
+ >>> from nltk.chunk.regexp import *
11
+ >>> from nltk import Tree
12
+
13
+ >>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./."
14
+ >>> gold_chunked_text = tagstr2tree(tagged_text)
15
+ >>> unchunked_text = gold_chunked_text.flatten()
16
+
17
+ Chunking uses a special regexp syntax for rules that delimit the chunks. These
18
+ rules must be converted to 'regular' regular expressions before a sentence can
19
+ be chunked.
20
+
21
+ >>> tag_pattern = "<DT>?<JJ>*<NN.*>"
22
+ >>> regexp_pattern = tag_pattern2re_pattern(tag_pattern)
23
+ >>> regexp_pattern
24
+ '(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)'
25
+
26
+ Construct some new chunking rules.
27
+
28
+ >>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything")
29
+ >>> strip_rule = StripRule(r"<VBD|IN|\.>", "Strip on verbs/prepositions")
30
+ >>> split_rule = SplitRule("<DT><NN>", "<DT><NN>",
31
+ ... "Split successive determiner/noun pairs")
32
+
33
+
34
+ Create and score a series of chunk parsers, successively more complex.
35
+
36
+ >>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP')
37
+ >>> chunked_text = chunk_parser.parse(unchunked_text)
38
+ >>> print(chunked_text)
39
+ (S
40
+ (NP
41
+ The/DT
42
+ cat/NN
43
+ sat/VBD
44
+ on/IN
45
+ the/DT
46
+ mat/NN
47
+ the/DT
48
+ dog/NN
49
+ chewed/VBD
50
+ ./.))
51
+
52
+ >>> chunkscore = ChunkScore()
53
+ >>> chunkscore.score(gold_chunked_text, chunked_text)
54
+ >>> print(chunkscore.precision())
55
+ 0.0
56
+
57
+ >>> print(chunkscore.recall())
58
+ 0.0
59
+
60
+ >>> print(chunkscore.f_measure())
61
+ 0
62
+
63
+ >>> for chunk in sorted(chunkscore.missed()): print(chunk)
64
+ (NP The/DT cat/NN)
65
+ (NP the/DT dog/NN)
66
+ (NP the/DT mat/NN)
67
+
68
+ >>> for chunk in chunkscore.incorrect(): print(chunk)
69
+ (NP
70
+ The/DT
71
+ cat/NN
72
+ sat/VBD
73
+ on/IN
74
+ the/DT
75
+ mat/NN
76
+ the/DT
77
+ dog/NN
78
+ chewed/VBD
79
+ ./.)
80
+
81
+ >>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule],
82
+ ... chunk_label='NP')
83
+ >>> chunked_text = chunk_parser.parse(unchunked_text)
84
+ >>> print(chunked_text)
85
+ (S
86
+ (NP The/DT cat/NN)
87
+ sat/VBD
88
+ on/IN
89
+ (NP the/DT mat/NN the/DT dog/NN)
90
+ chewed/VBD
91
+ ./.)
92
+ >>> assert chunked_text == chunk_parser.parse(list(unchunked_text))
93
+
94
+ >>> chunkscore = ChunkScore()
95
+ >>> chunkscore.score(gold_chunked_text, chunked_text)
96
+ >>> chunkscore.precision()
97
+ 0.5
98
+
99
+ >>> print(chunkscore.recall())
100
+ 0.33333333...
101
+
102
+ >>> print(chunkscore.f_measure())
103
+ 0.4
104
+
105
+ >>> for chunk in sorted(chunkscore.missed()): print(chunk)
106
+ (NP the/DT dog/NN)
107
+ (NP the/DT mat/NN)
108
+
109
+ >>> for chunk in chunkscore.incorrect(): print(chunk)
110
+ (NP the/DT mat/NN the/DT dog/NN)
111
+
112
+ >>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule],
113
+ ... chunk_label='NP')
114
+ >>> chunked_text = chunk_parser.parse(unchunked_text, trace=True)
115
+ # Input:
116
+ <DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
117
+ # Chunk everything:
118
+ {<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>}
119
+ # Strip on verbs/prepositions:
120
+ {<DT> <NN>} <VBD> <IN> {<DT> <NN> <DT> <NN>} <VBD> <.>
121
+ # Split successive determiner/noun pairs:
122
+ {<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
123
+ >>> print(chunked_text)
124
+ (S
125
+ (NP The/DT cat/NN)
126
+ sat/VBD
127
+ on/IN
128
+ (NP the/DT mat/NN)
129
+ (NP the/DT dog/NN)
130
+ chewed/VBD
131
+ ./.)
132
+
133
+ >>> chunkscore = ChunkScore()
134
+ >>> chunkscore.score(gold_chunked_text, chunked_text)
135
+ >>> chunkscore.precision()
136
+ 1.0
137
+
138
+ >>> chunkscore.recall()
139
+ 1.0
140
+
141
+ >>> chunkscore.f_measure()
142
+ 1.0
143
+
144
+ >>> chunkscore.missed()
145
+ []
146
+
147
+ >>> chunkscore.incorrect()
148
+ []
149
+
150
+ >>> chunk_parser.rules()
151
+ [<ChunkRule: '<.*>+'>, <StripRule: '<VBD|IN|\\.>'>,
152
+ <SplitRule: '<DT><NN>', '<DT><NN>'>]
153
+
154
+ Printing parsers:
155
+
156
+ >>> print(repr(chunk_parser))
157
+ <RegexpChunkParser with 3 rules>
158
+ >>> print(chunk_parser)
159
+ RegexpChunkParser with 3 rules:
160
+ Chunk everything
161
+ <ChunkRule: '<.*>+'>
162
+ Strip on verbs/prepositions
163
+ <StripRule: '<VBD|IN|\\.>'>
164
+ Split successive determiner/noun pairs
165
+ <SplitRule: '<DT><NN>', '<DT><NN>'>
166
+
167
+ Regression Tests
168
+ ~~~~~~~~~~~~~~~~
169
+ ChunkParserI
170
+ ------------
171
+ `ChunkParserI` is an abstract interface -- it is not meant to be
172
+ instantiated directly.
173
+
174
+ >>> ChunkParserI().parse([])
175
+ Traceback (most recent call last):
176
+ . . .
177
+ NotImplementedError
178
+
179
+
180
+ ChunkString
181
+ -----------
182
+ ChunkString can be built from a tree of tagged tuples, a tree of
183
+ trees, or a mixed list of both:
184
+
185
+ >>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)])
186
+ >>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])])
187
+ >>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])])
188
+ >>> ChunkString(t1)
189
+ <ChunkString: '<t0><t1><t2><t3><t4><t5><t6><t7><t8><t9>'>
190
+ >>> ChunkString(t2)
191
+ <ChunkString: '<t0><t1>'>
192
+ >>> ChunkString(t3)
193
+ <ChunkString: '<t0><t1>'>
194
+
195
+ Other values generate an error:
196
+
197
+ >>> ChunkString(Tree('S', ['x']))
198
+ Traceback (most recent call last):
199
+ . . .
200
+ ValueError: chunk structures must contain tagged tokens or trees
201
+
202
+ The `str()` for a chunk string adds spaces to it, which makes it line
203
+ up with `str()` output for other chunk strings over the same
204
+ underlying input.
205
+
206
+ >>> cs = ChunkString(t1)
207
+ >>> print(cs)
208
+ <t0> <t1> <t2> <t3> <t4> <t5> <t6> <t7> <t8> <t9>
209
+ >>> cs.xform('<t3>', '{<t3>}')
210
+ >>> print(cs)
211
+ <t0> <t1> <t2> {<t3>} <t4> <t5> <t6> <t7> <t8> <t9>
212
+
213
+ The `_verify()` method makes sure that our transforms don't corrupt
214
+ the chunk string. By setting debug_level=2, `_verify()` will be
215
+ called at the end of every call to `xform`.
216
+
217
+ >>> cs = ChunkString(t1, debug_level=3)
218
+
219
+ >>> # tag not marked with <...>:
220
+ >>> cs.xform('<t3>', 't3')
221
+ Traceback (most recent call last):
222
+ . . .
223
+ ValueError: Transformation generated invalid chunkstring:
224
+ <t0><t1><t2>t3<t4><t5><t6><t7><t8><t9>
225
+
226
+ >>> # brackets not balanced:
227
+ >>> cs.xform('<t3>', '{<t3>')
228
+ Traceback (most recent call last):
229
+ . . .
230
+ ValueError: Transformation generated invalid chunkstring:
231
+ <t0><t1><t2>{<t3><t4><t5><t6><t7><t8><t9>
232
+
233
+ >>> # nested brackets:
234
+ >>> cs.xform('<t3><t4><t5>', '{<t3>{<t4>}<t5>}')
235
+ Traceback (most recent call last):
236
+ . . .
237
+ ValueError: Transformation generated invalid chunkstring:
238
+ <t0><t1><t2>{<t3>{<t4>}<t5>}<t6><t7><t8><t9>
239
+
240
+ >>> # modified tags:
241
+ >>> cs.xform('<t3>', '<t9>')
242
+ Traceback (most recent call last):
243
+ . . .
244
+ ValueError: Transformation generated invalid chunkstring: tag changed
245
+
246
+ >>> # added tags:
247
+ >>> cs.xform('<t9>', '<t9><t10>')
248
+ Traceback (most recent call last):
249
+ . . .
250
+ ValueError: Transformation generated invalid chunkstring: tag changed
251
+
252
+ Chunking Rules
253
+ --------------
254
+
255
+ Test the different rule constructors & __repr__ methods:
256
+
257
+ >>> r1 = RegexpChunkRule('<a|b>'+ChunkString.IN_STRIP_PATTERN,
258
+ ... '{<a|b>}', 'chunk <a> and <b>')
259
+ >>> r2 = RegexpChunkRule(re.compile('<a|b>'+ChunkString.IN_STRIP_PATTERN),
260
+ ... '{<a|b>}', 'chunk <a> and <b>')
261
+ >>> r3 = ChunkRule('<a|b>', 'chunk <a> and <b>')
262
+ >>> r4 = StripRule('<a|b>', 'strip <a> and <b>')
263
+ >>> r5 = UnChunkRule('<a|b>', 'unchunk <a> and <b>')
264
+ >>> r6 = MergeRule('<a>', '<b>', 'merge <a> w/ <b>')
265
+ >>> r7 = SplitRule('<a>', '<b>', 'split <a> from <b>')
266
+ >>> r8 = ExpandLeftRule('<a>', '<b>', 'expand left <a> <b>')
267
+ >>> r9 = ExpandRightRule('<a>', '<b>', 'expand right <a> <b>')
268
+ >>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9:
269
+ ... print(rule)
270
+ <RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
271
+ <RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
272
+ <ChunkRule: '<a|b>'>
273
+ <StripRule: '<a|b>'>
274
+ <UnChunkRule: '<a|b>'>
275
+ <MergeRule: '<a>', '<b>'>
276
+ <SplitRule: '<a>', '<b>'>
277
+ <ExpandLeftRule: '<a>', '<b>'>
278
+ <ExpandRightRule: '<a>', '<b>'>
279
+
280
+ `tag_pattern2re_pattern()` complains if the tag pattern looks problematic:
281
+
282
+ >>> tag_pattern2re_pattern('{}')
283
+ Traceback (most recent call last):
284
+ . . .
285
+ ValueError: Bad tag pattern: '{}'
286
+
287
+ RegexpChunkParser
288
+ -----------------
289
+
290
+ A warning is printed when parsing an empty sentence:
291
+
292
+ >>> parser = RegexpChunkParser([ChunkRule('<a>', '')])
293
+ >>> parser.parse(Tree('S', []))
294
+ Warning: parsing empty text
295
+ Tree('S', [])
296
+
297
+ RegexpParser
298
+ ------------
299
+
300
+ >>> parser = RegexpParser('''
301
+ ... NP: {<DT>? <JJ>* <NN>*} # NP
302
+ ... P: {<IN>} # Preposition
303
+ ... V: {<V.*>} # Verb
304
+ ... PP: {<P> <NP>} # PP -> P NP
305
+ ... VP: {<V> <NP|PP>*} # VP -> V (NP|PP)*
306
+ ... ''')
307
+ >>> print(repr(parser))
308
+ <chunk.RegexpParser with 5 stages>
309
+ >>> print(parser)
310
+ chunk.RegexpParser with 5 stages:
311
+ RegexpChunkParser with 1 rules:
312
+ NP <ChunkRule: '<DT>? <JJ>* <NN>*'>
313
+ RegexpChunkParser with 1 rules:
314
+ Preposition <ChunkRule: '<IN>'>
315
+ RegexpChunkParser with 1 rules:
316
+ Verb <ChunkRule: '<V.*>'>
317
+ RegexpChunkParser with 1 rules:
318
+ PP -> P NP <ChunkRule: '<P> <NP>'>
319
+ RegexpChunkParser with 1 rules:
320
+ VP -> V (NP|PP)* <ChunkRule: '<V> <NP|PP>*'>
321
+ >>> print(parser.parse(unchunked_text, trace=True))
322
+ # Input:
323
+ <DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
324
+ # NP:
325
+ {<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
326
+ # Input:
327
+ <NP> <VBD> <IN> <NP> <NP> <VBD> <.>
328
+ # Preposition:
329
+ <NP> <VBD> {<IN>} <NP> <NP> <VBD> <.>
330
+ # Input:
331
+ <NP> <VBD> <P> <NP> <NP> <VBD> <.>
332
+ # Verb:
333
+ <NP> {<VBD>} <P> <NP> <NP> {<VBD>} <.>
334
+ # Input:
335
+ <NP> <V> <P> <NP> <NP> <V> <.>
336
+ # PP -> P NP:
337
+ <NP> <V> {<P> <NP>} <NP> <V> <.>
338
+ # Input:
339
+ <NP> <V> <PP> <NP> <V> <.>
340
+ # VP -> V (NP|PP)*:
341
+ <NP> {<V> <PP> <NP>}{<V>} <.>
342
+ (S
343
+ (NP The/DT cat/NN)
344
+ (VP
345
+ (V sat/VBD)
346
+ (PP (P on/IN) (NP the/DT mat/NN))
347
+ (NP the/DT dog/NN))
348
+ (VP (V chewed/VBD))
349
+ ./.)
350
+
351
+ Test parsing of other rule types:
352
+
353
+ >>> print(RegexpParser('''
354
+ ... X:
355
+ ... }<a><b>{ # strip rule
356
+ ... <a>}{<b> # split rule
357
+ ... <a>{}<b> # merge rule
358
+ ... <a>{<b>}<c> # chunk rule w/ context
359
+ ... '''))
360
+ chunk.RegexpParser with 1 stages:
361
+ RegexpChunkParser with 4 rules:
362
+ strip rule <StripRule: '<a><b>'>
363
+ split rule <SplitRule: '<a>', '<b>'>
364
+ merge rule <MergeRule: '<a>', '<b>'>
365
+ chunk rule w/ context <ChunkRuleWithContext: '<a>', '<b>', '<c>'>
366
+
367
+ Illegal patterns give an error message:
368
+
369
+ >>> print(RegexpParser('X: {<foo>} {<bar>}'))
370
+ Traceback (most recent call last):
371
+ . . .
372
+ ValueError: Illegal chunk pattern: {<foo>} {<bar>}
llmeval-env/lib/python3.10/site-packages/nltk/test/classify.doctest ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =============
5
+ Classifiers
6
+ =============
7
+
8
+ >>> from nltk.test.classify_fixt import setup_module
9
+ >>> setup_module()
10
+
11
+ Classifiers label tokens with category labels (or *class labels*).
12
+ Typically, labels are represented with strings (such as ``"health"``
13
+ or ``"sports"``. In NLTK, classifiers are defined using classes that
14
+ implement the `ClassifierI` interface, which supports the following operations:
15
+
16
+ - self.classify(featureset)
17
+ - self.classify_many(featuresets)
18
+ - self.labels()
19
+ - self.prob_classify(featureset)
20
+ - self.prob_classify_many(featuresets)
21
+
22
+ NLTK defines several classifier classes:
23
+
24
+ - `ConditionalExponentialClassifier`
25
+ - `DecisionTreeClassifier`
26
+ - `MaxentClassifier`
27
+ - `NaiveBayesClassifier`
28
+ - `WekaClassifier`
29
+
30
+ Classifiers are typically created by training them on a training
31
+ corpus.
32
+
33
+
34
+ Regression Tests
35
+ ~~~~~~~~~~~~~~~~
36
+
37
+ We define a very simple training corpus with 3 binary features: ['a',
38
+ 'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so
39
+ that the correct answers can be calculated analytically (although we
40
+ haven't done this yet for all tests).
41
+
42
+ >>> import nltk
43
+ >>> train = [
44
+ ... (dict(a=1,b=1,c=1), 'y'),
45
+ ... (dict(a=1,b=1,c=1), 'x'),
46
+ ... (dict(a=1,b=1,c=0), 'y'),
47
+ ... (dict(a=0,b=1,c=1), 'x'),
48
+ ... (dict(a=0,b=1,c=1), 'y'),
49
+ ... (dict(a=0,b=0,c=1), 'y'),
50
+ ... (dict(a=0,b=1,c=0), 'x'),
51
+ ... (dict(a=0,b=0,c=0), 'x'),
52
+ ... (dict(a=0,b=1,c=1), 'y'),
53
+ ... (dict(a=None,b=1,c=0), 'x'),
54
+ ... ]
55
+ >>> test = [
56
+ ... (dict(a=1,b=0,c=1)), # unseen
57
+ ... (dict(a=1,b=0,c=0)), # unseen
58
+ ... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x
59
+ ... (dict(a=0,b=1,c=0)), # seen 1 time, label=x
60
+ ... ]
61
+
62
+ Test the Naive Bayes classifier:
63
+
64
+ >>> classifier = nltk.classify.NaiveBayesClassifier.train(train)
65
+ >>> sorted(classifier.labels())
66
+ ['x', 'y']
67
+ >>> classifier.classify_many(test)
68
+ ['y', 'x', 'y', 'x']
69
+ >>> for pdist in classifier.prob_classify_many(test):
70
+ ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
71
+ 0.2500 0.7500
72
+ 0.5833 0.4167
73
+ 0.3571 0.6429
74
+ 0.7000 0.3000
75
+ >>> classifier.show_most_informative_features()
76
+ Most Informative Features
77
+ c = 0 x : y = 2.3 : 1.0
78
+ c = 1 y : x = 1.8 : 1.0
79
+ a = 1 y : x = 1.7 : 1.0
80
+ a = 0 x : y = 1.0 : 1.0
81
+ b = 0 x : y = 1.0 : 1.0
82
+ b = 1 x : y = 1.0 : 1.0
83
+
84
+ Test the Decision Tree classifier (without None):
85
+
86
+ >>> classifier = nltk.classify.DecisionTreeClassifier.train(
87
+ ... train[:-1], entropy_cutoff=0,
88
+ ... support_cutoff=0)
89
+ >>> sorted(classifier.labels())
90
+ ['x', 'y']
91
+ >>> print(classifier)
92
+ c=0? .................................................. x
93
+ a=0? ................................................ x
94
+ a=1? ................................................ y
95
+ c=1? .................................................. y
96
+ <BLANKLINE>
97
+ >>> classifier.classify_many(test)
98
+ ['y', 'y', 'y', 'x']
99
+ >>> for pdist in classifier.prob_classify_many(test):
100
+ ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
101
+ Traceback (most recent call last):
102
+ . . .
103
+ NotImplementedError
104
+
105
+
106
+ Test the Decision Tree classifier (with None):
107
+
108
+ >>> classifier = nltk.classify.DecisionTreeClassifier.train(
109
+ ... train, entropy_cutoff=0,
110
+ ... support_cutoff=0)
111
+ >>> sorted(classifier.labels())
112
+ ['x', 'y']
113
+ >>> print(classifier)
114
+ c=0? .................................................. x
115
+ a=0? ................................................ x
116
+ a=1? ................................................ y
117
+ a=None? ............................................. x
118
+ c=1? .................................................. y
119
+ <BLANKLINE>
120
+
121
+
122
+ Test SklearnClassifier, which requires the scikit-learn package.
123
+
124
+ >>> from nltk.classify import SklearnClassifier
125
+ >>> from sklearn.naive_bayes import BernoulliNB
126
+ >>> from sklearn.svm import SVC
127
+ >>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"),
128
+ ... ({"a": 5, "b": 2, "c": 1}, "ham"),
129
+ ... ({"a": 0, "b": 3, "c": 4}, "spam"),
130
+ ... ({"a": 5, "b": 1, "c": 1}, "ham"),
131
+ ... ({"a": 1, "b": 4, "c": 3}, "spam")]
132
+ >>> classif = SklearnClassifier(BernoulliNB()).train(train_data)
133
+ >>> test_data = [{"a": 3, "b": 2, "c": 1},
134
+ ... {"a": 0, "b": 3, "c": 7}]
135
+ >>> classif.classify_many(test_data)
136
+ ['ham', 'spam']
137
+ >>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data)
138
+ >>> classif.classify_many(test_data)
139
+ ['ham', 'spam']
140
+
141
+ Test the Maximum Entropy classifier training algorithms; they should all
142
+ generate the same results.
143
+
144
+ >>> def print_maxent_test_header():
145
+ ... print(' '*11+''.join([' test[%s] ' % i
146
+ ... for i in range(len(test))]))
147
+ ... print(' '*11+' p(x) p(y)'*len(test))
148
+ ... print('-'*(11+15*len(test)))
149
+
150
+ >>> def test_maxent(algorithm):
151
+ ... print('%11s' % algorithm, end=' ')
152
+ ... try:
153
+ ... classifier = nltk.classify.MaxentClassifier.train(
154
+ ... train, algorithm, trace=0, max_iter=1000)
155
+ ... except Exception as e:
156
+ ... print('Error: %r' % e)
157
+ ... return
158
+ ...
159
+ ... for featureset in test:
160
+ ... pdist = classifier.prob_classify(featureset)
161
+ ... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ')
162
+ ... print()
163
+
164
+ >>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS')
165
+ test[0] test[1] test[2] test[3]
166
+ p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y)
167
+ -----------------------------------------------------------------------
168
+ GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
169
+ IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
170
+
171
+ >>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP
172
+ MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
173
+ TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
174
+
175
+
176
+
177
+ Regression tests for TypedMaxentFeatureEncoding
178
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
179
+
180
+ >>> from nltk.classify import maxent
181
+ >>> train = [
182
+ ... ({'a': 1, 'b': 1, 'c': 1}, 'y'),
183
+ ... ({'a': 5, 'b': 5, 'c': 5}, 'x'),
184
+ ... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'),
185
+ ... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'),
186
+ ... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'),
187
+ ... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x')
188
+ ... ]
189
+
190
+ >>> test = [
191
+ ... {'a': 1, 'b': 0.8, 'c': 1.2},
192
+ ... {'a': 5.2, 'b': 5.1, 'c': 5}
193
+ ... ]
194
+
195
+ >>> encoding = maxent.TypedMaxentFeatureEncoding.train(
196
+ ... train, count_cutoff=3, alwayson_features=True)
197
+
198
+ >>> classifier = maxent.MaxentClassifier.train(
199
+ ... train, bernoulli=False, encoding=encoding, trace=0)
200
+
201
+ >>> classifier.classify_many(test)
202
+ ['y', 'x']
llmeval-env/lib/python3.10/site-packages/nltk/test/classify_fixt.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # most of classify.doctest requires numpy
2
+ def setup_module():
3
+ import pytest
4
+
5
+ pytest.importorskip("numpy")
llmeval-env/lib/python3.10/site-packages/nltk/test/collections.doctest ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===========
5
+ Collections
6
+ ===========
7
+
8
+ >>> import nltk
9
+ >>> from nltk.collections import *
10
+
11
+ Trie
12
+ ----
13
+
14
+ Trie can be pickled:
15
+
16
+ >>> import pickle
17
+ >>> trie = nltk.collections.Trie(['a'])
18
+ >>> s = pickle.dumps(trie)
19
+ >>> pickle.loads(s)
20
+ {'a': {True: None}}
21
+
22
+ LazyIteratorList
23
+ ----------------
24
+
25
+ Fetching the length of a LazyIteratorList object does not throw a StopIteration exception:
26
+
27
+ >>> lil = LazyIteratorList(i for i in range(1, 11))
28
+ >>> lil[-1]
29
+ 10
30
+ >>> len(lil)
31
+ 10
llmeval-env/lib/python3.10/site-packages/nltk/test/concordance.doctest ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2016 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==================================
5
+ Concordance Example
6
+ ==================================
7
+
8
+ A concordance view shows us every occurrence of a given
9
+ word, together with some context. Here we look up the word monstrous
10
+ in Moby Dick by entering text1 followed by a period, then the term
11
+ concordance, and then placing "monstrous" in parentheses:
12
+
13
+ >>> from nltk.corpus import gutenberg
14
+ >>> from nltk.text import Text
15
+ >>> corpus = gutenberg.words('melville-moby_dick.txt')
16
+ >>> text = Text(corpus)
17
+
18
+ >>> text.concordance("monstrous")
19
+ Displaying 11 of 11 matches:
20
+ ong the former , one was of a most monstrous size . ... This came towards us ,
21
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
22
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
23
+ d as you gazed , and wondered what monstrous cannibal and savage could ever hav
24
+ that has survived the flood ; most monstrous and most mountainous ! That Himmal
25
+ they might scout at Moby Dick as a monstrous fable , or still worse and more de
26
+ th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l
27
+ ing Scenes . In connexion with the monstrous pictures of whales , I am strongly
28
+ ere to enter upon those still more monstrous stories of them which are to be fo
29
+ ght have been rummaged out of this monstrous cabinet there is no telling . But
30
+ of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u
31
+
32
+ >>> text.concordance("monstrous")
33
+ Displaying 11 of 11 matches:
34
+ ong the former , one was of a most monstrous size . ... This came towards us ,
35
+ ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r
36
+ ll over with a heathenish array of monstrous clubs and spears . Some were thick
37
+ ...
38
+
39
+ We can also search for a multi-word phrase by passing a list of strings:
40
+
41
+ >>> text.concordance(["monstrous", "size"])
42
+ Displaying 2 of 2 matches:
43
+ the former , one was of a most monstrous size . ... This came towards us , op
44
+ Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead upo
45
+
46
+ =================================
47
+ Concordance List
48
+ =================================
49
+
50
+ Often we need to store the results of concordance for further usage.
51
+ To do so, call the concordance function with the stdout argument set
52
+ to false:
53
+
54
+ >>> from nltk.corpus import gutenberg
55
+ >>> from nltk.text import Text
56
+ >>> corpus = gutenberg.words('melville-moby_dick.txt')
57
+ >>> text = Text(corpus)
58
+ >>> con_list = text.concordance_list("monstrous")
59
+ >>> con_list[2].line
60
+ 'll over with a heathenish array of monstrous clubs and spears . Some were thick'
61
+ >>> len(con_list)
62
+ 11
63
+
64
+ =================================
65
+ Patching Issue #2088
66
+ =================================
67
+
68
+ Patching https://github.com/nltk/nltk/issues/2088
69
+ The left slice of the left context should be clip to 0 if the `i-context` < 0.
70
+
71
+ >>> from nltk import Text, word_tokenize
72
+ >>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.'
73
+ >>> text = Text(word_tokenize(jane_eyre))
74
+ >>> text.concordance_list('taking')[0].left
75
+ ['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of']
llmeval-env/lib/python3.10/site-packages/nltk/test/corpus.doctest ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/nltk/test/crubadan.doctest ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ Crubadan Corpus Reader
5
+ ======================
6
+
7
+ Crubadan is an NLTK corpus reader for ngram files provided
8
+ by the Crubadan project. It supports several languages.
9
+
10
+ >>> from nltk.corpus import crubadan
11
+ >>> crubadan.langs()
12
+ ['abk', 'abn',..., 'zpa', 'zul']
13
+
14
+ ----------------------------------------
15
+ Language code mapping and helper methods
16
+ ----------------------------------------
17
+
18
+ The web crawler that generates the 3-gram frequencies works at the
19
+ level of "writing systems" rather than languages. Writing systems
20
+ are assigned internal 2-3 letter codes that require mapping to the
21
+ standard ISO 639-3 codes. For more information, please refer to
22
+ the README in nltk_data/crubadan folder after installing it.
23
+
24
+ To translate ISO 639-3 codes to "Crubadan Code":
25
+
26
+ >>> crubadan.iso_to_crubadan('eng')
27
+ 'en'
28
+ >>> crubadan.iso_to_crubadan('fra')
29
+ 'fr'
30
+ >>> crubadan.iso_to_crubadan('aaa')
31
+
32
+ In reverse, print ISO 639-3 code if we have the Crubadan Code:
33
+
34
+ >>> crubadan.crubadan_to_iso('en')
35
+ 'eng'
36
+ >>> crubadan.crubadan_to_iso('fr')
37
+ 'fra'
38
+ >>> crubadan.crubadan_to_iso('aa')
39
+
40
+ ---------------------------
41
+ Accessing ngram frequencies
42
+ ---------------------------
43
+
44
+ On initialization the reader will create a dictionary of every
45
+ language supported by the Crubadan project, mapping the ISO 639-3
46
+ language code to its corresponding ngram frequency.
47
+
48
+ You can access individual language FreqDist and the ngrams within them as follows:
49
+
50
+ >>> english_fd = crubadan.lang_freq('eng')
51
+ >>> english_fd['the']
52
+ 728135
53
+
54
+ Above accesses the FreqDist of English and returns the frequency of the ngram 'the'.
55
+ A ngram that isn't found within the language will return 0:
56
+
57
+ >>> english_fd['sometest']
58
+ 0
59
+
60
+ A language that isn't supported will raise an exception:
61
+
62
+ >>> crubadan.lang_freq('elvish')
63
+ Traceback (most recent call last):
64
+ ...
65
+ RuntimeError: Unsupported language.
llmeval-env/lib/python3.10/site-packages/nltk/test/data.doctest ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =========================================
5
+ Loading Resources From the Data Package
6
+ =========================================
7
+
8
+ >>> import nltk.data
9
+
10
+ Overview
11
+ ~~~~~~~~
12
+ The `nltk.data` module contains functions that can be used to load
13
+ NLTK resource files, such as corpora, grammars, and saved processing
14
+ objects.
15
+
16
+ Loading Data Files
17
+ ~~~~~~~~~~~~~~~~~~
18
+ Resources are loaded using the function `nltk.data.load()`, which
19
+ takes as its first argument a URL specifying what file should be
20
+ loaded. The ``nltk:`` protocol loads files from the NLTK data
21
+ distribution:
22
+
23
+ >>> tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
24
+ >>> tokenizer.tokenize('Hello. This is a test. It works!')
25
+ ['Hello.', 'This is a test.', 'It works!']
26
+
27
+ It is important to note that there should be no space following the
28
+ colon (':') in the URL; 'nltk: tokenizers/punkt/english.pickle' will
29
+ not work!
30
+
31
+ The ``nltk:`` protocol is used by default if no protocol is specified:
32
+
33
+ >>> nltk.data.load('tokenizers/punkt/english.pickle')
34
+ <nltk.tokenize.punkt.PunktSentenceTokenizer object at ...>
35
+
36
+ But it is also possible to load resources from ``http:``, ``ftp:``,
37
+ and ``file:`` URLs:
38
+
39
+ >>> # Load a grammar from the NLTK webpage.
40
+ >>> cfg = nltk.data.load('https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg')
41
+ >>> print(cfg) # doctest: +ELLIPSIS
42
+ Grammar with 14 productions (start state = S)
43
+ S -> NP VP
44
+ PP -> P NP
45
+ ...
46
+ P -> 'on'
47
+ P -> 'in'
48
+
49
+ >>> # Load a grammar using an absolute path.
50
+ >>> url = 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg')
51
+ >>> url.replace('\\', '/')
52
+ 'file:...toy.cfg'
53
+ >>> print(nltk.data.load(url))
54
+ Grammar with 14 productions (start state = S)
55
+ S -> NP VP
56
+ PP -> P NP
57
+ ...
58
+ P -> 'on'
59
+ P -> 'in'
60
+
61
+ The second argument to the `nltk.data.load()` function specifies the
62
+ file format, which determines how the file's contents are processed
63
+ before they are returned by ``load()``. The formats that are
64
+ currently supported by the data module are described by the dictionary
65
+ `nltk.data.FORMATS`:
66
+
67
+ >>> for format, descr in sorted(nltk.data.FORMATS.items()):
68
+ ... print('{0:<7} {1:}'.format(format, descr))
69
+ cfg A context free grammar.
70
+ fcfg A feature CFG.
71
+ fol A list of first order logic expressions, parsed with
72
+ nltk.sem.logic.Expression.fromstring.
73
+ json A serialized python object, stored using the json module.
74
+ logic A list of first order logic expressions, parsed with
75
+ nltk.sem.logic.LogicParser. Requires an additional logic_parser
76
+ parameter
77
+ pcfg A probabilistic CFG.
78
+ pickle A serialized python object, stored using the pickle
79
+ module.
80
+ raw The raw (byte string) contents of a file.
81
+ text The raw (unicode string) contents of a file.
82
+ val A semantic valuation, parsed by
83
+ nltk.sem.Valuation.fromstring.
84
+ yaml A serialized python object, stored using the yaml module.
85
+
86
+ `nltk.data.load()` will raise a ValueError if a bad format name is
87
+ specified:
88
+
89
+ >>> nltk.data.load('grammars/sample_grammars/toy.cfg', 'bar')
90
+ Traceback (most recent call last):
91
+ . . .
92
+ ValueError: Unknown format type!
93
+
94
+ By default, the ``"auto"`` format is used, which chooses a format
95
+ based on the filename's extension. The mapping from file extensions
96
+ to format names is specified by `nltk.data.AUTO_FORMATS`:
97
+
98
+ >>> for ext, format in sorted(nltk.data.AUTO_FORMATS.items()):
99
+ ... print('.%-7s -> %s' % (ext, format))
100
+ .cfg -> cfg
101
+ .fcfg -> fcfg
102
+ .fol -> fol
103
+ .json -> json
104
+ .logic -> logic
105
+ .pcfg -> pcfg
106
+ .pickle -> pickle
107
+ .text -> text
108
+ .txt -> text
109
+ .val -> val
110
+ .yaml -> yaml
111
+
112
+ If `nltk.data.load()` is unable to determine the format based on the
113
+ filename's extension, it will raise a ValueError:
114
+
115
+ >>> nltk.data.load('foo.bar')
116
+ Traceback (most recent call last):
117
+ . . .
118
+ ValueError: Could not determine format for foo.bar based on its file
119
+ extension; use the "format" argument to specify the format explicitly.
120
+
121
+ Note that by explicitly specifying the ``format`` argument, you can
122
+ override the load method's default processing behavior. For example,
123
+ to get the raw contents of any file, simply use ``format="raw"``:
124
+
125
+ >>> s = nltk.data.load('grammars/sample_grammars/toy.cfg', 'text')
126
+ >>> print(s)
127
+ S -> NP VP
128
+ PP -> P NP
129
+ NP -> Det N | NP PP
130
+ VP -> V NP | VP PP
131
+ ...
132
+
133
+ Making Local Copies
134
+ ~~~~~~~~~~~~~~~~~~~
135
+ .. This will not be visible in the html output: create a tempdir to
136
+ play in.
137
+ >>> import tempfile, os
138
+ >>> tempdir = tempfile.mkdtemp()
139
+ >>> old_dir = os.path.abspath('.')
140
+ >>> os.chdir(tempdir)
141
+
142
+ The function `nltk.data.retrieve()` copies a given resource to a local
143
+ file. This can be useful, for example, if you want to edit one of the
144
+ sample grammars.
145
+
146
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
147
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy.cfg'
148
+
149
+ >>> # Simulate editing the grammar.
150
+ >>> with open('toy.cfg') as inp:
151
+ ... s = inp.read().replace('NP', 'DP')
152
+ >>> with open('toy.cfg', 'w') as out:
153
+ ... _bytes_written = out.write(s)
154
+
155
+ >>> # Load the edited grammar, & display it.
156
+ >>> cfg = nltk.data.load('file:///' + os.path.abspath('toy.cfg'))
157
+ >>> print(cfg)
158
+ Grammar with 14 productions (start state = S)
159
+ S -> DP VP
160
+ PP -> P DP
161
+ ...
162
+ P -> 'on'
163
+ P -> 'in'
164
+
165
+ The second argument to `nltk.data.retrieve()` specifies the filename
166
+ for the new copy of the file. By default, the source file's filename
167
+ is used.
168
+
169
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg', 'mytoy.cfg')
170
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'mytoy.cfg'
171
+ >>> os.path.isfile('./mytoy.cfg')
172
+ True
173
+ >>> nltk.data.retrieve('grammars/sample_grammars/np.fcfg')
174
+ Retrieving 'nltk:grammars/sample_grammars/np.fcfg', saving to 'np.fcfg'
175
+ >>> os.path.isfile('./np.fcfg')
176
+ True
177
+
178
+ If a file with the specified (or default) filename already exists in
179
+ the current directory, then `nltk.data.retrieve()` will raise a
180
+ ValueError exception. It will *not* overwrite the file:
181
+
182
+ >>> os.path.isfile('./toy.cfg')
183
+ True
184
+ >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg')
185
+ Traceback (most recent call last):
186
+ . . .
187
+ ValueError: File '...toy.cfg' already exists!
188
+
189
+ .. This will not be visible in the html output: clean up the tempdir.
190
+ >>> os.chdir(old_dir)
191
+ >>> for f in os.listdir(tempdir):
192
+ ... os.remove(os.path.join(tempdir, f))
193
+ >>> os.rmdir(tempdir)
194
+
195
+ Finding Files in the NLTK Data Package
196
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
197
+ The `nltk.data.find()` function searches the NLTK data package for a
198
+ given file, and returns a pointer to that file. This pointer can
199
+ either be a `FileSystemPathPointer` (whose `path` attribute gives the
200
+ absolute path of the file); or a `ZipFilePathPointer`, specifying a
201
+ zipfile and the name of an entry within that zipfile. Both pointer
202
+ types define the `open()` method, which can be used to read the string
203
+ contents of the file.
204
+
205
+ >>> path = nltk.data.find('corpora/abc/rural.txt')
206
+ >>> str(path)
207
+ '...rural.txt'
208
+ >>> print(path.open().read(60).decode())
209
+ PM denies knowledge of AWB kickbacks
210
+ The Prime Minister has
211
+
212
+ Alternatively, the `nltk.data.load()` function can be used with the
213
+ keyword argument ``format="raw"``:
214
+
215
+ >>> s = nltk.data.load('corpora/abc/rural.txt', format='raw')[:60]
216
+ >>> print(s.decode())
217
+ PM denies knowledge of AWB kickbacks
218
+ The Prime Minister has
219
+
220
+ Alternatively, you can use the keyword argument ``format="text"``:
221
+
222
+ >>> s = nltk.data.load('corpora/abc/rural.txt', format='text')[:60]
223
+ >>> print(s)
224
+ PM denies knowledge of AWB kickbacks
225
+ The Prime Minister has
226
+
227
+ Resource Caching
228
+ ~~~~~~~~~~~~~~~~
229
+
230
+ NLTK uses a weakref dictionary to maintain a cache of resources that
231
+ have been loaded. If you load a resource that is already stored in
232
+ the cache, then the cached copy will be returned. This behavior can
233
+ be seen by the trace output generated when verbose=True:
234
+
235
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
236
+ <<Loading nltk:grammars/book_grammars/feat0.fcfg>>
237
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True)
238
+ <<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
239
+
240
+ If you wish to load a resource from its source, bypassing the cache,
241
+ use the ``cache=False`` argument to `nltk.data.load()`. This can be
242
+ useful, for example, if the resource is loaded from a local file, and
243
+ you are actively editing that file:
244
+
245
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',cache=False,verbose=True)
246
+ <<Loading nltk:grammars/book_grammars/feat0.fcfg>>
247
+
248
+ The cache *no longer* uses weak references. A resource will not be
249
+ automatically expunged from the cache when no more objects are using
250
+ it. In the following example, when we clear the variable ``feat0``,
251
+ the reference count for the feature grammar object drops to zero.
252
+ However, the object remains cached:
253
+
254
+ >>> del feat0
255
+ >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',
256
+ ... verbose=True)
257
+ <<Using cached copy of nltk:grammars/book_grammars/feat0.fcfg>>
258
+
259
+ You can clear the entire contents of the cache, using
260
+ `nltk.data.clear_cache()`:
261
+
262
+ >>> nltk.data.clear_cache()
263
+
264
+ Retrieving other Data Sources
265
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
266
+ >>> formulas = nltk.data.load('grammars/book_grammars/background.fol')
267
+ >>> for f in formulas: print(str(f))
268
+ all x.(boxerdog(x) -> dog(x))
269
+ all x.(boxer(x) -> person(x))
270
+ all x.-(dog(x) & person(x))
271
+ all x.(married(x) <-> exists y.marry(x,y))
272
+ all x.(bark(x) -> dog(x))
273
+ all x y.(marry(x,y) -> (person(x) & person(y)))
274
+ -(Vincent = Mia)
275
+ -(Vincent = Fido)
276
+ -(Mia = Fido)
277
+
278
+ Regression Tests
279
+ ~~~~~~~~~~~~~~~~
280
+ Create a temp dir for tests that write files:
281
+
282
+ >>> import tempfile, os
283
+ >>> tempdir = tempfile.mkdtemp()
284
+ >>> old_dir = os.path.abspath('.')
285
+ >>> os.chdir(tempdir)
286
+
287
+ The `retrieve()` function accepts all url types:
288
+
289
+ >>> urls = ['https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg',
290
+ ... 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg'),
291
+ ... 'nltk:grammars/sample_grammars/toy.cfg',
292
+ ... 'grammars/sample_grammars/toy.cfg']
293
+ >>> for i, url in enumerate(urls):
294
+ ... nltk.data.retrieve(url, 'toy-%d.cfg' % i)
295
+ Retrieving 'https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', saving to 'toy-0.cfg'
296
+ Retrieving 'file:...toy.cfg', saving to 'toy-1.cfg'
297
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-2.cfg'
298
+ Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-3.cfg'
299
+
300
+ Clean up the temp dir:
301
+
302
+ >>> os.chdir(old_dir)
303
+ >>> for f in os.listdir(tempdir):
304
+ ... os.remove(os.path.join(tempdir, f))
305
+ >>> os.rmdir(tempdir)
306
+
307
+ Lazy Loader
308
+ -----------
309
+ A lazy loader is a wrapper object that defers loading a resource until
310
+ it is accessed or used in any way. This is mainly intended for
311
+ internal use by NLTK's corpus readers.
312
+
313
+ >>> # Create a lazy loader for toy.cfg.
314
+ >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
315
+
316
+ >>> # Show that it's not loaded yet:
317
+ >>> object.__repr__(ll)
318
+ '<nltk.data.LazyLoader object at ...>'
319
+
320
+ >>> # printing it is enough to cause it to be loaded:
321
+ >>> print(ll)
322
+ <Grammar with 14 productions>
323
+
324
+ >>> # Show that it's now been loaded:
325
+ >>> object.__repr__(ll)
326
+ '<nltk.grammar.CFG object at ...>'
327
+
328
+
329
+ >>> # Test that accessing an attribute also loads it:
330
+ >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg')
331
+ >>> ll.start()
332
+ S
333
+ >>> object.__repr__(ll)
334
+ '<nltk.grammar.CFG object at ...>'
335
+
336
+ Buffered Gzip Reading and Writing
337
+ ---------------------------------
338
+ Write performance to gzip-compressed is extremely poor when the files become large.
339
+ File creation can become a bottleneck in those cases.
340
+
341
+ Read performance from large gzipped pickle files was improved in data.py by
342
+ buffering the reads. A similar fix can be applied to writes by buffering
343
+ the writes to a StringIO object first.
344
+
345
+ This is mainly intended for internal use. The test simply tests that reading
346
+ and writing work as intended and does not test how much improvement buffering
347
+ provides.
348
+
349
+ >>> from io import StringIO
350
+ >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'wb', size=2**10)
351
+ >>> ans = []
352
+ >>> for i in range(10000):
353
+ ... ans.append(str(i).encode('ascii'))
354
+ ... test.write(str(i).encode('ascii'))
355
+ >>> test.close()
356
+ >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'rb')
357
+ >>> test.read() == b''.join(ans)
358
+ True
359
+ >>> test.close()
360
+ >>> import os
361
+ >>> os.unlink('testbuf.gz')
362
+
363
+ JSON Encoding and Decoding
364
+ --------------------------
365
+ JSON serialization is used instead of pickle for some classes.
366
+
367
+ >>> from nltk import jsontags
368
+ >>> from nltk.jsontags import JSONTaggedEncoder, JSONTaggedDecoder, register_tag
369
+ >>> @jsontags.register_tag
370
+ ... class JSONSerializable:
371
+ ... json_tag = 'JSONSerializable'
372
+ ...
373
+ ... def __init__(self, n):
374
+ ... self.n = n
375
+ ...
376
+ ... def encode_json_obj(self):
377
+ ... return self.n
378
+ ...
379
+ ... @classmethod
380
+ ... def decode_json_obj(cls, obj):
381
+ ... n = obj
382
+ ... return cls(n)
383
+ ...
384
+ >>> JSONTaggedEncoder().encode(JSONSerializable(1))
385
+ '{"!JSONSerializable": 1}'
386
+ >>> JSONTaggedDecoder().decode('{"!JSONSerializable": 1}').n
387
+ 1
llmeval-env/lib/python3.10/site-packages/nltk/test/drt.doctest ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ================================
5
+ Discourse Representation Theory
6
+ ================================
7
+
8
+ >>> from nltk.sem import logic
9
+ >>> from nltk.inference import TableauProver
10
+
11
+ Overview
12
+ ========
13
+
14
+ A DRS can be created with the ``DRS()`` constructor. This takes two arguments: a list of
15
+ discourse referents and list of conditions. .
16
+
17
+ >>> from nltk.sem.drt import *
18
+ >>> dexpr = DrtExpression.fromstring
19
+ >>> man_x = dexpr('man(x)')
20
+ >>> walk_x = dexpr('walk(x)')
21
+ >>> x = dexpr('x')
22
+ >>> print(DRS([x], [man_x, walk_x]))
23
+ ([x],[man(x), walk(x)])
24
+
25
+ The ``parse()`` method can also be applied directly to DRS
26
+ expressions, which allows them to be specified more
27
+ easily.
28
+
29
+ >>> drs1 = dexpr('([x],[man(x),walk(x)])')
30
+ >>> print(drs1)
31
+ ([x],[man(x), walk(x)])
32
+
33
+ DRSs can be *merged* using the ``+`` operator.
34
+
35
+ >>> drs2 = dexpr('([y],[woman(y),stop(y)])')
36
+ >>> drs3 = drs1 + drs2
37
+ >>> print(drs3)
38
+ (([x],[man(x), walk(x)]) + ([y],[woman(y), stop(y)]))
39
+ >>> print(drs3.simplify())
40
+ ([x,y],[man(x), walk(x), woman(y), stop(y)])
41
+
42
+ We can embed DRSs as components of an ``implies`` condition.
43
+
44
+ >>> s = '([], [(%s -> %s)])' % (drs1, drs2)
45
+ >>> print(dexpr(s))
46
+ ([],[(([x],[man(x), walk(x)]) -> ([y],[woman(y), stop(y)]))])
47
+
48
+ The ``fol()`` method converts DRSs into FOL formulae.
49
+
50
+ >>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
51
+ exists x.(man(x) & walks(x))
52
+ >>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
53
+ all x.(man(x) -> walks(x))
54
+
55
+ In order to visualize a DRS, the ``pretty_format()`` method can be used.
56
+
57
+ >>> print(drs3.pretty_format())
58
+ _________ __________
59
+ | x | | y |
60
+ (|---------| + |----------|)
61
+ | man(x) | | woman(y) |
62
+ | walk(x) | | stop(y) |
63
+ |_________| |__________|
64
+
65
+
66
+ Parse to semantics
67
+ ------------------
68
+
69
+ ..
70
+ >>> logic._counter._value = 0
71
+
72
+ DRSs can be used for building compositional semantics in a feature
73
+ based grammar. To specify that we want to use DRSs, the appropriate
74
+ logic parser needs be passed as a parameter to ``load_earley()``
75
+
76
+ >>> from nltk.parse import load_parser
77
+ >>> from nltk.sem.drt import DrtParser
78
+ >>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, logic_parser=DrtParser())
79
+ >>> for tree in parser.parse('a dog barks'.split()):
80
+ ... print(tree.label()['SEM'].simplify())
81
+ ...
82
+ ([x],[dog(x), bark(x)])
83
+
84
+ Alternatively, a ``FeatStructReader`` can be passed with the ``logic_parser`` set on it
85
+
86
+ >>> from nltk.featstruct import FeatStructReader
87
+ >>> from nltk.grammar import FeatStructNonterminal
88
+ >>> parser = load_parser('grammars/book_grammars/drt.fcfg', trace=0, fstruct_reader=FeatStructReader(fdict_class=FeatStructNonterminal, logic_parser=DrtParser()))
89
+ >>> for tree in parser.parse('every girl chases a dog'.split()):
90
+ ... print(tree.label()['SEM'].simplify().normalize())
91
+ ...
92
+ ([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chase(z1,z2)]))])
93
+
94
+
95
+
96
+ Unit Tests
97
+ ==========
98
+
99
+ Parser
100
+ ------
101
+
102
+ >>> print(dexpr(r'([x,y],[sees(x,y)])'))
103
+ ([x,y],[sees(x,y)])
104
+ >>> print(dexpr(r'([x],[man(x), walks(x)])'))
105
+ ([x],[man(x), walks(x)])
106
+ >>> print(dexpr(r'\x.([],[man(x), walks(x)])'))
107
+ \x.([],[man(x), walks(x)])
108
+ >>> print(dexpr(r'\x.\y.([],[sees(x,y)])'))
109
+ \x y.([],[sees(x,y)])
110
+
111
+ >>> print(dexpr(r'([x,y],[(x = y)])'))
112
+ ([x,y],[(x = y)])
113
+ >>> print(dexpr(r'([x,y],[(x != y)])'))
114
+ ([x,y],[-(x = y)])
115
+
116
+ >>> print(dexpr(r'\x.([],[walks(x)])(john)'))
117
+ (\x.([],[walks(x)]))(john)
118
+ >>> print(dexpr(r'\R.\x.([],[big(x,R)])(\y.([],[mouse(y)]))'))
119
+ (\R x.([],[big(x,R)]))(\y.([],[mouse(y)]))
120
+
121
+ >>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))'))
122
+ (([x],[walks(x)]) + ([y],[runs(y)]))
123
+ >>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))'))
124
+ (([x,y],[walks(x), jumps(y)]) + ([z],[twos(z)]) + ([w],[runs(w)]))
125
+ >>> print(dexpr(r'((([],[walks(x)]) + ([],[twos(x)])) + ([],[runs(x)]))'))
126
+ (([],[walks(x)]) + ([],[twos(x)]) + ([],[runs(x)]))
127
+ >>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)])) + (([],[threes(x)]) + ([],[fours(x)])))'))
128
+ (([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)]))
129
+
130
+ >>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))'))
131
+ (([],[walks(x)]) -> ([],[runs(x)]))
132
+
133
+ >>> print(dexpr(r'([x],[PRO(x), sees(John,x)])'))
134
+ ([x],[PRO(x), sees(John,x)])
135
+ >>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])'))
136
+ ([x],[man(x), -([],[walks(x)])])
137
+ >>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])'))
138
+ ([],[(([x],[man(x)]) -> ([],[walks(x)]))])
139
+
140
+ >>> print(dexpr(r'DRS([x],[walk(x)])'))
141
+ ([x],[walk(x)])
142
+ >>> print(dexpr(r'DRS([x][walk(x)])'))
143
+ ([x],[walk(x)])
144
+ >>> print(dexpr(r'([x][walk(x)])'))
145
+ ([x],[walk(x)])
146
+
147
+ ``simplify()``
148
+ --------------
149
+
150
+ >>> print(dexpr(r'\x.([],[man(x), walks(x)])(john)').simplify())
151
+ ([],[man(john), walks(john)])
152
+ >>> print(dexpr(r'\x.\y.([z],[dog(z),sees(x,y)])(john)(mary)').simplify())
153
+ ([z],[dog(z), sees(john,mary)])
154
+ >>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').simplify())
155
+ \x.([],[big(x,\y.([],[mouse(y)]))])
156
+
157
+ >>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').simplify())
158
+ ([x,y],[walks(x), runs(y)])
159
+ >>> print(dexpr(r'(([x,y],[walks(x), jumps(y)]) + (([z],[twos(z)]) + ([w],[runs(w)])))').simplify())
160
+ ([w,x,y,z],[walks(x), jumps(y), twos(z), runs(w)])
161
+ >>> print(dexpr(r'((([],[walks(x)]) + ([],[runs(x)]) + ([],[threes(x)]) + ([],[fours(x)])))').simplify())
162
+ ([],[walks(x), runs(x), threes(x), fours(x)])
163
+ >>> dexpr(r'([x],[man(x)])+([x],[walks(x)])').simplify() == \
164
+ ... dexpr(r'([x,z1],[man(x), walks(z1)])')
165
+ True
166
+ >>> dexpr(r'([y],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)]))])+([x],[run(x)])').simplify() == \
167
+ ... dexpr(r'([y,z1],[boy(y), (([x],[dog(x)]) -> ([],[chase(x,y)])), run(z1)])')
168
+ True
169
+
170
+ >>> dexpr(r'\Q.(([x],[john(x),walks(x)]) + Q)(([x],[PRO(x),leaves(x)]))').simplify() == \
171
+ ... dexpr(r'([x,z1],[john(x), walks(x), PRO(z1), leaves(z1)])')
172
+ True
173
+
174
+ >>> logic._counter._value = 0
175
+ >>> print(dexpr('([],[(([x],[dog(x)]) -> ([e,y],[boy(y), chase(e), subj(e,x), obj(e,y)]))])+([e,x],[PRO(x), run(e), subj(e,x)])').simplify().normalize().normalize())
176
+ ([e02,z5],[(([z3],[dog(z3)]) -> ([e01,z4],[boy(z4), chase(e01), subj(e01,z3), obj(e01,z4)])), PRO(z5), run(e02), subj(e02,z5)])
177
+
178
+ ``fol()``
179
+ -----------
180
+
181
+ >>> print(dexpr(r'([x,y],[sees(x,y)])').fol())
182
+ exists x y.sees(x,y)
183
+ >>> print(dexpr(r'([x],[man(x), walks(x)])').fol())
184
+ exists x.(man(x) & walks(x))
185
+ >>> print(dexpr(r'\x.([],[man(x), walks(x)])').fol())
186
+ \x.(man(x) & walks(x))
187
+ >>> print(dexpr(r'\x y.([],[sees(x,y)])').fol())
188
+ \x y.sees(x,y)
189
+
190
+ >>> print(dexpr(r'\x.([],[walks(x)])(john)').fol())
191
+ \x.walks(x)(john)
192
+ >>> print(dexpr(r'\R x.([],[big(x,R)])(\y.([],[mouse(y)]))').fol())
193
+ (\R x.big(x,R))(\y.mouse(y))
194
+
195
+ >>> print(dexpr(r'(([x],[walks(x)]) + ([y],[runs(y)]))').fol())
196
+ (exists x.walks(x) & exists y.runs(y))
197
+
198
+ >>> print(dexpr(r'(([],[walks(x)]) -> ([],[runs(x)]))').fol())
199
+ (walks(x) -> runs(x))
200
+
201
+ >>> print(dexpr(r'([x],[PRO(x), sees(John,x)])').fol())
202
+ exists x.(PRO(x) & sees(John,x))
203
+ >>> print(dexpr(r'([x],[man(x), -([],[walks(x)])])').fol())
204
+ exists x.(man(x) & -walks(x))
205
+ >>> print(dexpr(r'([],[(([x],[man(x)]) -> ([],[walks(x)]))])').fol())
206
+ all x.(man(x) -> walks(x))
207
+
208
+ >>> print(dexpr(r'([x],[man(x) | walks(x)])').fol())
209
+ exists x.(man(x) | walks(x))
210
+ >>> print(dexpr(r'P(x) + ([x],[walks(x)])').fol())
211
+ (P(x) & exists x.walks(x))
212
+
213
+ ``resolve_anaphora()``
214
+ ----------------------
215
+
216
+ >>> from nltk.sem.drt import AnaphoraResolutionException
217
+
218
+ >>> print(resolve_anaphora(dexpr(r'([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])')))
219
+ ([x,y,z],[dog(x), cat(y), walks(z), (z = [x,y])])
220
+ >>> print(resolve_anaphora(dexpr(r'([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])')))
221
+ ([],[(([x],[dog(x)]) -> ([y],[walks(y), (y = x)]))])
222
+ >>> print(resolve_anaphora(dexpr(r'(([x,y],[]) + ([],[PRO(x)]))')).simplify())
223
+ ([x,y],[(x = y)])
224
+ >>> try: print(resolve_anaphora(dexpr(r'([x],[walks(x), PRO(x)])')))
225
+ ... except AnaphoraResolutionException as e: print(e)
226
+ Variable 'x' does not resolve to anything.
227
+ >>> print(resolve_anaphora(dexpr('([e01,z6,z7],[boy(z6), PRO(z7), run(e01), subj(e01,z7)])')))
228
+ ([e01,z6,z7],[boy(z6), (z7 = z6), run(e01), subj(e01,z7)])
229
+
230
+ ``equiv()``:
231
+ ----------------
232
+
233
+ >>> a = dexpr(r'([x],[man(x), walks(x)])')
234
+ >>> b = dexpr(r'([x],[walks(x), man(x)])')
235
+ >>> print(a.equiv(b, TableauProver()))
236
+ True
237
+
238
+
239
+ ``replace()``:
240
+ --------------
241
+
242
+ >>> a = dexpr(r'a')
243
+ >>> w = dexpr(r'w')
244
+ >>> x = dexpr(r'x')
245
+ >>> y = dexpr(r'y')
246
+ >>> z = dexpr(r'z')
247
+
248
+
249
+ replace bound
250
+ -------------
251
+
252
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, False))
253
+ ([x],[give(x,y,z)])
254
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(x.variable, a, True))
255
+ ([a],[give(a,y,z)])
256
+
257
+ replace unbound
258
+ ---------------
259
+
260
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, False))
261
+ ([x],[give(x,a,z)])
262
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, a, True))
263
+ ([x],[give(x,a,z)])
264
+
265
+ replace unbound with bound
266
+ --------------------------
267
+
268
+ >>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, False) == \
269
+ ... dexpr('([z1],[give(z1,x,z)])')
270
+ True
271
+ >>> dexpr(r'([x],[give(x,y,z)])').replace(y.variable, x, True) == \
272
+ ... dexpr('([z1],[give(z1,x,z)])')
273
+ True
274
+
275
+ replace unbound with unbound
276
+ ----------------------------
277
+
278
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, False))
279
+ ([x],[give(x,z,z)])
280
+ >>> print(dexpr(r'([x],[give(x,y,z)])').replace(y.variable, z, True))
281
+ ([x],[give(x,z,z)])
282
+
283
+
284
+ replace unbound
285
+ ---------------
286
+
287
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
288
+ (([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
289
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
290
+ (([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
291
+
292
+ replace bound
293
+ -------------
294
+
295
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, False))
296
+ (([x],[P(x,y,z)]) + ([y],[Q(x,y,z)]))
297
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(x.variable, a, True))
298
+ (([a],[P(a,y,z)]) + ([y],[Q(a,y,z)]))
299
+
300
+ replace unbound with unbound
301
+ ----------------------------
302
+
303
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, False))
304
+ (([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
305
+ >>> print(dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,z)])').replace(z.variable, a, True))
306
+ (([x],[P(x,y,a)]) + ([y],[Q(x,y,a)]))
307
+
308
+ replace unbound with bound on same side
309
+ ---------------------------------------
310
+
311
+ >>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, False) == \
312
+ ... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
313
+ True
314
+ >>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(z.variable, x, True) == \
315
+ ... dexpr(r'(([z1],[P(z1,y,x)]) + ([y],[Q(z1,y,w)]))')
316
+ True
317
+
318
+ replace unbound with bound on other side
319
+ ----------------------------------------
320
+
321
+ >>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, False) == \
322
+ ... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
323
+ True
324
+ >>> dexpr(r'([x],[P(x,y,z)])+([y],[Q(x,y,w)])').replace(w.variable, x, True) == \
325
+ ... dexpr(r'(([z1],[P(z1,y,z)]) + ([y],[Q(z1,y,x)]))')
326
+ True
327
+
328
+ replace unbound with double bound
329
+ ---------------------------------
330
+
331
+ >>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, False) == \
332
+ ... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
333
+ True
334
+ >>> dexpr(r'([x],[P(x,y,z)])+([x],[Q(x,y,w)])').replace(z.variable, x, True) == \
335
+ ... dexpr(r'(([z1],[P(z1,y,x)]) + ([z1],[Q(z1,y,w)]))')
336
+ True
337
+
338
+
339
+ regression tests
340
+ ----------------
341
+
342
+ >>> d = dexpr('([x],[A(c), ([y],[B(x,y,z,a)])->([z],[C(x,y,z,a)])])')
343
+ >>> print(d)
344
+ ([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
345
+ >>> print(d.pretty_format())
346
+ ____________________________________
347
+ | x |
348
+ |------------------------------------|
349
+ | A(c) |
350
+ | ____________ ____________ |
351
+ | | y | | z | |
352
+ | (|------------| -> |------------|) |
353
+ | | B(x,y,z,a) | | C(x,y,z,a) | |
354
+ | |____________| |____________| |
355
+ |____________________________________|
356
+ >>> print(str(d))
357
+ ([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
358
+ >>> print(d.fol())
359
+ exists x.(A(c) & all y.(B(x,y,z,a) -> exists z.C(x,y,z,a)))
360
+ >>> print(d.replace(Variable('a'), DrtVariableExpression(Variable('r'))))
361
+ ([x],[A(c), (([y],[B(x,y,z,r)]) -> ([z],[C(x,y,z,r)]))])
362
+ >>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r'))))
363
+ ([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
364
+ >>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r'))))
365
+ ([x],[A(c), (([y],[B(x,y,z,a)]) -> ([z],[C(x,y,z,a)]))])
366
+ >>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r'))))
367
+ ([x],[A(c), (([y],[B(x,y,r,a)]) -> ([z],[C(x,y,z,a)]))])
368
+ >>> print(d.replace(Variable('x'), DrtVariableExpression(Variable('r')), True))
369
+ ([r],[A(c), (([y],[B(r,y,z,a)]) -> ([z],[C(r,y,z,a)]))])
370
+ >>> print(d.replace(Variable('y'), DrtVariableExpression(Variable('r')), True))
371
+ ([x],[A(c), (([r],[B(x,r,z,a)]) -> ([z],[C(x,r,z,a)]))])
372
+ >>> print(d.replace(Variable('z'), DrtVariableExpression(Variable('r')), True))
373
+ ([x],[A(c), (([y],[B(x,y,r,a)]) -> ([r],[C(x,y,r,a)]))])
374
+ >>> print(d == dexpr('([l],[A(c), ([m],[B(l,m,z,a)])->([n],[C(l,m,n,a)])])'))
375
+ True
376
+ >>> d = dexpr('([],[([x,y],[B(x,y,h), ([a,b],[dee(x,a,g)])])->([z,w],[cee(x,y,f), ([c,d],[E(x,c,d,e)])])])')
377
+ >>> sorted(d.free())
378
+ [Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
379
+ >>> sorted(d.variables())
380
+ [Variable('B'), Variable('E'), Variable('e'), Variable('f'), Variable('g'), Variable('h')]
381
+ >>> sorted(d.get_refs(True))
382
+ [Variable('a'), Variable('b'), Variable('c'), Variable('d'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
383
+ >>> sorted(d.conds[0].get_refs(False))
384
+ [Variable('x'), Variable('y')]
385
+ >>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])->([],[C(x,y)]), ([x,y],[D(x,y)])->([],[E(x,y)]), ([],[F(x,y)])->([x,y],[G(x,y)])])').eliminate_equality())
386
+ ([x],[A(x,x), (([],[B(x,x)]) -> ([],[C(x,x)])), (([x,y],[D(x,y)]) -> ([],[E(x,y)])), (([],[F(x,x)]) -> ([x,y],[G(x,y)]))])
387
+ >>> print(dexpr('([x,y],[A(x,y), (x=y)]) -> ([],[B(x,y)])').eliminate_equality())
388
+ (([x],[A(x,x)]) -> ([],[B(x,x)]))
389
+ >>> print(dexpr('([x,y],[A(x,y)]) -> ([],[B(x,y), (x=y)])').eliminate_equality())
390
+ (([x,y],[A(x,y)]) -> ([],[B(x,x)]))
391
+ >>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)])])').eliminate_equality())
392
+ ([x],[A(x,x), ([],[B(x,x)])])
393
+ >>> print(dexpr('([x,y],[A(x,y), ([],[B(x,y), (x=y)])])').eliminate_equality())
394
+ ([x,y],[A(x,y), ([],[B(x,x)])])
395
+ >>> print(dexpr('([z8 z9 z10],[A(z8), z8=z10, z9=z10, B(z9), C(z10), D(z10)])').eliminate_equality())
396
+ ([z9],[A(z9), B(z9), C(z9), D(z9)])
397
+
398
+ >>> print(dexpr('([x,y],[A(x,y), (x=y), ([],[B(x,y)]), ([x,y],[C(x,y)])])').eliminate_equality())
399
+ ([x],[A(x,x), ([],[B(x,x)]), ([x,y],[C(x,y)])])
400
+ >>> print(dexpr('([x,y],[A(x,y)]) + ([],[B(x,y), (x=y)]) + ([],[C(x,y)])').eliminate_equality())
401
+ ([x],[A(x,x), B(x,x), C(x,x)])
402
+ >>> print(dexpr('([x,y],[B(x,y)])+([x,y],[C(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
403
+ (([x,y],[B(x,y)]) + ([x,y],[C(x,y)]))
404
+ >>> print(dexpr('(([x,y],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
405
+ (([x,y],[B(x,y)]) + ([],[C(x,y)]) + ([],[D(x,y)]))
406
+ >>> print(dexpr('(([],[B(x,y)])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))))
407
+ (([],[B(x,x)]) + ([],[C(x,x)]) + ([],[D(x,x)]))
408
+ >>> print(dexpr('(([],[B(x,y), ([x,y],[A(x,y)])])+([],[C(x,y)]))+([],[D(x,y)])').replace(Variable('y'), DrtVariableExpression(Variable('x'))).normalize())
409
+ (([],[B(z3,z1), ([z2,z3],[A(z3,z2)])]) + ([],[C(z3,z1)]) + ([],[D(z3,z1)]))
410
+
411
+
412
+ Parse errors
413
+ ============
414
+
415
+ >>> def parse_error(drtstring):
416
+ ... try: dexpr(drtstring)
417
+ ... except logic.LogicalExpressionException as e: print(e)
418
+
419
+ >>> parse_error(r'')
420
+ End of input found. Expression expected.
421
+ <BLANKLINE>
422
+ ^
423
+ >>> parse_error(r'(')
424
+ End of input found. Expression expected.
425
+ (
426
+ ^
427
+ >>> parse_error(r'()')
428
+ Unexpected token: ')'. Expression expected.
429
+ ()
430
+ ^
431
+ >>> parse_error(r'([')
432
+ End of input found. Expected token ']'.
433
+ ([
434
+ ^
435
+ >>> parse_error(r'([,')
436
+ ',' is an illegal variable name. Constants may not be quantified.
437
+ ([,
438
+ ^
439
+ >>> parse_error(r'([x,')
440
+ End of input found. Variable expected.
441
+ ([x,
442
+ ^
443
+ >>> parse_error(r'([]')
444
+ End of input found. Expected token '['.
445
+ ([]
446
+ ^
447
+ >>> parse_error(r'([][')
448
+ End of input found. Expected token ']'.
449
+ ([][
450
+ ^
451
+ >>> parse_error(r'([][,')
452
+ Unexpected token: ','. Expression expected.
453
+ ([][,
454
+ ^
455
+ >>> parse_error(r'([][]')
456
+ End of input found. Expected token ')'.
457
+ ([][]
458
+ ^
459
+ >>> parse_error(r'([x][man(x)]) |')
460
+ End of input found. Expression expected.
461
+ ([x][man(x)]) |
462
+ ^
463
+
464
+ Pretty Printing
465
+ ===============
466
+
467
+ >>> dexpr(r"([],[])").pretty_print()
468
+ __
469
+ | |
470
+ |--|
471
+ |__|
472
+
473
+ >>> dexpr(r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])").pretty_print()
474
+ _____________________________
475
+ | |
476
+ |-----------------------------|
477
+ | ________ _________ |
478
+ | | x | | | |
479
+ | (|--------| -> |---------|) |
480
+ | | big(x) | | bark(x) | |
481
+ | | dog(x) | |_________| |
482
+ | |________| |
483
+ | _________ |
484
+ | | x | |
485
+ | __ |---------| |
486
+ | | | walk(x) | |
487
+ | |_________| |
488
+ |_____________________________|
489
+
490
+ >>> dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print()
491
+ _________ _________
492
+ | x y | | z |
493
+ (|---------| + |---------|)
494
+ | (x = y) | | dog(z) |
495
+ |_________| | walk(z) |
496
+ |_________|
497
+
498
+ >>> dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print()
499
+ _______________________________
500
+ | |
501
+ |-------------------------------|
502
+ | ___ ___ _________ |
503
+ | | x | | y | | z | |
504
+ | (|---| | |---| | |---------|) |
505
+ | |___| |___| | dog(z) | |
506
+ | | walk(z) | |
507
+ | |_________| |
508
+ |_______________________________|
509
+
510
+ >>> dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
511
+ ___ ________
512
+ \ | x | \ | |
513
+ /\ P Q.(|---| + P(x) + Q(x))( /\ x.|--------|)
514
+ |___| | dog(x) |
515
+ |________|
llmeval-env/lib/python3.10/site-packages/nltk/test/featstruct.doctest ADDED
@@ -0,0 +1,1229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==================================
5
+ Feature Structures & Unification
6
+ ==================================
7
+ >>> from nltk.featstruct import FeatStruct
8
+ >>> from nltk.sem.logic import Variable, VariableExpression, Expression
9
+
10
+ .. note:: For now, featstruct uses the older lambdalogic semantics
11
+ module. Eventually, it should be updated to use the new first
12
+ order predicate logic module.
13
+
14
+ Overview
15
+ ~~~~~~~~
16
+ A feature structure is a mapping from feature identifiers to feature
17
+ values, where feature values can be simple values (like strings or
18
+ ints), nested feature structures, or variables:
19
+
20
+ >>> fs1 = FeatStruct(number='singular', person=3)
21
+ >>> print(fs1)
22
+ [ number = 'singular' ]
23
+ [ person = 3 ]
24
+
25
+ Feature structure may be nested:
26
+
27
+ >>> fs2 = FeatStruct(type='NP', agr=fs1)
28
+ >>> print(fs2)
29
+ [ agr = [ number = 'singular' ] ]
30
+ [ [ person = 3 ] ]
31
+ [ ]
32
+ [ type = 'NP' ]
33
+
34
+ Variables are used to indicate that two features should be assigned
35
+ the same value. For example, the following feature structure requires
36
+ that the feature fs3['agr']['number'] be bound to the same value as the
37
+ feature fs3['subj']['number'].
38
+
39
+ >>> fs3 = FeatStruct(agr=FeatStruct(number=Variable('?n')),
40
+ ... subj=FeatStruct(number=Variable('?n')))
41
+ >>> print(fs3)
42
+ [ agr = [ number = ?n ] ]
43
+ [ ]
44
+ [ subj = [ number = ?n ] ]
45
+
46
+ Feature structures are typically used to represent partial information
47
+ about objects. A feature name that is not mapped to a value stands
48
+ for a feature whose value is unknown (*not* a feature without a
49
+ value). Two feature structures that represent (potentially
50
+ overlapping) information about the same object can be combined by
51
+ *unification*.
52
+
53
+ >>> print(fs2.unify(fs3))
54
+ [ agr = [ number = 'singular' ] ]
55
+ [ [ person = 3 ] ]
56
+ [ ]
57
+ [ subj = [ number = 'singular' ] ]
58
+ [ ]
59
+ [ type = 'NP' ]
60
+
61
+ When two inconsistent feature structures are unified, the unification
62
+ fails and returns ``None``.
63
+
64
+ >>> fs4 = FeatStruct(agr=FeatStruct(person=1))
65
+ >>> print(fs4.unify(fs2))
66
+ None
67
+ >>> print(fs2.unify(fs4))
68
+ None
69
+
70
+ ..
71
+ >>> del fs1, fs2, fs3, fs4 # clean-up
72
+
73
+ Feature Structure Types
74
+ -----------------------
75
+ There are actually two types of feature structure:
76
+
77
+ - *feature dictionaries*, implemented by `FeatDict`, act like
78
+ Python dictionaries. Feature identifiers may be strings or
79
+ instances of the `Feature` class.
80
+ - *feature lists*, implemented by `FeatList`, act like Python
81
+ lists. Feature identifiers are integers.
82
+
83
+ When you construct a feature structure using the `FeatStruct`
84
+ constructor, it will automatically decide which type is appropriate:
85
+
86
+ >>> type(FeatStruct(number='singular'))
87
+ <class 'nltk.featstruct.FeatDict'>
88
+ >>> type(FeatStruct([1,2,3]))
89
+ <class 'nltk.featstruct.FeatList'>
90
+
91
+ Usually, we will just use feature dictionaries; but sometimes feature
92
+ lists can be useful too. Two feature lists will unify with each other
93
+ only if they have equal lengths, and all of their feature values
94
+ match. If you wish to write a feature list that contains 'unknown'
95
+ values, you must use variables:
96
+
97
+ >>> fs1 = FeatStruct([1,2,Variable('?y')])
98
+ >>> fs2 = FeatStruct([1,Variable('?x'),3])
99
+ >>> fs1.unify(fs2)
100
+ [1, 2, 3]
101
+
102
+ ..
103
+ >>> del fs1, fs2 # clean-up
104
+
105
+ Parsing Feature Structure Strings
106
+ ---------------------------------
107
+ Feature structures can be constructed directly from strings. Often,
108
+ this is more convenient than constructing them directly. NLTK can
109
+ parse most feature strings to produce the corresponding feature
110
+ structures. (But you must restrict your base feature values to
111
+ strings, ints, logic expressions (`nltk.sem.logic.Expression`), and a
112
+ few other types discussed below).
113
+
114
+ Feature dictionaries are written like Python dictionaries, except that
115
+ keys are not put in quotes; and square brackets (``[]``) are used
116
+ instead of braces (``{}``):
117
+
118
+ >>> FeatStruct('[tense="past", agr=[number="sing", person=3]]')
119
+ [agr=[number='sing', person=3], tense='past']
120
+
121
+ If a feature value is a single alphanumeric word, then it does not
122
+ need to be quoted -- it will be automatically treated as a string:
123
+
124
+ >>> FeatStruct('[tense=past, agr=[number=sing, person=3]]')
125
+ [agr=[number='sing', person=3], tense='past']
126
+
127
+ Feature lists are written like python lists:
128
+
129
+ >>> FeatStruct('[1, 2, 3]')
130
+ [1, 2, 3]
131
+
132
+ The expression ``[]`` is treated as an empty feature dictionary, not
133
+ an empty feature list:
134
+
135
+ >>> type(FeatStruct('[]'))
136
+ <class 'nltk.featstruct.FeatDict'>
137
+
138
+ Feature Paths
139
+ -------------
140
+ Features can be specified using *feature paths*, or tuples of feature
141
+ identifiers that specify path through the nested feature structures to
142
+ a value.
143
+
144
+ >>> fs1 = FeatStruct('[x=1, y=[1,2,[z=3]]]')
145
+ >>> fs1['y']
146
+ [1, 2, [z=3]]
147
+ >>> fs1['y', 2]
148
+ [z=3]
149
+ >>> fs1['y', 2, 'z']
150
+ 3
151
+
152
+ ..
153
+ >>> del fs1 # clean-up
154
+
155
+ Reentrance
156
+ ----------
157
+ Feature structures may contain reentrant feature values. A *reentrant
158
+ feature value* is a single feature structure that can be accessed via
159
+ multiple feature paths.
160
+
161
+ >>> fs1 = FeatStruct(x='val')
162
+ >>> fs2 = FeatStruct(a=fs1, b=fs1)
163
+ >>> print(fs2)
164
+ [ a = (1) [ x = 'val' ] ]
165
+ [ ]
166
+ [ b -> (1) ]
167
+ >>> fs2
168
+ [a=(1)[x='val'], b->(1)]
169
+
170
+ As you can see, reentrane is displayed by marking a feature structure
171
+ with a unique identifier, in this case ``(1)``, the first time it is
172
+ encountered; and then using the special form ``var -> id`` whenever it
173
+ is encountered again. You can use the same notation to directly
174
+ create reentrant feature structures from strings.
175
+
176
+ >>> FeatStruct('[a=(1)[], b->(1), c=[d->(1)]]')
177
+ [a=(1)[], b->(1), c=[d->(1)]]
178
+
179
+ Reentrant feature structures may contain cycles:
180
+
181
+ >>> fs3 = FeatStruct('(1)[a->(1)]')
182
+ >>> fs3['a', 'a', 'a', 'a']
183
+ (1)[a->(1)]
184
+ >>> fs3['a', 'a', 'a', 'a'] is fs3
185
+ True
186
+
187
+ Unification preserves the reentrance relations imposed by both of the
188
+ unified feature structures. In the feature structure resulting from
189
+ unification, any modifications to a reentrant feature value will be
190
+ visible using any of its feature paths.
191
+
192
+ >>> fs3.unify(FeatStruct('[a=[b=12], c=33]'))
193
+ (1)[a->(1), b=12, c=33]
194
+
195
+ ..
196
+ >>> del fs1, fs2, fs3 # clean-up
197
+
198
+ Feature Structure Equality
199
+ --------------------------
200
+ Two feature structures are considered equal if they assign the same
201
+ values to all features, *and* they contain the same reentrances.
202
+
203
+ >>> fs1 = FeatStruct('[a=(1)[x=1], b->(1)]')
204
+ >>> fs2 = FeatStruct('[a=(1)[x=1], b->(1)]')
205
+ >>> fs3 = FeatStruct('[a=[x=1], b=[x=1]]')
206
+ >>> fs1 == fs1, fs1 is fs1
207
+ (True, True)
208
+ >>> fs1 == fs2, fs1 is fs2
209
+ (True, False)
210
+ >>> fs1 == fs3, fs1 is fs3
211
+ (False, False)
212
+
213
+ Note that this differs from how Python dictionaries and lists define
214
+ equality -- in particular, Python dictionaries and lists ignore
215
+ reentrance relations. To test two feature structures for equality
216
+ while ignoring reentrance relations, use the `equal_values()` method:
217
+
218
+ >>> fs1.equal_values(fs1)
219
+ True
220
+ >>> fs1.equal_values(fs2)
221
+ True
222
+ >>> fs1.equal_values(fs3)
223
+ True
224
+
225
+ ..
226
+ >>> del fs1, fs2, fs3 # clean-up
227
+
228
+ Feature Value Sets & Feature Value Tuples
229
+ -----------------------------------------
230
+ `nltk.featstruct` defines two new data types that are intended to be
231
+ used as feature values: `FeatureValueTuple` and `FeatureValueSet`.
232
+ Both of these types are considered base values -- i.e., unification
233
+ does *not* apply to them. However, variable binding *does* apply to
234
+ any values that they contain.
235
+
236
+ Feature value tuples are written with parentheses:
237
+
238
+ >>> fs1 = FeatStruct('[x=(?x, ?y)]')
239
+ >>> fs1
240
+ [x=(?x, ?y)]
241
+ >>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
242
+ [x=(1, 2)]
243
+
244
+ Feature sets are written with braces:
245
+
246
+ >>> fs1 = FeatStruct('[x={?x, ?y}]')
247
+ >>> fs1
248
+ [x={?x, ?y}]
249
+ >>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2})
250
+ [x={1, 2}]
251
+
252
+ In addition to the basic feature value tuple & set classes, nltk
253
+ defines feature value unions (for sets) and feature value
254
+ concatenations (for tuples). These are written using '+', and can be
255
+ used to combine sets & tuples:
256
+
257
+ >>> fs1 = FeatStruct('[x=((1, 2)+?z), z=?z]')
258
+ >>> fs1
259
+ [x=((1, 2)+?z), z=?z]
260
+ >>> fs1.unify(FeatStruct('[z=(3, 4, 5)]'))
261
+ [x=(1, 2, 3, 4, 5), z=(3, 4, 5)]
262
+
263
+ Thus, feature value tuples and sets can be used to build up tuples
264
+ and sets of values over the course of unification. For example, when
265
+ parsing sentences using a semantic feature grammar, feature sets or
266
+ feature tuples can be used to build a list of semantic predicates as
267
+ the sentence is parsed.
268
+
269
+ As was mentioned above, unification does not apply to feature value
270
+ tuples and sets. One reason for this that it's impossible to define a
271
+ single correct answer for unification when concatenation is used.
272
+ Consider the following example:
273
+
274
+ >>> fs1 = FeatStruct('[x=(1, 2, 3, 4)]')
275
+ >>> fs2 = FeatStruct('[x=(?a+?b), a=?a, b=?b]')
276
+
277
+ If unification applied to feature tuples, then the unification
278
+ algorithm would have to arbitrarily choose how to divide the tuple
279
+ (1,2,3,4) into two parts. Instead, the unification algorithm refuses
280
+ to make this decision, and simply unifies based on value. Because
281
+ (1,2,3,4) is not equal to (?a+?b), fs1 and fs2 will not unify:
282
+
283
+ >>> print(fs1.unify(fs2))
284
+ None
285
+
286
+ If you need a list-like structure that unification does apply to, use
287
+ `FeatList`.
288
+
289
+ ..
290
+ >>> del fs1, fs2 # clean-up
291
+
292
+ Light-weight Feature Structures
293
+ -------------------------------
294
+ Many of the functions defined by `nltk.featstruct` can be applied
295
+ directly to simple Python dictionaries and lists, rather than to
296
+ full-fledged `FeatDict` and `FeatList` objects. In other words,
297
+ Python ``dicts`` and ``lists`` can be used as "light-weight" feature
298
+ structures.
299
+
300
+ >>> # Note: pprint prints dicts sorted
301
+ >>> from pprint import pprint
302
+ >>> from nltk.featstruct import unify
303
+ >>> pprint(unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))))
304
+ {'a': 'a', 'x': 1, 'y': {'b': 'b'}}
305
+
306
+ However, you should keep in mind the following caveats:
307
+
308
+ - Python dictionaries & lists ignore reentrance when checking for
309
+ equality between values. But two FeatStructs with different
310
+ reentrances are considered nonequal, even if all their base
311
+ values are equal.
312
+
313
+ - FeatStructs can be easily frozen, allowing them to be used as
314
+ keys in hash tables. Python dictionaries and lists can not.
315
+
316
+ - FeatStructs display reentrance in their string representations;
317
+ Python dictionaries and lists do not.
318
+
319
+ - FeatStructs may *not* be mixed with Python dictionaries and lists
320
+ (e.g., when performing unification).
321
+
322
+ - FeatStructs provide a number of useful methods, such as `walk()`
323
+ and `cyclic()`, which are not available for Python dicts & lists.
324
+
325
+ In general, if your feature structures will contain any reentrances,
326
+ or if you plan to use them as dictionary keys, it is strongly
327
+ recommended that you use full-fledged `FeatStruct` objects.
328
+
329
+ Custom Feature Values
330
+ ---------------------
331
+ The abstract base class `CustomFeatureValue` can be used to define new
332
+ base value types that have custom unification methods. For example,
333
+ the following feature value type encodes a range, and defines
334
+ unification as taking the intersection on the ranges:
335
+
336
+ >>> from functools import total_ordering
337
+ >>> from nltk.featstruct import CustomFeatureValue, UnificationFailure
338
+ >>> @total_ordering
339
+ ... class Range(CustomFeatureValue):
340
+ ... def __init__(self, low, high):
341
+ ... assert low <= high
342
+ ... self.low = low
343
+ ... self.high = high
344
+ ... def unify(self, other):
345
+ ... if not isinstance(other, Range):
346
+ ... return UnificationFailure
347
+ ... low = max(self.low, other.low)
348
+ ... high = min(self.high, other.high)
349
+ ... if low <= high: return Range(low, high)
350
+ ... else: return UnificationFailure
351
+ ... def __repr__(self):
352
+ ... return '(%s<x<%s)' % (self.low, self.high)
353
+ ... def __eq__(self, other):
354
+ ... if not isinstance(other, Range):
355
+ ... return False
356
+ ... return (self.low == other.low) and (self.high == other.high)
357
+ ... def __lt__(self, other):
358
+ ... if not isinstance(other, Range):
359
+ ... return True
360
+ ... return (self.low, self.high) < (other.low, other.high)
361
+
362
+ >>> fs1 = FeatStruct(x=Range(5,8), y=FeatStruct(z=Range(7,22)))
363
+ >>> print(fs1.unify(FeatStruct(x=Range(6, 22))))
364
+ [ x = (6<x<8) ]
365
+ [ ]
366
+ [ y = [ z = (7<x<22) ] ]
367
+ >>> print(fs1.unify(FeatStruct(x=Range(9, 12))))
368
+ None
369
+ >>> print(fs1.unify(FeatStruct(x=12)))
370
+ None
371
+ >>> print(fs1.unify(FeatStruct('[x=?x, y=[z=?x]]')))
372
+ [ x = (7<x<8) ]
373
+ [ ]
374
+ [ y = [ z = (7<x<8) ] ]
375
+
376
+ Regression Tests
377
+ ~~~~~~~~~~~~~~~~
378
+
379
+ Dictionary access methods (non-mutating)
380
+ ----------------------------------------
381
+
382
+ >>> fs1 = FeatStruct(a=1, b=2, c=3)
383
+ >>> fs2 = FeatStruct(x=fs1, y='x')
384
+
385
+ Feature structures support all dictionary methods (excluding the class
386
+ method `dict.fromkeys()`). Non-mutating methods:
387
+
388
+ >>> sorted(fs2.keys()) # keys()
389
+ ['x', 'y']
390
+ >>> sorted(fs2.values()) # values()
391
+ [[a=1, b=2, c=3], 'x']
392
+ >>> sorted(fs2.items()) # items()
393
+ [('x', [a=1, b=2, c=3]), ('y', 'x')]
394
+ >>> sorted(fs2) # __iter__()
395
+ ['x', 'y']
396
+ >>> 'a' in fs2, 'x' in fs2 # __contains__()
397
+ (False, True)
398
+ >>> fs2.has_key('a'), fs2.has_key('x') # has_key()
399
+ (False, True)
400
+ >>> fs2['x'], fs2['y'] # __getitem__()
401
+ ([a=1, b=2, c=3], 'x')
402
+ >>> fs2['a'] # __getitem__()
403
+ Traceback (most recent call last):
404
+ . . .
405
+ KeyError: 'a'
406
+ >>> fs2.get('x'), fs2.get('y'), fs2.get('a') # get()
407
+ ([a=1, b=2, c=3], 'x', None)
408
+ >>> fs2.get('x', 'hello'), fs2.get('a', 'hello') # get()
409
+ ([a=1, b=2, c=3], 'hello')
410
+ >>> len(fs1), len(fs2) # __len__
411
+ (3, 2)
412
+ >>> fs2.copy() # copy()
413
+ [x=[a=1, b=2, c=3], y='x']
414
+ >>> fs2.copy() is fs2 # copy()
415
+ False
416
+
417
+ Note: by default, `FeatStruct.copy()` does a deep copy. Use
418
+ `FeatStruct.copy(deep=False)` for a shallow copy.
419
+
420
+ ..
421
+ >>> del fs1, fs2 # clean-up.
422
+
423
+ Dictionary access methods (mutating)
424
+ ------------------------------------
425
+ >>> fs1 = FeatStruct(a=1, b=2, c=3)
426
+ >>> fs2 = FeatStruct(x=fs1, y='x')
427
+
428
+ Setting features (`__setitem__()`)
429
+
430
+ >>> fs1['c'] = 5
431
+ >>> fs1
432
+ [a=1, b=2, c=5]
433
+ >>> fs1['x'] = 12
434
+ >>> fs1
435
+ [a=1, b=2, c=5, x=12]
436
+ >>> fs2['x', 'a'] = 2
437
+ >>> fs2
438
+ [x=[a=2, b=2, c=5, x=12], y='x']
439
+ >>> fs1
440
+ [a=2, b=2, c=5, x=12]
441
+
442
+ Deleting features (`__delitem__()`)
443
+
444
+ >>> del fs1['x']
445
+ >>> fs1
446
+ [a=2, b=2, c=5]
447
+ >>> del fs2['x', 'a']
448
+ >>> fs1
449
+ [b=2, c=5]
450
+
451
+ `setdefault()`:
452
+
453
+ >>> fs1.setdefault('b', 99)
454
+ 2
455
+ >>> fs1
456
+ [b=2, c=5]
457
+ >>> fs1.setdefault('x', 99)
458
+ 99
459
+ >>> fs1
460
+ [b=2, c=5, x=99]
461
+
462
+ `update()`:
463
+
464
+ >>> fs2.update({'a':'A', 'b':'B'}, c='C')
465
+ >>> fs2
466
+ [a='A', b='B', c='C', x=[b=2, c=5, x=99], y='x']
467
+
468
+ `pop()`:
469
+
470
+ >>> fs2.pop('a')
471
+ 'A'
472
+ >>> fs2
473
+ [b='B', c='C', x=[b=2, c=5, x=99], y='x']
474
+ >>> fs2.pop('a')
475
+ Traceback (most recent call last):
476
+ . . .
477
+ KeyError: 'a'
478
+ >>> fs2.pop('a', 'foo')
479
+ 'foo'
480
+ >>> fs2
481
+ [b='B', c='C', x=[b=2, c=5, x=99], y='x']
482
+
483
+ `clear()`:
484
+
485
+ >>> fs1.clear()
486
+ >>> fs1
487
+ []
488
+ >>> fs2
489
+ [b='B', c='C', x=[], y='x']
490
+
491
+ `popitem()`:
492
+
493
+ >>> sorted([fs2.popitem() for i in range(len(fs2))])
494
+ [('b', 'B'), ('c', 'C'), ('x', []), ('y', 'x')]
495
+ >>> fs2
496
+ []
497
+
498
+ Once a feature structure has been frozen, it may not be mutated.
499
+
500
+ >>> fs1 = FeatStruct('[x=1, y=2, z=[a=3]]')
501
+ >>> fs1.freeze()
502
+ >>> fs1.frozen()
503
+ True
504
+ >>> fs1['z'].frozen()
505
+ True
506
+
507
+ >>> fs1['x'] = 5
508
+ Traceback (most recent call last):
509
+ . . .
510
+ ValueError: Frozen FeatStructs may not be modified.
511
+ >>> del fs1['x']
512
+ Traceback (most recent call last):
513
+ . . .
514
+ ValueError: Frozen FeatStructs may not be modified.
515
+ >>> fs1.clear()
516
+ Traceback (most recent call last):
517
+ . . .
518
+ ValueError: Frozen FeatStructs may not be modified.
519
+ >>> fs1.pop('x')
520
+ Traceback (most recent call last):
521
+ . . .
522
+ ValueError: Frozen FeatStructs may not be modified.
523
+ >>> fs1.popitem()
524
+ Traceback (most recent call last):
525
+ . . .
526
+ ValueError: Frozen FeatStructs may not be modified.
527
+ >>> fs1.setdefault('x')
528
+ Traceback (most recent call last):
529
+ . . .
530
+ ValueError: Frozen FeatStructs may not be modified.
531
+ >>> fs1.update(z=22)
532
+ Traceback (most recent call last):
533
+ . . .
534
+ ValueError: Frozen FeatStructs may not be modified.
535
+
536
+ ..
537
+ >>> del fs1, fs2 # clean-up.
538
+
539
+ Feature Paths
540
+ -------------
541
+ Make sure that __getitem__ with feature paths works as intended:
542
+
543
+ >>> fs1 = FeatStruct(a=1, b=2,
544
+ ... c=FeatStruct(
545
+ ... d=FeatStruct(e=12),
546
+ ... f=FeatStruct(g=55, h='hello')))
547
+ >>> fs1[()]
548
+ [a=1, b=2, c=[d=[e=12], f=[g=55, h='hello']]]
549
+ >>> fs1['a'], fs1[('a',)]
550
+ (1, 1)
551
+ >>> fs1['c','d','e']
552
+ 12
553
+ >>> fs1['c','f','g']
554
+ 55
555
+
556
+ Feature paths that select unknown features raise KeyError:
557
+
558
+ >>> fs1['c', 'f', 'e']
559
+ Traceback (most recent call last):
560
+ . . .
561
+ KeyError: ('c', 'f', 'e')
562
+ >>> fs1['q', 'p']
563
+ Traceback (most recent call last):
564
+ . . .
565
+ KeyError: ('q', 'p')
566
+
567
+ Feature paths that try to go 'through' a feature that's not a feature
568
+ structure raise KeyError:
569
+
570
+ >>> fs1['a', 'b']
571
+ Traceback (most recent call last):
572
+ . . .
573
+ KeyError: ('a', 'b')
574
+
575
+ Feature paths can go through reentrant structures:
576
+
577
+ >>> fs2 = FeatStruct('(1)[a=[b=[c->(1), d=5], e=11]]')
578
+ >>> fs2['a', 'b', 'c', 'a', 'e']
579
+ 11
580
+ >>> fs2['a', 'b', 'c', 'a', 'b', 'd']
581
+ 5
582
+ >>> fs2[tuple('abcabcabcabcabcabcabcabcabcabca')]
583
+ (1)[b=[c=[a->(1)], d=5], e=11]
584
+
585
+ Indexing requires strings, `Feature`\s, or tuples; other types raise a
586
+ TypeError:
587
+
588
+ >>> fs2[12]
589
+ Traceback (most recent call last):
590
+ . . .
591
+ TypeError: Expected feature name or path. Got 12.
592
+ >>> fs2[list('abc')]
593
+ Traceback (most recent call last):
594
+ . . .
595
+ TypeError: Expected feature name or path. Got ['a', 'b', 'c'].
596
+
597
+ Feature paths can also be used with `get()`, `has_key()`, and
598
+ `__contains__()`.
599
+
600
+ >>> fpath1 = tuple('abcabc')
601
+ >>> fpath2 = tuple('abcabz')
602
+ >>> fs2.get(fpath1), fs2.get(fpath2)
603
+ ((1)[a=[b=[c->(1), d=5], e=11]], None)
604
+ >>> fpath1 in fs2, fpath2 in fs2
605
+ (True, False)
606
+ >>> fs2.has_key(fpath1), fs2.has_key(fpath2)
607
+ (True, False)
608
+
609
+ ..
610
+ >>> del fs1, fs2 # clean-up
611
+
612
+ Reading Feature Structures
613
+ --------------------------
614
+
615
+ Empty feature struct:
616
+
617
+ >>> FeatStruct('[]')
618
+ []
619
+
620
+ Test features with integer values:
621
+
622
+ >>> FeatStruct('[a=12, b=-33, c=0]')
623
+ [a=12, b=-33, c=0]
624
+
625
+ Test features with string values. Either single or double quotes may
626
+ be used. Strings are evaluated just like python strings -- in
627
+ particular, you can use escape sequences and 'u' and 'r' prefixes, and
628
+ triple-quoted strings.
629
+
630
+ >>> FeatStruct('[a="", b="hello", c="\'", d=\'\', e=\'"\']')
631
+ [a='', b='hello', c="'", d='', e='"']
632
+ >>> FeatStruct(r'[a="\\", b="\"", c="\x6f\\y", d="12"]')
633
+ [a='\\', b='"', c='o\\y', d='12']
634
+ >>> FeatStruct(r'[b=r"a\b\c"]')
635
+ [b='a\\b\\c']
636
+ >>> FeatStruct('[x="""a"""]')
637
+ [x='a']
638
+
639
+ Test parsing of reentrant feature structures.
640
+
641
+ >>> FeatStruct('[a=(1)[], b->(1)]')
642
+ [a=(1)[], b->(1)]
643
+ >>> FeatStruct('[a=(1)[x=1, y=2], b->(1)]')
644
+ [a=(1)[x=1, y=2], b->(1)]
645
+
646
+ Test parsing of cyclic feature structures.
647
+
648
+ >>> FeatStruct('[a=(1)[b->(1)]]')
649
+ [a=(1)[b->(1)]]
650
+ >>> FeatStruct('(1)[a=[b=[c->(1)]]]')
651
+ (1)[a=[b=[c->(1)]]]
652
+
653
+ Strings of the form "+name" and "-name" may be used to specify boolean
654
+ values.
655
+
656
+ >>> FeatStruct('[-bar, +baz, +foo]')
657
+ [-bar, +baz, +foo]
658
+
659
+ None, True, and False are recognized as values:
660
+
661
+ >>> FeatStruct('[bar=True, baz=False, foo=None]')
662
+ [+bar, -baz, foo=None]
663
+
664
+ Special features:
665
+
666
+ >>> FeatStruct('NP/VP')
667
+ NP[]/VP[]
668
+ >>> FeatStruct('?x/?x')
669
+ ?x[]/?x[]
670
+ >>> print(FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
671
+ [ *type* = 'VP' ]
672
+ [ ]
673
+ [ [ *type* = 'NP' ] ]
674
+ [ *slash* = [ agr = ?x ] ]
675
+ [ [ pl = True ] ]
676
+ [ ]
677
+ [ agr = ?x ]
678
+ [ fin = True ]
679
+ [ tense = 'past' ]
680
+
681
+ Here the slash feature gets coerced:
682
+
683
+ >>> FeatStruct('[*slash*=a, x=b, *type*="NP"]')
684
+ NP[x='b']/a[]
685
+
686
+ >>> FeatStruct('NP[sem=<bob>]/NP')
687
+ NP[sem=<bob>]/NP[]
688
+ >>> FeatStruct('S[sem=<walk(bob)>]')
689
+ S[sem=<walk(bob)>]
690
+ >>> print(FeatStruct('NP[sem=<bob>]/NP'))
691
+ [ *type* = 'NP' ]
692
+ [ ]
693
+ [ *slash* = [ *type* = 'NP' ] ]
694
+ [ ]
695
+ [ sem = <bob> ]
696
+
697
+ Playing with ranges:
698
+
699
+ >>> from nltk.featstruct import RangeFeature, FeatStructReader
700
+ >>> width = RangeFeature('width')
701
+ >>> reader = FeatStructReader([width])
702
+ >>> fs1 = reader.fromstring('[*width*=-5:12]')
703
+ >>> fs2 = reader.fromstring('[*width*=2:123]')
704
+ >>> fs3 = reader.fromstring('[*width*=-7:-2]')
705
+ >>> fs1.unify(fs2)
706
+ [*width*=(2, 12)]
707
+ >>> fs1.unify(fs3)
708
+ [*width*=(-5, -2)]
709
+ >>> print(fs2.unify(fs3)) # no overlap in width.
710
+ None
711
+
712
+ The slash feature has a default value of 'False':
713
+
714
+ >>> print(FeatStruct('NP[]/VP').unify(FeatStruct('NP[]'), trace=1))
715
+ <BLANKLINE>
716
+ Unification trace:
717
+ / NP[]/VP[]
718
+ |\ NP[]
719
+ |
720
+ | Unify feature: *type*
721
+ | / 'NP'
722
+ | |\ 'NP'
723
+ | |
724
+ | +-->'NP'
725
+ |
726
+ | Unify feature: *slash*
727
+ | / VP[]
728
+ | |\ False
729
+ | |
730
+ X X <-- FAIL
731
+ None
732
+
733
+ The demo structures from category.py. They all parse, but they don't
734
+ do quite the right thing, -- ?x vs x.
735
+
736
+ >>> FeatStruct(pos='n', agr=FeatStruct(number='pl', gender='f'))
737
+ [agr=[gender='f', number='pl'], pos='n']
738
+ >>> FeatStruct(r'NP[sem=<bob>]/NP')
739
+ NP[sem=<bob>]/NP[]
740
+ >>> FeatStruct(r'S[sem=<app(?x, ?y)>]')
741
+ S[sem=<?x(?y)>]
742
+ >>> FeatStruct('?x/?x')
743
+ ?x[]/?x[]
744
+ >>> FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')
745
+ VP[agr=?x, +fin, tense='past']/NP[agr=?x, +pl]
746
+ >>> FeatStruct('S[sem = <app(?subj, ?vp)>]')
747
+ S[sem=<?subj(?vp)>]
748
+
749
+ >>> FeatStruct('S')
750
+ S[]
751
+
752
+ The parser also includes support for reading sets and tuples.
753
+
754
+ >>> FeatStruct('[x={1,2,2,2}, y={/}]')
755
+ [x={1, 2}, y={/}]
756
+ >>> FeatStruct('[x=(1,2,2,2), y=()]')
757
+ [x=(1, 2, 2, 2), y=()]
758
+ >>> print(FeatStruct('[x=(1,[z=(1,2,?x)],?z,{/})]'))
759
+ [ x = (1, [ z = (1, 2, ?x) ], ?z, {/}) ]
760
+
761
+ Note that we can't put a featstruct inside a tuple, because doing so
762
+ would hash it, and it's not frozen yet:
763
+
764
+ >>> print(FeatStruct('[x={[]}]'))
765
+ Traceback (most recent call last):
766
+ . . .
767
+ TypeError: FeatStructs must be frozen before they can be hashed.
768
+
769
+ There's a special syntax for taking the union of sets: "{...+...}".
770
+ The elements should only be variables or sets.
771
+
772
+ >>> FeatStruct('[x={?a+?b+{1,2,3}}]')
773
+ [x={?a+?b+{1, 2, 3}}]
774
+
775
+ There's a special syntax for taking the concatenation of tuples:
776
+ "(...+...)". The elements should only be variables or tuples.
777
+
778
+ >>> FeatStruct('[x=(?a+?b+(1,2,3))]')
779
+ [x=(?a+?b+(1, 2, 3))]
780
+
781
+ Parsing gives helpful messages if your string contains an error.
782
+
783
+ >>> FeatStruct('[a=, b=5]]')
784
+ Traceback (most recent call last):
785
+ . . .
786
+ ValueError: Error parsing feature structure
787
+ [a=, b=5]]
788
+ ^ Expected value
789
+ >>> FeatStruct('[a=12 22, b=33]')
790
+ Traceback (most recent call last):
791
+ . . .
792
+ ValueError: Error parsing feature structure
793
+ [a=12 22, b=33]
794
+ ^ Expected comma
795
+ >>> FeatStruct('[a=5] [b=6]')
796
+ Traceback (most recent call last):
797
+ . . .
798
+ ValueError: Error parsing feature structure
799
+ [a=5] [b=6]
800
+ ^ Expected end of string
801
+ >>> FeatStruct(' *++*')
802
+ Traceback (most recent call last):
803
+ . . .
804
+ ValueError: Error parsing feature structure
805
+ *++*
806
+ ^ Expected open bracket or identifier
807
+ >>> FeatStruct('[x->(1)]')
808
+ Traceback (most recent call last):
809
+ . . .
810
+ ValueError: Error parsing feature structure
811
+ [x->(1)]
812
+ ^ Expected bound identifier
813
+ >>> FeatStruct('[x->y]')
814
+ Traceback (most recent call last):
815
+ . . .
816
+ ValueError: Error parsing feature structure
817
+ [x->y]
818
+ ^ Expected identifier
819
+ >>> FeatStruct('')
820
+ Traceback (most recent call last):
821
+ . . .
822
+ ValueError: Error parsing feature structure
823
+ <BLANKLINE>
824
+ ^ Expected open bracket or identifier
825
+
826
+
827
+ Unification
828
+ -----------
829
+ Very simple unifications give the expected results:
830
+
831
+ >>> FeatStruct().unify(FeatStruct())
832
+ []
833
+ >>> FeatStruct(number='singular').unify(FeatStruct())
834
+ [number='singular']
835
+ >>> FeatStruct().unify(FeatStruct(number='singular'))
836
+ [number='singular']
837
+ >>> FeatStruct(number='singular').unify(FeatStruct(person=3))
838
+ [number='singular', person=3]
839
+
840
+ Merging nested structures:
841
+
842
+ >>> fs1 = FeatStruct('[A=[B=b]]')
843
+ >>> fs2 = FeatStruct('[A=[C=c]]')
844
+ >>> fs1.unify(fs2)
845
+ [A=[B='b', C='c']]
846
+ >>> fs2.unify(fs1)
847
+ [A=[B='b', C='c']]
848
+
849
+ A basic case of reentrant unification
850
+
851
+ >>> fs4 = FeatStruct('[A=(1)[B=b], E=[F->(1)]]')
852
+ >>> fs5 = FeatStruct("[A=[C='c'], E=[F=[D='d']]]")
853
+ >>> fs4.unify(fs5)
854
+ [A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
855
+ >>> fs5.unify(fs4)
856
+ [A=(1)[B='b', C='c', D='d'], E=[F->(1)]]
857
+
858
+ More than 2 paths to a value
859
+
860
+ >>> fs1 = FeatStruct("[a=[],b=[],c=[],d=[]]")
861
+ >>> fs2 = FeatStruct('[a=(1)[], b->(1), c->(1), d->(1)]')
862
+ >>> fs1.unify(fs2)
863
+ [a=(1)[], b->(1), c->(1), d->(1)]
864
+
865
+ fs1[a] gets unified with itself
866
+
867
+ >>> fs1 = FeatStruct('[x=(1)[], y->(1)]')
868
+ >>> fs2 = FeatStruct('[x=(1)[], y->(1)]')
869
+ >>> fs1.unify(fs2)
870
+ [x=(1)[], y->(1)]
871
+
872
+ Bound variables should get forwarded appropriately
873
+
874
+ >>> fs1 = FeatStruct('[A=(1)[X=x], B->(1), C=?cvar, D=?dvar]')
875
+ >>> fs2 = FeatStruct('[A=(1)[Y=y], B=(2)[Z=z], C->(1), D->(2)]')
876
+ >>> fs1.unify(fs2)
877
+ [A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
878
+ >>> fs2.unify(fs1)
879
+ [A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)]
880
+
881
+ Cyclic structure created by unification.
882
+
883
+ >>> fs1 = FeatStruct('[F=(1)[], G->(1)]')
884
+ >>> fs2 = FeatStruct('[F=[H=(2)[]], G->(2)]')
885
+ >>> fs3 = fs1.unify(fs2)
886
+ >>> fs3
887
+ [F=(1)[H->(1)], G->(1)]
888
+ >>> fs3['F'] is fs3['G']
889
+ True
890
+ >>> fs3['F'] is fs3['G']['H']
891
+ True
892
+ >>> fs3['F'] is fs3['G']['H']['H']
893
+ True
894
+ >>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
895
+ True
896
+
897
+ Cyclic structure created w/ variables.
898
+
899
+ >>> fs1 = FeatStruct('[F=[H=?x]]')
900
+ >>> fs2 = FeatStruct('[F=?x]')
901
+ >>> fs3 = fs1.unify(fs2, rename_vars=False)
902
+ >>> fs3
903
+ [F=(1)[H->(1)]]
904
+ >>> fs3['F'] is fs3['F']['H']
905
+ True
906
+ >>> fs3['F'] is fs3['F']['H']['H']
907
+ True
908
+ >>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H']
909
+ True
910
+
911
+ Unifying w/ a cyclic feature structure.
912
+
913
+ >>> fs4 = FeatStruct('[F=[H=[H=[H=(1)[]]]], K->(1)]')
914
+ >>> fs3.unify(fs4)
915
+ [F=(1)[H->(1)], K->(1)]
916
+ >>> fs4.unify(fs3)
917
+ [F=(1)[H->(1)], K->(1)]
918
+
919
+ Variable bindings should preserve reentrance.
920
+
921
+ >>> bindings = {}
922
+ >>> fs1 = FeatStruct("[a=?x]")
923
+ >>> fs2 = fs1.unify(FeatStruct("[a=[]]"), bindings)
924
+ >>> fs2['a'] is bindings[Variable('?x')]
925
+ True
926
+ >>> fs2.unify(FeatStruct("[b=?x]"), bindings)
927
+ [a=(1)[], b->(1)]
928
+
929
+ Aliased variable tests
930
+
931
+ >>> fs1 = FeatStruct("[a=?x, b=?x]")
932
+ >>> fs2 = FeatStruct("[b=?y, c=?y]")
933
+ >>> bindings = {}
934
+ >>> fs3 = fs1.unify(fs2, bindings)
935
+ >>> fs3
936
+ [a=?x, b=?x, c=?x]
937
+ >>> bindings
938
+ {Variable('?y'): Variable('?x')}
939
+ >>> fs3.unify(FeatStruct("[a=1]"))
940
+ [a=1, b=1, c=1]
941
+
942
+ If we keep track of the bindings, then we can use the same variable
943
+ over multiple calls to unify.
944
+
945
+ >>> bindings = {}
946
+ >>> fs1 = FeatStruct('[a=?x]')
947
+ >>> fs2 = fs1.unify(FeatStruct('[a=[]]'), bindings)
948
+ >>> fs2.unify(FeatStruct('[b=?x]'), bindings)
949
+ [a=(1)[], b->(1)]
950
+ >>> bindings
951
+ {Variable('?x'): []}
952
+
953
+ ..
954
+ >>> del fs1, fs2, fs3, fs4, fs5 # clean-up
955
+
956
+ Unification Bindings
957
+ --------------------
958
+
959
+ >>> bindings = {}
960
+ >>> fs1 = FeatStruct('[a=?x]')
961
+ >>> fs2 = FeatStruct('[a=12]')
962
+ >>> fs3 = FeatStruct('[b=?x]')
963
+ >>> fs1.unify(fs2, bindings)
964
+ [a=12]
965
+ >>> bindings
966
+ {Variable('?x'): 12}
967
+ >>> fs3.substitute_bindings(bindings)
968
+ [b=12]
969
+ >>> fs3 # substitute_bindings didn't mutate fs3.
970
+ [b=?x]
971
+ >>> fs2.unify(fs3, bindings)
972
+ [a=12, b=12]
973
+
974
+ >>> bindings = {}
975
+ >>> fs1 = FeatStruct('[a=?x, b=1]')
976
+ >>> fs2 = FeatStruct('[a=5, b=?x]')
977
+ >>> fs1.unify(fs2, bindings)
978
+ [a=5, b=1]
979
+ >>> sorted(bindings.items())
980
+ [(Variable('?x'), 5), (Variable('?x2'), 1)]
981
+
982
+ ..
983
+ >>> del fs1, fs2, fs3 # clean-up
984
+
985
+ Expressions
986
+ -----------
987
+
988
+ >>> e = Expression.fromstring('\\P y.P(z,y)')
989
+ >>> fs1 = FeatStruct(x=e, y=Variable('z'))
990
+ >>> fs2 = FeatStruct(y=VariableExpression(Variable('John')))
991
+ >>> fs1.unify(fs2)
992
+ [x=<\P y.P(John,y)>, y=<John>]
993
+
994
+ Remove Variables
995
+ ----------------
996
+
997
+ >>> FeatStruct('[a=?x, b=12, c=[d=?y]]').remove_variables()
998
+ [b=12, c=[]]
999
+ >>> FeatStruct('(1)[a=[b=?x,c->(1)]]').remove_variables()
1000
+ (1)[a=[c->(1)]]
1001
+
1002
+ Equality & Hashing
1003
+ ------------------
1004
+ The `equal_values` method checks whether two feature structures assign
1005
+ the same value to every feature. If the optional argument
1006
+ ``check_reentrances`` is supplied, then it also returns false if there
1007
+ is any difference in the reentrances.
1008
+
1009
+ >>> a = FeatStruct('(1)[x->(1)]')
1010
+ >>> b = FeatStruct('(1)[x->(1)]')
1011
+ >>> c = FeatStruct('(1)[x=[x->(1)]]')
1012
+ >>> d = FeatStruct('[x=(1)[x->(1)]]')
1013
+ >>> e = FeatStruct('(1)[x=[x->(1), y=1], y=1]')
1014
+ >>> def compare(x,y):
1015
+ ... assert x.equal_values(y, True) == y.equal_values(x, True)
1016
+ ... assert x.equal_values(y, False) == y.equal_values(x, False)
1017
+ ... if x.equal_values(y, True):
1018
+ ... assert x.equal_values(y, False)
1019
+ ... print('equal values, same reentrance')
1020
+ ... elif x.equal_values(y, False):
1021
+ ... print('equal values, different reentrance')
1022
+ ... else:
1023
+ ... print('different values')
1024
+
1025
+ >>> compare(a, a)
1026
+ equal values, same reentrance
1027
+ >>> compare(a, b)
1028
+ equal values, same reentrance
1029
+ >>> compare(a, c)
1030
+ equal values, different reentrance
1031
+ >>> compare(a, d)
1032
+ equal values, different reentrance
1033
+ >>> compare(c, d)
1034
+ equal values, different reentrance
1035
+ >>> compare(a, e)
1036
+ different values
1037
+ >>> compare(c, e)
1038
+ different values
1039
+ >>> compare(d, e)
1040
+ different values
1041
+ >>> compare(e, e)
1042
+ equal values, same reentrance
1043
+
1044
+ Feature structures may not be hashed until they are frozen:
1045
+
1046
+ >>> hash(a)
1047
+ Traceback (most recent call last):
1048
+ . . .
1049
+ TypeError: FeatStructs must be frozen before they can be hashed.
1050
+ >>> a.freeze()
1051
+ >>> v = hash(a)
1052
+
1053
+ Feature structures define hash consistently. The following example
1054
+ looks at the hash value for each (fs1,fs2) pair; if their hash values
1055
+ are not equal, then they must not be equal. If their hash values are
1056
+ equal, then display a message, and indicate whether their values are
1057
+ indeed equal. Note that c and d currently have the same hash value,
1058
+ even though they are not equal. That is not a bug, strictly speaking,
1059
+ but it wouldn't be a bad thing if it changed.
1060
+
1061
+ >>> for fstruct in (a, b, c, d, e):
1062
+ ... fstruct.freeze()
1063
+ >>> for fs1_name in 'abcde':
1064
+ ... for fs2_name in 'abcde':
1065
+ ... fs1 = locals()[fs1_name]
1066
+ ... fs2 = locals()[fs2_name]
1067
+ ... if hash(fs1) != hash(fs2):
1068
+ ... assert fs1 != fs2
1069
+ ... else:
1070
+ ... print('%s and %s have the same hash value,' %
1071
+ ... (fs1_name, fs2_name))
1072
+ ... if fs1 == fs2: print('and are equal')
1073
+ ... else: print('and are not equal')
1074
+ a and a have the same hash value, and are equal
1075
+ a and b have the same hash value, and are equal
1076
+ b and a have the same hash value, and are equal
1077
+ b and b have the same hash value, and are equal
1078
+ c and c have the same hash value, and are equal
1079
+ c and d have the same hash value, and are not equal
1080
+ d and c have the same hash value, and are not equal
1081
+ d and d have the same hash value, and are equal
1082
+ e and e have the same hash value, and are equal
1083
+
1084
+ ..
1085
+ >>> del a, b, c, d, e, v # clean-up
1086
+
1087
+ Tracing
1088
+ -------
1089
+
1090
+ >>> fs1 = FeatStruct('[a=[b=(1)[], c=?x], d->(1), e=[f=?x]]')
1091
+ >>> fs2 = FeatStruct('[a=(1)[c="C"], e=[g->(1)]]')
1092
+ >>> fs1.unify(fs2, trace=True)
1093
+ <BLANKLINE>
1094
+ Unification trace:
1095
+ / [a=[b=(1)[], c=?x], d->(1), e=[f=?x]]
1096
+ |\ [a=(1)[c='C'], e=[g->(1)]]
1097
+ |
1098
+ | Unify feature: a
1099
+ | / [b=[], c=?x]
1100
+ | |\ [c='C']
1101
+ | |
1102
+ | | Unify feature: a.c
1103
+ | | / ?x
1104
+ | | |\ 'C'
1105
+ | | |
1106
+ | | +-->Variable('?x')
1107
+ | |
1108
+ | +-->[b=[], c=?x]
1109
+ | Bindings: {?x: 'C'}
1110
+ |
1111
+ | Unify feature: e
1112
+ | / [f=?x]
1113
+ | |\ [g=[c='C']]
1114
+ | |
1115
+ | +-->[f=?x, g=[b=[], c=?x]]
1116
+ | Bindings: {?x: 'C'}
1117
+ |
1118
+ +-->[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
1119
+ Bindings: {?x: 'C'}
1120
+ [a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]]
1121
+ >>>
1122
+ >>> fs1 = FeatStruct('[a=?x, b=?z, c=?z]')
1123
+ >>> fs2 = FeatStruct('[a=?y, b=?y, c=?q]')
1124
+ >>> #fs1.unify(fs2, trace=True)
1125
+ >>>
1126
+
1127
+ ..
1128
+ >>> del fs1, fs2 # clean-up
1129
+
1130
+ Unification on Dicts & Lists
1131
+ ----------------------------
1132
+ It's possible to do unification on dictionaries:
1133
+
1134
+ >>> from nltk.featstruct import unify
1135
+ >>> pprint(unify(dict(x=1, y=dict(z=2)), dict(x=1, q=5)), width=1)
1136
+ {'q': 5, 'x': 1, 'y': {'z': 2}}
1137
+
1138
+ It's possible to do unification on lists as well:
1139
+
1140
+ >>> unify([1, 2, 3], [1, Variable('x'), 3])
1141
+ [1, 2, 3]
1142
+
1143
+ Mixing dicts and lists is fine:
1144
+
1145
+ >>> pprint(unify([dict(x=1, y=dict(z=2)),3], [dict(x=1, q=5),3]),
1146
+ ... width=1)
1147
+ [{'q': 5, 'x': 1, 'y': {'z': 2}}, 3]
1148
+
1149
+ Mixing dicts and FeatStructs is discouraged:
1150
+
1151
+ >>> unify(dict(x=1), FeatStruct(x=1))
1152
+ Traceback (most recent call last):
1153
+ . . .
1154
+ ValueError: Mixing FeatStruct objects with Python dicts and lists is not supported.
1155
+
1156
+ But you can do it if you really want, by explicitly stating that both
1157
+ dictionaries and FeatStructs should be treated as feature structures:
1158
+
1159
+ >>> unify(dict(x=1), FeatStruct(x=1), fs_class=(dict, FeatStruct))
1160
+ {'x': 1}
1161
+
1162
+ Finding Conflicts
1163
+ -----------------
1164
+
1165
+ >>> from nltk.featstruct import conflicts
1166
+ >>> fs1 = FeatStruct('[a=[b=(1)[c=2], d->(1), e=[f->(1)]]]')
1167
+ >>> fs2 = FeatStruct('[a=[b=[c=[x=5]], d=[c=2], e=[f=[c=3]]]]')
1168
+ >>> for path in conflicts(fs1, fs2):
1169
+ ... print('%-8s: %r vs %r' % ('.'.join(path), fs1[path], fs2[path]))
1170
+ a.b.c : 2 vs [x=5]
1171
+ a.e.f.c : 2 vs 3
1172
+
1173
+ ..
1174
+ >>> del fs1, fs2 # clean-up
1175
+
1176
+ Retracting Bindings
1177
+ -------------------
1178
+
1179
+ >>> from nltk.featstruct import retract_bindings
1180
+ >>> bindings = {}
1181
+ >>> fs1 = FeatStruct('[a=?x, b=[c=?y]]')
1182
+ >>> fs2 = FeatStruct('[a=(1)[c=[d=1]], b->(1)]')
1183
+ >>> fs3 = fs1.unify(fs2, bindings)
1184
+ >>> print(fs3)
1185
+ [ a = (1) [ c = [ d = 1 ] ] ]
1186
+ [ ]
1187
+ [ b -> (1) ]
1188
+ >>> pprint(bindings)
1189
+ {Variable('?x'): [c=[d=1]], Variable('?y'): [d=1]}
1190
+ >>> retract_bindings(fs3, bindings)
1191
+ [a=?x, b=?x]
1192
+ >>> pprint(bindings)
1193
+ {Variable('?x'): [c=?y], Variable('?y'): [d=1]}
1194
+
1195
+ Squashed Bugs
1196
+ ~~~~~~~~~~~~~
1197
+ In svn rev 5167, unifying two feature structures that used the same
1198
+ variable would cause those variables to become aliased in the output.
1199
+
1200
+ >>> fs1 = FeatStruct('[a=?x]')
1201
+ >>> fs2 = FeatStruct('[b=?x]')
1202
+ >>> fs1.unify(fs2)
1203
+ [a=?x, b=?x2]
1204
+
1205
+ There was a bug in svn revision 5172 that caused `rename_variables` to
1206
+ rename variables to names that are already used.
1207
+
1208
+ >>> FeatStruct('[a=?x, b=?x2]').rename_variables(
1209
+ ... vars=[Variable('?x')])
1210
+ [a=?x3, b=?x2]
1211
+ >>> fs1 = FeatStruct('[a=?x]')
1212
+ >>> fs2 = FeatStruct('[a=?x, b=?x2]')
1213
+ >>> fs1.unify(fs2)
1214
+ [a=?x, b=?x2]
1215
+
1216
+ There was a bug in svn rev 5167 that caused us to get the following
1217
+ example wrong. Basically the problem was that we only followed
1218
+ 'forward' pointers for other, not self, when unifying two feature
1219
+ structures. (nb: this test assumes that features are unified in
1220
+ alphabetical order -- if they are not, it might pass even if the bug
1221
+ is present.)
1222
+
1223
+ >>> fs1 = FeatStruct('[a=[x=1], b=?x, c=?x]')
1224
+ >>> fs2 = FeatStruct('[a=(1)[], b->(1), c=[x=2]]')
1225
+ >>> print(fs1.unify(fs2))
1226
+ None
1227
+
1228
+ ..
1229
+ >>> del fs1, fs2 # clean-up
llmeval-env/lib/python3.10/site-packages/nltk/test/framenet.doctest ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ========
5
+ FrameNet
6
+ ========
7
+
8
+ The FrameNet corpus is a lexical database of English that is both human-
9
+ and machine-readable, based on annotating examples of how words are used
10
+ in actual texts. FrameNet is based on a theory of meaning called Frame
11
+ Semantics, deriving from the work of Charles J. Fillmore and colleagues.
12
+ The basic idea is straightforward: that the meanings of most words can
13
+ best be understood on the basis of a semantic frame: a description of a
14
+ type of event, relation, or entity and the participants in it. For
15
+ example, the concept of cooking typically involves a person doing the
16
+ cooking (Cook), the food that is to be cooked (Food), something to hold
17
+ the food while cooking (Container) and a source of heat
18
+ (Heating_instrument). In the FrameNet project, this is represented as a
19
+ frame called Apply_heat, and the Cook, Food, Heating_instrument and
20
+ Container are called frame elements (FEs). Words that evoke this frame,
21
+ such as fry, bake, boil, and broil, are called lexical units (LUs) of
22
+ the Apply_heat frame. The job of FrameNet is to define the frames
23
+ and to annotate sentences to show how the FEs fit syntactically around
24
+ the word that evokes the frame.
25
+
26
+ ------
27
+ Frames
28
+ ------
29
+
30
+ A Frame is a script-like conceptual structure that describes a
31
+ particular type of situation, object, or event along with the
32
+ participants and props that are needed for that Frame. For
33
+ example, the "Apply_heat" frame describes a common situation
34
+ involving a Cook, some Food, and a Heating_Instrument, and is
35
+ evoked by words such as bake, blanch, boil, broil, brown,
36
+ simmer, steam, etc.
37
+
38
+ We call the roles of a Frame "frame elements" (FEs) and the
39
+ frame-evoking words are called "lexical units" (LUs).
40
+
41
+ FrameNet includes relations between Frames. Several types of
42
+ relations are defined, of which the most important are:
43
+
44
+ - Inheritance: An IS-A relation. The child frame is a subtype
45
+ of the parent frame, and each FE in the parent is bound to
46
+ a corresponding FE in the child. An example is the
47
+ "Revenge" frame which inherits from the
48
+ "Rewards_and_punishments" frame.
49
+
50
+ - Using: The child frame presupposes the parent frame as
51
+ background, e.g the "Speed" frame "uses" (or presupposes)
52
+ the "Motion" frame; however, not all parent FEs need to be
53
+ bound to child FEs.
54
+
55
+ - Subframe: The child frame is a subevent of a complex event
56
+ represented by the parent, e.g. the "Criminal_process" frame
57
+ has subframes of "Arrest", "Arraignment", "Trial", and
58
+ "Sentencing".
59
+
60
+ - Perspective_on: The child frame provides a particular
61
+ perspective on an un-perspectivized parent frame. A pair of
62
+ examples consists of the "Hiring" and "Get_a_job" frames,
63
+ which perspectivize the "Employment_start" frame from the
64
+ Employer's and the Employee's point of view, respectively.
65
+
66
+ To get a list of all of the Frames in FrameNet, you can use the
67
+ `frames()` function. If you supply a regular expression pattern to the
68
+ `frames()` function, you will get a list of all Frames whose names match
69
+ that pattern:
70
+
71
+ >>> from pprint import pprint
72
+ >>> from operator import itemgetter
73
+ >>> from nltk.corpus import framenet as fn
74
+ >>> from nltk.corpus.reader.framenet import PrettyList
75
+ >>> x = fn.frames(r'(?i)crim')
76
+ >>> x.sort(key=itemgetter('ID'))
77
+ >>> x
78
+ [<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
79
+ >>> PrettyList(sorted(x, key=itemgetter('ID')))
80
+ [<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
81
+
82
+ To get the details of a particular Frame, you can use the `frame()`
83
+ function passing in the frame number:
84
+
85
+ >>> from pprint import pprint
86
+ >>> from nltk.corpus import framenet as fn
87
+ >>> f = fn.frame(202)
88
+ >>> f.ID
89
+ 202
90
+ >>> f.name
91
+ 'Arrest'
92
+ >>> f.definition
93
+ "Authorities charge a Suspect, who is under suspicion of having committed a crime..."
94
+ >>> len(f.lexUnit)
95
+ 11
96
+ >>> pprint(sorted([x for x in f.FE]))
97
+ ['Authorities',
98
+ 'Charges',
99
+ 'Co-participant',
100
+ 'Manner',
101
+ 'Means',
102
+ 'Offense',
103
+ 'Place',
104
+ 'Purpose',
105
+ 'Source_of_legal_authority',
106
+ 'Suspect',
107
+ 'Time',
108
+ 'Type']
109
+ >>> pprint(f.frameRelations)
110
+ [<Parent=Intentionally_affect -- Inheritance -> Child=Arrest>, <Complex=Criminal_process -- Subframe -> Component=Arrest>, ...]
111
+
112
+ The `frame()` function shown above returns a dict object containing
113
+ detailed information about the Frame. See the documentation on the
114
+ `frame()` function for the specifics.
115
+
116
+ You can also search for Frames by their Lexical Units (LUs). The
117
+ `frames_by_lemma()` function returns a list of all frames that contain
118
+ LUs in which the 'name' attribute of the LU matches the given regular
119
+ expression. Note that LU names are composed of "lemma.POS", where the
120
+ "lemma" part can be made up of either a single lexeme (e.g. 'run') or
121
+ multiple lexemes (e.g. 'a little') (see below).
122
+
123
+ >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID')))
124
+ [<frame ID=189 name=Quanti...>, <frame ID=2001 name=Degree>]
125
+
126
+ -------------
127
+ Lexical Units
128
+ -------------
129
+
130
+ A lexical unit (LU) is a pairing of a word with a meaning. For
131
+ example, the "Apply_heat" Frame describes a common situation
132
+ involving a Cook, some Food, and a Heating Instrument, and is
133
+ _evoked_ by words such as bake, blanch, boil, broil, brown,
134
+ simmer, steam, etc. These frame-evoking words are the LUs in the
135
+ Apply_heat frame. Each sense of a polysemous word is a different
136
+ LU.
137
+
138
+ We have used the word "word" in talking about LUs. The reality
139
+ is actually rather complex. When we say that the word "bake" is
140
+ polysemous, we mean that the lemma "bake.v" (which has the
141
+ word-forms "bake", "bakes", "baked", and "baking") is linked to
142
+ three different frames:
143
+
144
+ - Apply_heat: "Michelle baked the potatoes for 45 minutes."
145
+
146
+ - Cooking_creation: "Michelle baked her mother a cake for her birthday."
147
+
148
+ - Absorb_heat: "The potatoes have to bake for more than 30 minutes."
149
+
150
+ These constitute three different LUs, with different
151
+ definitions.
152
+
153
+ Multiword expressions such as "given name" and hyphenated words
154
+ like "shut-eye" can also be LUs. Idiomatic phrases such as
155
+ "middle of nowhere" and "give the slip (to)" are also defined as
156
+ LUs in the appropriate frames ("Isolated_places" and "Evading",
157
+ respectively), and their internal structure is not analyzed.
158
+
159
+ Framenet provides multiple annotated examples of each sense of a
160
+ word (i.e. each LU). Moreover, the set of examples
161
+ (approximately 20 per LU) illustrates all of the combinatorial
162
+ possibilities of the lexical unit.
163
+
164
+ Each LU is linked to a Frame, and hence to the other words which
165
+ evoke that Frame. This makes the FrameNet database similar to a
166
+ thesaurus, grouping together semantically similar words.
167
+
168
+ In the simplest case, frame-evoking words are verbs such as
169
+ "fried" in:
170
+
171
+ "Matilde fried the catfish in a heavy iron skillet."
172
+
173
+ Sometimes event nouns may evoke a Frame. For example,
174
+ "reduction" evokes "Cause_change_of_scalar_position" in:
175
+
176
+ "...the reduction of debt levels to $665 million from $2.6 billion."
177
+
178
+ Adjectives may also evoke a Frame. For example, "asleep" may
179
+ evoke the "Sleep" frame as in:
180
+
181
+ "They were asleep for hours."
182
+
183
+ Many common nouns, such as artifacts like "hat" or "tower",
184
+ typically serve as dependents rather than clearly evoking their
185
+ own frames.
186
+
187
+ Details for a specific lexical unit can be obtained using this class's
188
+ `lus()` function, which takes an optional regular expression
189
+ pattern that will be matched against the name of the lexical unit:
190
+
191
+ >>> from pprint import pprint
192
+ >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')))
193
+ [<lu ID=14733 name=a little.n>, <lu ID=14743 name=a little.adv>, ...]
194
+
195
+ You can obtain detailed information on a particular LU by calling the
196
+ `lu()` function and passing in an LU's 'ID' number:
197
+
198
+ >>> from pprint import pprint
199
+ >>> from nltk.corpus import framenet as fn
200
+ >>> fn.lu(256).name
201
+ 'foresee.v'
202
+ >>> fn.lu(256).definition
203
+ 'COD: be aware of beforehand; predict.'
204
+ >>> fn.lu(256).frame.name
205
+ 'Expectation'
206
+ >>> fn.lu(256).lexemes[0].name
207
+ 'foresee'
208
+
209
+ Note that LU names take the form of a dotted string (e.g. "run.v" or "a
210
+ little.adv") in which a lemma precedes the "." and a part of speech
211
+ (POS) follows the dot. The lemma may be composed of a single lexeme
212
+ (e.g. "run") or of multiple lexemes (e.g. "a little"). The list of
213
+ POSs used in the LUs is:
214
+
215
+ v - verb
216
+ n - noun
217
+ a - adjective
218
+ adv - adverb
219
+ prep - preposition
220
+ num - numbers
221
+ intj - interjection
222
+ art - article
223
+ c - conjunction
224
+ scon - subordinating conjunction
225
+
226
+ For more detailed information about the info that is contained in the
227
+ dict that is returned by the `lu()` function, see the documentation on
228
+ the `lu()` function.
229
+
230
+ -------------------
231
+ Annotated Documents
232
+ -------------------
233
+
234
+ The FrameNet corpus contains a small set of annotated documents. A list
235
+ of these documents can be obtained by calling the `docs()` function:
236
+
237
+ >>> from pprint import pprint
238
+ >>> from nltk.corpus import framenet as fn
239
+ >>> d = fn.docs('BellRinging')[0]
240
+ >>> d.corpname
241
+ 'PropBank'
242
+ >>> d.sentence[49]
243
+ full-text sentence (...) in BellRinging:
244
+ <BLANKLINE>
245
+ <BLANKLINE>
246
+ [POS] 17 tags
247
+ <BLANKLINE>
248
+ [POS_tagset] PENN
249
+ <BLANKLINE>
250
+ [text] + [annotationSet]
251
+ <BLANKLINE>
252
+ `` I live in hopes that the ringers themselves will be drawn into
253
+ ***** ******* *****
254
+ Desir Cause_t Cause
255
+ [1] [3] [2]
256
+ <BLANKLINE>
257
+ that fuller life .
258
+ ******
259
+ Comple
260
+ [4]
261
+ (Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness)
262
+ <BLANKLINE>
263
+
264
+ >>> d.sentence[49].annotationSet[1]
265
+ annotation set (...):
266
+ <BLANKLINE>
267
+ [status] MANUAL
268
+ <BLANKLINE>
269
+ [LU] (6605) hope.n in Desiring
270
+ <BLANKLINE>
271
+ [frame] (366) Desiring
272
+ <BLANKLINE>
273
+ [GF] 2 relations
274
+ <BLANKLINE>
275
+ [PT] 2 phrases
276
+ <BLANKLINE>
277
+ [text] + [Target] + [FE] + [Noun]
278
+ <BLANKLINE>
279
+ `` I live in hopes that the ringers themselves will be drawn into
280
+ - ^^^^ ^^ ***** ----------------------------------------------
281
+ E supp su Event
282
+ <BLANKLINE>
283
+ that fuller life .
284
+ -----------------
285
+ <BLANKLINE>
286
+ (E=Experiencer, su=supp)
287
+ <BLANKLINE>
288
+ <BLANKLINE>
llmeval-env/lib/python3.10/site-packages/nltk/test/generate.doctest ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===============================================
5
+ Generating sentences from context-free grammars
6
+ ===============================================
7
+
8
+ An example grammar:
9
+
10
+ >>> from nltk.parse.generate import generate, demo_grammar
11
+ >>> from nltk import CFG
12
+ >>> grammar = CFG.fromstring(demo_grammar)
13
+ >>> print(grammar)
14
+ Grammar with 13 productions (start state = S)
15
+ S -> NP VP
16
+ NP -> Det N
17
+ PP -> P NP
18
+ VP -> 'slept'
19
+ VP -> 'saw' NP
20
+ VP -> 'walked' PP
21
+ Det -> 'the'
22
+ Det -> 'a'
23
+ N -> 'man'
24
+ N -> 'park'
25
+ N -> 'dog'
26
+ P -> 'in'
27
+ P -> 'with'
28
+
29
+ The first 10 generated sentences:
30
+
31
+ >>> for sentence in generate(grammar, n=10):
32
+ ... print(' '.join(sentence))
33
+ the man slept
34
+ the man saw the man
35
+ the man saw the park
36
+ the man saw the dog
37
+ the man saw a man
38
+ the man saw a park
39
+ the man saw a dog
40
+ the man walked in the man
41
+ the man walked in the park
42
+ the man walked in the dog
43
+
44
+ All sentences of max depth 4:
45
+
46
+ >>> for sentence in generate(grammar, depth=4):
47
+ ... print(' '.join(sentence))
48
+ the man slept
49
+ the park slept
50
+ the dog slept
51
+ a man slept
52
+ a park slept
53
+ a dog slept
54
+
55
+ The number of sentences of different max depths:
56
+
57
+ >>> len(list(generate(grammar, depth=3)))
58
+ 0
59
+ >>> len(list(generate(grammar, depth=4)))
60
+ 6
61
+ >>> len(list(generate(grammar, depth=5)))
62
+ 42
63
+ >>> len(list(generate(grammar, depth=6)))
64
+ 114
65
+ >>> len(list(generate(grammar)))
66
+ 114
67
+
68
+ Infinite grammars will throw a RecursionError when not bounded by some ``depth``:
69
+
70
+ >>> grammar = CFG.fromstring("""
71
+ ... S -> A B
72
+ ... A -> B
73
+ ... B -> "b" | A
74
+ ... """)
75
+ >>> list(generate(grammar))
76
+ Traceback (most recent call last):
77
+ ...
78
+ RuntimeError: The grammar has rule(s) that yield infinite recursion!
llmeval-env/lib/python3.10/site-packages/nltk/test/gensim.doctest ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======================================
5
+ Demonstrate word embedding using Gensim
6
+ =======================================
7
+
8
+ >>> from nltk.test.gensim_fixt import setup_module
9
+ >>> setup_module()
10
+
11
+ We demonstrate three functions:
12
+ - Train the word embeddings using brown corpus;
13
+ - Load the pre-trained model and perform simple tasks; and
14
+ - Pruning the pre-trained binary model.
15
+
16
+ >>> import gensim
17
+
18
+ ---------------
19
+ Train the model
20
+ ---------------
21
+
22
+ Here we train a word embedding using the Brown Corpus:
23
+
24
+ >>> from nltk.corpus import brown
25
+ >>> train_set = brown.sents()[:10000]
26
+ >>> model = gensim.models.Word2Vec(train_set)
27
+
28
+ It might take some time to train the model. So, after it is trained, it can be saved as follows:
29
+
30
+ >>> model.save('brown.embedding')
31
+ >>> new_model = gensim.models.Word2Vec.load('brown.embedding')
32
+
33
+ The model will be the list of words with their embedding. We can easily get the vector representation of a word.
34
+
35
+ >>> len(new_model.wv['university'])
36
+ 100
37
+
38
+ There are some supporting functions already implemented in Gensim to manipulate with word embeddings.
39
+ For example, to compute the cosine similarity between 2 words:
40
+
41
+ >>> new_model.wv.similarity('university','school') > 0.3
42
+ True
43
+
44
+ ---------------------------
45
+ Using the pre-trained model
46
+ ---------------------------
47
+
48
+ NLTK includes a pre-trained model which is part of a model that is trained on 100 billion words from the Google News Dataset.
49
+ The full model is from https://code.google.com/p/word2vec/ (about 3 GB).
50
+
51
+ >>> from nltk.data import find
52
+ >>> word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))
53
+ >>> model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False)
54
+
55
+ We pruned the model to only include the most common words (~44k words).
56
+
57
+ >>> len(model)
58
+ 43981
59
+
60
+ Each word is represented in the space of 300 dimensions:
61
+
62
+ >>> len(model['university'])
63
+ 300
64
+
65
+ Finding the top n words that are similar to a target word is simple. The result is the list of n words with the score.
66
+
67
+ >>> model.most_similar(positive=['university'], topn = 3)
68
+ [('universities', 0.70039...), ('faculty', 0.67809...), ('undergraduate', 0.65870...)]
69
+
70
+ Finding a word that is not in a list is also supported, although, implementing this by yourself is simple.
71
+
72
+ >>> model.doesnt_match('breakfast cereal dinner lunch'.split())
73
+ 'cereal'
74
+
75
+ Mikolov et al. (2013) figured out that word embedding captures much of syntactic and semantic regularities. For example,
76
+ the vector 'King - Man + Woman' is close to 'Queen' and 'Germany - Berlin + Paris' is close to 'France'.
77
+
78
+ >>> model.most_similar(positive=['woman','king'], negative=['man'], topn = 1)
79
+ [('queen', 0.71181...)]
80
+
81
+ >>> model.most_similar(positive=['Paris','Germany'], negative=['Berlin'], topn = 1)
82
+ [('France', 0.78840...)]
83
+
84
+ We can visualize the word embeddings using t-SNE (https://lvdmaaten.github.io/tsne/). For this demonstration, we visualize the first 1000 words.
85
+
86
+ | import numpy as np
87
+ | labels = []
88
+ | count = 0
89
+ | max_count = 1000
90
+ | X = np.zeros(shape=(max_count,len(model['university'])))
91
+ |
92
+ | for term in model.index_to_key:
93
+ | X[count] = model[term]
94
+ | labels.append(term)
95
+ | count+= 1
96
+ | if count >= max_count: break
97
+ |
98
+ | # It is recommended to use PCA first to reduce to ~50 dimensions
99
+ | from sklearn.decomposition import PCA
100
+ | pca = PCA(n_components=50)
101
+ | X_50 = pca.fit_transform(X)
102
+ |
103
+ | # Using TSNE to further reduce to 2 dimensions
104
+ | from sklearn.manifold import TSNE
105
+ | model_tsne = TSNE(n_components=2, random_state=0)
106
+ | Y = model_tsne.fit_transform(X_50)
107
+ |
108
+ | # Show the scatter plot
109
+ | import matplotlib.pyplot as plt
110
+ | plt.scatter(Y[:,0], Y[:,1], 20)
111
+ |
112
+ | # Add labels
113
+ | for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
114
+ | plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10)
115
+ |
116
+ | plt.show()
117
+
118
+ ------------------------------
119
+ Prune the trained binary model
120
+ ------------------------------
121
+
122
+ Here is the supporting code to extract part of the binary model (GoogleNews-vectors-negative300.bin.gz) from https://code.google.com/p/word2vec/
123
+ We use this code to get the `word2vec_sample` model.
124
+
125
+ | import gensim
126
+ | # Load the binary model
127
+ | model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary = True)
128
+ |
129
+ | # Only output word that appear in the Brown corpus
130
+ | from nltk.corpus import brown
131
+ | words = set(brown.words())
132
+ | print(len(words))
133
+ |
134
+ | # Output presented word to a temporary file
135
+ | out_file = 'pruned.word2vec.txt'
136
+ | with open(out_file,'w') as f:
137
+ | word_presented = words.intersection(model.index_to_key)
138
+ | f.write('{} {}\n'.format(len(word_presented),len(model['word'])))
139
+ |
140
+ | for word in word_presented:
141
+ | f.write('{} {}\n'.format(word, ' '.join(str(value) for value in model[word])))
llmeval-env/lib/python3.10/site-packages/nltk/test/gluesemantics_malt.doctest ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ .. see also: gluesemantics.doctest
5
+
6
+ ==============================================================================
7
+ Glue Semantics
8
+ ==============================================================================
9
+
10
+ >>> from nltk.test.gluesemantics_malt_fixt import setup_module
11
+ >>> setup_module()
12
+
13
+ >>> from nltk.sem.glue import *
14
+ >>> nltk.sem.logic._counter._value = 0
15
+
16
+ --------------------------------
17
+ Initialize the Dependency Parser
18
+ --------------------------------
19
+ >>> from nltk.parse.malt import MaltParser
20
+
21
+ >>> tagger = RegexpTagger(
22
+ ... [('^(John|Mary)$', 'NNP'),
23
+ ... ('^(sees|chases)$', 'VB'),
24
+ ... ('^(a)$', 'ex_quant'),
25
+ ... ('^(every)$', 'univ_quant'),
26
+ ... ('^(girl|dog)$', 'NN')
27
+ ... ]).tag
28
+ >>> depparser = MaltParser(tagger=tagger)
29
+
30
+ --------------------
31
+ Automated Derivation
32
+ --------------------
33
+ >>> glue = Glue(depparser=depparser)
34
+ >>> readings = glue.parse_to_meaning('every girl chases a dog'.split())
35
+ >>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
36
+ ... print(reading.normalize())
37
+ all z1.(girl(z1) -> exists z2.(dog(z2) & chases(z1,z2)))
38
+ exists z1.(dog(z1) & all z2.(girl(z2) -> chases(z2,z1)))
39
+
40
+ >>> drtglue = DrtGlue(depparser=depparser)
41
+ >>> readings = drtglue.parse_to_meaning('every girl chases a dog'.split())
42
+ >>> for reading in sorted([r.simplify().normalize() for r in readings], key=str):
43
+ ... print(reading)
44
+ ([],[(([z1],[girl(z1)]) -> ([z2],[dog(z2), chases(z1,z2)]))])
45
+ ([z1],[dog(z1), (([z2],[girl(z2)]) -> ([],[chases(z2,z1)]))])
46
+
47
+ --------------
48
+ With inference
49
+ --------------
50
+
51
+ Checking for equality of two DRSs is very useful when generating readings of a sentence.
52
+ For example, the ``glue`` module generates two readings for the sentence
53
+ *John sees Mary*:
54
+
55
+ >>> from nltk.sem.glue import DrtGlue
56
+ >>> readings = drtglue.parse_to_meaning('John sees Mary'.split())
57
+ >>> for drs in sorted([r.simplify().normalize() for r in readings], key=str):
58
+ ... print(drs)
59
+ ([z1,z2],[John(z1), Mary(z2), sees(z1,z2)])
60
+ ([z1,z2],[Mary(z1), John(z2), sees(z2,z1)])
61
+
62
+ However, it is easy to tell that these two readings are logically the
63
+ same, and therefore one of them is superfluous. We can use the theorem prover
64
+ to determine this equivalence, and then delete one of them. A particular
65
+ theorem prover may be specified, or the argument may be left off to use the
66
+ default.
67
+
68
+ >>> readings[0].equiv(readings[1])
69
+ True
llmeval-env/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==========================
5
+ Test Suites for Grammars
6
+ ==========================
7
+
8
+ Sentences in the test suite are divided into two classes:
9
+
10
+ - grammatical (*accept*) and
11
+ - ungrammatical (*reject*).
12
+
13
+ If a sentence should parse according to the grammar, the value of
14
+ ``trees`` will be a non-empty list. If a sentence should be rejected
15
+ according to the grammar, then the value of ``trees`` will be ``None``.
16
+
17
+ >>> from nltk.parse import TestGrammar
18
+ >>> germantest1 = {}
19
+ >>> germantest1['doc'] = "Tests for person agreement"
20
+ >>> germantest1['accept'] = [
21
+ ... 'ich komme',
22
+ ... 'ich sehe mich',
23
+ ... 'du kommst',
24
+ ... 'du siehst mich',
25
+ ... 'sie kommt',
26
+ ... 'sie sieht mich',
27
+ ... 'ihr kommt',
28
+ ... 'wir kommen',
29
+ ... 'sie kommen',
30
+ ... 'du magst mich',
31
+ ... 'er mag mich',
32
+ ... 'du folgst mir',
33
+ ... 'sie hilft mir',
34
+ ... ]
35
+ >>> germantest1['reject'] = [
36
+ ... 'ich kommt',
37
+ ... 'ich kommst',
38
+ ... 'ich siehst mich',
39
+ ... 'du komme',
40
+ ... 'du sehe mich',
41
+ ... 'du kommt',
42
+ ... 'er komme',
43
+ ... 'er siehst mich',
44
+ ... 'wir komme',
45
+ ... 'wir kommst',
46
+ ... 'die Katzen kommst',
47
+ ... 'sie komme',
48
+ ... 'sie kommst',
49
+ ... 'du mag mich',
50
+ ... 'er magst mich',
51
+ ... 'du folgt mir',
52
+ ... 'sie hilfst mir',
53
+ ... ]
54
+ >>> germantest2 = {}
55
+ >>> germantest2['doc'] = "Tests for number agreement"
56
+ >>> germantest2['accept'] = [
57
+ ... 'der Hund kommt',
58
+ ... 'die Hunde kommen',
59
+ ... 'ich komme',
60
+ ... 'wir kommen',
61
+ ... 'ich sehe die Katzen',
62
+ ... 'ich folge den Katzen',
63
+ ... 'ich sehe die Katzen',
64
+ ... 'ich folge den Katzen',
65
+ ... 'wir sehen die Katzen',
66
+ ... 'wir folgen den Katzen'
67
+ ... ]
68
+ >>> germantest2['reject'] = [
69
+ ... 'ich kommen',
70
+ ... 'wir komme',
71
+ ... 'der Hunde kommt',
72
+ ... 'der Hunde kommen',
73
+ ... 'die Katzen kommt',
74
+ ... 'ich sehe der Hunde',
75
+ ... 'ich folge den Hund',
76
+ ... 'ich sehen der Hunde',
77
+ ... 'ich folgen den Hund',
78
+ ... 'wir sehe die Katzen',
79
+ ... 'wir folge den Katzen'
80
+ ... ]
81
+ >>> germantest3 = {}
82
+ >>> germantest3['doc'] = "Tests for case government and subcategorization"
83
+ >>> germantest3['accept'] = [
84
+ ... 'der Hund sieht mich',
85
+ ... 'der Hund kommt',
86
+ ... 'ich sehe den Hund',
87
+ ... 'ich helfe dem Hund',
88
+ ... ]
89
+ >>> germantest3['reject'] = [
90
+ ... 'ich sehe',
91
+ ... 'ich helfe',
92
+ ... 'ich komme den Hund',
93
+ ... 'ich sehe den Hund die Katzen',
94
+ ... 'du hilfst mich',
95
+ ... 'du siehst mir',
96
+ ... 'du siehst ich',
97
+ ... 'der Hunde kommt mich',
98
+ ... 'die Hunde sehe die Hunde',
99
+ ... 'der Hund sehe die Hunde',
100
+ ... 'ich hilft den Hund',
101
+ ... 'ich hilft der Hund',
102
+ ... 'ich sehe dem Hund',
103
+ ... ]
104
+ >>> germantestsuites = [germantest1, germantest2, germantest3]
105
+ >>> tester = TestGrammar('grammars/book_grammars/german.fcfg', germantestsuites)
106
+ >>> tester.run()
107
+ Tests for person agreement: All tests passed!
108
+ Tests for number agreement: All tests passed!
109
+ Tests for case government and subcategorization: All tests passed!
llmeval-env/lib/python3.10/site-packages/nltk/test/internals.doctest ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ==========================================
5
+ Unit tests for the nltk.utilities module
6
+ ==========================================
7
+
8
+ overridden()
9
+ ~~~~~~~~~~~~
10
+ >>> from nltk.internals import overridden
11
+
12
+ The typical use case is in defining methods for an interface or
13
+ abstract base class, in such a way that subclasses don't have to
14
+ implement all of the methods:
15
+
16
+ >>> class EaterI(object):
17
+ ... '''Subclass must define eat() or batch_eat().'''
18
+ ... def eat(self, food):
19
+ ... if overridden(self.batch_eat):
20
+ ... return self.batch_eat([food])[0]
21
+ ... else:
22
+ ... raise NotImplementedError()
23
+ ... def batch_eat(self, foods):
24
+ ... return [self.eat(food) for food in foods]
25
+
26
+ As long as a subclass implements one method, it will be used to
27
+ perform the other method:
28
+
29
+ >>> class GoodEater1(EaterI):
30
+ ... def eat(self, food):
31
+ ... return 'yum'
32
+ >>> GoodEater1().eat('steak')
33
+ 'yum'
34
+ >>> GoodEater1().batch_eat(['steak', 'peas'])
35
+ ['yum', 'yum']
36
+
37
+ >>> class GoodEater2(EaterI):
38
+ ... def batch_eat(self, foods):
39
+ ... return ['yum' for food in foods]
40
+ >>> GoodEater2().eat('steak')
41
+ 'yum'
42
+ >>> GoodEater2().batch_eat(['steak', 'peas'])
43
+ ['yum', 'yum']
44
+
45
+ But if a subclass doesn't implement either one, then they'll get an
46
+ error when they try to call them. (nb this is better than infinite
47
+ recursion):
48
+
49
+ >>> class BadEater1(EaterI):
50
+ ... pass
51
+ >>> BadEater1().eat('steak')
52
+ Traceback (most recent call last):
53
+ . . .
54
+ NotImplementedError
55
+ >>> BadEater1().batch_eat(['steak', 'peas'])
56
+ Traceback (most recent call last):
57
+ . . .
58
+ NotImplementedError
59
+
60
+ Trying to use the abstract base class itself will also result in an
61
+ error:
62
+
63
+ >>> class EaterI(EaterI):
64
+ ... pass
65
+ >>> EaterI().eat('steak')
66
+ Traceback (most recent call last):
67
+ . . .
68
+ NotImplementedError
69
+ >>> EaterI().batch_eat(['steak', 'peas'])
70
+ Traceback (most recent call last):
71
+ . . .
72
+ NotImplementedError
73
+
74
+ It's ok to use intermediate abstract classes:
75
+
76
+ >>> class AbstractEater(EaterI):
77
+ ... pass
78
+
79
+ >>> class GoodEater3(AbstractEater):
80
+ ... def eat(self, food):
81
+ ... return 'yum'
82
+ ...
83
+ >>> GoodEater3().eat('steak')
84
+ 'yum'
85
+ >>> GoodEater3().batch_eat(['steak', 'peas'])
86
+ ['yum', 'yum']
87
+
88
+ >>> class GoodEater4(AbstractEater):
89
+ ... def batch_eat(self, foods):
90
+ ... return ['yum' for food in foods]
91
+ >>> GoodEater4().eat('steak')
92
+ 'yum'
93
+ >>> GoodEater4().batch_eat(['steak', 'peas'])
94
+ ['yum', 'yum']
95
+
96
+ >>> class BadEater2(AbstractEater):
97
+ ... pass
98
+ >>> BadEater2().eat('steak')
99
+ Traceback (most recent call last):
100
+ . . .
101
+ NotImplementedError
102
+ >>> BadEater2().batch_eat(['steak', 'peas'])
103
+ Traceback (most recent call last):
104
+ . . .
105
+ NotImplementedError
106
+
107
+ Here's some extra tests:
108
+
109
+ >>> class A(object):
110
+ ... def f(x): pass
111
+ >>> class B(A):
112
+ ... def f(x): pass
113
+ >>> class C(A): pass
114
+ >>> class D(B): pass
115
+
116
+ >>> overridden(A().f)
117
+ False
118
+ >>> overridden(B().f)
119
+ True
120
+ >>> overridden(C().f)
121
+ False
122
+ >>> overridden(D().f)
123
+ True
124
+
125
+ It works for classic classes, too:
126
+
127
+ >>> class A:
128
+ ... def f(x): pass
129
+ >>> class B(A):
130
+ ... def f(x): pass
131
+ >>> class C(A): pass
132
+ >>> class D(B): pass
133
+ >>> overridden(A().f)
134
+ False
135
+ >>> overridden(B().f)
136
+ True
137
+ >>> overridden(C().f)
138
+ False
139
+ >>> overridden(D().f)
140
+ True
141
+
142
+
143
+ read_str()
144
+ ~~~~~~~~~~~~
145
+ >>> from nltk.internals import read_str
146
+
147
+ Test valid scenarios
148
+
149
+ >>> read_str("'valid string'", 0)
150
+ ('valid string', 14)
151
+
152
+ Now test invalid scenarios
153
+
154
+ >>> read_str("should error", 0)
155
+ Traceback (most recent call last):
156
+ ...
157
+ nltk.internals.ReadError: Expected open quote at 0
158
+ >>> read_str("'should error", 0)
159
+ Traceback (most recent call last):
160
+ ...
161
+ nltk.internals.ReadError: Expected close quote at 1
llmeval-env/lib/python3.10/site-packages/nltk/test/lm.doctest ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ .. -*- coding: utf-8 -*-
5
+
6
+
7
+ Regression Tests
8
+ ================
9
+
10
+
11
+ Issue 167
12
+ ---------
13
+ https://github.com/nltk/nltk/issues/167
14
+
15
+ >>> from nltk.corpus import brown
16
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
17
+ >>> ngram_order = 3
18
+ >>> train_data, vocab_data = padded_everygram_pipeline(
19
+ ... ngram_order,
20
+ ... brown.sents(categories="news")
21
+ ... )
22
+
23
+ >>> from nltk.lm import WittenBellInterpolated
24
+ >>> lm = WittenBellInterpolated(ngram_order)
25
+ >>> lm.fit(train_data, vocab_data)
26
+
27
+
28
+
29
+
30
+ Sentence containing an unseen word should result in infinite entropy because
31
+ Witten-Bell is based ultimately on MLE, which cannot handle unseen ngrams.
32
+ Crucially, it shouldn't raise any exceptions for unseen words.
33
+
34
+ >>> from nltk.util import ngrams
35
+ >>> sent = ngrams("This is a sentence with the word aaddvark".split(), 3)
36
+ >>> lm.entropy(sent)
37
+ inf
38
+
39
+ If we remove all unseen ngrams from the sentence, we'll get a non-infinite value
40
+ for the entropy.
41
+
42
+ >>> sent = ngrams("This is a sentence".split(), 3)
43
+ >>> round(lm.entropy(sent), 14)
44
+ 10.23701322869105
45
+
46
+
47
+ Issue 367
48
+ ---------
49
+ https://github.com/nltk/nltk/issues/367
50
+
51
+ Reproducing Dan Blanchard's example:
52
+ https://github.com/nltk/nltk/issues/367#issuecomment-14646110
53
+
54
+ >>> from nltk.lm import Lidstone, Vocabulary
55
+ >>> word_seq = list('aaaababaaccbacb')
56
+ >>> ngram_order = 2
57
+ >>> from nltk.util import everygrams
58
+ >>> train_data = [everygrams(word_seq, max_len=ngram_order)]
59
+ >>> V = Vocabulary(['a', 'b', 'c', ''])
60
+ >>> lm = Lidstone(0.2, ngram_order, vocabulary=V)
61
+ >>> lm.fit(train_data)
62
+
63
+ For doctest to work we have to sort the vocabulary keys.
64
+
65
+ >>> V_keys = sorted(V)
66
+ >>> round(sum(lm.score(w, ("b",)) for w in V_keys), 6)
67
+ 1.0
68
+ >>> round(sum(lm.score(w, ("a",)) for w in V_keys), 6)
69
+ 1.0
70
+
71
+ >>> [lm.score(w, ("b",)) for w in V_keys]
72
+ [0.05, 0.05, 0.8, 0.05, 0.05]
73
+ >>> [round(lm.score(w, ("a",)), 4) for w in V_keys]
74
+ [0.0222, 0.0222, 0.4667, 0.2444, 0.2444]
75
+
76
+
77
+ Here's reproducing @afourney's comment:
78
+ https://github.com/nltk/nltk/issues/367#issuecomment-15686289
79
+
80
+ >>> sent = ['foo', 'foo', 'foo', 'foo', 'bar', 'baz']
81
+ >>> ngram_order = 3
82
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
83
+ >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, [sent])
84
+ >>> from nltk.lm import Lidstone
85
+ >>> lm = Lidstone(0.2, ngram_order)
86
+ >>> lm.fit(train_data, vocab_data)
87
+
88
+ The vocabulary includes the "UNK" symbol as well as two padding symbols.
89
+
90
+ >>> len(lm.vocab)
91
+ 6
92
+ >>> word = "foo"
93
+ >>> context = ("bar", "baz")
94
+
95
+ The raw counts.
96
+
97
+ >>> lm.context_counts(context)[word]
98
+ 0
99
+ >>> lm.context_counts(context).N()
100
+ 1
101
+
102
+ Counts with Lidstone smoothing.
103
+
104
+ >>> lm.context_counts(context)[word] + lm.gamma
105
+ 0.2
106
+ >>> lm.context_counts(context).N() + len(lm.vocab) * lm.gamma
107
+ 2.2
108
+
109
+ Without any backoff, just using Lidstone smoothing, P("foo" | "bar", "baz") should be:
110
+ 0.2 / 2.2 ~= 0.090909
111
+
112
+ >>> round(lm.score(word, context), 6)
113
+ 0.090909
114
+
115
+
116
+ Issue 380
117
+ ---------
118
+ https://github.com/nltk/nltk/issues/380
119
+
120
+ Reproducing setup akin to this comment:
121
+ https://github.com/nltk/nltk/issues/380#issue-12879030
122
+
123
+ For speed take only the first 100 sentences of reuters. Shouldn't affect the test.
124
+
125
+ >>> from nltk.corpus import reuters
126
+ >>> sents = reuters.sents()[:100]
127
+ >>> ngram_order = 3
128
+ >>> from nltk.lm.preprocessing import padded_everygram_pipeline
129
+ >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, sents)
130
+
131
+ >>> from nltk.lm import Lidstone
132
+ >>> lm = Lidstone(0.2, ngram_order)
133
+ >>> lm.fit(train_data, vocab_data)
134
+ >>> lm.score("said", ("",)) < 1
135
+ True
llmeval-env/lib/python3.10/site-packages/nltk/test/logic.doctest ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======================
5
+ Logic & Lambda Calculus
6
+ =======================
7
+
8
+ The `nltk.logic` package allows expressions of First-Order Logic (FOL) to be
9
+ parsed into ``Expression`` objects. In addition to FOL, the parser
10
+ handles lambda-abstraction with variables of higher order.
11
+
12
+ --------
13
+ Overview
14
+ --------
15
+
16
+ >>> from nltk.sem.logic import *
17
+
18
+ The default inventory of logical constants is the following:
19
+
20
+ >>> boolean_ops()
21
+ negation -
22
+ conjunction &
23
+ disjunction |
24
+ implication ->
25
+ equivalence <->
26
+ >>> equality_preds()
27
+ equality =
28
+ inequality !=
29
+ >>> binding_ops()
30
+ existential exists
31
+ universal all
32
+ lambda \
33
+
34
+ ----------------
35
+ Regression Tests
36
+ ----------------
37
+
38
+
39
+ Untyped Logic
40
+ +++++++++++++
41
+
42
+ Process logical expressions conveniently:
43
+
44
+ >>> read_expr = Expression.fromstring
45
+
46
+ Test for equality under alpha-conversion
47
+ ========================================
48
+
49
+ >>> e1 = read_expr('exists x.P(x)')
50
+ >>> print(e1)
51
+ exists x.P(x)
52
+ >>> e2 = e1.alpha_convert(Variable('z'))
53
+ >>> print(e2)
54
+ exists z.P(z)
55
+ >>> e1 == e2
56
+ True
57
+
58
+
59
+ >>> l = read_expr(r'\X.\X.X(X)(1)').simplify()
60
+ >>> id = read_expr(r'\X.X(X)')
61
+ >>> l == id
62
+ True
63
+
64
+ Test numerals
65
+ =============
66
+
67
+ >>> zero = read_expr(r'\F x.x')
68
+ >>> one = read_expr(r'\F x.F(x)')
69
+ >>> two = read_expr(r'\F x.F(F(x))')
70
+ >>> three = read_expr(r'\F x.F(F(F(x)))')
71
+ >>> four = read_expr(r'\F x.F(F(F(F(x))))')
72
+ >>> succ = read_expr(r'\N F x.F(N(F,x))')
73
+ >>> plus = read_expr(r'\M N F x.M(F,N(F,x))')
74
+ >>> mult = read_expr(r'\M N F.M(N(F))')
75
+ >>> pred = read_expr(r'\N F x.(N(\G H.H(G(F)))(\u.x)(\u.u))')
76
+ >>> v1 = ApplicationExpression(succ, zero).simplify()
77
+ >>> v1 == one
78
+ True
79
+ >>> v2 = ApplicationExpression(succ, v1).simplify()
80
+ >>> v2 == two
81
+ True
82
+ >>> v3 = ApplicationExpression(ApplicationExpression(plus, v1), v2).simplify()
83
+ >>> v3 == three
84
+ True
85
+ >>> v4 = ApplicationExpression(ApplicationExpression(mult, v2), v2).simplify()
86
+ >>> v4 == four
87
+ True
88
+ >>> v5 = ApplicationExpression(pred, ApplicationExpression(pred, v4)).simplify()
89
+ >>> v5 == two
90
+ True
91
+
92
+ Overloaded operators also exist, for convenience.
93
+
94
+ >>> print(succ(zero).simplify() == one)
95
+ True
96
+ >>> print(plus(one,two).simplify() == three)
97
+ True
98
+ >>> print(mult(two,two).simplify() == four)
99
+ True
100
+ >>> print(pred(pred(four)).simplify() == two)
101
+ True
102
+
103
+ >>> john = read_expr(r'john')
104
+ >>> man = read_expr(r'\x.man(x)')
105
+ >>> walk = read_expr(r'\x.walk(x)')
106
+ >>> man(john).simplify()
107
+ <ApplicationExpression man(john)>
108
+ >>> print(-walk(john).simplify())
109
+ -walk(john)
110
+ >>> print((man(john) & walk(john)).simplify())
111
+ (man(john) & walk(john))
112
+ >>> print((man(john) | walk(john)).simplify())
113
+ (man(john) | walk(john))
114
+ >>> print((man(john) > walk(john)).simplify())
115
+ (man(john) -> walk(john))
116
+ >>> print((man(john) < walk(john)).simplify())
117
+ (man(john) <-> walk(john))
118
+
119
+ Python's built-in lambda operator can also be used with Expressions
120
+
121
+ >>> john = VariableExpression(Variable('john'))
122
+ >>> run_var = VariableExpression(Variable('run'))
123
+ >>> run = lambda x: run_var(x)
124
+ >>> run(john)
125
+ <ApplicationExpression run(john)>
126
+
127
+
128
+ ``betaConversionTestSuite.pl``
129
+ ------------------------------
130
+
131
+ Tests based on Blackburn & Bos' book, *Representation and Inference
132
+ for Natural Language*.
133
+
134
+ >>> x1 = read_expr(r'\P.P(mia)(\x.walk(x))').simplify()
135
+ >>> x2 = read_expr(r'walk(mia)').simplify()
136
+ >>> x1 == x2
137
+ True
138
+
139
+ >>> x1 = read_expr(r'exists x.(man(x) & ((\P.exists x.(woman(x) & P(x)))(\y.love(x,y))))').simplify()
140
+ >>> x2 = read_expr(r'exists x.(man(x) & exists y.(woman(y) & love(x,y)))').simplify()
141
+ >>> x1 == x2
142
+ True
143
+ >>> x1 = read_expr(r'\a.sleep(a)(mia)').simplify()
144
+ >>> x2 = read_expr(r'sleep(mia)').simplify()
145
+ >>> x1 == x2
146
+ True
147
+ >>> x1 = read_expr(r'\a.\b.like(b,a)(mia)').simplify()
148
+ >>> x2 = read_expr(r'\b.like(b,mia)').simplify()
149
+ >>> x1 == x2
150
+ True
151
+ >>> x1 = read_expr(r'\a.(\b.like(b,a)(vincent))').simplify()
152
+ >>> x2 = read_expr(r'\a.like(vincent,a)').simplify()
153
+ >>> x1 == x2
154
+ True
155
+ >>> x1 = read_expr(r'\a.((\b.like(b,a)(vincent)) & sleep(a))').simplify()
156
+ >>> x2 = read_expr(r'\a.(like(vincent,a) & sleep(a))').simplify()
157
+ >>> x1 == x2
158
+ True
159
+
160
+ >>> x1 = read_expr(r'(\a.\b.like(b,a)(mia)(vincent))').simplify()
161
+ >>> x2 = read_expr(r'like(vincent,mia)').simplify()
162
+ >>> x1 == x2
163
+ True
164
+
165
+ >>> x1 = read_expr(r'P((\a.sleep(a)(vincent)))').simplify()
166
+ >>> x2 = read_expr(r'P(sleep(vincent))').simplify()
167
+ >>> x1 == x2
168
+ True
169
+
170
+ >>> x1 = read_expr(r'\A.A((\b.sleep(b)(vincent)))').simplify()
171
+ >>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
172
+ >>> x1 == x2
173
+ True
174
+
175
+ >>> x1 = read_expr(r'\A.A(sleep(vincent))').simplify()
176
+ >>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
177
+ >>> x1 == x2
178
+ True
179
+
180
+ >>> x1 = read_expr(r'(\A.A(vincent)(\b.sleep(b)))').simplify()
181
+ >>> x2 = read_expr(r'sleep(vincent)').simplify()
182
+ >>> x1 == x2
183
+ True
184
+
185
+ >>> x1 = read_expr(r'\A.believe(mia,A(vincent))(\b.sleep(b))').simplify()
186
+ >>> x2 = read_expr(r'believe(mia,sleep(vincent))').simplify()
187
+ >>> x1 == x2
188
+ True
189
+
190
+ >>> x1 = read_expr(r'(\A.(A(vincent) & A(mia)))(\b.sleep(b))').simplify()
191
+ >>> x2 = read_expr(r'(sleep(vincent) & sleep(mia))').simplify()
192
+ >>> x1 == x2
193
+ True
194
+
195
+ >>> x1 = read_expr(r'\A.\B.(\C.C(A(vincent))(\d.probably(d)) & (\C.C(B(mia))(\d.improbably(d))))(\f.walk(f))(\f.talk(f))').simplify()
196
+ >>> x2 = read_expr(r'(probably(walk(vincent)) & improbably(talk(mia)))').simplify()
197
+ >>> x1 == x2
198
+ True
199
+
200
+ >>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\d.\f.love(d,f))))(jules)(mia)').simplify()
201
+ >>> x2 = read_expr(r'love(jules,mia)').simplify()
202
+ >>> x1 == x2
203
+ True
204
+
205
+ >>> x1 = read_expr(r'(\A.\B.exists c.(A(c) & B(c)))(\d.boxer(d),\d.sleep(d))').simplify()
206
+ >>> x2 = read_expr(r'exists c.(boxer(c) & sleep(c))').simplify()
207
+ >>> x1 == x2
208
+ True
209
+
210
+ >>> x1 = read_expr(r'\A.Z(A)(\c.\a.like(a,c))').simplify()
211
+ >>> x2 = read_expr(r'Z(\c.\a.like(a,c))').simplify()
212
+ >>> x1 == x2
213
+ True
214
+
215
+ >>> x1 = read_expr(r'\A.\b.A(b)(\c.\b.like(b,c))').simplify()
216
+ >>> x2 = read_expr(r'\b.(\c.\b.like(b,c)(b))').simplify()
217
+ >>> x1 == x2
218
+ True
219
+
220
+ >>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\b.\a.loves(b,a))))(jules)(mia)').simplify()
221
+ >>> x2 = read_expr(r'loves(jules,mia)').simplify()
222
+ >>> x1 == x2
223
+ True
224
+
225
+ >>> x1 = read_expr(r'(\A.\b.(exists b.A(b) & A(b)))(\c.boxer(c))(vincent)').simplify()
226
+ >>> x2 = read_expr(r'((exists b.boxer(b)) & boxer(vincent))').simplify()
227
+ >>> x1 == x2
228
+ True
229
+
230
+ Test Parser
231
+ ===========
232
+
233
+ >>> print(read_expr(r'john'))
234
+ john
235
+ >>> print(read_expr(r'x'))
236
+ x
237
+ >>> print(read_expr(r'-man(x)'))
238
+ -man(x)
239
+ >>> print(read_expr(r'--man(x)'))
240
+ --man(x)
241
+ >>> print(read_expr(r'(man(x))'))
242
+ man(x)
243
+ >>> print(read_expr(r'((man(x)))'))
244
+ man(x)
245
+ >>> print(read_expr(r'man(x) <-> tall(x)'))
246
+ (man(x) <-> tall(x))
247
+ >>> print(read_expr(r'(man(x) <-> tall(x))'))
248
+ (man(x) <-> tall(x))
249
+ >>> print(read_expr(r'(man(x) & tall(x) & walks(x))'))
250
+ (man(x) & tall(x) & walks(x))
251
+ >>> print(read_expr(r'(man(x) & tall(x) & walks(x))').first)
252
+ (man(x) & tall(x))
253
+ >>> print(read_expr(r'man(x) | tall(x) & walks(x)'))
254
+ (man(x) | (tall(x) & walks(x)))
255
+ >>> print(read_expr(r'((man(x) & tall(x)) | walks(x))'))
256
+ ((man(x) & tall(x)) | walks(x))
257
+ >>> print(read_expr(r'man(x) & (tall(x) | walks(x))'))
258
+ (man(x) & (tall(x) | walks(x)))
259
+ >>> print(read_expr(r'(man(x) & (tall(x) | walks(x)))'))
260
+ (man(x) & (tall(x) | walks(x)))
261
+ >>> print(read_expr(r'P(x) -> Q(x) <-> R(x) | S(x) & T(x)'))
262
+ ((P(x) -> Q(x)) <-> (R(x) | (S(x) & T(x))))
263
+ >>> print(read_expr(r'exists x.man(x)'))
264
+ exists x.man(x)
265
+ >>> print(read_expr(r'exists x.(man(x) & tall(x))'))
266
+ exists x.(man(x) & tall(x))
267
+ >>> print(read_expr(r'exists x.(man(x) & tall(x) & walks(x))'))
268
+ exists x.(man(x) & tall(x) & walks(x))
269
+ >>> print(read_expr(r'-P(x) & Q(x)'))
270
+ (-P(x) & Q(x))
271
+ >>> read_expr(r'-P(x) & Q(x)') == read_expr(r'(-P(x)) & Q(x)')
272
+ True
273
+ >>> print(read_expr(r'\x.man(x)'))
274
+ \x.man(x)
275
+ >>> print(read_expr(r'\x.man(x)(john)'))
276
+ \x.man(x)(john)
277
+ >>> print(read_expr(r'\x.man(x)(john) & tall(x)'))
278
+ (\x.man(x)(john) & tall(x))
279
+ >>> print(read_expr(r'\x.\y.sees(x,y)'))
280
+ \x y.sees(x,y)
281
+ >>> print(read_expr(r'\x y.sees(x,y)'))
282
+ \x y.sees(x,y)
283
+ >>> print(read_expr(r'\x.\y.sees(x,y)(a)'))
284
+ (\x y.sees(x,y))(a)
285
+ >>> print(read_expr(r'\x y.sees(x,y)(a)'))
286
+ (\x y.sees(x,y))(a)
287
+ >>> print(read_expr(r'\x.\y.sees(x,y)(a)(b)'))
288
+ ((\x y.sees(x,y))(a))(b)
289
+ >>> print(read_expr(r'\x y.sees(x,y)(a)(b)'))
290
+ ((\x y.sees(x,y))(a))(b)
291
+ >>> print(read_expr(r'\x.\y.sees(x,y)(a,b)'))
292
+ ((\x y.sees(x,y))(a))(b)
293
+ >>> print(read_expr(r'\x y.sees(x,y)(a,b)'))
294
+ ((\x y.sees(x,y))(a))(b)
295
+ >>> print(read_expr(r'((\x.\y.sees(x,y))(a))(b)'))
296
+ ((\x y.sees(x,y))(a))(b)
297
+ >>> print(read_expr(r'P(x)(y)(z)'))
298
+ P(x,y,z)
299
+ >>> print(read_expr(r'P(Q)'))
300
+ P(Q)
301
+ >>> print(read_expr(r'P(Q(x))'))
302
+ P(Q(x))
303
+ >>> print(read_expr(r'(\x.exists y.walks(x,y))(x)'))
304
+ (\x.exists y.walks(x,y))(x)
305
+ >>> print(read_expr(r'exists x.(x = john)'))
306
+ exists x.(x = john)
307
+ >>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))'))
308
+ ((\P Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))
309
+ >>> a = read_expr(r'exists c.exists b.A(b,c) & A(b,c)')
310
+ >>> b = read_expr(r'(exists c.(exists b.A(b,c))) & A(b,c)')
311
+ >>> print(a == b)
312
+ True
313
+ >>> a = read_expr(r'exists c.(exists b.A(b,c) & A(b,c))')
314
+ >>> b = read_expr(r'exists c.((exists b.A(b,c)) & A(b,c))')
315
+ >>> print(a == b)
316
+ True
317
+ >>> print(read_expr(r'exists x.x = y'))
318
+ exists x.(x = y)
319
+ >>> print(read_expr('A(B)(C)'))
320
+ A(B,C)
321
+ >>> print(read_expr('(A(B))(C)'))
322
+ A(B,C)
323
+ >>> print(read_expr('A((B)(C))'))
324
+ A(B(C))
325
+ >>> print(read_expr('A(B(C))'))
326
+ A(B(C))
327
+ >>> print(read_expr('(A)(B(C))'))
328
+ A(B(C))
329
+ >>> print(read_expr('(((A)))(((B))(((C))))'))
330
+ A(B(C))
331
+ >>> print(read_expr(r'A != B'))
332
+ -(A = B)
333
+ >>> print(read_expr('P(x) & x=y & P(y)'))
334
+ (P(x) & (x = y) & P(y))
335
+ >>> try: print(read_expr(r'\walk.walk(x)'))
336
+ ... except LogicalExpressionException as e: print(e)
337
+ 'walk' is an illegal variable name. Constants may not be abstracted.
338
+ \walk.walk(x)
339
+ ^
340
+ >>> try: print(read_expr(r'all walk.walk(john)'))
341
+ ... except LogicalExpressionException as e: print(e)
342
+ 'walk' is an illegal variable name. Constants may not be quantified.
343
+ all walk.walk(john)
344
+ ^
345
+ >>> try: print(read_expr(r'x(john)'))
346
+ ... except LogicalExpressionException as e: print(e)
347
+ 'x' is an illegal predicate name. Individual variables may not be used as predicates.
348
+ x(john)
349
+ ^
350
+
351
+ >>> from nltk.sem.logic import LogicParser # hack to give access to custom quote chars
352
+ >>> lpq = LogicParser()
353
+ >>> lpq.quote_chars = [("'", "'", "\\", False)]
354
+ >>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
355
+ (man(x) & tall's,(x) & walks(x))
356
+ >>> lpq.quote_chars = [("'", "'", "\\", True)]
357
+ >>> print(lpq.parse(r"'tall\'s,'"))
358
+ 'tall\'s,'
359
+ >>> print(lpq.parse(r"'spaced name(x)'"))
360
+ 'spaced name(x)'
361
+ >>> print(lpq.parse(r"-'tall\'s,'(x)"))
362
+ -'tall\'s,'(x)
363
+ >>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
364
+ (man(x) & 'tall\'s,'(x) & walks(x))
365
+
366
+
367
+ Simplify
368
+ ========
369
+
370
+ >>> print(read_expr(r'\x.man(x)(john)').simplify())
371
+ man(john)
372
+ >>> print(read_expr(r'\x.((man(x)))(john)').simplify())
373
+ man(john)
374
+ >>> print(read_expr(r'\x.\y.sees(x,y)(john, mary)').simplify())
375
+ sees(john,mary)
376
+ >>> print(read_expr(r'\x y.sees(x,y)(john, mary)').simplify())
377
+ sees(john,mary)
378
+ >>> print(read_expr(r'\x.\y.sees(x,y)(john)(mary)').simplify())
379
+ sees(john,mary)
380
+ >>> print(read_expr(r'\x y.sees(x,y)(john)(mary)').simplify())
381
+ sees(john,mary)
382
+ >>> print(read_expr(r'\x.\y.sees(x,y)(john)').simplify())
383
+ \y.sees(john,y)
384
+ >>> print(read_expr(r'\x y.sees(x,y)(john)').simplify())
385
+ \y.sees(john,y)
386
+ >>> print(read_expr(r'(\x.\y.sees(x,y)(john))(mary)').simplify())
387
+ sees(john,mary)
388
+ >>> print(read_expr(r'(\x y.sees(x,y)(john))(mary)').simplify())
389
+ sees(john,mary)
390
+ >>> print(read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify())
391
+ exists x.(man(x) & exists y.walks(x,y))
392
+ >>> e1 = read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(y))').simplify()
393
+ >>> e2 = read_expr(r'exists x.(man(x) & exists z1.walks(y,z1))')
394
+ >>> e1 == e2
395
+ True
396
+ >>> print(read_expr(r'(\P Q.exists x.(P(x) & Q(x)))(\x.dog(x))').simplify())
397
+ \Q.exists x.(dog(x) & Q(x))
398
+ >>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))').simplify())
399
+ exists x.(dog(x) & bark(x))
400
+ >>> print(read_expr(r'\P.(P(x)(y))(\a b.Q(a,b))').simplify())
401
+ Q(x,y)
402
+
403
+ Replace
404
+ =======
405
+
406
+ >>> a = read_expr(r'a')
407
+ >>> x = read_expr(r'x')
408
+ >>> y = read_expr(r'y')
409
+ >>> z = read_expr(r'z')
410
+
411
+ >>> print(read_expr(r'man(x)').replace(x.variable, a, False))
412
+ man(a)
413
+ >>> print(read_expr(r'(man(x) & tall(x))').replace(x.variable, a, False))
414
+ (man(a) & tall(a))
415
+ >>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, False))
416
+ exists x.man(x)
417
+ >>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, True))
418
+ exists a.man(a)
419
+ >>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, False))
420
+ exists x.give(x,a,z)
421
+ >>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, True))
422
+ exists x.give(x,a,z)
423
+ >>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, False)
424
+ >>> e2 = read_expr(r'exists z1.give(z1,x,z)')
425
+ >>> e1 == e2
426
+ True
427
+ >>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, True)
428
+ >>> e2 = read_expr(r'exists z1.give(z1,x,z)')
429
+ >>> e1 == e2
430
+ True
431
+ >>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, False))
432
+ \x y z.give(x,y,z)
433
+ >>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, True))
434
+ \x a z.give(x,a,z)
435
+ >>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, False))
436
+ \x y.give(x,y,a)
437
+ >>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, True))
438
+ \x y.give(x,y,a)
439
+ >>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, False)
440
+ >>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
441
+ >>> e1 == e2
442
+ True
443
+ >>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, True)
444
+ >>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
445
+ >>> e1 == e2
446
+ True
447
+ >>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, False))
448
+ \x.give(x,y,y)
449
+ >>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, True))
450
+ \x.give(x,y,y)
451
+
452
+ >>> from nltk.sem import logic
453
+ >>> logic._counter._value = 0
454
+ >>> e1 = read_expr('e1')
455
+ >>> e2 = read_expr('e2')
456
+ >>> print(read_expr('exists e1 e2.(walk(e1) & talk(e2))').replace(e1.variable, e2, True))
457
+ exists e2 e01.(walk(e2) & talk(e01))
458
+
459
+
460
+ Variables / Free
461
+ ================
462
+
463
+ >>> examples = [r'walk(john)',
464
+ ... r'walk(x)',
465
+ ... r'?vp(?np)',
466
+ ... r'see(john,mary)',
467
+ ... r'exists x.walk(x)',
468
+ ... r'\x.see(john,x)',
469
+ ... r'\x.see(john,x)(mary)',
470
+ ... r'P(x)',
471
+ ... r'\P.P(x)',
472
+ ... r'aa(x,bb(y),cc(z),P(w),u)',
473
+ ... r'bo(?det(?n),@x)']
474
+ >>> examples = [read_expr(e) for e in examples]
475
+
476
+ >>> for e in examples:
477
+ ... print('%-25s' % e, sorted(e.free()))
478
+ walk(john) []
479
+ walk(x) [Variable('x')]
480
+ ?vp(?np) []
481
+ see(john,mary) []
482
+ exists x.walk(x) []
483
+ \x.see(john,x) []
484
+ (\x.see(john,x))(mary) []
485
+ P(x) [Variable('P'), Variable('x')]
486
+ \P.P(x) [Variable('x')]
487
+ aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
488
+ bo(?det(?n),@x) []
489
+
490
+ >>> for e in examples:
491
+ ... print('%-25s' % e, sorted(e.constants()))
492
+ walk(john) [Variable('john')]
493
+ walk(x) []
494
+ ?vp(?np) [Variable('?np')]
495
+ see(john,mary) [Variable('john'), Variable('mary')]
496
+ exists x.walk(x) []
497
+ \x.see(john,x) [Variable('john')]
498
+ (\x.see(john,x))(mary) [Variable('john'), Variable('mary')]
499
+ P(x) []
500
+ \P.P(x) []
501
+ aa(x,bb(y),cc(z),P(w),u) []
502
+ bo(?det(?n),@x) [Variable('?n'), Variable('@x')]
503
+
504
+ >>> for e in examples:
505
+ ... print('%-25s' % e, sorted(e.predicates()))
506
+ walk(john) [Variable('walk')]
507
+ walk(x) [Variable('walk')]
508
+ ?vp(?np) [Variable('?vp')]
509
+ see(john,mary) [Variable('see')]
510
+ exists x.walk(x) [Variable('walk')]
511
+ \x.see(john,x) [Variable('see')]
512
+ (\x.see(john,x))(mary) [Variable('see')]
513
+ P(x) []
514
+ \P.P(x) []
515
+ aa(x,bb(y),cc(z),P(w),u) [Variable('aa'), Variable('bb'), Variable('cc')]
516
+ bo(?det(?n),@x) [Variable('?det'), Variable('bo')]
517
+
518
+ >>> for e in examples:
519
+ ... print('%-25s' % e, sorted(e.variables()))
520
+ walk(john) []
521
+ walk(x) [Variable('x')]
522
+ ?vp(?np) [Variable('?np'), Variable('?vp')]
523
+ see(john,mary) []
524
+ exists x.walk(x) []
525
+ \x.see(john,x) []
526
+ (\x.see(john,x))(mary) []
527
+ P(x) [Variable('P'), Variable('x')]
528
+ \P.P(x) [Variable('x')]
529
+ aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
530
+ bo(?det(?n),@x) [Variable('?det'), Variable('?n'), Variable('@x')]
531
+
532
+
533
+
534
+ `normalize`
535
+ >>> print(read_expr(r'\e083.(walk(e083, z472) & talk(e092, z938))').normalize())
536
+ \e01.(walk(e01,z3) & talk(e02,z4))
537
+
538
+ Typed Logic
539
+ +++++++++++
540
+
541
+ >>> from nltk.sem.logic import LogicParser
542
+ >>> tlp = LogicParser(True)
543
+ >>> print(tlp.parse(r'man(x)').type)
544
+ ?
545
+ >>> print(tlp.parse(r'walk(angus)').type)
546
+ ?
547
+ >>> print(tlp.parse(r'-man(x)').type)
548
+ t
549
+ >>> print(tlp.parse(r'(man(x) <-> tall(x))').type)
550
+ t
551
+ >>> print(tlp.parse(r'exists x.(man(x) & tall(x))').type)
552
+ t
553
+ >>> print(tlp.parse(r'\x.man(x)').type)
554
+ <e,?>
555
+ >>> print(tlp.parse(r'john').type)
556
+ e
557
+ >>> print(tlp.parse(r'\x y.sees(x,y)').type)
558
+ <e,<e,?>>
559
+ >>> print(tlp.parse(r'\x.man(x)(john)').type)
560
+ ?
561
+ >>> print(tlp.parse(r'\x.\y.sees(x,y)(john)').type)
562
+ <e,?>
563
+ >>> print(tlp.parse(r'\x.\y.sees(x,y)(john)(mary)').type)
564
+ ?
565
+ >>> print(tlp.parse(r'\P.\Q.exists x.(P(x) & Q(x))').type)
566
+ <<e,t>,<<e,t>,t>>
567
+ >>> print(tlp.parse(r'\x.y').type)
568
+ <?,e>
569
+ >>> print(tlp.parse(r'\P.P(x)').type)
570
+ <<e,?>,?>
571
+
572
+ >>> parsed = tlp.parse('see(john,mary)')
573
+ >>> print(parsed.type)
574
+ ?
575
+ >>> print(parsed.function)
576
+ see(john)
577
+ >>> print(parsed.function.type)
578
+ <e,?>
579
+ >>> print(parsed.function.function)
580
+ see
581
+ >>> print(parsed.function.function.type)
582
+ <e,<e,?>>
583
+
584
+ >>> parsed = tlp.parse('P(x,y)')
585
+ >>> print(parsed)
586
+ P(x,y)
587
+ >>> print(parsed.type)
588
+ ?
589
+ >>> print(parsed.function)
590
+ P(x)
591
+ >>> print(parsed.function.type)
592
+ <e,?>
593
+ >>> print(parsed.function.function)
594
+ P
595
+ >>> print(parsed.function.function.type)
596
+ <e,<e,?>>
597
+
598
+ >>> print(tlp.parse(r'P').type)
599
+ ?
600
+
601
+ >>> print(tlp.parse(r'P', {'P': 't'}).type)
602
+ t
603
+
604
+ >>> a = tlp.parse(r'P(x)')
605
+ >>> print(a.type)
606
+ ?
607
+ >>> print(a.function.type)
608
+ <e,?>
609
+ >>> print(a.argument.type)
610
+ e
611
+
612
+ >>> a = tlp.parse(r'-P(x)')
613
+ >>> print(a.type)
614
+ t
615
+ >>> print(a.term.type)
616
+ t
617
+ >>> print(a.term.function.type)
618
+ <e,t>
619
+ >>> print(a.term.argument.type)
620
+ e
621
+
622
+ >>> a = tlp.parse(r'P & Q')
623
+ >>> print(a.type)
624
+ t
625
+ >>> print(a.first.type)
626
+ t
627
+ >>> print(a.second.type)
628
+ t
629
+
630
+ >>> a = tlp.parse(r'(P(x) & Q(x))')
631
+ >>> print(a.type)
632
+ t
633
+ >>> print(a.first.type)
634
+ t
635
+ >>> print(a.first.function.type)
636
+ <e,t>
637
+ >>> print(a.first.argument.type)
638
+ e
639
+ >>> print(a.second.type)
640
+ t
641
+ >>> print(a.second.function.type)
642
+ <e,t>
643
+ >>> print(a.second.argument.type)
644
+ e
645
+
646
+ >>> a = tlp.parse(r'\x.P(x)')
647
+ >>> print(a.type)
648
+ <e,?>
649
+ >>> print(a.term.function.type)
650
+ <e,?>
651
+ >>> print(a.term.argument.type)
652
+ e
653
+
654
+ >>> a = tlp.parse(r'\P.P(x)')
655
+ >>> print(a.type)
656
+ <<e,?>,?>
657
+ >>> print(a.term.function.type)
658
+ <e,?>
659
+ >>> print(a.term.argument.type)
660
+ e
661
+
662
+ >>> a = tlp.parse(r'(\x.P(x)(john)) & Q(x)')
663
+ >>> print(a.type)
664
+ t
665
+ >>> print(a.first.type)
666
+ t
667
+ >>> print(a.first.function.type)
668
+ <e,t>
669
+ >>> print(a.first.function.term.function.type)
670
+ <e,t>
671
+ >>> print(a.first.function.term.argument.type)
672
+ e
673
+ >>> print(a.first.argument.type)
674
+ e
675
+
676
+ >>> a = tlp.parse(r'\x y.P(x,y)(john)(mary) & Q(x)')
677
+ >>> print(a.type)
678
+ t
679
+ >>> print(a.first.type)
680
+ t
681
+ >>> print(a.first.function.type)
682
+ <e,t>
683
+ >>> print(a.first.function.function.type)
684
+ <e,<e,t>>
685
+
686
+ >>> a = tlp.parse(r'--P')
687
+ >>> print(a.type)
688
+ t
689
+ >>> print(a.term.type)
690
+ t
691
+ >>> print(a.term.term.type)
692
+ t
693
+
694
+ >>> tlp.parse(r'\x y.P(x,y)').type
695
+ <e,<e,?>>
696
+ >>> tlp.parse(r'\x y.P(x,y)', {'P': '<e,<e,t>>'}).type
697
+ <e,<e,t>>
698
+
699
+ >>> a = tlp.parse(r'\P y.P(john,y)(\x y.see(x,y))')
700
+ >>> a.type
701
+ <e,?>
702
+ >>> a.function.type
703
+ <<e,<e,?>>,<e,?>>
704
+ >>> a.function.term.term.function.function.type
705
+ <e,<e,?>>
706
+ >>> a.argument.type
707
+ <e,<e,?>>
708
+
709
+ >>> a = tlp.parse(r'exists c f.(father(c) = f)')
710
+ >>> a.type
711
+ t
712
+ >>> a.term.term.type
713
+ t
714
+ >>> a.term.term.first.type
715
+ e
716
+ >>> a.term.term.first.function.type
717
+ <e,e>
718
+ >>> a.term.term.second.type
719
+ e
720
+
721
+ typecheck()
722
+
723
+ >>> a = tlp.parse('P(x)')
724
+ >>> b = tlp.parse('Q(x)')
725
+ >>> a.type
726
+ ?
727
+ >>> c = a & b
728
+ >>> c.first.type
729
+ ?
730
+ >>> c.typecheck()
731
+ {...}
732
+ >>> c.first.type
733
+ t
734
+
735
+ >>> a = tlp.parse('P(x)')
736
+ >>> b = tlp.parse('P(x) & Q(x)')
737
+ >>> a.type
738
+ ?
739
+ >>> typecheck([a,b])
740
+ {...}
741
+ >>> a.type
742
+ t
743
+
744
+ >>> e = tlp.parse(r'man(x)')
745
+ >>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,?>'})
746
+ True
747
+ >>> sig = {'man': '<e, t>'}
748
+ >>> e = tlp.parse(r'man(x)', sig)
749
+ >>> print(e.function.type)
750
+ <e,t>
751
+ >>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,t>'})
752
+ True
753
+ >>> print(e.function.type)
754
+ <e,t>
755
+ >>> print(dict((k,str(v)) for k,v in e.typecheck(sig).items()) == {'x': 'e', 'man': '<e,t>'})
756
+ True
757
+
758
+ findtype()
759
+
760
+ >>> print(tlp.parse(r'man(x)').findtype(Variable('man')))
761
+ <e,?>
762
+ >>> print(tlp.parse(r'see(x,y)').findtype(Variable('see')))
763
+ <e,<e,?>>
764
+ >>> print(tlp.parse(r'P(Q(R(x)))').findtype(Variable('Q')))
765
+ ?
766
+
767
+ reading types from strings
768
+
769
+ >>> Type.fromstring('e')
770
+ e
771
+ >>> Type.fromstring('<e,t>')
772
+ <e,t>
773
+ >>> Type.fromstring('<<e,t>,<e,t>>')
774
+ <<e,t>,<e,t>>
775
+ >>> Type.fromstring('<<e,?>,?>')
776
+ <<e,?>,?>
777
+
778
+ alternative type format
779
+
780
+ >>> Type.fromstring('e').str()
781
+ 'IND'
782
+ >>> Type.fromstring('<e,?>').str()
783
+ '(IND -> ANY)'
784
+ >>> Type.fromstring('<<e,t>,t>').str()
785
+ '((IND -> BOOL) -> BOOL)'
786
+
787
+ Type.__eq__()
788
+
789
+ >>> from nltk.sem.logic import *
790
+
791
+ >>> e = ENTITY_TYPE
792
+ >>> t = TRUTH_TYPE
793
+ >>> a = ANY_TYPE
794
+ >>> et = ComplexType(e,t)
795
+ >>> eet = ComplexType(e,ComplexType(e,t))
796
+ >>> at = ComplexType(a,t)
797
+ >>> ea = ComplexType(e,a)
798
+ >>> aa = ComplexType(a,a)
799
+
800
+ >>> e == e
801
+ True
802
+ >>> t == t
803
+ True
804
+ >>> e == t
805
+ False
806
+ >>> a == t
807
+ False
808
+ >>> t == a
809
+ False
810
+ >>> a == a
811
+ True
812
+ >>> et == et
813
+ True
814
+ >>> a == et
815
+ False
816
+ >>> et == a
817
+ False
818
+ >>> a == ComplexType(a,aa)
819
+ True
820
+ >>> ComplexType(a,aa) == a
821
+ True
822
+
823
+ matches()
824
+
825
+ >>> e.matches(t)
826
+ False
827
+ >>> a.matches(t)
828
+ True
829
+ >>> t.matches(a)
830
+ True
831
+ >>> a.matches(et)
832
+ True
833
+ >>> et.matches(a)
834
+ True
835
+ >>> ea.matches(eet)
836
+ True
837
+ >>> eet.matches(ea)
838
+ True
839
+ >>> aa.matches(et)
840
+ True
841
+ >>> aa.matches(t)
842
+ True
843
+
844
+ Type error during parsing
845
+ =========================
846
+
847
+ >>> try: print(tlp.parse(r'exists x y.(P(x) & P(x,y))'))
848
+ ... except InconsistentTypeHierarchyException as e: print(e)
849
+ The variable 'P' was found in multiple places with different types.
850
+ >>> try: tlp.parse(r'\x y.see(x,y)(\x.man(x))')
851
+ ... except TypeException as e: print(e)
852
+ The function '\x y.see(x,y)' is of type '<e,<e,?>>' and cannot be applied to '\x.man(x)' of type '<e,?>'. Its argument must match type 'e'.
853
+ >>> try: tlp.parse(r'\P x y.-P(x,y)(\x.-man(x))')
854
+ ... except TypeException as e: print(e)
855
+ The function '\P x y.-P(x,y)' is of type '<<e,<e,t>>,<e,<e,t>>>' and cannot be applied to '\x.-man(x)' of type '<e,t>'. Its argument must match type '<e,<e,t>>'.
856
+
857
+ >>> a = tlp.parse(r'-talk(x)')
858
+ >>> signature = a.typecheck()
859
+ >>> try: print(tlp.parse(r'-talk(x,y)', signature))
860
+ ... except InconsistentTypeHierarchyException as e: print(e)
861
+ The variable 'talk' was found in multiple places with different types.
862
+
863
+ >>> a = tlp.parse(r'-P(x)')
864
+ >>> b = tlp.parse(r'-P(x,y)')
865
+ >>> a.typecheck()
866
+ {...}
867
+ >>> b.typecheck()
868
+ {...}
869
+ >>> try: typecheck([a,b])
870
+ ... except InconsistentTypeHierarchyException as e: print(e)
871
+ The variable 'P' was found in multiple places with different types.
872
+
873
+ >>> a = tlp.parse(r'P(x)')
874
+ >>> b = tlp.parse(r'P(x,y)')
875
+ >>> signature = {'P': '<e,t>'}
876
+ >>> a.typecheck(signature)
877
+ {...}
878
+ >>> try: typecheck([a,b], signature)
879
+ ... except InconsistentTypeHierarchyException as e: print(e)
880
+ The variable 'P' was found in multiple places with different types.
881
+
882
+ Parse errors
883
+ ============
884
+
885
+ >>> try: read_expr(r'')
886
+ ... except LogicalExpressionException as e: print(e)
887
+ End of input found. Expression expected.
888
+ <BLANKLINE>
889
+ ^
890
+ >>> try: read_expr(r'(')
891
+ ... except LogicalExpressionException as e: print(e)
892
+ End of input found. Expression expected.
893
+ (
894
+ ^
895
+ >>> try: read_expr(r')')
896
+ ... except LogicalExpressionException as e: print(e)
897
+ Unexpected token: ')'. Expression expected.
898
+ )
899
+ ^
900
+ >>> try: read_expr(r'()')
901
+ ... except LogicalExpressionException as e: print(e)
902
+ Unexpected token: ')'. Expression expected.
903
+ ()
904
+ ^
905
+ >>> try: read_expr(r'(P(x) & Q(x)')
906
+ ... except LogicalExpressionException as e: print(e)
907
+ End of input found. Expected token ')'.
908
+ (P(x) & Q(x)
909
+ ^
910
+ >>> try: read_expr(r'(P(x) &')
911
+ ... except LogicalExpressionException as e: print(e)
912
+ End of input found. Expression expected.
913
+ (P(x) &
914
+ ^
915
+ >>> try: read_expr(r'(P(x) | )')
916
+ ... except LogicalExpressionException as e: print(e)
917
+ Unexpected token: ')'. Expression expected.
918
+ (P(x) | )
919
+ ^
920
+ >>> try: read_expr(r'P(x) ->')
921
+ ... except LogicalExpressionException as e: print(e)
922
+ End of input found. Expression expected.
923
+ P(x) ->
924
+ ^
925
+ >>> try: read_expr(r'P(x')
926
+ ... except LogicalExpressionException as e: print(e)
927
+ End of input found. Expected token ')'.
928
+ P(x
929
+ ^
930
+ >>> try: read_expr(r'P(x,')
931
+ ... except LogicalExpressionException as e: print(e)
932
+ End of input found. Expression expected.
933
+ P(x,
934
+ ^
935
+ >>> try: read_expr(r'P(x,)')
936
+ ... except LogicalExpressionException as e: print(e)
937
+ Unexpected token: ')'. Expression expected.
938
+ P(x,)
939
+ ^
940
+ >>> try: read_expr(r'exists')
941
+ ... except LogicalExpressionException as e: print(e)
942
+ End of input found. Variable and Expression expected following quantifier 'exists'.
943
+ exists
944
+ ^
945
+ >>> try: read_expr(r'exists x')
946
+ ... except LogicalExpressionException as e: print(e)
947
+ End of input found. Expression expected.
948
+ exists x
949
+ ^
950
+ >>> try: read_expr(r'exists x.')
951
+ ... except LogicalExpressionException as e: print(e)
952
+ End of input found. Expression expected.
953
+ exists x.
954
+ ^
955
+ >>> try: read_expr(r'\ ')
956
+ ... except LogicalExpressionException as e: print(e)
957
+ End of input found. Variable and Expression expected following lambda operator.
958
+ \
959
+ ^
960
+ >>> try: read_expr(r'\ x')
961
+ ... except LogicalExpressionException as e: print(e)
962
+ End of input found. Expression expected.
963
+ \ x
964
+ ^
965
+ >>> try: read_expr(r'\ x y')
966
+ ... except LogicalExpressionException as e: print(e)
967
+ End of input found. Expression expected.
968
+ \ x y
969
+ ^
970
+ >>> try: read_expr(r'\ x.')
971
+ ... except LogicalExpressionException as e: print(e)
972
+ End of input found. Expression expected.
973
+ \ x.
974
+ ^
975
+ >>> try: read_expr(r'P(x)Q(x)')
976
+ ... except LogicalExpressionException as e: print(e)
977
+ Unexpected token: 'Q'.
978
+ P(x)Q(x)
979
+ ^
980
+ >>> try: read_expr(r'(P(x)Q(x)')
981
+ ... except LogicalExpressionException as e: print(e)
982
+ Unexpected token: 'Q'. Expected token ')'.
983
+ (P(x)Q(x)
984
+ ^
985
+ >>> try: read_expr(r'exists x y')
986
+ ... except LogicalExpressionException as e: print(e)
987
+ End of input found. Expression expected.
988
+ exists x y
989
+ ^
990
+ >>> try: read_expr(r'exists x y.')
991
+ ... except LogicalExpressionException as e: print(e)
992
+ End of input found. Expression expected.
993
+ exists x y.
994
+ ^
995
+ >>> try: read_expr(r'exists x -> y')
996
+ ... except LogicalExpressionException as e: print(e)
997
+ Unexpected token: '->'. Expression expected.
998
+ exists x -> y
999
+ ^
1000
+
1001
+
1002
+ >>> try: read_expr(r'A -> ((P(x) & Q(x)) -> Z')
1003
+ ... except LogicalExpressionException as e: print(e)
1004
+ End of input found. Expected token ')'.
1005
+ A -> ((P(x) & Q(x)) -> Z
1006
+ ^
1007
+ >>> try: read_expr(r'A -> ((P(x) &) -> Z')
1008
+ ... except LogicalExpressionException as e: print(e)
1009
+ Unexpected token: ')'. Expression expected.
1010
+ A -> ((P(x) &) -> Z
1011
+ ^
1012
+ >>> try: read_expr(r'A -> ((P(x) | )) -> Z')
1013
+ ... except LogicalExpressionException as e: print(e)
1014
+ Unexpected token: ')'. Expression expected.
1015
+ A -> ((P(x) | )) -> Z
1016
+ ^
1017
+ >>> try: read_expr(r'A -> (P(x) ->) -> Z')
1018
+ ... except LogicalExpressionException as e: print(e)
1019
+ Unexpected token: ')'. Expression expected.
1020
+ A -> (P(x) ->) -> Z
1021
+ ^
1022
+ >>> try: read_expr(r'A -> (P(x) -> Z')
1023
+ ... except LogicalExpressionException as e: print(e)
1024
+ End of input found. Expected token ')'.
1025
+ A -> (P(x) -> Z
1026
+ ^
1027
+ >>> try: read_expr(r'A -> (P(x,) -> Z')
1028
+ ... except LogicalExpressionException as e: print(e)
1029
+ Unexpected token: ')'. Expression expected.
1030
+ A -> (P(x,) -> Z
1031
+ ^
1032
+ >>> try: read_expr(r'A -> (P(x,)) -> Z')
1033
+ ... except LogicalExpressionException as e: print(e)
1034
+ Unexpected token: ')'. Expression expected.
1035
+ A -> (P(x,)) -> Z
1036
+ ^
1037
+ >>> try: read_expr(r'A -> (exists) -> Z')
1038
+ ... except LogicalExpressionException as e: print(e)
1039
+ ')' is an illegal variable name. Constants may not be quantified.
1040
+ A -> (exists) -> Z
1041
+ ^
1042
+ >>> try: read_expr(r'A -> (exists x) -> Z')
1043
+ ... except LogicalExpressionException as e: print(e)
1044
+ Unexpected token: ')'. Expression expected.
1045
+ A -> (exists x) -> Z
1046
+ ^
1047
+ >>> try: read_expr(r'A -> (exists x.) -> Z')
1048
+ ... except LogicalExpressionException as e: print(e)
1049
+ Unexpected token: ')'. Expression expected.
1050
+ A -> (exists x.) -> Z
1051
+ ^
1052
+ >>> try: read_expr(r'A -> (\ ) -> Z')
1053
+ ... except LogicalExpressionException as e: print(e)
1054
+ ')' is an illegal variable name. Constants may not be abstracted.
1055
+ A -> (\ ) -> Z
1056
+ ^
1057
+ >>> try: read_expr(r'A -> (\ x) -> Z')
1058
+ ... except LogicalExpressionException as e: print(e)
1059
+ Unexpected token: ')'. Expression expected.
1060
+ A -> (\ x) -> Z
1061
+ ^
1062
+ >>> try: read_expr(r'A -> (\ x y) -> Z')
1063
+ ... except LogicalExpressionException as e: print(e)
1064
+ Unexpected token: ')'. Expression expected.
1065
+ A -> (\ x y) -> Z
1066
+ ^
1067
+ >>> try: read_expr(r'A -> (\ x.) -> Z')
1068
+ ... except LogicalExpressionException as e: print(e)
1069
+ Unexpected token: ')'. Expression expected.
1070
+ A -> (\ x.) -> Z
1071
+ ^
1072
+ >>> try: read_expr(r'A -> (P(x)Q(x)) -> Z')
1073
+ ... except LogicalExpressionException as e: print(e)
1074
+ Unexpected token: 'Q'. Expected token ')'.
1075
+ A -> (P(x)Q(x)) -> Z
1076
+ ^
1077
+ >>> try: read_expr(r'A -> ((P(x)Q(x)) -> Z')
1078
+ ... except LogicalExpressionException as e: print(e)
1079
+ Unexpected token: 'Q'. Expected token ')'.
1080
+ A -> ((P(x)Q(x)) -> Z
1081
+ ^
1082
+ >>> try: read_expr(r'A -> (all x y) -> Z')
1083
+ ... except LogicalExpressionException as e: print(e)
1084
+ Unexpected token: ')'. Expression expected.
1085
+ A -> (all x y) -> Z
1086
+ ^
1087
+ >>> try: read_expr(r'A -> (exists x y.) -> Z')
1088
+ ... except LogicalExpressionException as e: print(e)
1089
+ Unexpected token: ')'. Expression expected.
1090
+ A -> (exists x y.) -> Z
1091
+ ^
1092
+ >>> try: read_expr(r'A -> (exists x -> y) -> Z')
1093
+ ... except LogicalExpressionException as e: print(e)
1094
+ Unexpected token: '->'. Expression expected.
1095
+ A -> (exists x -> y) -> Z
1096
+ ^
llmeval-env/lib/python3.10/site-packages/nltk/test/metrics.doctest ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ =======
5
+ Metrics
6
+ =======
7
+
8
+ -----
9
+ Setup
10
+ -----
11
+
12
+ >>> import pytest
13
+ >>> _ = pytest.importorskip("numpy")
14
+
15
+
16
+ The `nltk.metrics` package provides a variety of *evaluation measures*
17
+ which can be used for a wide variety of NLP tasks.
18
+
19
+ >>> from nltk.metrics import *
20
+
21
+ ------------------
22
+ Standard IR Scores
23
+ ------------------
24
+
25
+ We can use standard scores from information retrieval to test the
26
+ performance of taggers, chunkers, etc.
27
+
28
+ >>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
29
+ >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
30
+ >>> print(accuracy(reference, test))
31
+ 0.8
32
+
33
+
34
+ The following measures apply to sets:
35
+
36
+ >>> reference_set = set(reference)
37
+ >>> test_set = set(test)
38
+ >>> precision(reference_set, test_set)
39
+ 1.0
40
+ >>> print(recall(reference_set, test_set))
41
+ 0.8
42
+ >>> print(f_measure(reference_set, test_set))
43
+ 0.88888888888...
44
+
45
+ Measuring the likelihood of the data, given probability distributions:
46
+
47
+ >>> from nltk import FreqDist, MLEProbDist
48
+ >>> pdist1 = MLEProbDist(FreqDist("aldjfalskfjaldsf"))
49
+ >>> pdist2 = MLEProbDist(FreqDist("aldjfalssjjlldss"))
50
+ >>> print(log_likelihood(['a', 'd'], [pdist1, pdist2]))
51
+ -2.7075187496...
52
+
53
+
54
+ ----------------
55
+ Distance Metrics
56
+ ----------------
57
+
58
+ String edit distance (Levenshtein):
59
+
60
+ >>> edit_distance("rain", "shine")
61
+ 3
62
+ >>> edit_distance_align("shine", "shine")
63
+ [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]
64
+ >>> edit_distance_align("rain", "brainy")
65
+ [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6)]
66
+ >>> edit_distance_align("", "brainy")
67
+ [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)]
68
+ >>> edit_distance_align("", "")
69
+ [(0, 0)]
70
+
71
+ Other distance measures:
72
+
73
+ >>> s1 = set([1,2,3,4])
74
+ >>> s2 = set([3,4,5])
75
+ >>> binary_distance(s1, s2)
76
+ 1.0
77
+ >>> print(jaccard_distance(s1, s2))
78
+ 0.6
79
+ >>> print(masi_distance(s1, s2))
80
+ 0.868
81
+
82
+ ----------------------
83
+ Miscellaneous Measures
84
+ ----------------------
85
+
86
+ Rank Correlation works with two dictionaries mapping keys to ranks.
87
+ The dictionaries should have the same set of keys.
88
+
89
+ >>> spearman_correlation({'e':1, 't':2, 'a':3}, {'e':1, 'a':2, 't':3})
90
+ 0.5
91
+
92
+ Windowdiff uses a sliding window in comparing two segmentations of the same input (e.g. tokenizations, chunkings).
93
+ Segmentations are represented using strings of zeros and ones.
94
+
95
+ >>> s1 = "000100000010"
96
+ >>> s2 = "000010000100"
97
+ >>> s3 = "100000010000"
98
+ >>> s4 = "000000000000"
99
+ >>> s5 = "111111111111"
100
+ >>> windowdiff(s1, s1, 3)
101
+ 0.0
102
+ >>> abs(windowdiff(s1, s2, 3) - 0.3) < 1e-6 # windowdiff(s1, s2, 3) == 0.3
103
+ True
104
+ >>> abs(windowdiff(s2, s3, 3) - 0.8) < 1e-6 # windowdiff(s2, s3, 3) == 0.8
105
+ True
106
+ >>> windowdiff(s1, s4, 3)
107
+ 0.5
108
+ >>> windowdiff(s1, s5, 3)
109
+ 1.0
110
+
111
+ ----------------
112
+ Confusion Matrix
113
+ ----------------
114
+
115
+ >>> reference = 'This is the reference data. Testing 123. aoaeoeoe'
116
+ >>> test = 'Thos iz_the rifirenci data. Testeng 123. aoaeoeoe'
117
+ >>> print(ConfusionMatrix(reference, test))
118
+ | . 1 2 3 T _ a c d e f g h i n o r s t z |
119
+ --+-------------------------------------------+
120
+ |<8>. . . . . 1 . . . . . . . . . . . . . . |
121
+ . | .<2>. . . . . . . . . . . . . . . . . . . |
122
+ 1 | . .<1>. . . . . . . . . . . . . . . . . . |
123
+ 2 | . . .<1>. . . . . . . . . . . . . . . . . |
124
+ 3 | . . . .<1>. . . . . . . . . . . . . . . . |
125
+ T | . . . . .<2>. . . . . . . . . . . . . . . |
126
+ _ | . . . . . .<.>. . . . . . . . . . . . . . |
127
+ a | . . . . . . .<4>. . . . . . . . . . . . . |
128
+ c | . . . . . . . .<1>. . . . . . . . . . . . |
129
+ d | . . . . . . . . .<1>. . . . . . . . . . . |
130
+ e | . . . . . . . . . .<6>. . . 3 . . . . . . |
131
+ f | . . . . . . . . . . .<1>. . . . . . . . . |
132
+ g | . . . . . . . . . . . .<1>. . . . . . . . |
133
+ h | . . . . . . . . . . . . .<2>. . . . . . . |
134
+ i | . . . . . . . . . . 1 . . .<1>. 1 . . . . |
135
+ n | . . . . . . . . . . . . . . .<2>. . . . . |
136
+ o | . . . . . . . . . . . . . . . .<3>. . . . |
137
+ r | . . . . . . . . . . . . . . . . .<2>. . . |
138
+ s | . . . . . . . . . . . . . . . . . .<2>. 1 |
139
+ t | . . . . . . . . . . . . . . . . . . .<3>. |
140
+ z | . . . . . . . . . . . . . . . . . . . .<.>|
141
+ --+-------------------------------------------+
142
+ (row = reference; col = test)
143
+ <BLANKLINE>
144
+
145
+ >>> cm = ConfusionMatrix(reference, test)
146
+ >>> print(cm.pretty_format(sort_by_count=True))
147
+ | e a i o s t . T h n r 1 2 3 c d f g _ z |
148
+ --+-------------------------------------------+
149
+ |<8>. . . . . . . . . . . . . . . . . . 1 . |
150
+ e | .<6>. 3 . . . . . . . . . . . . . . . . . |
151
+ a | . .<4>. . . . . . . . . . . . . . . . . . |
152
+ i | . 1 .<1>1 . . . . . . . . . . . . . . . . |
153
+ o | . . . .<3>. . . . . . . . . . . . . . . . |
154
+ s | . . . . .<2>. . . . . . . . . . . . . . 1 |
155
+ t | . . . . . .<3>. . . . . . . . . . . . . . |
156
+ . | . . . . . . .<2>. . . . . . . . . . . . . |
157
+ T | . . . . . . . .<2>. . . . . . . . . . . . |
158
+ h | . . . . . . . . .<2>. . . . . . . . . . . |
159
+ n | . . . . . . . . . .<2>. . . . . . . . . . |
160
+ r | . . . . . . . . . . .<2>. . . . . . . . . |
161
+ 1 | . . . . . . . . . . . .<1>. . . . . . . . |
162
+ 2 | . . . . . . . . . . . . .<1>. . . . . . . |
163
+ 3 | . . . . . . . . . . . . . .<1>. . . . . . |
164
+ c | . . . . . . . . . . . . . . .<1>. . . . . |
165
+ d | . . . . . . . . . . . . . . . .<1>. . . . |
166
+ f | . . . . . . . . . . . . . . . . .<1>. . . |
167
+ g | . . . . . . . . . . . . . . . . . .<1>. . |
168
+ _ | . . . . . . . . . . . . . . . . . . .<.>. |
169
+ z | . . . . . . . . . . . . . . . . . . . .<.>|
170
+ --+-------------------------------------------+
171
+ (row = reference; col = test)
172
+ <BLANKLINE>
173
+
174
+ >>> print(cm.pretty_format(sort_by_count=True, truncate=10))
175
+ | e a i o s t . T h |
176
+ --+---------------------+
177
+ |<8>. . . . . . . . . |
178
+ e | .<6>. 3 . . . . . . |
179
+ a | . .<4>. . . . . . . |
180
+ i | . 1 .<1>1 . . . . . |
181
+ o | . . . .<3>. . . . . |
182
+ s | . . . . .<2>. . . . |
183
+ t | . . . . . .<3>. . . |
184
+ . | . . . . . . .<2>. . |
185
+ T | . . . . . . . .<2>. |
186
+ h | . . . . . . . . .<2>|
187
+ --+---------------------+
188
+ (row = reference; col = test)
189
+ <BLANKLINE>
190
+
191
+ >>> print(cm.pretty_format(sort_by_count=True, truncate=10, values_in_chart=False))
192
+ | 1 |
193
+ | 1 2 3 4 5 6 7 8 9 0 |
194
+ ---+---------------------+
195
+ 1 |<8>. . . . . . . . . |
196
+ 2 | .<6>. 3 . . . . . . |
197
+ 3 | . .<4>. . . . . . . |
198
+ 4 | . 1 .<1>1 . . . . . |
199
+ 5 | . . . .<3>. . . . . |
200
+ 6 | . . . . .<2>. . . . |
201
+ 7 | . . . . . .<3>. . . |
202
+ 8 | . . . . . . .<2>. . |
203
+ 9 | . . . . . . . .<2>. |
204
+ 10 | . . . . . . . . .<2>|
205
+ ---+---------------------+
206
+ (row = reference; col = test)
207
+ Value key:
208
+ 1:
209
+ 2: e
210
+ 3: a
211
+ 4: i
212
+ 5: o
213
+ 6: s
214
+ 7: t
215
+ 8: .
216
+ 9: T
217
+ 10: h
218
+ <BLANKLINE>
219
+
220
+ For "e", the number of true positives should be 6, while the number of false negatives is 3.
221
+ So, the recall ought to be 6 / (6 + 3):
222
+
223
+ >>> cm.recall("e") # doctest: +ELLIPSIS
224
+ 0.666666...
225
+
226
+ For "e", the false positive is just 1, so the precision should be 6 / (6 + 1):
227
+
228
+ >>> cm.precision("e") # doctest: +ELLIPSIS
229
+ 0.857142...
230
+
231
+ The f-measure with default value of ``alpha = 0.5`` should then be:
232
+
233
+ * *1/(alpha/p + (1-alpha)/r) =*
234
+ * *1/(0.5/p + 0.5/r) =*
235
+ * *2pr / (p + r) =*
236
+ * *2 * 0.857142... * 0.666666... / (0.857142... + 0.666666...) =*
237
+ * *0.749999...*
238
+
239
+ >>> cm.f_measure("e") # doctest: +ELLIPSIS
240
+ 0.749999...
241
+
242
+ --------------------
243
+ Association measures
244
+ --------------------
245
+
246
+ These measures are useful to determine whether the coocurrence of two random
247
+ events is meaningful. They are used, for instance, to distinguish collocations
248
+ from other pairs of adjacent words.
249
+
250
+ We bring some examples of bigram association calculations from Manning and
251
+ Schutze's SNLP, 2nd Ed. chapter 5.
252
+
253
+ >>> n_new_companies, n_new, n_companies, N = 8, 15828, 4675, 14307668
254
+ >>> bam = BigramAssocMeasures
255
+ >>> bam.raw_freq(20, (42, 20), N) == 20. / N
256
+ True
257
+ >>> bam.student_t(n_new_companies, (n_new, n_companies), N)
258
+ 0.999...
259
+ >>> bam.chi_sq(n_new_companies, (n_new, n_companies), N)
260
+ 1.54...
261
+ >>> bam.likelihood_ratio(150, (12593, 932), N)
262
+ 1291...
263
+
264
+ For other associations, we ensure the ordering of the measures:
265
+
266
+ >>> bam.mi_like(20, (42, 20), N) > bam.mi_like(20, (41, 27), N)
267
+ True
268
+ >>> bam.pmi(20, (42, 20), N) > bam.pmi(20, (41, 27), N)
269
+ True
270
+ >>> bam.phi_sq(20, (42, 20), N) > bam.phi_sq(20, (41, 27), N)
271
+ True
272
+ >>> bam.poisson_stirling(20, (42, 20), N) > bam.poisson_stirling(20, (41, 27), N)
273
+ True
274
+ >>> bam.jaccard(20, (42, 20), N) > bam.jaccard(20, (41, 27), N)
275
+ True
276
+ >>> bam.dice(20, (42, 20), N) > bam.dice(20, (41, 27), N)
277
+ True
278
+ >>> bam.fisher(20, (42, 20), N) > bam.fisher(20, (41, 27), N) # doctest: +SKIP
279
+ False
280
+
281
+ For trigrams, we have to provide more count information:
282
+
283
+ >>> n_w1_w2_w3 = 20
284
+ >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
285
+ >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
286
+ >>> n_w1, n_w2, n_w3 = 100, 200, 300
287
+ >>> uni_counts = (n_w1, n_w2, n_w3)
288
+ >>> N = 14307668
289
+ >>> tam = TrigramAssocMeasures
290
+ >>> tam.raw_freq(n_w1_w2_w3, pair_counts, uni_counts, N) == 1. * n_w1_w2_w3 / N
291
+ True
292
+ >>> uni_counts2 = (n_w1, n_w2, 100)
293
+ >>> tam.student_t(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.student_t(n_w1_w2_w3, pair_counts, uni_counts, N)
294
+ True
295
+ >>> tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts, N)
296
+ True
297
+ >>> tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts, N)
298
+ True
299
+ >>> tam.pmi(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.pmi(n_w1_w2_w3, pair_counts, uni_counts, N)
300
+ True
301
+ >>> tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts, N)
302
+ True
303
+ >>> tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts, N)
304
+ True
305
+ >>> tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts, N)
306
+ True
307
+
308
+
309
+ For fourgrams, we have to provide more count information:
310
+
311
+ >>> n_w1_w2_w3_w4 = 5
312
+ >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
313
+ >>> n_w1_w2_w3, n_w2_w3_w4 = 20, 10
314
+ >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
315
+ >>> triplet_counts = (n_w1_w2_w3, n_w2_w3_w4)
316
+ >>> n_w1, n_w2, n_w3, n_w4 = 100, 200, 300, 400
317
+ >>> uni_counts = (n_w1, n_w2, n_w3, n_w4)
318
+ >>> N = 14307668
319
+ >>> qam = QuadgramAssocMeasures
320
+ >>> qam.raw_freq(n_w1_w2_w3_w4, pair_counts, triplet_counts, uni_counts, N) == 1. * n_w1_w2_w3_w4 / N
321
+ True
llmeval-env/lib/python3.10/site-packages/nltk/test/misc.doctest ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ --------------------------------------------------------------------------------
5
+ Unit tests for the miscellaneous sort functions.
6
+ --------------------------------------------------------------------------------
7
+
8
+ >>> from copy import deepcopy
9
+ >>> from nltk.misc.sort import *
10
+
11
+ A (very) small list of unsorted integers.
12
+
13
+ >>> test_data = [12, 67, 7, 28, 92, 56, 53, 720, 91, 57, 20, 20]
14
+
15
+ Test each sorting method - each method returns the number of operations
16
+ required to sort the data, and sorts in-place (desctructively - hence the need
17
+ for multiple copies).
18
+
19
+ >>> sorted_data = deepcopy(test_data)
20
+ >>> selection(sorted_data)
21
+ 66
22
+
23
+ >>> sorted_data
24
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
25
+
26
+ >>> sorted_data = deepcopy(test_data)
27
+ >>> bubble(sorted_data)
28
+ 30
29
+
30
+ >>> sorted_data
31
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
32
+
33
+ >>> sorted_data = deepcopy(test_data)
34
+ >>> merge(sorted_data)
35
+ 30
36
+
37
+ >>> sorted_data
38
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
39
+
40
+ >>> sorted_data = deepcopy(test_data)
41
+ >>> quick(sorted_data)
42
+ 13
43
+
44
+ >>> sorted_data
45
+ [7, 12, 20, 20, 28, 53, 56, 57, 67, 91, 92, 720]
46
+
47
+ --------------------------------------------------------------------------------
48
+ Unit tests for Wordfinder class
49
+ --------------------------------------------------------------------------------
50
+
51
+ >>> import random
52
+
53
+ >>> # The following is not enough for reproducibility under Python 2/3
54
+ >>> # (see https://bugs.python.org/issue9025) so this test is skipped.
55
+ >>> random.seed(12345)
56
+
57
+ >>> from nltk.misc import wordfinder
58
+ >>> wordfinder.word_finder() # doctest: +SKIP
59
+ Word Finder
60
+ <BLANKLINE>
61
+ J V L A I R O T A T I S I V O D E R E T
62
+ H U U B E A R O E P O C S O R E T N E P
63
+ A D A U Z E E S R A P P A L L M E N T R
64
+ C X A D Q S Z T P E O R S N G P J A D E
65
+ I G Y K K T I A A R G F I D T E L C N S
66
+ R E C N B H T R L T N N B W N T A O A I
67
+ A Y I L O E I A M E I A A Y U R P L L D
68
+ G L T V S T S F E A D I P H D O O H N I
69
+ R L S E C I N I L R N N M E C G R U E A
70
+ A A Y G I C E N L L E O I G Q R T A E L
71
+ M R C E T I S T A E T L L E U A E N R L
72
+ O U O T A S E E C S O O N H Y P A T G Y
73
+ E M H O M M D R E S F P U L T H C F N V
74
+ L A C A I M A M A N L B R U T E D O M I
75
+ O R I L N E E E E E U A R S C R Y L I P
76
+ H T R K E S N N M S I L A S R E V I N U
77
+ T X T A A O U T K S E T A R R E S I B J
78
+ A E D L E L J I F O O R P E L K N I R W
79
+ K H A I D E Q O P R I C K T I M B E R P
80
+ Z K D O O H G N I H T U R V E Y D R O P
81
+ <BLANKLINE>
82
+ 1: INTERCHANGER
83
+ 2: TEARLESSNESS
84
+ 3: UNIVERSALISM
85
+ 4: DESENSITIZER
86
+ 5: INTERMENTION
87
+ 6: TRICHOCYSTIC
88
+ 7: EXTRAMURALLY
89
+ 8: VEGETOALKALI
90
+ 9: PALMELLACEAE
91
+ 10: AESTHETICISM
92
+ 11: PETROGRAPHER
93
+ 12: VISITATORIAL
94
+ 13: OLEOMARGARIC
95
+ 14: WRINKLEPROOF
96
+ 15: PRICKTIMBER
97
+ 16: PRESIDIALLY
98
+ 17: SCITAMINEAE
99
+ 18: ENTEROSCOPE
100
+ 19: APPALLMENT
101
+ 20: TURVEYDROP
102
+ 21: THINGHOOD
103
+ 22: BISERRATE
104
+ 23: GREENLAND
105
+ 24: BRUTEDOM
106
+ 25: POLONIAN
107
+ 26: ACOLHUAN
108
+ 27: LAPORTEA
109
+ 28: TENDING
110
+ 29: TEREDO
111
+ 30: MESOLE
112
+ 31: UNLIMP
113
+ 32: OSTARA
114
+ 33: PILY
115
+ 34: DUNT
116
+ 35: ONYX
117
+ 36: KATH
118
+ 37: JUNE
llmeval-env/lib/python3.10/site-packages/nltk/test/portuguese_en_fixt.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ def setup_module():
2
+ import pytest
3
+
4
+ pytest.skip("portuguese_en.doctest imports nltk.examples.pt which doesn't exist!")
llmeval-env/lib/python3.10/site-packages/nltk/test/probability.doctest ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. Copyright (C) 2001-2023 NLTK Project
2
+ .. For license information, see LICENSE.TXT
3
+
4
+ ===========
5
+ Probability
6
+ ===========
7
+
8
+ >>> from nltk.test.probability_fixt import setup_module
9
+ >>> setup_module()
10
+
11
+ >>> import nltk
12
+ >>> from nltk.probability import *
13
+
14
+ FreqDist
15
+ --------
16
+
17
+ >>> text1 = ['no', 'good', 'fish', 'goes', 'anywhere', 'without', 'a', 'porpoise', '!']
18
+ >>> text2 = ['no', 'good', 'porpoise', 'likes', 'to', 'fish', 'fish', 'anywhere', '.']
19
+
20
+ >>> fd1 = nltk.FreqDist(text1)
21
+ >>> fd1 == nltk.FreqDist(text1)
22
+ True
23
+
24
+ Note that items are sorted in order of decreasing frequency; two items of the same frequency appear in indeterminate order.
25
+
26
+ >>> import itertools
27
+ >>> both = nltk.FreqDist(text1 + text2)
28
+ >>> both_most_common = both.most_common()
29
+ >>> list(itertools.chain(*(sorted(ys) for k, ys in itertools.groupby(both_most_common, key=lambda t: t[1]))))
30
+ [('fish', 3), ('anywhere', 2), ('good', 2), ('no', 2), ('porpoise', 2), ('!', 1), ('.', 1), ('a', 1), ('goes', 1), ('likes', 1), ('to', 1), ('without', 1)]
31
+
32
+ >>> both == fd1 + nltk.FreqDist(text2)
33
+ True
34
+ >>> fd1 == nltk.FreqDist(text1) # But fd1 is unchanged
35
+ True
36
+
37
+ >>> fd2 = nltk.FreqDist(text2)
38
+ >>> fd1.update(fd2)
39
+ >>> fd1 == both
40
+ True
41
+
42
+ >>> fd1 = nltk.FreqDist(text1)
43
+ >>> fd1.update(text2)
44
+ >>> fd1 == both
45
+ True
46
+
47
+ >>> fd1 = nltk.FreqDist(text1)
48
+ >>> fd2 = nltk.FreqDist(fd1)
49
+ >>> fd2 == fd1
50
+ True
51
+
52
+ ``nltk.FreqDist`` can be pickled:
53
+
54
+ >>> import pickle
55
+ >>> fd1 = nltk.FreqDist(text1)
56
+ >>> pickled = pickle.dumps(fd1)
57
+ >>> fd1 == pickle.loads(pickled)
58
+ True
59
+
60
+ Mathematical operations:
61
+
62
+ >>> FreqDist('abbb') + FreqDist('bcc')
63
+ FreqDist({'b': 4, 'c': 2, 'a': 1})
64
+ >>> FreqDist('abbbc') - FreqDist('bccd')
65
+ FreqDist({'b': 2, 'a': 1})
66
+ >>> FreqDist('abbb') | FreqDist('bcc')
67
+ FreqDist({'b': 3, 'c': 2, 'a': 1})
68
+ >>> FreqDist('abbb') & FreqDist('bcc')
69
+ FreqDist({'b': 1})
70
+
71
+ ConditionalFreqDist
72
+ -------------------
73
+
74
+ >>> cfd1 = ConditionalFreqDist()
75
+ >>> cfd1[1] = FreqDist('abbbb')
76
+ >>> cfd1[2] = FreqDist('xxxxyy')
77
+ >>> cfd1
78
+ <ConditionalFreqDist with 2 conditions>
79
+
80
+ >>> cfd2 = ConditionalFreqDist()
81
+ >>> cfd2[1] = FreqDist('bbccc')
82
+ >>> cfd2[2] = FreqDist('xxxyyyzz')
83
+ >>> cfd2[3] = FreqDist('m')
84
+ >>> cfd2
85
+ <ConditionalFreqDist with 3 conditions>
86
+
87
+ >>> r = cfd1 + cfd2
88
+ >>> [(i,r[i]) for i in r.conditions()]
89
+ [(1, FreqDist({'b': 6, 'c': 3, 'a': 1})), (2, FreqDist({'x': 7, 'y': 5, 'z': 2})), (3, FreqDist({'m': 1}))]
90
+
91
+ >>> r = cfd1 - cfd2
92
+ >>> [(i,r[i]) for i in r.conditions()]
93
+ [(1, FreqDist({'b': 2, 'a': 1})), (2, FreqDist({'x': 1}))]
94
+
95
+ >>> r = cfd1 | cfd2
96
+ >>> [(i,r[i]) for i in r.conditions()]
97
+ [(1, FreqDist({'b': 4, 'c': 3, 'a': 1})), (2, FreqDist({'x': 4, 'y': 3, 'z': 2})), (3, FreqDist({'m': 1}))]
98
+
99
+ >>> r = cfd1 & cfd2
100
+ >>> [(i,r[i]) for i in r.conditions()]
101
+ [(1, FreqDist({'b': 2})), (2, FreqDist({'x': 3, 'y': 2}))]
102
+
103
+ Testing some HMM estimators
104
+ ---------------------------
105
+
106
+ We extract a small part (500 sentences) of the Brown corpus
107
+
108
+ >>> corpus = nltk.corpus.brown.tagged_sents(categories='adventure')[:500]
109
+ >>> print(len(corpus))
110
+ 500
111
+
112
+ We create a HMM trainer - note that we need the tags and symbols
113
+ from the whole corpus, not just the training corpus
114
+
115
+ >>> from nltk.util import unique_list
116
+ >>> tag_set = unique_list(tag for sent in corpus for (word,tag) in sent)
117
+ >>> print(len(tag_set))
118
+ 92
119
+ >>> symbols = unique_list(word for sent in corpus for (word,tag) in sent)
120
+ >>> print(len(symbols))
121
+ 1464
122
+ >>> trainer = nltk.tag.HiddenMarkovModelTrainer(tag_set, symbols)
123
+
124
+ We divide the corpus into 90% training and 10% testing
125
+
126
+ >>> train_corpus = []
127
+ >>> test_corpus = []
128
+ >>> for i in range(len(corpus)):
129
+ ... if i % 10:
130
+ ... train_corpus += [corpus[i]]
131
+ ... else:
132
+ ... test_corpus += [corpus[i]]
133
+ >>> print(len(train_corpus))
134
+ 450
135
+ >>> print(len(test_corpus))
136
+ 50
137
+
138
+ And now we can test the estimators
139
+
140
+ >>> def train_and_test(est):
141
+ ... hmm = trainer.train_supervised(train_corpus, estimator=est)
142
+ ... print('%.2f%%' % (100 * hmm.accuracy(test_corpus)))
143
+
144
+ Maximum Likelihood Estimation
145
+ -----------------------------
146
+ - this resulted in an initialization error before r7209
147
+
148
+ >>> mle = lambda fd, bins: MLEProbDist(fd)
149
+ >>> train_and_test(mle)
150
+ 22.75%
151
+
152
+ Laplace (= Lidstone with gamma==1)
153
+
154
+ >>> train_and_test(LaplaceProbDist)
155
+ 66.04%
156
+
157
+ Expected Likelihood Estimation (= Lidstone with gamma==0.5)
158
+
159
+ >>> train_and_test(ELEProbDist)
160
+ 73.01%
161
+
162
+ Lidstone Estimation, for gamma==0.1, 0.5 and 1
163
+ (the later two should be exactly equal to MLE and ELE above)
164
+
165
+ >>> def lidstone(gamma):
166
+ ... return lambda fd, bins: LidstoneProbDist(fd, gamma, bins)
167
+ >>> train_and_test(lidstone(0.1))
168
+ 82.51%
169
+ >>> train_and_test(lidstone(0.5))
170
+ 73.01%
171
+ >>> train_and_test(lidstone(1.0))
172
+ 66.04%
173
+
174
+ Witten Bell Estimation
175
+ ----------------------
176
+ - This resulted in ZeroDivisionError before r7209
177
+
178
+ >>> train_and_test(WittenBellProbDist)
179
+ 88.12%
180
+
181
+ Good Turing Estimation
182
+
183
+ >>> gt = lambda fd, bins: SimpleGoodTuringProbDist(fd, bins=1e5)
184
+ >>> train_and_test(gt)
185
+ 86.93%
186
+
187
+ Kneser Ney Estimation
188
+ ---------------------
189
+ Since the Kneser-Ney distribution is best suited for trigrams, we must adjust
190
+ our testing accordingly.
191
+
192
+ >>> corpus = [[((x[0],y[0],z[0]),(x[1],y[1],z[1]))
193
+ ... for x, y, z in nltk.trigrams(sent)]
194
+ ... for sent in corpus[:100]]
195
+
196
+ We will then need to redefine the rest of the training/testing variables
197
+
198
+ >>> tag_set = unique_list(tag for sent in corpus for (word,tag) in sent)
199
+ >>> len(tag_set)
200
+ 906
201
+
202
+ >>> symbols = unique_list(word for sent in corpus for (word,tag) in sent)
203
+ >>> len(symbols)
204
+ 1341
205
+
206
+ >>> trainer = nltk.tag.HiddenMarkovModelTrainer(tag_set, symbols)
207
+ >>> train_corpus = []
208
+ >>> test_corpus = []
209
+
210
+ >>> for i in range(len(corpus)):
211
+ ... if i % 10:
212
+ ... train_corpus += [corpus[i]]
213
+ ... else:
214
+ ... test_corpus += [corpus[i]]
215
+
216
+ >>> len(train_corpus)
217
+ 90
218
+ >>> len(test_corpus)
219
+ 10
220
+
221
+ >>> kn = lambda fd, bins: KneserNeyProbDist(fd)
222
+ >>> train_and_test(kn)
223
+ 0.86%
224
+
225
+ Remains to be added:
226
+ - Tests for HeldoutProbDist, CrossValidationProbDist and MutableProbDist
227
+
228
+ Squashed bugs
229
+ -------------
230
+
231
+ Issue 511: override pop and popitem to invalidate the cache
232
+
233
+ >>> fd = nltk.FreqDist('a')
234
+ >>> list(fd.keys())
235
+ ['a']
236
+ >>> fd.pop('a')
237
+ 1
238
+ >>> list(fd.keys())
239
+ []
240
+
241
+ Issue 533: access cumulative frequencies with no arguments
242
+
243
+ >>> fd = nltk.FreqDist('aab')
244
+ >>> list(fd._cumulative_frequencies(['a']))
245
+ [2.0]
246
+ >>> list(fd._cumulative_frequencies(['a', 'b']))
247
+ [2.0, 3.0]
248
+
249
+ Issue 579: override clear to reset some variables
250
+
251
+ >>> fd = FreqDist('aab')
252
+ >>> fd.clear()
253
+ >>> fd.N()
254
+ 0
255
+
256
+ Issue 351: fix fileids method of CategorizedCorpusReader to inadvertently
257
+ add errant categories
258
+
259
+ >>> from nltk.corpus import brown
260
+ >>> brown.fileids('blah')
261
+ Traceback (most recent call last):
262
+ ...
263
+ ValueError: Category blah not found
264
+ >>> brown.categories()
265
+ ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction']
266
+
267
+ Issue 175: add the unseen bin to SimpleGoodTuringProbDist by default
268
+ otherwise any unseen events get a probability of zero, i.e.,
269
+ they don't get smoothed
270
+
271
+ >>> from nltk import SimpleGoodTuringProbDist, FreqDist
272
+ >>> fd = FreqDist({'a':1, 'b':1, 'c': 2, 'd': 3, 'e': 4, 'f': 4, 'g': 4, 'h': 5, 'i': 5, 'j': 6, 'k': 6, 'l': 6, 'm': 7, 'n': 7, 'o': 8, 'p': 9, 'q': 10})
273
+ >>> p = SimpleGoodTuringProbDist(fd)
274
+ >>> p.prob('a')
275
+ 0.017649766667026317...
276
+ >>> p.prob('o')
277
+ 0.08433050215340411...
278
+ >>> p.prob('z')
279
+ 0.022727272727272728...
280
+ >>> p.prob('foobar')
281
+ 0.022727272727272728...
282
+
283
+ ``MLEProbDist``, ``ConditionalProbDist'', ``DictionaryConditionalProbDist`` and
284
+ ``ConditionalFreqDist`` can be pickled:
285
+
286
+ >>> import pickle
287
+ >>> pd = MLEProbDist(fd)
288
+ >>> sorted(pd.samples()) == sorted(pickle.loads(pickle.dumps(pd)).samples())
289
+ True
290
+ >>> dpd = DictionaryConditionalProbDist({'x': pd})
291
+ >>> unpickled = pickle.loads(pickle.dumps(dpd))
292
+ >>> dpd['x'].prob('a')
293
+ 0.011363636...
294
+ >>> dpd['x'].prob('a') == unpickled['x'].prob('a')
295
+ True
296
+ >>> cfd = nltk.probability.ConditionalFreqDist()
297
+ >>> cfd['foo']['hello'] += 1
298
+ >>> cfd['foo']['hello'] += 1
299
+ >>> cfd['bar']['hello'] += 1
300
+ >>> cfd2 = pickle.loads(pickle.dumps(cfd))
301
+ >>> cfd2 == cfd
302
+ True
303
+ >>> cpd = ConditionalProbDist(cfd, SimpleGoodTuringProbDist)
304
+ >>> cpd2 = pickle.loads(pickle.dumps(cpd))
305
+ >>> cpd['foo'].prob('hello') == cpd2['foo'].prob('hello')
306
+ True