applied-ai-018 commited on
Commit
685344b
·
verified ·
1 Parent(s): 9b2b726

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. venv/lib/python3.10/site-packages/nltk/cluster/__init__.py +92 -0
  5. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/nltk/cluster/api.py +74 -0
  12. venv/lib/python3.10/site-packages/nltk/cluster/em.py +219 -0
  13. venv/lib/python3.10/site-packages/nltk/cluster/gaac.py +170 -0
  14. venv/lib/python3.10/site-packages/nltk/cluster/kmeans.py +231 -0
  15. venv/lib/python3.10/site-packages/nltk/parse/corenlp.py +800 -0
  16. venv/lib/python3.10/site-packages/nltk/parse/earleychart.py +552 -0
  17. venv/lib/python3.10/site-packages/nltk/parse/evaluate.py +129 -0
  18. venv/lib/python3.10/site-packages/nltk/parse/featurechart.py +674 -0
  19. venv/lib/python3.10/site-packages/nltk/parse/generate.py +85 -0
  20. venv/lib/python3.10/site-packages/nltk/parse/malt.py +393 -0
  21. venv/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py +772 -0
  22. venv/lib/python3.10/site-packages/nltk/parse/stanford.py +470 -0
  23. venv/lib/python3.10/site-packages/nltk/parse/transitionparser.py +794 -0
  24. venv/lib/python3.10/site-packages/nltk/parse/viterbi.py +453 -0
  25. venv/lib/python3.10/site-packages/nltk/sem/__init__.py +75 -0
  26. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/nltk/sem/boxer.py +1605 -0
  42. venv/lib/python3.10/site-packages/nltk/sem/chat80.py +857 -0
  43. venv/lib/python3.10/site-packages/nltk/sem/cooper_storage.py +124 -0
  44. venv/lib/python3.10/site-packages/nltk/sem/drt.py +1460 -0
  45. venv/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py +553 -0
  46. venv/lib/python3.10/site-packages/nltk/sem/evaluate.py +829 -0
  47. venv/lib/python3.10/site-packages/nltk/sem/glue.py +835 -0
  48. venv/lib/python3.10/site-packages/nltk/sem/hole.py +395 -0
  49. venv/lib/python3.10/site-packages/nltk/sem/lfg.py +261 -0
  50. venv/lib/python3.10/site-packages/nltk/sem/linearlogic.py +482 -0
ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f38b925a7af0322ad68abc7db2ac055147a75f029045fe6c2876445f2535178
3
+ size 33555627
ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e9274825045ac22d62b393697bacccac6f6165f7c016168f9b976333c96ad0
3
+ size 33555533
ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4daa274fcea6f52ae6438d775e833cebb40dd2139690bb2745dd8e698c71dadb
3
+ size 33555627
venv/lib/python3.10/site-packages/nltk/cluster/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ This module contains a number of basic clustering algorithms. Clustering
10
+ describes the task of discovering groups of similar items with a large
11
+ collection. It is also describe as unsupervised machine learning, as the data
12
+ from which it learns is unannotated with class information, as is the case for
13
+ supervised learning. Annotated data is difficult and expensive to obtain in
14
+ the quantities required for the majority of supervised learning algorithms.
15
+ This problem, the knowledge acquisition bottleneck, is common to most natural
16
+ language processing tasks, thus fueling the need for quality unsupervised
17
+ approaches.
18
+
19
+ This module contains a k-means clusterer, E-M clusterer and a group average
20
+ agglomerative clusterer (GAAC). All these clusterers involve finding good
21
+ cluster groupings for a set of vectors in multi-dimensional space.
22
+
23
+ The K-means clusterer starts with k arbitrary chosen means then allocates each
24
+ vector to the cluster with the closest mean. It then recalculates the means of
25
+ each cluster as the centroid of the vectors in the cluster. This process
26
+ repeats until the cluster memberships stabilise. This is a hill-climbing
27
+ algorithm which may converge to a local maximum. Hence the clustering is
28
+ often repeated with random initial means and the most commonly occurring
29
+ output means are chosen.
30
+
31
+ The GAAC clusterer starts with each of the *N* vectors as singleton clusters.
32
+ It then iteratively merges pairs of clusters which have the closest centroids.
33
+ This continues until there is only one cluster. The order of merges gives rise
34
+ to a dendrogram - a tree with the earlier merges lower than later merges. The
35
+ membership of a given number of clusters *c*, *1 <= c <= N*, can be found by
36
+ cutting the dendrogram at depth *c*.
37
+
38
+ The Gaussian EM clusterer models the vectors as being produced by a mixture
39
+ of k Gaussian sources. The parameters of these sources (prior probability,
40
+ mean and covariance matrix) are then found to maximise the likelihood of the
41
+ given data. This is done with the expectation maximisation algorithm. It
42
+ starts with k arbitrarily chosen means, priors and covariance matrices. It
43
+ then calculates the membership probabilities for each vector in each of the
44
+ clusters - this is the 'E' step. The cluster parameters are then updated in
45
+ the 'M' step using the maximum likelihood estimate from the cluster membership
46
+ probabilities. This process continues until the likelihood of the data does
47
+ not significantly increase.
48
+
49
+ They all extend the ClusterI interface which defines common operations
50
+ available with each clusterer. These operations include:
51
+
52
+ - cluster: clusters a sequence of vectors
53
+ - classify: assign a vector to a cluster
54
+ - classification_probdist: give the probability distribution over cluster memberships
55
+
56
+ The current existing classifiers also extend cluster.VectorSpace, an
57
+ abstract class which allows for singular value decomposition (SVD) and vector
58
+ normalisation. SVD is used to reduce the dimensionality of the vector space in
59
+ such a manner as to preserve as much of the variation as possible, by
60
+ reparameterising the axes in order of variability and discarding all bar the
61
+ first d dimensions. Normalisation ensures that vectors fall in the unit
62
+ hypersphere.
63
+
64
+ Usage example (see also demo())::
65
+
66
+ from nltk import cluster
67
+ from nltk.cluster import euclidean_distance
68
+ from numpy import array
69
+
70
+ vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
71
+
72
+ # initialise the clusterer (will also assign the vectors to clusters)
73
+ clusterer = cluster.KMeansClusterer(2, euclidean_distance)
74
+ clusterer.cluster(vectors, True)
75
+
76
+ # classify a new vector
77
+ print(clusterer.classify(array([3, 3])))
78
+
79
+ Note that the vectors must use numpy array-like
80
+ objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
81
+ efficiency when required.
82
+ """
83
+
84
+ from nltk.cluster.em import EMClusterer
85
+ from nltk.cluster.gaac import GAAClusterer
86
+ from nltk.cluster.kmeans import KMeansClusterer
87
+ from nltk.cluster.util import (
88
+ Dendrogram,
89
+ VectorSpaceClusterer,
90
+ cosine_distance,
91
+ euclidean_distance,
92
+ )
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.31 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc ADDED
Binary file (6.85 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc ADDED
Binary file (9.85 kB). View file
 
venv/lib/python3.10/site-packages/nltk/cluster/api.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterer Interfaces
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Porting: Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from abc import ABCMeta, abstractmethod
10
+
11
+ from nltk.probability import DictionaryProbDist
12
+
13
+
14
+ class ClusterI(metaclass=ABCMeta):
15
+ """
16
+ Interface covering basic clustering functionality.
17
+ """
18
+
19
+ @abstractmethod
20
+ def cluster(self, vectors, assign_clusters=False):
21
+ """
22
+ Assigns the vectors to clusters, learning the clustering parameters
23
+ from the data. Returns a cluster identifier for each vector.
24
+ """
25
+
26
+ @abstractmethod
27
+ def classify(self, token):
28
+ """
29
+ Classifies the token into a cluster, setting the token's CLUSTER
30
+ parameter to that cluster identifier.
31
+ """
32
+
33
+ def likelihood(self, vector, label):
34
+ """
35
+ Returns the likelihood (a float) of the token having the
36
+ corresponding cluster.
37
+ """
38
+ if self.classify(vector) == label:
39
+ return 1.0
40
+ else:
41
+ return 0.0
42
+
43
+ def classification_probdist(self, vector):
44
+ """
45
+ Classifies the token into a cluster, returning
46
+ a probability distribution over the cluster identifiers.
47
+ """
48
+ likelihoods = {}
49
+ sum = 0.0
50
+ for cluster in self.cluster_names():
51
+ likelihoods[cluster] = self.likelihood(vector, cluster)
52
+ sum += likelihoods[cluster]
53
+ for cluster in self.cluster_names():
54
+ likelihoods[cluster] /= sum
55
+ return DictionaryProbDist(likelihoods)
56
+
57
+ @abstractmethod
58
+ def num_clusters(self):
59
+ """
60
+ Returns the number of clusters.
61
+ """
62
+
63
+ def cluster_names(self):
64
+ """
65
+ Returns the names of the clusters.
66
+ :rtype: list
67
+ """
68
+ return list(range(self.num_clusters()))
69
+
70
+ def cluster_name(self, index):
71
+ """
72
+ Returns the names of the cluster at index.
73
+ """
74
+ return index
venv/lib/python3.10/site-packages/nltk/cluster/em.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Expectation Maximization Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ pass
12
+
13
+ from nltk.cluster.util import VectorSpaceClusterer
14
+
15
+
16
+ class EMClusterer(VectorSpaceClusterer):
17
+ """
18
+ The Gaussian EM clusterer models the vectors as being produced by
19
+ a mixture of k Gaussian sources. The parameters of these sources
20
+ (prior probability, mean and covariance matrix) are then found to
21
+ maximise the likelihood of the given data. This is done with the
22
+ expectation maximisation algorithm. It starts with k arbitrarily
23
+ chosen means, priors and covariance matrices. It then calculates
24
+ the membership probabilities for each vector in each of the
25
+ clusters; this is the 'E' step. The cluster parameters are then
26
+ updated in the 'M' step using the maximum likelihood estimate from
27
+ the cluster membership probabilities. This process continues until
28
+ the likelihood of the data does not significantly increase.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ initial_means,
34
+ priors=None,
35
+ covariance_matrices=None,
36
+ conv_threshold=1e-6,
37
+ bias=0.1,
38
+ normalise=False,
39
+ svd_dimensions=None,
40
+ ):
41
+ """
42
+ Creates an EM clusterer with the given starting parameters,
43
+ convergence threshold and vector mangling parameters.
44
+
45
+ :param initial_means: the means of the gaussian cluster centers
46
+ :type initial_means: [seq of] numpy array or seq of SparseArray
47
+ :param priors: the prior probability for each cluster
48
+ :type priors: numpy array or seq of float
49
+ :param covariance_matrices: the covariance matrix for each cluster
50
+ :type covariance_matrices: [seq of] numpy array
51
+ :param conv_threshold: maximum change in likelihood before deemed
52
+ convergent
53
+ :type conv_threshold: int or float
54
+ :param bias: variance bias used to ensure non-singular covariance
55
+ matrices
56
+ :type bias: float
57
+ :param normalise: should vectors be normalised to length 1
58
+ :type normalise: boolean
59
+ :param svd_dimensions: number of dimensions to use in reducing vector
60
+ dimensionsionality with SVD
61
+ :type svd_dimensions: int
62
+ """
63
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
64
+ self._means = numpy.array(initial_means, numpy.float64)
65
+ self._num_clusters = len(initial_means)
66
+ self._conv_threshold = conv_threshold
67
+ self._covariance_matrices = covariance_matrices
68
+ self._priors = priors
69
+ self._bias = bias
70
+
71
+ def num_clusters(self):
72
+ return self._num_clusters
73
+
74
+ def cluster_vectorspace(self, vectors, trace=False):
75
+ assert len(vectors) > 0
76
+
77
+ # set the parameters to initial values
78
+ dimensions = len(vectors[0])
79
+ means = self._means
80
+ priors = self._priors
81
+ if not priors:
82
+ priors = self._priors = (
83
+ numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters
84
+ )
85
+ covariances = self._covariance_matrices
86
+ if not covariances:
87
+ covariances = self._covariance_matrices = [
88
+ numpy.identity(dimensions, numpy.float64)
89
+ for i in range(self._num_clusters)
90
+ ]
91
+
92
+ # do the E and M steps until the likelihood plateaus
93
+ lastl = self._loglikelihood(vectors, priors, means, covariances)
94
+ converged = False
95
+
96
+ while not converged:
97
+ if trace:
98
+ print("iteration; loglikelihood", lastl)
99
+ # E-step, calculate hidden variables, h[i,j]
100
+ h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64)
101
+ for i in range(len(vectors)):
102
+ for j in range(self._num_clusters):
103
+ h[i, j] = priors[j] * self._gaussian(
104
+ means[j], covariances[j], vectors[i]
105
+ )
106
+ h[i, :] /= sum(h[i, :])
107
+
108
+ # M-step, update parameters - cvm, p, mean
109
+ for j in range(self._num_clusters):
110
+ covariance_before = covariances[j]
111
+ new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64)
112
+ new_mean = numpy.zeros(dimensions, numpy.float64)
113
+ sum_hj = 0.0
114
+ for i in range(len(vectors)):
115
+ delta = vectors[i] - means[j]
116
+ new_covariance += h[i, j] * numpy.multiply.outer(delta, delta)
117
+ sum_hj += h[i, j]
118
+ new_mean += h[i, j] * vectors[i]
119
+ covariances[j] = new_covariance / sum_hj
120
+ means[j] = new_mean / sum_hj
121
+ priors[j] = sum_hj / len(vectors)
122
+
123
+ # bias term to stop covariance matrix being singular
124
+ covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64)
125
+
126
+ # calculate likelihood - FIXME: may be broken
127
+ l = self._loglikelihood(vectors, priors, means, covariances)
128
+
129
+ # check for convergence
130
+ if abs(lastl - l) < self._conv_threshold:
131
+ converged = True
132
+ lastl = l
133
+
134
+ def classify_vectorspace(self, vector):
135
+ best = None
136
+ for j in range(self._num_clusters):
137
+ p = self._priors[j] * self._gaussian(
138
+ self._means[j], self._covariance_matrices[j], vector
139
+ )
140
+ if not best or p > best[0]:
141
+ best = (p, j)
142
+ return best[1]
143
+
144
+ def likelihood_vectorspace(self, vector, cluster):
145
+ cid = self.cluster_names().index(cluster)
146
+ return self._priors[cluster] * self._gaussian(
147
+ self._means[cluster], self._covariance_matrices[cluster], vector
148
+ )
149
+
150
+ def _gaussian(self, mean, cvm, x):
151
+ m = len(mean)
152
+ assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape)
153
+ try:
154
+ det = numpy.linalg.det(cvm)
155
+ inv = numpy.linalg.inv(cvm)
156
+ a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0)
157
+ dx = x - mean
158
+ print(dx, inv)
159
+ b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx)
160
+ return a * numpy.exp(b)
161
+ except OverflowError:
162
+ # happens when the exponent is negative infinity - i.e. b = 0
163
+ # i.e. the inverse of cvm is huge (cvm is almost zero)
164
+ return 0
165
+
166
+ def _loglikelihood(self, vectors, priors, means, covariances):
167
+ llh = 0.0
168
+ for vector in vectors:
169
+ p = 0
170
+ for j in range(len(priors)):
171
+ p += priors[j] * self._gaussian(means[j], covariances[j], vector)
172
+ llh += numpy.log(p)
173
+ return llh
174
+
175
+ def __repr__(self):
176
+ return "<EMClusterer means=%s>" % list(self._means)
177
+
178
+
179
+ def demo():
180
+ """
181
+ Non-interactive demonstration of the clusterers with simple 2-D data.
182
+ """
183
+
184
+ from nltk import cluster
185
+
186
+ # example from figure 14.10, page 519, Manning and Schutze
187
+
188
+ vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
189
+ means = [[4, 2], [4, 2.01]]
190
+
191
+ clusterer = cluster.EMClusterer(means, bias=0.1)
192
+ clusters = clusterer.cluster(vectors, True, trace=True)
193
+
194
+ print("Clustered:", vectors)
195
+ print("As: ", clusters)
196
+ print()
197
+
198
+ for c in range(2):
199
+ print("Cluster:", c)
200
+ print("Prior: ", clusterer._priors[c])
201
+ print("Mean: ", clusterer._means[c])
202
+ print("Covar: ", clusterer._covariance_matrices[c])
203
+ print()
204
+
205
+ # classify a new vector
206
+ vector = numpy.array([2, 2])
207
+ print("classify(%s):" % vector, end=" ")
208
+ print(clusterer.classify(vector))
209
+
210
+ # show the classification probabilities
211
+ vector = numpy.array([2, 2])
212
+ print("classification_probdist(%s):" % vector)
213
+ pdist = clusterer.classification_probdist(vector)
214
+ for sample in pdist.samples():
215
+ print(f"{sample} => {pdist.prob(sample) * 100:.0f}%")
216
+
217
+
218
+ if __name__ == "__main__":
219
+ demo()
venv/lib/python3.10/site-packages/nltk/cluster/gaac.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Group Average Agglomerative Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ pass
12
+
13
+ from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance
14
+
15
+
16
+ class GAAClusterer(VectorSpaceClusterer):
17
+ """
18
+ The Group Average Agglomerative starts with each of the N vectors as singleton
19
+ clusters. It then iteratively merges pairs of clusters which have the
20
+ closest centroids. This continues until there is only one cluster. The
21
+ order of merges gives rise to a dendrogram: a tree with the earlier merges
22
+ lower than later merges. The membership of a given number of clusters c, 1
23
+ <= c <= N, can be found by cutting the dendrogram at depth c.
24
+
25
+ This clusterer uses the cosine similarity metric only, which allows for
26
+ efficient speed-up in the clustering process.
27
+ """
28
+
29
+ def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
30
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
31
+ self._num_clusters = num_clusters
32
+ self._dendrogram = None
33
+ self._groups_values = None
34
+
35
+ def cluster(self, vectors, assign_clusters=False, trace=False):
36
+ # stores the merge order
37
+ self._dendrogram = Dendrogram(
38
+ [numpy.array(vector, numpy.float64) for vector in vectors]
39
+ )
40
+ return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
41
+
42
+ def cluster_vectorspace(self, vectors, trace=False):
43
+ # variables describing the initial situation
44
+ N = len(vectors)
45
+ cluster_len = [1] * N
46
+ cluster_count = N
47
+ index_map = numpy.arange(N)
48
+
49
+ # construct the similarity matrix
50
+ dims = (N, N)
51
+ dist = numpy.ones(dims, dtype=float) * numpy.inf
52
+ for i in range(N):
53
+ for j in range(i + 1, N):
54
+ dist[i, j] = cosine_distance(vectors[i], vectors[j])
55
+
56
+ while cluster_count > max(self._num_clusters, 1):
57
+ i, j = numpy.unravel_index(dist.argmin(), dims)
58
+ if trace:
59
+ print("merging %d and %d" % (i, j))
60
+
61
+ # update similarities for merging i and j
62
+ self._merge_similarities(dist, cluster_len, i, j)
63
+
64
+ # remove j
65
+ dist[:, j] = numpy.inf
66
+ dist[j, :] = numpy.inf
67
+
68
+ # merge the clusters
69
+ cluster_len[i] = cluster_len[i] + cluster_len[j]
70
+ self._dendrogram.merge(index_map[i], index_map[j])
71
+ cluster_count -= 1
72
+
73
+ # update the index map to reflect the indexes if we
74
+ # had removed j
75
+ index_map[j + 1 :] -= 1
76
+ index_map[j] = N
77
+
78
+ self.update_clusters(self._num_clusters)
79
+
80
+ def _merge_similarities(self, dist, cluster_len, i, j):
81
+ # the new cluster i merged from i and j adopts the average of
82
+ # i and j's similarity to each other cluster, weighted by the
83
+ # number of points in the clusters i and j
84
+ i_weight = cluster_len[i]
85
+ j_weight = cluster_len[j]
86
+ weight_sum = i_weight + j_weight
87
+
88
+ # update for x<i
89
+ dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
90
+ dist[:i, i] /= weight_sum
91
+ # update for i<x<j
92
+ dist[i, i + 1 : j] = (
93
+ dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
94
+ )
95
+ # update for i<j<x
96
+ dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
97
+ dist[i, i + 1 :] /= weight_sum
98
+
99
+ def update_clusters(self, num_clusters):
100
+ clusters = self._dendrogram.groups(num_clusters)
101
+ self._centroids = []
102
+ for cluster in clusters:
103
+ assert len(cluster) > 0
104
+ if self._should_normalise:
105
+ centroid = self._normalise(cluster[0])
106
+ else:
107
+ centroid = numpy.array(cluster[0])
108
+ for vector in cluster[1:]:
109
+ if self._should_normalise:
110
+ centroid += self._normalise(vector)
111
+ else:
112
+ centroid += vector
113
+ centroid /= len(cluster)
114
+ self._centroids.append(centroid)
115
+ self._num_clusters = len(self._centroids)
116
+
117
+ def classify_vectorspace(self, vector):
118
+ best = None
119
+ for i in range(self._num_clusters):
120
+ centroid = self._centroids[i]
121
+ dist = cosine_distance(vector, centroid)
122
+ if not best or dist < best[0]:
123
+ best = (dist, i)
124
+ return best[1]
125
+
126
+ def dendrogram(self):
127
+ """
128
+ :return: The dendrogram representing the current clustering
129
+ :rtype: Dendrogram
130
+ """
131
+ return self._dendrogram
132
+
133
+ def num_clusters(self):
134
+ return self._num_clusters
135
+
136
+ def __repr__(self):
137
+ return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
138
+
139
+
140
+ def demo():
141
+ """
142
+ Non-interactive demonstration of the clusterers with simple 2-D data.
143
+ """
144
+
145
+ from nltk.cluster import GAAClusterer
146
+
147
+ # use a set of tokens with 2D indices
148
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
149
+
150
+ # test the GAAC clusterer with 4 clusters
151
+ clusterer = GAAClusterer(4)
152
+ clusters = clusterer.cluster(vectors, True)
153
+
154
+ print("Clusterer:", clusterer)
155
+ print("Clustered:", vectors)
156
+ print("As:", clusters)
157
+ print()
158
+
159
+ # show the dendrogram
160
+ clusterer.dendrogram().show()
161
+
162
+ # classify a new vector
163
+ vector = numpy.array([3, 3])
164
+ print("classify(%s):" % vector, end=" ")
165
+ print(clusterer.classify(vector))
166
+ print()
167
+
168
+
169
+ if __name__ == "__main__":
170
+ demo()
venv/lib/python3.10/site-packages/nltk/cluster/kmeans.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: K-Means Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import copy
9
+ import random
10
+ import sys
11
+
12
+ try:
13
+ import numpy
14
+ except ImportError:
15
+ pass
16
+
17
+
18
+ from nltk.cluster.util import VectorSpaceClusterer
19
+
20
+
21
+ class KMeansClusterer(VectorSpaceClusterer):
22
+ """
23
+ The K-means clusterer starts with k arbitrary chosen means then allocates
24
+ each vector to the cluster with the closest mean. It then recalculates the
25
+ means of each cluster as the centroid of the vectors in the cluster. This
26
+ process repeats until the cluster memberships stabilise. This is a
27
+ hill-climbing algorithm which may converge to a local maximum. Hence the
28
+ clustering is often repeated with random initial means and the most
29
+ commonly occurring output means are chosen.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ num_means,
35
+ distance,
36
+ repeats=1,
37
+ conv_test=1e-6,
38
+ initial_means=None,
39
+ normalise=False,
40
+ svd_dimensions=None,
41
+ rng=None,
42
+ avoid_empty_clusters=False,
43
+ ):
44
+
45
+ """
46
+ :param num_means: the number of means to use (may use fewer)
47
+ :type num_means: int
48
+ :param distance: measure of distance between two vectors
49
+ :type distance: function taking two vectors and returning a float
50
+ :param repeats: number of randomised clustering trials to use
51
+ :type repeats: int
52
+ :param conv_test: maximum variation in mean differences before
53
+ deemed convergent
54
+ :type conv_test: number
55
+ :param initial_means: set of k initial means
56
+ :type initial_means: sequence of vectors
57
+ :param normalise: should vectors be normalised to length 1
58
+ :type normalise: boolean
59
+ :param svd_dimensions: number of dimensions to use in reducing vector
60
+ dimensionsionality with SVD
61
+ :type svd_dimensions: int
62
+ :param rng: random number generator (or None)
63
+ :type rng: Random
64
+ :param avoid_empty_clusters: include current centroid in computation
65
+ of next one; avoids undefined behavior
66
+ when clusters become empty
67
+ :type avoid_empty_clusters: boolean
68
+ """
69
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
70
+ self._num_means = num_means
71
+ self._distance = distance
72
+ self._max_difference = conv_test
73
+ assert not initial_means or len(initial_means) == num_means
74
+ self._means = initial_means
75
+ assert repeats >= 1
76
+ assert not (initial_means and repeats > 1)
77
+ self._repeats = repeats
78
+ self._rng = rng if rng else random.Random()
79
+ self._avoid_empty_clusters = avoid_empty_clusters
80
+
81
+ def cluster_vectorspace(self, vectors, trace=False):
82
+ if self._means and self._repeats > 1:
83
+ print("Warning: means will be discarded for subsequent trials")
84
+
85
+ meanss = []
86
+ for trial in range(self._repeats):
87
+ if trace:
88
+ print("k-means trial", trial)
89
+ if not self._means or trial > 1:
90
+ self._means = self._rng.sample(list(vectors), self._num_means)
91
+ self._cluster_vectorspace(vectors, trace)
92
+ meanss.append(self._means)
93
+
94
+ if len(meanss) > 1:
95
+ # sort the means first (so that different cluster numbering won't
96
+ # effect the distance comparison)
97
+ for means in meanss:
98
+ means.sort(key=sum)
99
+
100
+ # find the set of means that's minimally different from the others
101
+ min_difference = min_means = None
102
+ for i in range(len(meanss)):
103
+ d = 0
104
+ for j in range(len(meanss)):
105
+ if i != j:
106
+ d += self._sum_distances(meanss[i], meanss[j])
107
+ if min_difference is None or d < min_difference:
108
+ min_difference, min_means = d, meanss[i]
109
+
110
+ # use the best means
111
+ self._means = min_means
112
+
113
+ def _cluster_vectorspace(self, vectors, trace=False):
114
+ if self._num_means < len(vectors):
115
+ # perform k-means clustering
116
+ converged = False
117
+ while not converged:
118
+ # assign the tokens to clusters based on minimum distance to
119
+ # the cluster means
120
+ clusters = [[] for m in range(self._num_means)]
121
+ for vector in vectors:
122
+ index = self.classify_vectorspace(vector)
123
+ clusters[index].append(vector)
124
+
125
+ if trace:
126
+ print("iteration")
127
+ # for i in range(self._num_means):
128
+ # print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
129
+
130
+ # recalculate cluster means by computing the centroid of each cluster
131
+ new_means = list(map(self._centroid, clusters, self._means))
132
+
133
+ # measure the degree of change from the previous step for convergence
134
+ difference = self._sum_distances(self._means, new_means)
135
+ if difference < self._max_difference:
136
+ converged = True
137
+
138
+ # remember the new means
139
+ self._means = new_means
140
+
141
+ def classify_vectorspace(self, vector):
142
+ # finds the closest cluster centroid
143
+ # returns that cluster's index
144
+ best_distance = best_index = None
145
+ for index in range(len(self._means)):
146
+ mean = self._means[index]
147
+ dist = self._distance(vector, mean)
148
+ if best_distance is None or dist < best_distance:
149
+ best_index, best_distance = index, dist
150
+ return best_index
151
+
152
+ def num_clusters(self):
153
+ if self._means:
154
+ return len(self._means)
155
+ else:
156
+ return self._num_means
157
+
158
+ def means(self):
159
+ """
160
+ The means used for clustering.
161
+ """
162
+ return self._means
163
+
164
+ def _sum_distances(self, vectors1, vectors2):
165
+ difference = 0.0
166
+ for u, v in zip(vectors1, vectors2):
167
+ difference += self._distance(u, v)
168
+ return difference
169
+
170
+ def _centroid(self, cluster, mean):
171
+ if self._avoid_empty_clusters:
172
+ centroid = copy.copy(mean)
173
+ for vector in cluster:
174
+ centroid += vector
175
+ return centroid / (1 + len(cluster))
176
+ else:
177
+ if not len(cluster):
178
+ sys.stderr.write("Error: no centroid defined for empty cluster.\n")
179
+ sys.stderr.write(
180
+ "Try setting argument 'avoid_empty_clusters' to True\n"
181
+ )
182
+ assert False
183
+ centroid = copy.copy(cluster[0])
184
+ for vector in cluster[1:]:
185
+ centroid += vector
186
+ return centroid / len(cluster)
187
+
188
+ def __repr__(self):
189
+ return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
190
+
191
+
192
+ #################################################################################
193
+
194
+
195
+ def demo():
196
+ # example from figure 14.9, page 517, Manning and Schutze
197
+
198
+ from nltk.cluster import KMeansClusterer, euclidean_distance
199
+
200
+ vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
201
+ means = [[4, 3], [5, 5]]
202
+
203
+ clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
204
+ clusters = clusterer.cluster(vectors, True, trace=True)
205
+
206
+ print("Clustered:", vectors)
207
+ print("As:", clusters)
208
+ print("Means:", clusterer.means())
209
+ print()
210
+
211
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
212
+
213
+ # test k-means using the euclidean distance metric, 2 means and repeat
214
+ # clustering 10 times with random seeds
215
+
216
+ clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
217
+ clusters = clusterer.cluster(vectors, True)
218
+ print("Clustered:", vectors)
219
+ print("As:", clusters)
220
+ print("Means:", clusterer.means())
221
+ print()
222
+
223
+ # classify a new vector
224
+ vector = numpy.array([3, 3])
225
+ print("classify(%s):" % vector, end=" ")
226
+ print(clusterer.classify(vector))
227
+ print()
228
+
229
+
230
+ if __name__ == "__main__":
231
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/corenlp.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the CoreNLP REST API.
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dmitrijs Milajevs <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import json
10
+ import os # required for doctests
11
+ import re
12
+ import socket
13
+ import time
14
+ from typing import List, Tuple
15
+
16
+ from nltk.internals import _java_options, config_java, find_jar_iter, java
17
+ from nltk.parse.api import ParserI
18
+ from nltk.parse.dependencygraph import DependencyGraph
19
+ from nltk.tag.api import TaggerI
20
+ from nltk.tokenize.api import TokenizerI
21
+ from nltk.tree import Tree
22
+
23
+ _stanford_url = "https://stanfordnlp.github.io/CoreNLP/"
24
+
25
+
26
+ class CoreNLPServerError(EnvironmentError):
27
+ """Exceptions associated with the Core NLP server."""
28
+
29
+
30
+ def try_port(port=0):
31
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
32
+ sock.bind(("", port))
33
+
34
+ p = sock.getsockname()[1]
35
+ sock.close()
36
+
37
+ return p
38
+
39
+
40
+ class CoreNLPServer:
41
+
42
+ _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)-models\.jar"
43
+ _JAR = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)\.jar"
44
+
45
+ def __init__(
46
+ self,
47
+ path_to_jar=None,
48
+ path_to_models_jar=None,
49
+ verbose=False,
50
+ java_options=None,
51
+ corenlp_options=None,
52
+ port=None,
53
+ ):
54
+
55
+ if corenlp_options is None:
56
+ corenlp_options = ["-preload", "tokenize,ssplit,pos,lemma,parse,depparse"]
57
+
58
+ jars = list(
59
+ find_jar_iter(
60
+ self._JAR,
61
+ path_to_jar,
62
+ env_vars=("CORENLP",),
63
+ searchpath=(),
64
+ url=_stanford_url,
65
+ verbose=verbose,
66
+ is_regex=True,
67
+ )
68
+ )
69
+
70
+ # find the most recent code and model jar
71
+ stanford_jar = max(jars, key=lambda model_name: re.match(self._JAR, model_name))
72
+
73
+ if port is None:
74
+ try:
75
+ port = try_port(9000)
76
+ except OSError:
77
+ port = try_port()
78
+ corenlp_options.extend(["-port", str(port)])
79
+ else:
80
+ try_port(port)
81
+ corenlp_options.extend(["-port", str(port)])
82
+
83
+ self.url = f"http://localhost:{port}"
84
+
85
+ model_jar = max(
86
+ find_jar_iter(
87
+ self._MODEL_JAR_PATTERN,
88
+ path_to_models_jar,
89
+ env_vars=("CORENLP_MODELS",),
90
+ searchpath=(),
91
+ url=_stanford_url,
92
+ verbose=verbose,
93
+ is_regex=True,
94
+ ),
95
+ key=lambda model_name: re.match(self._MODEL_JAR_PATTERN, model_name),
96
+ )
97
+
98
+ self.verbose = verbose
99
+
100
+ self._classpath = stanford_jar, model_jar
101
+
102
+ self.corenlp_options = corenlp_options
103
+ self.java_options = java_options or ["-mx2g"]
104
+
105
+ def start(self, stdout="devnull", stderr="devnull"):
106
+ """Starts the CoreNLP server
107
+
108
+ :param stdout, stderr: Specifies where CoreNLP output is redirected. Valid values are 'devnull', 'stdout', 'pipe'
109
+ """
110
+ import requests
111
+
112
+ cmd = ["edu.stanford.nlp.pipeline.StanfordCoreNLPServer"]
113
+
114
+ if self.corenlp_options:
115
+ cmd.extend(self.corenlp_options)
116
+
117
+ # Configure java.
118
+ default_options = " ".join(_java_options)
119
+ config_java(options=self.java_options, verbose=self.verbose)
120
+
121
+ try:
122
+ self.popen = java(
123
+ cmd,
124
+ classpath=self._classpath,
125
+ blocking=False,
126
+ stdout=stdout,
127
+ stderr=stderr,
128
+ )
129
+ finally:
130
+ # Return java configurations to their default values.
131
+ config_java(options=default_options, verbose=self.verbose)
132
+
133
+ # Check that the server is istill running.
134
+ returncode = self.popen.poll()
135
+ if returncode is not None:
136
+ _, stderrdata = self.popen.communicate()
137
+ raise CoreNLPServerError(
138
+ returncode,
139
+ "Could not start the server. "
140
+ "The error was: {}".format(stderrdata.decode("ascii")),
141
+ )
142
+
143
+ for i in range(30):
144
+ try:
145
+ response = requests.get(requests.compat.urljoin(self.url, "live"))
146
+ except requests.exceptions.ConnectionError:
147
+ time.sleep(1)
148
+ else:
149
+ if response.ok:
150
+ break
151
+ else:
152
+ raise CoreNLPServerError("Could not connect to the server.")
153
+
154
+ for i in range(60):
155
+ try:
156
+ response = requests.get(requests.compat.urljoin(self.url, "ready"))
157
+ except requests.exceptions.ConnectionError:
158
+ time.sleep(1)
159
+ else:
160
+ if response.ok:
161
+ break
162
+ else:
163
+ raise CoreNLPServerError("The server is not ready.")
164
+
165
+ def stop(self):
166
+ self.popen.terminate()
167
+ self.popen.wait()
168
+
169
+ def __enter__(self):
170
+ self.start()
171
+
172
+ return self
173
+
174
+ def __exit__(self, exc_type, exc_val, exc_tb):
175
+ self.stop()
176
+ return False
177
+
178
+
179
+ class GenericCoreNLPParser(ParserI, TokenizerI, TaggerI):
180
+ """Interface to the CoreNLP Parser."""
181
+
182
+ def __init__(
183
+ self,
184
+ url="http://localhost:9000",
185
+ encoding="utf8",
186
+ tagtype=None,
187
+ strict_json=True,
188
+ ):
189
+ import requests
190
+
191
+ self.url = url
192
+ self.encoding = encoding
193
+
194
+ if tagtype not in ["pos", "ner", None]:
195
+ raise ValueError("tagtype must be either 'pos', 'ner' or None")
196
+
197
+ self.tagtype = tagtype
198
+ self.strict_json = strict_json
199
+
200
+ self.session = requests.Session()
201
+
202
+ def parse_sents(self, sentences, *args, **kwargs):
203
+ """Parse multiple sentences.
204
+
205
+ Takes multiple sentences as a list where each sentence is a list of
206
+ words. Each sentence will be automatically tagged with this
207
+ CoreNLPParser instance's tagger.
208
+
209
+ If a whitespace exists inside a token, then the token will be treated as
210
+ several tokens.
211
+
212
+ :param sentences: Input sentences to parse
213
+ :type sentences: list(list(str))
214
+ :rtype: iter(iter(Tree))
215
+ """
216
+ # Converting list(list(str)) -> list(str)
217
+ sentences = (" ".join(words) for words in sentences)
218
+ return self.raw_parse_sents(sentences, *args, **kwargs)
219
+
220
+ def raw_parse(self, sentence, properties=None, *args, **kwargs):
221
+ """Parse a sentence.
222
+
223
+ Takes a sentence as a string; before parsing, it will be automatically
224
+ tokenized and tagged by the CoreNLP Parser.
225
+
226
+ :param sentence: Input sentence to parse
227
+ :type sentence: str
228
+ :rtype: iter(Tree)
229
+ """
230
+ default_properties = {"tokenize.whitespace": "false"}
231
+ default_properties.update(properties or {})
232
+
233
+ return next(
234
+ self.raw_parse_sents(
235
+ [sentence], properties=default_properties, *args, **kwargs
236
+ )
237
+ )
238
+
239
+ def api_call(self, data, properties=None, timeout=60):
240
+ default_properties = {
241
+ "outputFormat": "json",
242
+ "annotators": "tokenize,pos,lemma,ssplit,{parser_annotator}".format(
243
+ parser_annotator=self.parser_annotator
244
+ ),
245
+ }
246
+
247
+ default_properties.update(properties or {})
248
+
249
+ response = self.session.post(
250
+ self.url,
251
+ params={"properties": json.dumps(default_properties)},
252
+ data=data.encode(self.encoding),
253
+ headers={"Content-Type": f"text/plain; charset={self.encoding}"},
254
+ timeout=timeout,
255
+ )
256
+
257
+ response.raise_for_status()
258
+
259
+ return response.json(strict=self.strict_json)
260
+
261
+ def raw_parse_sents(
262
+ self, sentences, verbose=False, properties=None, *args, **kwargs
263
+ ):
264
+ """Parse multiple sentences.
265
+
266
+ Takes multiple sentences as a list of strings. Each sentence will be
267
+ automatically tokenized and tagged.
268
+
269
+ :param sentences: Input sentences to parse.
270
+ :type sentences: list(str)
271
+ :rtype: iter(iter(Tree))
272
+
273
+ """
274
+ default_properties = {
275
+ # Only splits on '\n', never inside the sentence.
276
+ "ssplit.eolonly": "true"
277
+ }
278
+
279
+ default_properties.update(properties or {})
280
+
281
+ """
282
+ for sentence in sentences:
283
+ parsed_data = self.api_call(sentence, properties=default_properties)
284
+
285
+ assert len(parsed_data['sentences']) == 1
286
+
287
+ for parse in parsed_data['sentences']:
288
+ tree = self.make_tree(parse)
289
+ yield iter([tree])
290
+ """
291
+ parsed_data = self.api_call("\n".join(sentences), properties=default_properties)
292
+ for parsed_sent in parsed_data["sentences"]:
293
+ tree = self.make_tree(parsed_sent)
294
+ yield iter([tree])
295
+
296
+ def parse_text(self, text, *args, **kwargs):
297
+ """Parse a piece of text.
298
+
299
+ The text might contain several sentences which will be split by CoreNLP.
300
+
301
+ :param str text: text to be split.
302
+ :returns: an iterable of syntactic structures. # TODO: should it be an iterable of iterables?
303
+
304
+ """
305
+ parsed_data = self.api_call(text, *args, **kwargs)
306
+
307
+ for parse in parsed_data["sentences"]:
308
+ yield self.make_tree(parse)
309
+
310
+ def tokenize(self, text, properties=None):
311
+ """Tokenize a string of text.
312
+
313
+ Skip these tests if CoreNLP is likely not ready.
314
+ >>> from nltk.test.setup_fixt import check_jar
315
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
316
+
317
+ The CoreNLP server can be started using the following notation, although
318
+ we recommend the `with CoreNLPServer() as server:` context manager notation
319
+ to ensure that the server is always stopped.
320
+ >>> server = CoreNLPServer()
321
+ >>> server.start()
322
+ >>> parser = CoreNLPParser(url=server.url)
323
+
324
+ >>> text = 'Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.'
325
+ >>> list(parser.tokenize(text))
326
+ ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
327
+
328
+ >>> s = "The colour of the wall is blue."
329
+ >>> list(
330
+ ... parser.tokenize(
331
+ ... 'The colour of the wall is blue.',
332
+ ... properties={'tokenize.options': 'americanize=true'},
333
+ ... )
334
+ ... )
335
+ ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.']
336
+ >>> server.stop()
337
+
338
+ """
339
+ default_properties = {"annotators": "tokenize,ssplit"}
340
+
341
+ default_properties.update(properties or {})
342
+
343
+ result = self.api_call(text, properties=default_properties)
344
+
345
+ for sentence in result["sentences"]:
346
+ for token in sentence["tokens"]:
347
+ yield token["originalText"] or token["word"]
348
+
349
+ def tag_sents(self, sentences):
350
+ """
351
+ Tag multiple sentences.
352
+
353
+ Takes multiple sentences as a list where each sentence is a list of
354
+ tokens.
355
+
356
+ :param sentences: Input sentences to tag
357
+ :type sentences: list(list(str))
358
+ :rtype: list(list(tuple(str, str))
359
+ """
360
+ # Converting list(list(str)) -> list(str)
361
+ sentences = (" ".join(words) for words in sentences)
362
+ return [sentences[0] for sentences in self.raw_tag_sents(sentences)]
363
+
364
+ def tag(self, sentence: str) -> List[Tuple[str, str]]:
365
+ """
366
+ Tag a list of tokens.
367
+
368
+ :rtype: list(tuple(str, str))
369
+
370
+ Skip these tests if CoreNLP is likely not ready.
371
+ >>> from nltk.test.setup_fixt import check_jar
372
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
373
+
374
+ The CoreNLP server can be started using the following notation, although
375
+ we recommend the `with CoreNLPServer() as server:` context manager notation
376
+ to ensure that the server is always stopped.
377
+ >>> server = CoreNLPServer()
378
+ >>> server.start()
379
+ >>> parser = CoreNLPParser(url=server.url, tagtype='ner')
380
+ >>> tokens = 'Rami Eid is studying at Stony Brook University in NY'.split()
381
+ >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE
382
+ [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), ('at', 'O'), ('Stony', 'ORGANIZATION'),
383
+ ('Brook', 'ORGANIZATION'), ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'STATE_OR_PROVINCE')]
384
+
385
+ >>> parser = CoreNLPParser(url=server.url, tagtype='pos')
386
+ >>> tokens = "What is the airspeed of an unladen swallow ?".split()
387
+ >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE
388
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'),
389
+ ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'),
390
+ ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
391
+ >>> server.stop()
392
+ """
393
+ return self.tag_sents([sentence])[0]
394
+
395
+ def raw_tag_sents(self, sentences):
396
+ """
397
+ Tag multiple sentences.
398
+
399
+ Takes multiple sentences as a list where each sentence is a string.
400
+
401
+ :param sentences: Input sentences to tag
402
+ :type sentences: list(str)
403
+ :rtype: list(list(list(tuple(str, str)))
404
+ """
405
+ default_properties = {
406
+ "ssplit.isOneSentence": "true",
407
+ "annotators": "tokenize,ssplit,",
408
+ }
409
+
410
+ # Supports only 'pos' or 'ner' tags.
411
+ assert self.tagtype in ["pos", "ner"]
412
+ default_properties["annotators"] += self.tagtype
413
+ for sentence in sentences:
414
+ tagged_data = self.api_call(sentence, properties=default_properties)
415
+ yield [
416
+ [
417
+ (token["word"], token[self.tagtype])
418
+ for token in tagged_sentence["tokens"]
419
+ ]
420
+ for tagged_sentence in tagged_data["sentences"]
421
+ ]
422
+
423
+
424
+ class CoreNLPParser(GenericCoreNLPParser):
425
+ """
426
+ Skip these tests if CoreNLP is likely not ready.
427
+ >>> from nltk.test.setup_fixt import check_jar
428
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
429
+
430
+ The recommended usage of `CoreNLPParser` is using the context manager notation:
431
+ >>> with CoreNLPServer() as server:
432
+ ... parser = CoreNLPParser(url=server.url)
433
+ ... next(
434
+ ... parser.raw_parse('The quick brown fox jumps over the lazy dog.')
435
+ ... ).pretty_print() # doctest: +NORMALIZE_WHITESPACE
436
+ ROOT
437
+ |
438
+ S
439
+ _______________|__________________________
440
+ | VP |
441
+ | _________|___ |
442
+ | | PP |
443
+ | | ________|___ |
444
+ NP | | NP |
445
+ ____|__________ | | _______|____ |
446
+ DT JJ JJ NN VBZ IN DT JJ NN .
447
+ | | | | | | | | | |
448
+ The quick brown fox jumps over the lazy dog .
449
+
450
+ Alternatively, the server can be started using the following notation.
451
+ Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started
452
+ outside of Python.
453
+ >>> server = CoreNLPServer()
454
+ >>> server.start()
455
+ >>> parser = CoreNLPParser(url=server.url)
456
+
457
+ >>> (parse_fox, ), (parse_wolf, ) = parser.raw_parse_sents(
458
+ ... [
459
+ ... 'The quick brown fox jumps over the lazy dog.',
460
+ ... 'The quick grey wolf jumps over the lazy fox.',
461
+ ... ]
462
+ ... )
463
+
464
+ >>> parse_fox.pretty_print() # doctest: +NORMALIZE_WHITESPACE
465
+ ROOT
466
+ |
467
+ S
468
+ _______________|__________________________
469
+ | VP |
470
+ | _________|___ |
471
+ | | PP |
472
+ | | ________|___ |
473
+ NP | | NP |
474
+ ____|__________ | | _______|____ |
475
+ DT JJ JJ NN VBZ IN DT JJ NN .
476
+ | | | | | | | | | |
477
+ The quick brown fox jumps over the lazy dog .
478
+
479
+ >>> parse_wolf.pretty_print() # doctest: +NORMALIZE_WHITESPACE
480
+ ROOT
481
+ |
482
+ S
483
+ _______________|__________________________
484
+ | VP |
485
+ | _________|___ |
486
+ | | PP |
487
+ | | ________|___ |
488
+ NP | | NP |
489
+ ____|_________ | | _______|____ |
490
+ DT JJ JJ NN VBZ IN DT JJ NN .
491
+ | | | | | | | | | |
492
+ The quick grey wolf jumps over the lazy fox .
493
+
494
+ >>> (parse_dog, ), (parse_friends, ) = parser.parse_sents(
495
+ ... [
496
+ ... "I 'm a dog".split(),
497
+ ... "This is my friends ' cat ( the tabby )".split(),
498
+ ... ]
499
+ ... )
500
+
501
+ >>> parse_dog.pretty_print() # doctest: +NORMALIZE_WHITESPACE
502
+ ROOT
503
+ |
504
+ S
505
+ _______|____
506
+ | VP
507
+ | ________|___
508
+ NP | NP
509
+ | | ___|___
510
+ PRP VBP DT NN
511
+ | | | |
512
+ I 'm a dog
513
+
514
+ >>> parse_friends.pretty_print() # doctest: +NORMALIZE_WHITESPACE
515
+ ROOT
516
+ |
517
+ S
518
+ ____|___________
519
+ | VP
520
+ | ___________|_____________
521
+ | | NP
522
+ | | _______|________________________
523
+ | | NP | | |
524
+ | | _____|_______ | | |
525
+ NP | NP | | NP |
526
+ | | ______|_________ | | ___|____ |
527
+ DT VBZ PRP$ NNS POS NN -LRB- DT NN -RRB-
528
+ | | | | | | | | | |
529
+ This is my friends ' cat -LRB- the tabby -RRB-
530
+
531
+ >>> parse_john, parse_mary, = parser.parse_text(
532
+ ... 'John loves Mary. Mary walks.'
533
+ ... )
534
+
535
+ >>> parse_john.pretty_print() # doctest: +NORMALIZE_WHITESPACE
536
+ ROOT
537
+ |
538
+ S
539
+ _____|_____________
540
+ | VP |
541
+ | ____|___ |
542
+ NP | NP |
543
+ | | | |
544
+ NNP VBZ NNP .
545
+ | | | |
546
+ John loves Mary .
547
+
548
+ >>> parse_mary.pretty_print() # doctest: +NORMALIZE_WHITESPACE
549
+ ROOT
550
+ |
551
+ S
552
+ _____|____
553
+ NP VP |
554
+ | | |
555
+ NNP VBZ .
556
+ | | |
557
+ Mary walks .
558
+
559
+ Special cases
560
+
561
+ >>> next(
562
+ ... parser.raw_parse(
563
+ ... 'NASIRIYA, Iraq—Iraqi doctors who treated former prisoner of war '
564
+ ... 'Jessica Lynch have angrily dismissed claims made in her biography '
565
+ ... 'that she was raped by her Iraqi captors.'
566
+ ... )
567
+ ... ).height()
568
+ 14
569
+
570
+ >>> next(
571
+ ... parser.raw_parse(
572
+ ... "The broader Standard & Poor's 500 Index <.SPX> was 0.46 points lower, or "
573
+ ... '0.05 percent, at 997.02.'
574
+ ... )
575
+ ... ).height()
576
+ 11
577
+
578
+ >>> server.stop()
579
+ """
580
+
581
+ _OUTPUT_FORMAT = "penn"
582
+ parser_annotator = "parse"
583
+
584
+ def make_tree(self, result):
585
+ return Tree.fromstring(result["parse"])
586
+
587
+
588
+ class CoreNLPDependencyParser(GenericCoreNLPParser):
589
+ """Dependency parser.
590
+
591
+ Skip these tests if CoreNLP is likely not ready.
592
+ >>> from nltk.test.setup_fixt import check_jar
593
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
594
+
595
+ The recommended usage of `CoreNLPParser` is using the context manager notation:
596
+ >>> with CoreNLPServer() as server:
597
+ ... dep_parser = CoreNLPDependencyParser(url=server.url)
598
+ ... parse, = dep_parser.raw_parse(
599
+ ... 'The quick brown fox jumps over the lazy dog.'
600
+ ... )
601
+ ... print(parse.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
602
+ The DT 4 det
603
+ quick JJ 4 amod
604
+ brown JJ 4 amod
605
+ fox NN 5 nsubj
606
+ jumps VBZ 0 ROOT
607
+ over IN 9 case
608
+ the DT 9 det
609
+ lazy JJ 9 amod
610
+ dog NN 5 obl
611
+ . . 5 punct
612
+
613
+ Alternatively, the server can be started using the following notation.
614
+ Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started
615
+ outside of Python.
616
+ >>> server = CoreNLPServer()
617
+ >>> server.start()
618
+ >>> dep_parser = CoreNLPDependencyParser(url=server.url)
619
+ >>> parse, = dep_parser.raw_parse('The quick brown fox jumps over the lazy dog.')
620
+ >>> print(parse.tree()) # doctest: +NORMALIZE_WHITESPACE
621
+ (jumps (fox The quick brown) (dog over the lazy) .)
622
+
623
+ >>> for governor, dep, dependent in parse.triples():
624
+ ... print(governor, dep, dependent) # doctest: +NORMALIZE_WHITESPACE
625
+ ('jumps', 'VBZ') nsubj ('fox', 'NN')
626
+ ('fox', 'NN') det ('The', 'DT')
627
+ ('fox', 'NN') amod ('quick', 'JJ')
628
+ ('fox', 'NN') amod ('brown', 'JJ')
629
+ ('jumps', 'VBZ') obl ('dog', 'NN')
630
+ ('dog', 'NN') case ('over', 'IN')
631
+ ('dog', 'NN') det ('the', 'DT')
632
+ ('dog', 'NN') amod ('lazy', 'JJ')
633
+ ('jumps', 'VBZ') punct ('.', '.')
634
+
635
+ >>> (parse_fox, ), (parse_dog, ) = dep_parser.raw_parse_sents(
636
+ ... [
637
+ ... 'The quick brown fox jumps over the lazy dog.',
638
+ ... 'The quick grey wolf jumps over the lazy fox.',
639
+ ... ]
640
+ ... )
641
+ >>> print(parse_fox.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
642
+ The DT 4 det
643
+ quick JJ 4 amod
644
+ brown JJ 4 amod
645
+ fox NN 5 nsubj
646
+ jumps VBZ 0 ROOT
647
+ over IN 9 case
648
+ the DT 9 det
649
+ lazy JJ 9 amod
650
+ dog NN 5 obl
651
+ . . 5 punct
652
+
653
+ >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
654
+ The DT 4 det
655
+ quick JJ 4 amod
656
+ grey JJ 4 amod
657
+ wolf NN 5 nsubj
658
+ jumps VBZ 0 ROOT
659
+ over IN 9 case
660
+ the DT 9 det
661
+ lazy JJ 9 amod
662
+ fox NN 5 obl
663
+ . . 5 punct
664
+
665
+ >>> (parse_dog, ), (parse_friends, ) = dep_parser.parse_sents(
666
+ ... [
667
+ ... "I 'm a dog".split(),
668
+ ... "This is my friends ' cat ( the tabby )".split(),
669
+ ... ]
670
+ ... )
671
+ >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
672
+ I PRP 4 nsubj
673
+ 'm VBP 4 cop
674
+ a DT 4 det
675
+ dog NN 0 ROOT
676
+
677
+ >>> print(parse_friends.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
678
+ This DT 6 nsubj
679
+ is VBZ 6 cop
680
+ my PRP$ 4 nmod:poss
681
+ friends NNS 6 nmod:poss
682
+ ' POS 4 case
683
+ cat NN 0 ROOT
684
+ ( -LRB- 9 punct
685
+ the DT 9 det
686
+ tabby NN 6 dep
687
+ ) -RRB- 9 punct
688
+
689
+ >>> parse_john, parse_mary, = dep_parser.parse_text(
690
+ ... 'John loves Mary. Mary walks.'
691
+ ... )
692
+
693
+ >>> print(parse_john.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
694
+ John NNP 2 nsubj
695
+ loves VBZ 0 ROOT
696
+ Mary NNP 2 obj
697
+ . . 2 punct
698
+
699
+ >>> print(parse_mary.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
700
+ Mary NNP 2 nsubj
701
+ walks VBZ 0 ROOT
702
+ . . 2 punct
703
+
704
+ Special cases
705
+
706
+ Non-breaking space inside of a token.
707
+
708
+ >>> len(
709
+ ... next(
710
+ ... dep_parser.raw_parse(
711
+ ... 'Anhalt said children typically treat a 20-ounce soda bottle as one '
712
+ ... 'serving, while it actually contains 2 1/2 servings.'
713
+ ... )
714
+ ... ).nodes
715
+ ... )
716
+ 23
717
+
718
+ Phone numbers.
719
+
720
+ >>> len(
721
+ ... next(
722
+ ... dep_parser.raw_parse('This is not going to crash: 01 111 555.')
723
+ ... ).nodes
724
+ ... )
725
+ 10
726
+
727
+ >>> print(
728
+ ... next(
729
+ ... dep_parser.raw_parse('The underscore _ should not simply disappear.')
730
+ ... ).to_conll(4)
731
+ ... ) # doctest: +NORMALIZE_WHITESPACE
732
+ The DT 2 det
733
+ underscore NN 7 nsubj
734
+ _ NFP 7 punct
735
+ should MD 7 aux
736
+ not RB 7 advmod
737
+ simply RB 7 advmod
738
+ disappear VB 0 ROOT
739
+ . . 7 punct
740
+
741
+ >>> print(
742
+ ... next(
743
+ ... dep_parser.raw_parse(
744
+ ... 'for all of its insights into the dream world of teen life , and its electronic expression through '
745
+ ... 'cyber culture , the film gives no quarter to anyone seeking to pull a cohesive story out of its 2 '
746
+ ... '1/2-hour running time .'
747
+ ... )
748
+ ... ).to_conll(4)
749
+ ... ) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
750
+ for IN 2 case
751
+ all DT 24 obl
752
+ of IN 5 case
753
+ its PRP$ 5 nmod:poss
754
+ insights NNS 2 nmod
755
+ into IN 9 case
756
+ the DT 9 det
757
+ dream NN 9 compound
758
+ world NN 5 nmod
759
+ of IN 12 case
760
+ teen NN 12 compound
761
+ ...
762
+
763
+ >>> server.stop()
764
+ """
765
+
766
+ _OUTPUT_FORMAT = "conll2007"
767
+ parser_annotator = "depparse"
768
+
769
+ def make_tree(self, result):
770
+
771
+ return DependencyGraph(
772
+ (
773
+ " ".join(n_items[1:]) # NLTK expects an iterable of strings...
774
+ for n_items in sorted(transform(result))
775
+ ),
776
+ cell_separator=" ", # To make sure that a non-breaking space is kept inside of a token.
777
+ )
778
+
779
+
780
+ def transform(sentence):
781
+ for dependency in sentence["basicDependencies"]:
782
+
783
+ dependent_index = dependency["dependent"]
784
+ token = sentence["tokens"][dependent_index - 1]
785
+
786
+ # Return values that we don't know as '_'. Also, consider tag and ctag
787
+ # to be equal.
788
+ yield (
789
+ dependent_index,
790
+ "_",
791
+ token["word"],
792
+ token["lemma"],
793
+ token["pos"],
794
+ token["pos"],
795
+ "_",
796
+ str(dependency["governor"]),
797
+ dependency["dep"],
798
+ "_",
799
+ "_",
800
+ )
venv/lib/python3.10/site-packages/nltk/parse/earleychart.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: An Incremental Earley Chart Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Ljunglöf <[email protected]>
5
+ # Rob Speer <[email protected]>
6
+ # Edward Loper <[email protected]>
7
+ # Steven Bird <[email protected]>
8
+ # Jean Mark Gawron <[email protected]>
9
+ # URL: <https://www.nltk.org/>
10
+ # For license information, see LICENSE.TXT
11
+
12
+ """
13
+ Data classes and parser implementations for *incremental* chart
14
+ parsers, which use dynamic programming to efficiently parse a text.
15
+ A "chart parser" derives parse trees for a text by iteratively adding
16
+ \"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree
17
+ structure for a subsequence of the text. The "chart" is a
18
+ \"blackboard\" for composing and combining these hypotheses.
19
+
20
+ A parser is "incremental", if it guarantees that for all i, j where i < j,
21
+ all edges ending at i are built before any edges ending at j.
22
+ This is appealing for, say, speech recognizer hypothesis filtering.
23
+
24
+ The main parser class is ``EarleyChartParser``, which is a top-down
25
+ algorithm, originally formulated by Jay Earley (1970).
26
+ """
27
+
28
+ from time import perf_counter
29
+
30
+ from nltk.parse.chart import (
31
+ BottomUpPredictCombineRule,
32
+ BottomUpPredictRule,
33
+ CachedTopDownPredictRule,
34
+ Chart,
35
+ ChartParser,
36
+ EdgeI,
37
+ EmptyPredictRule,
38
+ FilteredBottomUpPredictCombineRule,
39
+ FilteredSingleEdgeFundamentalRule,
40
+ LeafEdge,
41
+ LeafInitRule,
42
+ SingleEdgeFundamentalRule,
43
+ TopDownInitRule,
44
+ )
45
+ from nltk.parse.featurechart import (
46
+ FeatureBottomUpPredictCombineRule,
47
+ FeatureBottomUpPredictRule,
48
+ FeatureChart,
49
+ FeatureChartParser,
50
+ FeatureEmptyPredictRule,
51
+ FeatureSingleEdgeFundamentalRule,
52
+ FeatureTopDownInitRule,
53
+ FeatureTopDownPredictRule,
54
+ )
55
+
56
+ # ////////////////////////////////////////////////////////////
57
+ # Incremental Chart
58
+ # ////////////////////////////////////////////////////////////
59
+
60
+
61
+ class IncrementalChart(Chart):
62
+ def initialize(self):
63
+ # A sequence of edge lists contained in this chart.
64
+ self._edgelists = tuple([] for x in self._positions())
65
+
66
+ # The set of child pointer lists associated with each edge.
67
+ self._edge_to_cpls = {}
68
+
69
+ # Indexes mapping attribute values to lists of edges
70
+ # (used by select()).
71
+ self._indexes = {}
72
+
73
+ def edges(self):
74
+ return list(self.iteredges())
75
+
76
+ def iteredges(self):
77
+ return (edge for edgelist in self._edgelists for edge in edgelist)
78
+
79
+ def select(self, end, **restrictions):
80
+ edgelist = self._edgelists[end]
81
+
82
+ # If there are no restrictions, then return all edges.
83
+ if restrictions == {}:
84
+ return iter(edgelist)
85
+
86
+ # Find the index corresponding to the given restrictions.
87
+ restr_keys = sorted(restrictions.keys())
88
+ restr_keys = tuple(restr_keys)
89
+
90
+ # If it doesn't exist, then create it.
91
+ if restr_keys not in self._indexes:
92
+ self._add_index(restr_keys)
93
+
94
+ vals = tuple(restrictions[key] for key in restr_keys)
95
+ return iter(self._indexes[restr_keys][end].get(vals, []))
96
+
97
+ def _add_index(self, restr_keys):
98
+ # Make sure it's a valid index.
99
+ for key in restr_keys:
100
+ if not hasattr(EdgeI, key):
101
+ raise ValueError("Bad restriction: %s" % key)
102
+
103
+ # Create the index.
104
+ index = self._indexes[restr_keys] = tuple({} for x in self._positions())
105
+
106
+ # Add all existing edges to the index.
107
+ for end, edgelist in enumerate(self._edgelists):
108
+ this_index = index[end]
109
+ for edge in edgelist:
110
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
111
+ this_index.setdefault(vals, []).append(edge)
112
+
113
+ def _register_with_indexes(self, edge):
114
+ end = edge.end()
115
+ for (restr_keys, index) in self._indexes.items():
116
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
117
+ index[end].setdefault(vals, []).append(edge)
118
+
119
+ def _append_edge(self, edge):
120
+ self._edgelists[edge.end()].append(edge)
121
+
122
+ def _positions(self):
123
+ return range(self.num_leaves() + 1)
124
+
125
+
126
+ class FeatureIncrementalChart(IncrementalChart, FeatureChart):
127
+ def select(self, end, **restrictions):
128
+ edgelist = self._edgelists[end]
129
+
130
+ # If there are no restrictions, then return all edges.
131
+ if restrictions == {}:
132
+ return iter(edgelist)
133
+
134
+ # Find the index corresponding to the given restrictions.
135
+ restr_keys = sorted(restrictions.keys())
136
+ restr_keys = tuple(restr_keys)
137
+
138
+ # If it doesn't exist, then create it.
139
+ if restr_keys not in self._indexes:
140
+ self._add_index(restr_keys)
141
+
142
+ vals = tuple(
143
+ self._get_type_if_possible(restrictions[key]) for key in restr_keys
144
+ )
145
+ return iter(self._indexes[restr_keys][end].get(vals, []))
146
+
147
+ def _add_index(self, restr_keys):
148
+ # Make sure it's a valid index.
149
+ for key in restr_keys:
150
+ if not hasattr(EdgeI, key):
151
+ raise ValueError("Bad restriction: %s" % key)
152
+
153
+ # Create the index.
154
+ index = self._indexes[restr_keys] = tuple({} for x in self._positions())
155
+
156
+ # Add all existing edges to the index.
157
+ for end, edgelist in enumerate(self._edgelists):
158
+ this_index = index[end]
159
+ for edge in edgelist:
160
+ vals = tuple(
161
+ self._get_type_if_possible(getattr(edge, key)())
162
+ for key in restr_keys
163
+ )
164
+ this_index.setdefault(vals, []).append(edge)
165
+
166
+ def _register_with_indexes(self, edge):
167
+ end = edge.end()
168
+ for (restr_keys, index) in self._indexes.items():
169
+ vals = tuple(
170
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
171
+ )
172
+ index[end].setdefault(vals, []).append(edge)
173
+
174
+
175
+ # ////////////////////////////////////////////////////////////
176
+ # Incremental CFG Rules
177
+ # ////////////////////////////////////////////////////////////
178
+
179
+
180
+ class CompleteFundamentalRule(SingleEdgeFundamentalRule):
181
+ def _apply_incomplete(self, chart, grammar, left_edge):
182
+ end = left_edge.end()
183
+ # When the chart is incremental, we only have to look for
184
+ # empty complete edges here.
185
+ for right_edge in chart.select(
186
+ start=end, end=end, is_complete=True, lhs=left_edge.nextsym()
187
+ ):
188
+ new_edge = left_edge.move_dot_forward(right_edge.end())
189
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
190
+ yield new_edge
191
+
192
+
193
+ class CompleterRule(CompleteFundamentalRule):
194
+ _fundamental_rule = CompleteFundamentalRule()
195
+
196
+ def apply(self, chart, grammar, edge):
197
+ if not isinstance(edge, LeafEdge):
198
+ yield from self._fundamental_rule.apply(chart, grammar, edge)
199
+
200
+
201
+ class ScannerRule(CompleteFundamentalRule):
202
+ _fundamental_rule = CompleteFundamentalRule()
203
+
204
+ def apply(self, chart, grammar, edge):
205
+ if isinstance(edge, LeafEdge):
206
+ yield from self._fundamental_rule.apply(chart, grammar, edge)
207
+
208
+
209
+ class PredictorRule(CachedTopDownPredictRule):
210
+ pass
211
+
212
+
213
+ class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
214
+ def apply(self, chart, grammar, edge):
215
+ # Since the Filtered rule only works for grammars without empty productions,
216
+ # we only have to bother with complete edges here.
217
+ if edge.is_complete():
218
+ yield from self._apply_complete(chart, grammar, edge)
219
+
220
+
221
+ # ////////////////////////////////////////////////////////////
222
+ # Incremental FCFG Rules
223
+ # ////////////////////////////////////////////////////////////
224
+
225
+
226
+ class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
227
+ def _apply_incomplete(self, chart, grammar, left_edge):
228
+ fr = self._fundamental_rule
229
+ end = left_edge.end()
230
+ # When the chart is incremental, we only have to look for
231
+ # empty complete edges here.
232
+ for right_edge in chart.select(
233
+ start=end, end=end, is_complete=True, lhs=left_edge.nextsym()
234
+ ):
235
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
236
+
237
+
238
+ class FeatureCompleterRule(CompleterRule):
239
+ _fundamental_rule = FeatureCompleteFundamentalRule()
240
+
241
+
242
+ class FeatureScannerRule(ScannerRule):
243
+ _fundamental_rule = FeatureCompleteFundamentalRule()
244
+
245
+
246
+ class FeaturePredictorRule(FeatureTopDownPredictRule):
247
+ pass
248
+
249
+
250
+ # ////////////////////////////////////////////////////////////
251
+ # Incremental CFG Chart Parsers
252
+ # ////////////////////////////////////////////////////////////
253
+
254
+ EARLEY_STRATEGY = [
255
+ LeafInitRule(),
256
+ TopDownInitRule(),
257
+ CompleterRule(),
258
+ ScannerRule(),
259
+ PredictorRule(),
260
+ ]
261
+ TD_INCREMENTAL_STRATEGY = [
262
+ LeafInitRule(),
263
+ TopDownInitRule(),
264
+ CachedTopDownPredictRule(),
265
+ CompleteFundamentalRule(),
266
+ ]
267
+ BU_INCREMENTAL_STRATEGY = [
268
+ LeafInitRule(),
269
+ EmptyPredictRule(),
270
+ BottomUpPredictRule(),
271
+ CompleteFundamentalRule(),
272
+ ]
273
+ BU_LC_INCREMENTAL_STRATEGY = [
274
+ LeafInitRule(),
275
+ EmptyPredictRule(),
276
+ BottomUpPredictCombineRule(),
277
+ CompleteFundamentalRule(),
278
+ ]
279
+
280
+ LC_INCREMENTAL_STRATEGY = [
281
+ LeafInitRule(),
282
+ FilteredBottomUpPredictCombineRule(),
283
+ FilteredCompleteFundamentalRule(),
284
+ ]
285
+
286
+
287
+ class IncrementalChartParser(ChartParser):
288
+ """
289
+ An *incremental* chart parser implementing Jay Earley's
290
+ parsing algorithm:
291
+
292
+ | For each index end in [0, 1, ..., N]:
293
+ | For each edge such that edge.end = end:
294
+ | If edge is incomplete and edge.next is not a part of speech:
295
+ | Apply PredictorRule to edge
296
+ | If edge is incomplete and edge.next is a part of speech:
297
+ | Apply ScannerRule to edge
298
+ | If edge is complete:
299
+ | Apply CompleterRule to edge
300
+ | Return any complete parses in the chart
301
+ """
302
+
303
+ def __init__(
304
+ self,
305
+ grammar,
306
+ strategy=BU_LC_INCREMENTAL_STRATEGY,
307
+ trace=0,
308
+ trace_chart_width=50,
309
+ chart_class=IncrementalChart,
310
+ ):
311
+ """
312
+ Create a new Earley chart parser, that uses ``grammar`` to
313
+ parse texts.
314
+
315
+ :type grammar: CFG
316
+ :param grammar: The grammar used to parse texts.
317
+ :type trace: int
318
+ :param trace: The level of tracing that should be used when
319
+ parsing a text. ``0`` will generate no tracing output;
320
+ and higher numbers will produce more verbose tracing
321
+ output.
322
+ :type trace_chart_width: int
323
+ :param trace_chart_width: The default total width reserved for
324
+ the chart in trace output. The remainder of each line will
325
+ be used to display edges.
326
+ :param chart_class: The class that should be used to create
327
+ the charts used by this parser.
328
+ """
329
+ self._grammar = grammar
330
+ self._trace = trace
331
+ self._trace_chart_width = trace_chart_width
332
+ self._chart_class = chart_class
333
+
334
+ self._axioms = []
335
+ self._inference_rules = []
336
+ for rule in strategy:
337
+ if rule.NUM_EDGES == 0:
338
+ self._axioms.append(rule)
339
+ elif rule.NUM_EDGES == 1:
340
+ self._inference_rules.append(rule)
341
+ else:
342
+ raise ValueError(
343
+ "Incremental inference rules must have " "NUM_EDGES == 0 or 1"
344
+ )
345
+
346
+ def chart_parse(self, tokens, trace=None):
347
+ if trace is None:
348
+ trace = self._trace
349
+ trace_new_edges = self._trace_new_edges
350
+
351
+ tokens = list(tokens)
352
+ self._grammar.check_coverage(tokens)
353
+ chart = self._chart_class(tokens)
354
+ grammar = self._grammar
355
+
356
+ # Width, for printing trace edges.
357
+ trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
358
+ if trace:
359
+ print(chart.pretty_format_leaves(trace_edge_width))
360
+
361
+ for axiom in self._axioms:
362
+ new_edges = list(axiom.apply(chart, grammar))
363
+ trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
364
+
365
+ inference_rules = self._inference_rules
366
+ for end in range(chart.num_leaves() + 1):
367
+ if trace > 1:
368
+ print("\n* Processing queue:", end, "\n")
369
+ agenda = list(chart.select(end=end))
370
+ while agenda:
371
+ edge = agenda.pop()
372
+ for rule in inference_rules:
373
+ new_edges = list(rule.apply(chart, grammar, edge))
374
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
375
+ for new_edge in new_edges:
376
+ if new_edge.end() == end:
377
+ agenda.append(new_edge)
378
+
379
+ return chart
380
+
381
+
382
+ class EarleyChartParser(IncrementalChartParser):
383
+ def __init__(self, grammar, **parser_args):
384
+ IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
385
+
386
+
387
+ class IncrementalTopDownChartParser(IncrementalChartParser):
388
+ def __init__(self, grammar, **parser_args):
389
+ IncrementalChartParser.__init__(
390
+ self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args
391
+ )
392
+
393
+
394
+ class IncrementalBottomUpChartParser(IncrementalChartParser):
395
+ def __init__(self, grammar, **parser_args):
396
+ IncrementalChartParser.__init__(
397
+ self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args
398
+ )
399
+
400
+
401
+ class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
402
+ def __init__(self, grammar, **parser_args):
403
+ IncrementalChartParser.__init__(
404
+ self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args
405
+ )
406
+
407
+
408
+ class IncrementalLeftCornerChartParser(IncrementalChartParser):
409
+ def __init__(self, grammar, **parser_args):
410
+ if not grammar.is_nonempty():
411
+ raise ValueError(
412
+ "IncrementalLeftCornerParser only works for grammars "
413
+ "without empty productions."
414
+ )
415
+ IncrementalChartParser.__init__(
416
+ self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args
417
+ )
418
+
419
+
420
+ # ////////////////////////////////////////////////////////////
421
+ # Incremental FCFG Chart Parsers
422
+ # ////////////////////////////////////////////////////////////
423
+
424
+ EARLEY_FEATURE_STRATEGY = [
425
+ LeafInitRule(),
426
+ FeatureTopDownInitRule(),
427
+ FeatureCompleterRule(),
428
+ FeatureScannerRule(),
429
+ FeaturePredictorRule(),
430
+ ]
431
+ TD_INCREMENTAL_FEATURE_STRATEGY = [
432
+ LeafInitRule(),
433
+ FeatureTopDownInitRule(),
434
+ FeatureTopDownPredictRule(),
435
+ FeatureCompleteFundamentalRule(),
436
+ ]
437
+ BU_INCREMENTAL_FEATURE_STRATEGY = [
438
+ LeafInitRule(),
439
+ FeatureEmptyPredictRule(),
440
+ FeatureBottomUpPredictRule(),
441
+ FeatureCompleteFundamentalRule(),
442
+ ]
443
+ BU_LC_INCREMENTAL_FEATURE_STRATEGY = [
444
+ LeafInitRule(),
445
+ FeatureEmptyPredictRule(),
446
+ FeatureBottomUpPredictCombineRule(),
447
+ FeatureCompleteFundamentalRule(),
448
+ ]
449
+
450
+
451
+ class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
452
+ def __init__(
453
+ self,
454
+ grammar,
455
+ strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
456
+ trace_chart_width=20,
457
+ chart_class=FeatureIncrementalChart,
458
+ **parser_args
459
+ ):
460
+ IncrementalChartParser.__init__(
461
+ self,
462
+ grammar,
463
+ strategy=strategy,
464
+ trace_chart_width=trace_chart_width,
465
+ chart_class=chart_class,
466
+ **parser_args
467
+ )
468
+
469
+
470
+ class FeatureEarleyChartParser(FeatureIncrementalChartParser):
471
+ def __init__(self, grammar, **parser_args):
472
+ FeatureIncrementalChartParser.__init__(
473
+ self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args
474
+ )
475
+
476
+
477
+ class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
478
+ def __init__(self, grammar, **parser_args):
479
+ FeatureIncrementalChartParser.__init__(
480
+ self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args
481
+ )
482
+
483
+
484
+ class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
485
+ def __init__(self, grammar, **parser_args):
486
+ FeatureIncrementalChartParser.__init__(
487
+ self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args
488
+ )
489
+
490
+
491
+ class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
492
+ def __init__(self, grammar, **parser_args):
493
+ FeatureIncrementalChartParser.__init__(
494
+ self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args
495
+ )
496
+
497
+
498
+ # ////////////////////////////////////////////////////////////
499
+ # Demonstration
500
+ # ////////////////////////////////////////////////////////////
501
+
502
+
503
+ def demo(
504
+ print_times=True,
505
+ print_grammar=False,
506
+ print_trees=True,
507
+ trace=2,
508
+ sent="I saw John with a dog with my cookie",
509
+ numparses=5,
510
+ ):
511
+ """
512
+ A demonstration of the Earley parsers.
513
+ """
514
+ import sys
515
+ import time
516
+
517
+ from nltk.parse.chart import demo_grammar
518
+
519
+ # The grammar for ChartParser and SteppingChartParser:
520
+ grammar = demo_grammar()
521
+ if print_grammar:
522
+ print("* Grammar")
523
+ print(grammar)
524
+
525
+ # Tokenize the sample sentence.
526
+ print("* Sentence:")
527
+ print(sent)
528
+ tokens = sent.split()
529
+ print(tokens)
530
+ print()
531
+
532
+ # Do the parsing.
533
+ earley = EarleyChartParser(grammar, trace=trace)
534
+ t = perf_counter()
535
+ chart = earley.chart_parse(tokens)
536
+ parses = list(chart.parses(grammar.start()))
537
+ t = perf_counter() - t
538
+
539
+ # Print results.
540
+ if numparses:
541
+ assert len(parses) == numparses, "Not all parses found"
542
+ if print_trees:
543
+ for tree in parses:
544
+ print(tree)
545
+ else:
546
+ print("Nr trees:", len(parses))
547
+ if print_times:
548
+ print("Time:", t)
549
+
550
+
551
+ if __name__ == "__main__":
552
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/evaluate.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: evaluation of dependency parser
2
+ #
3
+ # Author: Long Duong <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import unicodedata
10
+
11
+
12
+ class DependencyEvaluator:
13
+ """
14
+ Class for measuring labelled and unlabelled attachment score for
15
+ dependency parsing. Note that the evaluation ignores punctuation.
16
+
17
+ >>> from nltk.parse import DependencyGraph, DependencyEvaluator
18
+
19
+ >>> gold_sent = DependencyGraph(\"""
20
+ ... Pierre NNP 2 NMOD
21
+ ... Vinken NNP 8 SUB
22
+ ... , , 2 P
23
+ ... 61 CD 5 NMOD
24
+ ... years NNS 6 AMOD
25
+ ... old JJ 2 NMOD
26
+ ... , , 2 P
27
+ ... will MD 0 ROOT
28
+ ... join VB 8 VC
29
+ ... the DT 11 NMOD
30
+ ... board NN 9 OBJ
31
+ ... as IN 9 VMOD
32
+ ... a DT 15 NMOD
33
+ ... nonexecutive JJ 15 NMOD
34
+ ... director NN 12 PMOD
35
+ ... Nov. NNP 9 VMOD
36
+ ... 29 CD 16 NMOD
37
+ ... . . 9 VMOD
38
+ ... \""")
39
+
40
+ >>> parsed_sent = DependencyGraph(\"""
41
+ ... Pierre NNP 8 NMOD
42
+ ... Vinken NNP 1 SUB
43
+ ... , , 3 P
44
+ ... 61 CD 6 NMOD
45
+ ... years NNS 6 AMOD
46
+ ... old JJ 2 NMOD
47
+ ... , , 3 AMOD
48
+ ... will MD 0 ROOT
49
+ ... join VB 8 VC
50
+ ... the DT 11 AMOD
51
+ ... board NN 9 OBJECT
52
+ ... as IN 9 NMOD
53
+ ... a DT 15 NMOD
54
+ ... nonexecutive JJ 15 NMOD
55
+ ... director NN 12 PMOD
56
+ ... Nov. NNP 9 VMOD
57
+ ... 29 CD 16 NMOD
58
+ ... . . 9 VMOD
59
+ ... \""")
60
+
61
+ >>> de = DependencyEvaluator([parsed_sent],[gold_sent])
62
+ >>> las, uas = de.eval()
63
+ >>> las
64
+ 0.6
65
+ >>> uas
66
+ 0.8
67
+ >>> abs(uas - 0.8) < 0.00001
68
+ True
69
+ """
70
+
71
+ def __init__(self, parsed_sents, gold_sents):
72
+ """
73
+ :param parsed_sents: the list of parsed_sents as the output of parser
74
+ :type parsed_sents: list(DependencyGraph)
75
+ """
76
+ self._parsed_sents = parsed_sents
77
+ self._gold_sents = gold_sents
78
+
79
+ def _remove_punct(self, inStr):
80
+ """
81
+ Function to remove punctuation from Unicode string.
82
+ :param input: the input string
83
+ :return: Unicode string after remove all punctuation
84
+ """
85
+ punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"}
86
+ return "".join(x for x in inStr if unicodedata.category(x) not in punc_cat)
87
+
88
+ def eval(self):
89
+ """
90
+ Return the Labeled Attachment Score (LAS) and Unlabeled Attachment Score (UAS)
91
+
92
+ :return : tuple(float,float)
93
+ """
94
+ if len(self._parsed_sents) != len(self._gold_sents):
95
+ raise ValueError(
96
+ " Number of parsed sentence is different with number of gold sentence."
97
+ )
98
+
99
+ corr = 0
100
+ corrL = 0
101
+ total = 0
102
+
103
+ for i in range(len(self._parsed_sents)):
104
+ parsed_sent_nodes = self._parsed_sents[i].nodes
105
+ gold_sent_nodes = self._gold_sents[i].nodes
106
+
107
+ if len(parsed_sent_nodes) != len(gold_sent_nodes):
108
+ raise ValueError("Sentences must have equal length.")
109
+
110
+ for parsed_node_address, parsed_node in parsed_sent_nodes.items():
111
+ gold_node = gold_sent_nodes[parsed_node_address]
112
+
113
+ if parsed_node["word"] is None:
114
+ continue
115
+ if parsed_node["word"] != gold_node["word"]:
116
+ raise ValueError("Sentence sequence is not matched.")
117
+
118
+ # Ignore if word is punctuation by default
119
+ # if (parsed_sent[j]["word"] in string.punctuation):
120
+ if self._remove_punct(parsed_node["word"]) == "":
121
+ continue
122
+
123
+ total += 1
124
+ if parsed_node["head"] == gold_node["head"]:
125
+ corr += 1
126
+ if parsed_node["rel"] == gold_node["rel"]:
127
+ corrL += 1
128
+
129
+ return corrL / total, corr / total
venv/lib/python3.10/site-packages/nltk/parse/featurechart.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chart Parser for Feature-Based Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Rob Speer <[email protected]>
5
+ # Peter Ljunglöf <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Extension of chart parsing implementation to handle grammars with
11
+ feature structures as nodes.
12
+ """
13
+ from time import perf_counter
14
+
15
+ from nltk.featstruct import TYPE, FeatStruct, find_variables, unify
16
+ from nltk.grammar import (
17
+ CFG,
18
+ FeatStructNonterminal,
19
+ Nonterminal,
20
+ Production,
21
+ is_nonterminal,
22
+ is_terminal,
23
+ )
24
+ from nltk.parse.chart import (
25
+ BottomUpPredictCombineRule,
26
+ BottomUpPredictRule,
27
+ CachedTopDownPredictRule,
28
+ Chart,
29
+ ChartParser,
30
+ EdgeI,
31
+ EmptyPredictRule,
32
+ FundamentalRule,
33
+ LeafInitRule,
34
+ SingleEdgeFundamentalRule,
35
+ TopDownInitRule,
36
+ TreeEdge,
37
+ )
38
+ from nltk.sem import logic
39
+ from nltk.tree import Tree
40
+
41
+ # ////////////////////////////////////////////////////////////
42
+ # Tree Edge
43
+ # ////////////////////////////////////////////////////////////
44
+
45
+
46
+ class FeatureTreeEdge(TreeEdge):
47
+ """
48
+ A specialized tree edge that allows shared variable bindings
49
+ between nonterminals on the left-hand side and right-hand side.
50
+
51
+ Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a
52
+ dictionary mapping from variables to values. If the edge is not
53
+ complete, then these bindings are simply stored. However, if the
54
+ edge is complete, then the constructor applies these bindings to
55
+ every nonterminal in the edge whose symbol implements the
56
+ interface ``SubstituteBindingsI``.
57
+ """
58
+
59
+ def __init__(self, span, lhs, rhs, dot=0, bindings=None):
60
+ """
61
+ Construct a new edge. If the edge is incomplete (i.e., if
62
+ ``dot<len(rhs)``), then store the bindings as-is. If the edge
63
+ is complete (i.e., if ``dot==len(rhs)``), then apply the
64
+ bindings to all nonterminals in ``lhs`` and ``rhs``, and then
65
+ clear the bindings. See ``TreeEdge`` for a description of
66
+ the other arguments.
67
+ """
68
+ if bindings is None:
69
+ bindings = {}
70
+
71
+ # If the edge is complete, then substitute in the bindings,
72
+ # and then throw them away. (If we didn't throw them away, we
73
+ # might think that 2 complete edges are different just because
74
+ # they have different bindings, even though all bindings have
75
+ # already been applied.)
76
+ if dot == len(rhs) and bindings:
77
+ lhs = self._bind(lhs, bindings)
78
+ rhs = [self._bind(elt, bindings) for elt in rhs]
79
+ bindings = {}
80
+
81
+ # Initialize the edge.
82
+ TreeEdge.__init__(self, span, lhs, rhs, dot)
83
+ self._bindings = bindings
84
+ self._comparison_key = (self._comparison_key, tuple(sorted(bindings.items())))
85
+
86
+ @staticmethod
87
+ def from_production(production, index):
88
+ """
89
+ :return: A new ``TreeEdge`` formed from the given production.
90
+ The new edge's left-hand side and right-hand side will
91
+ be taken from ``production``; its span will be
92
+ ``(index,index)``; and its dot position will be ``0``.
93
+ :rtype: TreeEdge
94
+ """
95
+ return FeatureTreeEdge(
96
+ span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0
97
+ )
98
+
99
+ def move_dot_forward(self, new_end, bindings=None):
100
+ """
101
+ :return: A new ``FeatureTreeEdge`` formed from this edge.
102
+ The new edge's dot position is increased by ``1``,
103
+ and its end index will be replaced by ``new_end``.
104
+ :rtype: FeatureTreeEdge
105
+ :param new_end: The new end index.
106
+ :type new_end: int
107
+ :param bindings: Bindings for the new edge.
108
+ :type bindings: dict
109
+ """
110
+ return FeatureTreeEdge(
111
+ span=(self._span[0], new_end),
112
+ lhs=self._lhs,
113
+ rhs=self._rhs,
114
+ dot=self._dot + 1,
115
+ bindings=bindings,
116
+ )
117
+
118
+ def _bind(self, nt, bindings):
119
+ if not isinstance(nt, FeatStructNonterminal):
120
+ return nt
121
+ return nt.substitute_bindings(bindings)
122
+
123
+ def next_with_bindings(self):
124
+ return self._bind(self.nextsym(), self._bindings)
125
+
126
+ def bindings(self):
127
+ """
128
+ Return a copy of this edge's bindings dictionary.
129
+ """
130
+ return self._bindings.copy()
131
+
132
+ def variables(self):
133
+ """
134
+ :return: The set of variables used by this edge.
135
+ :rtype: set(Variable)
136
+ """
137
+ return find_variables(
138
+ [self._lhs]
139
+ + list(self._rhs)
140
+ + list(self._bindings.keys())
141
+ + list(self._bindings.values()),
142
+ fs_class=FeatStruct,
143
+ )
144
+
145
+ def __str__(self):
146
+ if self.is_complete():
147
+ return super().__str__()
148
+ else:
149
+ bindings = "{%s}" % ", ".join(
150
+ "%s: %r" % item for item in sorted(self._bindings.items())
151
+ )
152
+ return f"{super().__str__()} {bindings}"
153
+
154
+
155
+ # ////////////////////////////////////////////////////////////
156
+ # A specialized Chart for feature grammars
157
+ # ////////////////////////////////////////////////////////////
158
+
159
+ # TODO: subsumes check when adding new edges
160
+
161
+
162
+ class FeatureChart(Chart):
163
+ """
164
+ A Chart for feature grammars.
165
+ :see: ``Chart`` for more information.
166
+ """
167
+
168
+ def select(self, **restrictions):
169
+ """
170
+ Returns an iterator over the edges in this chart.
171
+ See ``Chart.select`` for more information about the
172
+ ``restrictions`` on the edges.
173
+ """
174
+ # If there are no restrictions, then return all edges.
175
+ if restrictions == {}:
176
+ return iter(self._edges)
177
+
178
+ # Find the index corresponding to the given restrictions.
179
+ restr_keys = sorted(restrictions.keys())
180
+ restr_keys = tuple(restr_keys)
181
+
182
+ # If it doesn't exist, then create it.
183
+ if restr_keys not in self._indexes:
184
+ self._add_index(restr_keys)
185
+
186
+ vals = tuple(
187
+ self._get_type_if_possible(restrictions[key]) for key in restr_keys
188
+ )
189
+ return iter(self._indexes[restr_keys].get(vals, []))
190
+
191
+ def _add_index(self, restr_keys):
192
+ """
193
+ A helper function for ``select``, which creates a new index for
194
+ a given set of attributes (aka restriction keys).
195
+ """
196
+ # Make sure it's a valid index.
197
+ for key in restr_keys:
198
+ if not hasattr(EdgeI, key):
199
+ raise ValueError("Bad restriction: %s" % key)
200
+
201
+ # Create the index.
202
+ index = self._indexes[restr_keys] = {}
203
+
204
+ # Add all existing edges to the index.
205
+ for edge in self._edges:
206
+ vals = tuple(
207
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
208
+ )
209
+ index.setdefault(vals, []).append(edge)
210
+
211
+ def _register_with_indexes(self, edge):
212
+ """
213
+ A helper function for ``insert``, which registers the new
214
+ edge with all existing indexes.
215
+ """
216
+ for (restr_keys, index) in self._indexes.items():
217
+ vals = tuple(
218
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
219
+ )
220
+ index.setdefault(vals, []).append(edge)
221
+
222
+ def _get_type_if_possible(self, item):
223
+ """
224
+ Helper function which returns the ``TYPE`` feature of the ``item``,
225
+ if it exists, otherwise it returns the ``item`` itself
226
+ """
227
+ if isinstance(item, dict) and TYPE in item:
228
+ return item[TYPE]
229
+ else:
230
+ return item
231
+
232
+ def parses(self, start, tree_class=Tree):
233
+ for edge in self.select(start=0, end=self._num_leaves):
234
+ if (
235
+ (isinstance(edge, FeatureTreeEdge))
236
+ and (edge.lhs()[TYPE] == start[TYPE])
237
+ and (unify(edge.lhs(), start, rename_vars=True))
238
+ ):
239
+ yield from self.trees(edge, complete=True, tree_class=tree_class)
240
+
241
+
242
+ # ////////////////////////////////////////////////////////////
243
+ # Fundamental Rule
244
+ # ////////////////////////////////////////////////////////////
245
+
246
+
247
+ class FeatureFundamentalRule(FundamentalRule):
248
+ r"""
249
+ A specialized version of the fundamental rule that operates on
250
+ nonterminals whose symbols are ``FeatStructNonterminal``s. Rather
251
+ than simply comparing the nonterminals for equality, they are
252
+ unified. Variable bindings from these unifications are collected
253
+ and stored in the chart using a ``FeatureTreeEdge``. When a
254
+ complete edge is generated, these bindings are applied to all
255
+ nonterminals in the edge.
256
+
257
+ The fundamental rule states that:
258
+
259
+ - ``[A -> alpha \* B1 beta][i:j]``
260
+ - ``[B2 -> gamma \*][j:k]``
261
+
262
+ licenses the edge:
263
+
264
+ - ``[A -> alpha B3 \* beta][i:j]``
265
+
266
+ assuming that B1 and B2 can be unified to generate B3.
267
+ """
268
+
269
+ def apply(self, chart, grammar, left_edge, right_edge):
270
+ # Make sure the rule is applicable.
271
+ if not (
272
+ left_edge.end() == right_edge.start()
273
+ and left_edge.is_incomplete()
274
+ and right_edge.is_complete()
275
+ and isinstance(left_edge, FeatureTreeEdge)
276
+ ):
277
+ return
278
+ found = right_edge.lhs()
279
+ nextsym = left_edge.nextsym()
280
+ if isinstance(right_edge, FeatureTreeEdge):
281
+ if not is_nonterminal(nextsym):
282
+ return
283
+ if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]:
284
+ return
285
+ # Create a copy of the bindings.
286
+ bindings = left_edge.bindings()
287
+ # We rename vars here, because we don't want variables
288
+ # from the two different productions to match.
289
+ found = found.rename_variables(used_vars=left_edge.variables())
290
+ # Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to
291
+ # generate B3 (result).
292
+ result = unify(nextsym, found, bindings, rename_vars=False)
293
+ if result is None:
294
+ return
295
+ else:
296
+ if nextsym != found:
297
+ return
298
+ # Create a copy of the bindings.
299
+ bindings = left_edge.bindings()
300
+
301
+ # Construct the new edge.
302
+ new_edge = left_edge.move_dot_forward(right_edge.end(), bindings)
303
+
304
+ # Add it to the chart, with appropriate child pointers.
305
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
306
+ yield new_edge
307
+
308
+
309
+ class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
310
+ """
311
+ A specialized version of the completer / single edge fundamental rule
312
+ that operates on nonterminals whose symbols are ``FeatStructNonterminal``.
313
+ Rather than simply comparing the nonterminals for equality, they are
314
+ unified.
315
+ """
316
+
317
+ _fundamental_rule = FeatureFundamentalRule()
318
+
319
+ def _apply_complete(self, chart, grammar, right_edge):
320
+ fr = self._fundamental_rule
321
+ for left_edge in chart.select(
322
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
323
+ ):
324
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
325
+
326
+ def _apply_incomplete(self, chart, grammar, left_edge):
327
+ fr = self._fundamental_rule
328
+ for right_edge in chart.select(
329
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
330
+ ):
331
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
332
+
333
+
334
+ # ////////////////////////////////////////////////////////////
335
+ # Top-Down Prediction
336
+ # ////////////////////////////////////////////////////////////
337
+
338
+
339
+ class FeatureTopDownInitRule(TopDownInitRule):
340
+ def apply(self, chart, grammar):
341
+ for prod in grammar.productions(lhs=grammar.start()):
342
+ new_edge = FeatureTreeEdge.from_production(prod, 0)
343
+ if chart.insert(new_edge, ()):
344
+ yield new_edge
345
+
346
+
347
+ class FeatureTopDownPredictRule(CachedTopDownPredictRule):
348
+ r"""
349
+ A specialized version of the (cached) top down predict rule that operates
350
+ on nonterminals whose symbols are ``FeatStructNonterminal``. Rather
351
+ than simply comparing the nonterminals for equality, they are
352
+ unified.
353
+
354
+ The top down expand rule states that:
355
+
356
+ - ``[A -> alpha \* B1 beta][i:j]``
357
+
358
+ licenses the edge:
359
+
360
+ - ``[B2 -> \* gamma][j:j]``
361
+
362
+ for each grammar production ``B2 -> gamma``, assuming that B1
363
+ and B2 can be unified.
364
+ """
365
+
366
+ def apply(self, chart, grammar, edge):
367
+ if edge.is_complete():
368
+ return
369
+ nextsym, index = edge.nextsym(), edge.end()
370
+ if not is_nonterminal(nextsym):
371
+ return
372
+
373
+ # If we've already applied this rule to an edge with the same
374
+ # next & end, and the chart & grammar have not changed, then
375
+ # just return (no new edges to add).
376
+ nextsym_with_bindings = edge.next_with_bindings()
377
+ done = self._done.get((nextsym_with_bindings, index), (None, None))
378
+ if done[0] is chart and done[1] is grammar:
379
+ return
380
+
381
+ for prod in grammar.productions(lhs=nextsym):
382
+ # If the left corner in the predicted production is
383
+ # leaf, it must match with the input.
384
+ if prod.rhs():
385
+ first = prod.rhs()[0]
386
+ if is_terminal(first):
387
+ if index >= chart.num_leaves():
388
+ continue
389
+ if first != chart.leaf(index):
390
+ continue
391
+
392
+ # We rename vars here, because we don't want variables
393
+ # from the two different productions to match.
394
+ if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True):
395
+ new_edge = FeatureTreeEdge.from_production(prod, edge.end())
396
+ if chart.insert(new_edge, ()):
397
+ yield new_edge
398
+
399
+ # Record the fact that we've applied this rule.
400
+ self._done[nextsym_with_bindings, index] = (chart, grammar)
401
+
402
+
403
+ # ////////////////////////////////////////////////////////////
404
+ # Bottom-Up Prediction
405
+ # ////////////////////////////////////////////////////////////
406
+
407
+
408
+ class FeatureBottomUpPredictRule(BottomUpPredictRule):
409
+ def apply(self, chart, grammar, edge):
410
+ if edge.is_incomplete():
411
+ return
412
+ for prod in grammar.productions(rhs=edge.lhs()):
413
+ if isinstance(edge, FeatureTreeEdge):
414
+ _next = prod.rhs()[0]
415
+ if not is_nonterminal(_next):
416
+ continue
417
+
418
+ new_edge = FeatureTreeEdge.from_production(prod, edge.start())
419
+ if chart.insert(new_edge, ()):
420
+ yield new_edge
421
+
422
+
423
+ class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule):
424
+ def apply(self, chart, grammar, edge):
425
+ if edge.is_incomplete():
426
+ return
427
+ found = edge.lhs()
428
+ for prod in grammar.productions(rhs=found):
429
+ bindings = {}
430
+ if isinstance(edge, FeatureTreeEdge):
431
+ _next = prod.rhs()[0]
432
+ if not is_nonterminal(_next):
433
+ continue
434
+
435
+ # We rename vars here, because we don't want variables
436
+ # from the two different productions to match.
437
+ used_vars = find_variables(
438
+ (prod.lhs(),) + prod.rhs(), fs_class=FeatStruct
439
+ )
440
+ found = found.rename_variables(used_vars=used_vars)
441
+
442
+ result = unify(_next, found, bindings, rename_vars=False)
443
+ if result is None:
444
+ continue
445
+
446
+ new_edge = FeatureTreeEdge.from_production(
447
+ prod, edge.start()
448
+ ).move_dot_forward(edge.end(), bindings)
449
+ if chart.insert(new_edge, (edge,)):
450
+ yield new_edge
451
+
452
+
453
+ class FeatureEmptyPredictRule(EmptyPredictRule):
454
+ def apply(self, chart, grammar):
455
+ for prod in grammar.productions(empty=True):
456
+ for index in range(chart.num_leaves() + 1):
457
+ new_edge = FeatureTreeEdge.from_production(prod, index)
458
+ if chart.insert(new_edge, ()):
459
+ yield new_edge
460
+
461
+
462
+ # ////////////////////////////////////////////////////////////
463
+ # Feature Chart Parser
464
+ # ////////////////////////////////////////////////////////////
465
+
466
+ TD_FEATURE_STRATEGY = [
467
+ LeafInitRule(),
468
+ FeatureTopDownInitRule(),
469
+ FeatureTopDownPredictRule(),
470
+ FeatureSingleEdgeFundamentalRule(),
471
+ ]
472
+ BU_FEATURE_STRATEGY = [
473
+ LeafInitRule(),
474
+ FeatureEmptyPredictRule(),
475
+ FeatureBottomUpPredictRule(),
476
+ FeatureSingleEdgeFundamentalRule(),
477
+ ]
478
+ BU_LC_FEATURE_STRATEGY = [
479
+ LeafInitRule(),
480
+ FeatureEmptyPredictRule(),
481
+ FeatureBottomUpPredictCombineRule(),
482
+ FeatureSingleEdgeFundamentalRule(),
483
+ ]
484
+
485
+
486
+ class FeatureChartParser(ChartParser):
487
+ def __init__(
488
+ self,
489
+ grammar,
490
+ strategy=BU_LC_FEATURE_STRATEGY,
491
+ trace_chart_width=20,
492
+ chart_class=FeatureChart,
493
+ **parser_args,
494
+ ):
495
+ ChartParser.__init__(
496
+ self,
497
+ grammar,
498
+ strategy=strategy,
499
+ trace_chart_width=trace_chart_width,
500
+ chart_class=chart_class,
501
+ **parser_args,
502
+ )
503
+
504
+
505
+ class FeatureTopDownChartParser(FeatureChartParser):
506
+ def __init__(self, grammar, **parser_args):
507
+ FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args)
508
+
509
+
510
+ class FeatureBottomUpChartParser(FeatureChartParser):
511
+ def __init__(self, grammar, **parser_args):
512
+ FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args)
513
+
514
+
515
+ class FeatureBottomUpLeftCornerChartParser(FeatureChartParser):
516
+ def __init__(self, grammar, **parser_args):
517
+ FeatureChartParser.__init__(
518
+ self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args
519
+ )
520
+
521
+
522
+ # ////////////////////////////////////////////////////////////
523
+ # Instantiate Variable Chart
524
+ # ////////////////////////////////////////////////////////////
525
+
526
+
527
+ class InstantiateVarsChart(FeatureChart):
528
+ """
529
+ A specialized chart that 'instantiates' variables whose names
530
+ start with '@', by replacing them with unique new variables.
531
+ In particular, whenever a complete edge is added to the chart, any
532
+ variables in the edge's ``lhs`` whose names start with '@' will be
533
+ replaced by unique new ``Variable``.
534
+ """
535
+
536
+ def __init__(self, tokens):
537
+ FeatureChart.__init__(self, tokens)
538
+
539
+ def initialize(self):
540
+ self._instantiated = set()
541
+ FeatureChart.initialize(self)
542
+
543
+ def insert(self, edge, child_pointer_list):
544
+ if edge in self._instantiated:
545
+ return False
546
+ self.instantiate_edge(edge)
547
+ return FeatureChart.insert(self, edge, child_pointer_list)
548
+
549
+ def instantiate_edge(self, edge):
550
+ """
551
+ If the edge is a ``FeatureTreeEdge``, and it is complete,
552
+ then instantiate all variables whose names start with '@',
553
+ by replacing them with unique new variables.
554
+
555
+ Note that instantiation is done in-place, since the
556
+ parsing algorithms might already hold a reference to
557
+ the edge for future use.
558
+ """
559
+ # If the edge is a leaf, or is not complete, or is
560
+ # already in the chart, then just return it as-is.
561
+ if not isinstance(edge, FeatureTreeEdge):
562
+ return
563
+ if not edge.is_complete():
564
+ return
565
+ if edge in self._edge_to_cpls:
566
+ return
567
+
568
+ # Get a list of variables that need to be instantiated.
569
+ # If there are none, then return as-is.
570
+ inst_vars = self.inst_vars(edge)
571
+ if not inst_vars:
572
+ return
573
+
574
+ # Instantiate the edge!
575
+ self._instantiated.add(edge)
576
+ edge._lhs = edge.lhs().substitute_bindings(inst_vars)
577
+
578
+ def inst_vars(self, edge):
579
+ return {
580
+ var: logic.unique_variable()
581
+ for var in edge.lhs().variables()
582
+ if var.name.startswith("@")
583
+ }
584
+
585
+
586
+ # ////////////////////////////////////////////////////////////
587
+ # Demo
588
+ # ////////////////////////////////////////////////////////////
589
+
590
+
591
+ def demo_grammar():
592
+ from nltk.grammar import FeatureGrammar
593
+
594
+ return FeatureGrammar.fromstring(
595
+ """
596
+ S -> NP VP
597
+ PP -> Prep NP
598
+ NP -> NP PP
599
+ VP -> VP PP
600
+ VP -> Verb NP
601
+ VP -> Verb
602
+ NP -> Det[pl=?x] Noun[pl=?x]
603
+ NP -> "John"
604
+ NP -> "I"
605
+ Det -> "the"
606
+ Det -> "my"
607
+ Det[-pl] -> "a"
608
+ Noun[-pl] -> "dog"
609
+ Noun[-pl] -> "cookie"
610
+ Verb -> "ate"
611
+ Verb -> "saw"
612
+ Prep -> "with"
613
+ Prep -> "under"
614
+ """
615
+ )
616
+
617
+
618
+ def demo(
619
+ print_times=True,
620
+ print_grammar=True,
621
+ print_trees=True,
622
+ print_sentence=True,
623
+ trace=1,
624
+ parser=FeatureChartParser,
625
+ sent="I saw John with a dog with my cookie",
626
+ ):
627
+ import sys
628
+ import time
629
+
630
+ print()
631
+ grammar = demo_grammar()
632
+ if print_grammar:
633
+ print(grammar)
634
+ print()
635
+ print("*", parser.__name__)
636
+ if print_sentence:
637
+ print("Sentence:", sent)
638
+ tokens = sent.split()
639
+ t = perf_counter()
640
+ cp = parser(grammar, trace=trace)
641
+ chart = cp.chart_parse(tokens)
642
+ trees = list(chart.parses(grammar.start()))
643
+ if print_times:
644
+ print("Time: %s" % (perf_counter() - t))
645
+ if print_trees:
646
+ for tree in trees:
647
+ print(tree)
648
+ else:
649
+ print("Nr trees:", len(trees))
650
+
651
+
652
+ def run_profile():
653
+ import profile
654
+
655
+ profile.run("for i in range(1): demo()", "/tmp/profile.out")
656
+ import pstats
657
+
658
+ p = pstats.Stats("/tmp/profile.out")
659
+ p.strip_dirs().sort_stats("time", "cum").print_stats(60)
660
+ p.strip_dirs().sort_stats("cum", "time").print_stats(60)
661
+
662
+
663
+ if __name__ == "__main__":
664
+ from nltk.data import load
665
+
666
+ demo()
667
+ print()
668
+ grammar = load("grammars/book_grammars/feat0.fcfg")
669
+ cp = FeatureChartParser(grammar, trace=2)
670
+ sent = "Kim likes children"
671
+ tokens = sent.split()
672
+ trees = cp.parse(tokens)
673
+ for tree in trees:
674
+ print(tree)
venv/lib/python3.10/site-packages/nltk/parse/generate.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Generating from a CFG
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Peter Ljunglöf <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import itertools
11
+ import sys
12
+
13
+ from nltk.grammar import Nonterminal
14
+
15
+
16
+ def generate(grammar, start=None, depth=None, n=None):
17
+ """
18
+ Generates an iterator of all sentences from a CFG.
19
+
20
+ :param grammar: The Grammar used to generate sentences.
21
+ :param start: The Nonterminal from which to start generate sentences.
22
+ :param depth: The maximal depth of the generated tree.
23
+ :param n: The maximum number of sentences to return.
24
+ :return: An iterator of lists of terminal tokens.
25
+ """
26
+ if not start:
27
+ start = grammar.start()
28
+ if depth is None:
29
+ depth = sys.maxsize
30
+
31
+ iter = _generate_all(grammar, [start], depth)
32
+
33
+ if n:
34
+ iter = itertools.islice(iter, n)
35
+
36
+ return iter
37
+
38
+
39
+ def _generate_all(grammar, items, depth):
40
+ if items:
41
+ try:
42
+ for frag1 in _generate_one(grammar, items[0], depth):
43
+ for frag2 in _generate_all(grammar, items[1:], depth):
44
+ yield frag1 + frag2
45
+ except RecursionError as error:
46
+ # Helpful error message while still showing the recursion stack.
47
+ raise RuntimeError(
48
+ "The grammar has rule(s) that yield infinite recursion!"
49
+ ) from error
50
+ else:
51
+ yield []
52
+
53
+
54
+ def _generate_one(grammar, item, depth):
55
+ if depth > 0:
56
+ if isinstance(item, Nonterminal):
57
+ for prod in grammar.productions(lhs=item):
58
+ yield from _generate_all(grammar, prod.rhs(), depth - 1)
59
+ else:
60
+ yield [item]
61
+
62
+
63
+ demo_grammar = """
64
+ S -> NP VP
65
+ NP -> Det N
66
+ PP -> P NP
67
+ VP -> 'slept' | 'saw' NP | 'walked' PP
68
+ Det -> 'the' | 'a'
69
+ N -> 'man' | 'park' | 'dog'
70
+ P -> 'in' | 'with'
71
+ """
72
+
73
+
74
+ def demo(N=23):
75
+ from nltk.grammar import CFG
76
+
77
+ print("Generating the first %d sentences for demo grammar:" % (N,))
78
+ print(demo_grammar)
79
+ grammar = CFG.fromstring(demo_grammar)
80
+ for n, sent in enumerate(generate(grammar, n=N), 1):
81
+ print("%3d. %s" % (n, " ".join(sent)))
82
+
83
+
84
+ if __name__ == "__main__":
85
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/malt.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to MaltParser
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ # Contributor: Liling Tan, Mustufain, osamamukhtar11
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import inspect
11
+ import os
12
+ import subprocess
13
+ import sys
14
+ import tempfile
15
+
16
+ from nltk.data import ZipFilePathPointer
17
+ from nltk.internals import find_dir, find_file, find_jars_within_path
18
+ from nltk.parse.api import ParserI
19
+ from nltk.parse.dependencygraph import DependencyGraph
20
+ from nltk.parse.util import taggedsents_to_conll
21
+
22
+
23
+ def malt_regex_tagger():
24
+ from nltk.tag import RegexpTagger
25
+
26
+ _tagger = RegexpTagger(
27
+ [
28
+ (r"\.$", "."),
29
+ (r"\,$", ","),
30
+ (r"\?$", "?"), # fullstop, comma, Qmark
31
+ (r"\($", "("),
32
+ (r"\)$", ")"), # round brackets
33
+ (r"\[$", "["),
34
+ (r"\]$", "]"), # square brackets
35
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
36
+ (r"(The|the|A|a|An|an)$", "DT"), # articles
37
+ (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns
38
+ (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive
39
+ (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive
40
+ (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions
41
+ (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions
42
+ (r"(till|Till|until|Until)$", "IN"), # time prepopsitions
43
+ (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions
44
+ (r"(under|Under|below|Below)$", "IN"), # space prepopsitions
45
+ (r"(over|Over|above|Above)$", "IN"), # space prepopsitions
46
+ (r"(across|Across|through|Through)$", "IN"), # space prepopsitions
47
+ (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions
48
+ (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions
49
+ (r".*able$", "JJ"), # adjectives
50
+ (r".*ness$", "NN"), # nouns formed from adjectives
51
+ (r".*ly$", "RB"), # adverbs
52
+ (r".*s$", "NNS"), # plural nouns
53
+ (r".*ing$", "VBG"), # gerunds
54
+ (r".*ed$", "VBD"), # past tense verbs
55
+ (r".*", "NN"), # nouns (default)
56
+ ]
57
+ )
58
+ return _tagger.tag
59
+
60
+
61
+ def find_maltparser(parser_dirname):
62
+ """
63
+ A module to find MaltParser .jar file and its dependencies.
64
+ """
65
+ if os.path.exists(parser_dirname): # If a full path is given.
66
+ _malt_dir = parser_dirname
67
+ else: # Try to find path to maltparser directory in environment variables.
68
+ _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",))
69
+ # Checks that that the found directory contains all the necessary .jar
70
+ malt_dependencies = ["", "", ""]
71
+ _malt_jars = set(find_jars_within_path(_malt_dir))
72
+ _jars = {os.path.split(jar)[1] for jar in _malt_jars}
73
+ malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"}
74
+
75
+ assert malt_dependencies.issubset(_jars)
76
+ assert any(
77
+ filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars)
78
+ )
79
+ return list(_malt_jars)
80
+
81
+
82
+ def find_malt_model(model_filename):
83
+ """
84
+ A module to find pre-trained MaltParser model.
85
+ """
86
+ if model_filename is None:
87
+ return "malt_temp.mco"
88
+ elif os.path.exists(model_filename): # If a full path is given.
89
+ return model_filename
90
+ else: # Try to find path to malt model in environment variables.
91
+ return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False)
92
+
93
+
94
+ class MaltParser(ParserI):
95
+ """
96
+ A class for dependency parsing with MaltParser. The input is the paths to:
97
+ - (optionally) a maltparser directory
98
+ - (optionally) the path to a pre-trained MaltParser .mco model file
99
+ - (optionally) the tagger to use for POS tagging before parsing
100
+ - (optionally) additional Java arguments
101
+
102
+ Example:
103
+ >>> from nltk.parse import malt
104
+ >>> # With MALT_PARSER and MALT_MODEL environment set.
105
+ >>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP
106
+ >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
107
+ (shot I (elephant an) (in (pajamas my)) .)
108
+ >>> # Without MALT_PARSER and MALT_MODEL environment.
109
+ >>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP
110
+ >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
111
+ (shot I (elephant an) (in (pajamas my)) .)
112
+ """
113
+
114
+ def __init__(
115
+ self,
116
+ parser_dirname="",
117
+ model_filename=None,
118
+ tagger=None,
119
+ additional_java_args=None,
120
+ ):
121
+ """
122
+ An interface for parsing with the Malt Parser.
123
+
124
+ :param parser_dirname: The path to the maltparser directory that
125
+ contains the maltparser-1.x.jar
126
+ :type parser_dirname: str
127
+ :param model_filename: The name of the pre-trained model with .mco file
128
+ extension. If provided, training will not be required.
129
+ (see http://www.maltparser.org/mco/mco.html and
130
+ see http://www.patful.com/chalk/node/185)
131
+ :type model_filename: str
132
+ :param tagger: The tagger used to POS tag the raw string before
133
+ formatting to CONLL format. It should behave like `nltk.pos_tag`
134
+ :type tagger: function
135
+ :param additional_java_args: This is the additional Java arguments that
136
+ one can use when calling Maltparser, usually this is the heapsize
137
+ limits, e.g. `additional_java_args=['-Xmx1024m']`
138
+ (see https://goo.gl/mpDBvQ)
139
+ :type additional_java_args: list
140
+ """
141
+
142
+ # Find all the necessary jar files for MaltParser.
143
+ self.malt_jars = find_maltparser(parser_dirname)
144
+ # Initialize additional java arguments.
145
+ self.additional_java_args = (
146
+ additional_java_args if additional_java_args is not None else []
147
+ )
148
+ # Initialize model.
149
+ self.model = find_malt_model(model_filename)
150
+ self._trained = self.model != "malt_temp.mco"
151
+ # Set the working_dir parameters i.e. `-w` from MaltParser's option.
152
+ self.working_dir = tempfile.gettempdir()
153
+ # Initialize POS tagger.
154
+ self.tagger = tagger if tagger is not None else malt_regex_tagger()
155
+
156
+ def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"):
157
+ """
158
+ Use MaltParser to parse multiple POS tagged sentences. Takes multiple
159
+ sentences where each sentence is a list of (word, tag) tuples.
160
+ The sentences must have already been tokenized and tagged.
161
+
162
+ :param sentences: Input sentences to parse
163
+ :type sentence: list(list(tuple(str, str)))
164
+ :return: iter(iter(``DependencyGraph``)) the dependency graph
165
+ representation of each sentence
166
+ """
167
+ if not self._trained:
168
+ raise Exception("Parser has not been trained. Call train() first.")
169
+
170
+ with tempfile.NamedTemporaryFile(
171
+ prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False
172
+ ) as input_file:
173
+ with tempfile.NamedTemporaryFile(
174
+ prefix="malt_output.conll.",
175
+ dir=self.working_dir,
176
+ mode="w",
177
+ delete=False,
178
+ ) as output_file:
179
+ # Convert list of sentences to CONLL format.
180
+ for line in taggedsents_to_conll(sentences):
181
+ input_file.write(str(line))
182
+ input_file.close()
183
+
184
+ # Generate command to run maltparser.
185
+ cmd = self.generate_malt_command(
186
+ input_file.name, output_file.name, mode="parse"
187
+ )
188
+
189
+ # This is a maltparser quirk, it needs to be run
190
+ # where the model file is. otherwise it goes into an awkward
191
+ # missing .jars or strange -w working_dir problem.
192
+ _current_path = os.getcwd() # Remembers the current path.
193
+ try: # Change to modelfile path
194
+ os.chdir(os.path.split(self.model)[0])
195
+ except:
196
+ pass
197
+ ret = self._execute(cmd, verbose) # Run command.
198
+ os.chdir(_current_path) # Change back to current path.
199
+
200
+ if ret != 0:
201
+ raise Exception(
202
+ "MaltParser parsing (%s) failed with exit "
203
+ "code %d" % (" ".join(cmd), ret)
204
+ )
205
+
206
+ # Must return iter(iter(Tree))
207
+ with open(output_file.name) as infile:
208
+ for tree_str in infile.read().split("\n\n"):
209
+ yield (
210
+ iter(
211
+ [
212
+ DependencyGraph(
213
+ tree_str, top_relation_label=top_relation_label
214
+ )
215
+ ]
216
+ )
217
+ )
218
+
219
+ os.remove(input_file.name)
220
+ os.remove(output_file.name)
221
+
222
+ def parse_sents(self, sentences, verbose=False, top_relation_label="null"):
223
+ """
224
+ Use MaltParser to parse multiple sentences.
225
+ Takes a list of sentences, where each sentence is a list of words.
226
+ Each sentence will be automatically tagged with this
227
+ MaltParser instance's tagger.
228
+
229
+ :param sentences: Input sentences to parse
230
+ :type sentence: list(list(str))
231
+ :return: iter(DependencyGraph)
232
+ """
233
+ tagged_sentences = (self.tagger(sentence) for sentence in sentences)
234
+ return self.parse_tagged_sents(
235
+ tagged_sentences, verbose, top_relation_label=top_relation_label
236
+ )
237
+
238
+ def generate_malt_command(self, inputfilename, outputfilename=None, mode=None):
239
+ """
240
+ This function generates the maltparser command use at the terminal.
241
+
242
+ :param inputfilename: path to the input file
243
+ :type inputfilename: str
244
+ :param outputfilename: path to the output file
245
+ :type outputfilename: str
246
+ """
247
+
248
+ cmd = ["java"]
249
+ cmd += self.additional_java_args # Adds additional java arguments
250
+ # Joins classpaths with ";" if on Windows and on Linux/Mac use ":"
251
+ classpaths_separator = ";" if sys.platform.startswith("win") else ":"
252
+ cmd += [
253
+ "-cp",
254
+ classpaths_separator.join(self.malt_jars),
255
+ ] # Adds classpaths for jars
256
+ cmd += ["org.maltparser.Malt"] # Adds the main function.
257
+
258
+ # Adds the model file.
259
+ if os.path.exists(self.model): # when parsing
260
+ cmd += ["-c", os.path.split(self.model)[-1]]
261
+ else: # when learning
262
+ cmd += ["-c", self.model]
263
+
264
+ cmd += ["-i", inputfilename]
265
+ if mode == "parse":
266
+ cmd += ["-o", outputfilename]
267
+ cmd += ["-m", mode] # mode use to generate parses.
268
+ return cmd
269
+
270
+ @staticmethod
271
+ def _execute(cmd, verbose=False):
272
+ output = None if verbose else subprocess.PIPE
273
+ p = subprocess.Popen(cmd, stdout=output, stderr=output)
274
+ return p.wait()
275
+
276
+ def train(self, depgraphs, verbose=False):
277
+ """
278
+ Train MaltParser from a list of ``DependencyGraph`` objects
279
+
280
+ :param depgraphs: list of ``DependencyGraph`` objects for training input data
281
+ :type depgraphs: DependencyGraph
282
+ """
283
+
284
+ # Write the conll_str to malt_train.conll file in /tmp/
285
+ with tempfile.NamedTemporaryFile(
286
+ prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
287
+ ) as input_file:
288
+ input_str = "\n".join(dg.to_conll(10) for dg in depgraphs)
289
+ input_file.write(str(input_str))
290
+ # Trains the model with the malt_train.conll
291
+ self.train_from_file(input_file.name, verbose=verbose)
292
+ # Removes the malt_train.conll once training finishes.
293
+ os.remove(input_file.name)
294
+
295
+ def train_from_file(self, conll_file, verbose=False):
296
+ """
297
+ Train MaltParser from a file
298
+ :param conll_file: str for the filename of the training input data
299
+ :type conll_file: str
300
+ """
301
+
302
+ # If conll_file is a ZipFilePathPointer,
303
+ # then we need to do some extra massaging
304
+ if isinstance(conll_file, ZipFilePathPointer):
305
+ with tempfile.NamedTemporaryFile(
306
+ prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
307
+ ) as input_file:
308
+ with conll_file.open() as conll_input_file:
309
+ conll_str = conll_input_file.read()
310
+ input_file.write(str(conll_str))
311
+ return self.train_from_file(input_file.name, verbose=verbose)
312
+
313
+ # Generate command to run maltparser.
314
+ cmd = self.generate_malt_command(conll_file, mode="learn")
315
+ ret = self._execute(cmd, verbose)
316
+ if ret != 0:
317
+ raise Exception(
318
+ "MaltParser training (%s) failed with exit "
319
+ "code %d" % (" ".join(cmd), ret)
320
+ )
321
+ self._trained = True
322
+
323
+
324
+ if __name__ == "__main__":
325
+ """
326
+ A demonstration function to show how NLTK users can use the malt parser API.
327
+
328
+ >>> from nltk import pos_tag
329
+ >>> assert 'MALT_PARSER' in os.environ, str(
330
+ ... "Please set MALT_PARSER in your global environment, e.g.:\n"
331
+ ... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'")
332
+ >>>
333
+ >>> assert 'MALT_MODEL' in os.environ, str(
334
+ ... "Please set MALT_MODEL in your global environment, e.g.:\n"
335
+ ... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'")
336
+ >>>
337
+ >>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
338
+ ... "2 sees _ VB _ _ 0 ROOT _ _\n"
339
+ ... "3 a _ DT _ _ 4 SPEC _ _\n"
340
+ ... "4 dog _ NN _ _ 2 OBJ _ _\n"
341
+ ... "5 . _ . _ _ 2 PUNCT _ _\n")
342
+ >>>
343
+ >>>
344
+ >>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
345
+ ... "2 walks _ VB _ _ 0 ROOT _ _\n"
346
+ ... "3 . _ . _ _ 2 PUNCT _ _\n")
347
+ >>> dg1 = DependencyGraph(_dg1_str)
348
+ >>> dg2 = DependencyGraph(_dg2_str)
349
+ >>> # Initialize a MaltParser object
350
+ >>> mp = MaltParser()
351
+ >>>
352
+ >>> # Trains a model.
353
+ >>> mp.train([dg1,dg2], verbose=False)
354
+ >>> sent1 = ['John','sees','Mary', '.']
355
+ >>> sent2 = ['John', 'walks', 'a', 'dog', '.']
356
+ >>>
357
+ >>> # Parse a single sentence.
358
+ >>> parsed_sent1 = mp.parse_one(sent1)
359
+ >>> parsed_sent2 = mp.parse_one(sent2)
360
+ >>> print(parsed_sent1.tree())
361
+ (sees John Mary .)
362
+ >>> print(parsed_sent2.tree())
363
+ (walks John (dog a) .)
364
+ >>>
365
+ >>> # Parsing multiple sentences.
366
+ >>> sentences = [sent1,sent2]
367
+ >>> parsed_sents = mp.parse_sents(sentences)
368
+ >>> print(next(next(parsed_sents)).tree())
369
+ (sees John Mary .)
370
+ >>> print(next(next(parsed_sents)).tree())
371
+ (walks John (dog a) .)
372
+ >>>
373
+ >>> # Initialize a MaltParser object with an English pre-trained model.
374
+ >>> parser_dirname = 'maltparser-1.9.2'
375
+ >>> model_name = 'engmalt.linear-1.7.mco'
376
+ >>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag)
377
+ >>> sent1 = 'I shot an elephant in my pajamas .'.split()
378
+ >>> sent2 = 'Time flies like banana .'.split()
379
+ >>> # Parse a single sentence.
380
+ >>> print(mp.parse_one(sent1).tree())
381
+ (shot I (elephant an) (in (pajamas my)) .)
382
+ # Parsing multiple sentences
383
+ >>> sentences = [sent1,sent2]
384
+ >>> parsed_sents = mp.parse_sents(sentences)
385
+ >>> print(next(next(parsed_sents)).tree())
386
+ (shot I (elephant an) (in (pajamas my)) .)
387
+ >>> print(next(next(parsed_sents)).tree())
388
+ (flies Time (like banana) .)
389
+ """
390
+
391
+ import doctest
392
+
393
+ doctest.testmod()
venv/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py ADDED
@@ -0,0 +1,772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import logging
11
+ import math
12
+
13
+ from nltk.parse.dependencygraph import DependencyGraph
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ #################################################################
18
+ # DependencyScorerI - Interface for Graph-Edge Weight Calculation
19
+ #################################################################
20
+
21
+
22
+ class DependencyScorerI:
23
+ """
24
+ A scorer for calculated the weights on the edges of a weighted
25
+ dependency graph. This is used by a
26
+ ``ProbabilisticNonprojectiveParser`` to initialize the edge
27
+ weights of a ``DependencyGraph``. While typically this would be done
28
+ by training a binary classifier, any class that can return a
29
+ multidimensional list representation of the edge weights can
30
+ implement this interface. As such, it has no necessary
31
+ fields.
32
+ """
33
+
34
+ def __init__(self):
35
+ if self.__class__ == DependencyScorerI:
36
+ raise TypeError("DependencyScorerI is an abstract interface")
37
+
38
+ def train(self, graphs):
39
+ """
40
+ :type graphs: list(DependencyGraph)
41
+ :param graphs: A list of dependency graphs to train the scorer.
42
+ Typically the edges present in the graphs can be used as
43
+ positive training examples, and the edges not present as negative
44
+ examples.
45
+ """
46
+ raise NotImplementedError()
47
+
48
+ def score(self, graph):
49
+ """
50
+ :type graph: DependencyGraph
51
+ :param graph: A dependency graph whose set of edges need to be
52
+ scored.
53
+ :rtype: A three-dimensional list of numbers.
54
+ :return: The score is returned in a multidimensional(3) list, such
55
+ that the outer-dimension refers to the head, and the
56
+ inner-dimension refers to the dependencies. For instance,
57
+ scores[0][1] would reference the list of scores corresponding to
58
+ arcs from node 0 to node 1. The node's 'address' field can be used
59
+ to determine its number identification.
60
+
61
+ For further illustration, a score list corresponding to Fig.2 of
62
+ Keith Hall's 'K-best Spanning Tree Parsing' paper::
63
+
64
+ scores = [[[], [5], [1], [1]],
65
+ [[], [], [11], [4]],
66
+ [[], [10], [], [5]],
67
+ [[], [8], [8], []]]
68
+
69
+ When used in conjunction with a MaxEntClassifier, each score would
70
+ correspond to the confidence of a particular edge being classified
71
+ with the positive training examples.
72
+ """
73
+ raise NotImplementedError()
74
+
75
+
76
+ #################################################################
77
+ # NaiveBayesDependencyScorer
78
+ #################################################################
79
+
80
+
81
+ class NaiveBayesDependencyScorer(DependencyScorerI):
82
+ """
83
+ A dependency scorer built around a MaxEnt classifier. In this
84
+ particular class that classifier is a ``NaiveBayesClassifier``.
85
+ It uses head-word, head-tag, child-word, and child-tag features
86
+ for classification.
87
+
88
+ >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2
89
+
90
+ >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry]
91
+ >>> npp = ProbabilisticNonprojectiveParser()
92
+ >>> npp.train(graphs, NaiveBayesDependencyScorer())
93
+ >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
94
+ >>> len(list(parses))
95
+ 1
96
+
97
+ """
98
+
99
+ def __init__(self):
100
+ pass # Do nothing without throwing error
101
+
102
+ def train(self, graphs):
103
+ """
104
+ Trains a ``NaiveBayesClassifier`` using the edges present in
105
+ graphs list as positive examples, the edges not present as
106
+ negative examples. Uses a feature vector of head-word,
107
+ head-tag, child-word, and child-tag.
108
+
109
+ :type graphs: list(DependencyGraph)
110
+ :param graphs: A list of dependency graphs to train the scorer.
111
+ """
112
+
113
+ from nltk.classify import NaiveBayesClassifier
114
+
115
+ # Create training labeled training examples
116
+ labeled_examples = []
117
+ for graph in graphs:
118
+ for head_node in graph.nodes.values():
119
+ for child_index, child_node in graph.nodes.items():
120
+ if child_index in head_node["deps"]:
121
+ label = "T"
122
+ else:
123
+ label = "F"
124
+ labeled_examples.append(
125
+ (
126
+ dict(
127
+ a=head_node["word"],
128
+ b=head_node["tag"],
129
+ c=child_node["word"],
130
+ d=child_node["tag"],
131
+ ),
132
+ label,
133
+ )
134
+ )
135
+
136
+ self.classifier = NaiveBayesClassifier.train(labeled_examples)
137
+
138
+ def score(self, graph):
139
+ """
140
+ Converts the graph into a feature-based representation of
141
+ each edge, and then assigns a score to each based on the
142
+ confidence of the classifier in assigning it to the
143
+ positive label. Scores are returned in a multidimensional list.
144
+
145
+ :type graph: DependencyGraph
146
+ :param graph: A dependency graph to score.
147
+ :rtype: 3 dimensional list
148
+ :return: Edge scores for the graph parameter.
149
+ """
150
+ # Convert graph to feature representation
151
+ edges = []
152
+ for head_node in graph.nodes.values():
153
+ for child_node in graph.nodes.values():
154
+ edges.append(
155
+ dict(
156
+ a=head_node["word"],
157
+ b=head_node["tag"],
158
+ c=child_node["word"],
159
+ d=child_node["tag"],
160
+ )
161
+ )
162
+
163
+ # Score edges
164
+ edge_scores = []
165
+ row = []
166
+ count = 0
167
+ for pdist in self.classifier.prob_classify_many(edges):
168
+ logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F"))
169
+ # smoothing in case the probability = 0
170
+ row.append([math.log(pdist.prob("T") + 0.00000000001)])
171
+ count += 1
172
+ if count == len(graph.nodes):
173
+ edge_scores.append(row)
174
+ row = []
175
+ count = 0
176
+ return edge_scores
177
+
178
+
179
+ #################################################################
180
+ # A Scorer for Demo Purposes
181
+ #################################################################
182
+ # A short class necessary to show parsing example from paper
183
+ class DemoScorer(DependencyScorerI):
184
+ def train(self, graphs):
185
+ print("Training...")
186
+
187
+ def score(self, graph):
188
+ # scores for Keith Hall 'K-best Spanning Tree Parsing' paper
189
+ return [
190
+ [[], [5], [1], [1]],
191
+ [[], [], [11], [4]],
192
+ [[], [10], [], [5]],
193
+ [[], [8], [8], []],
194
+ ]
195
+
196
+
197
+ #################################################################
198
+ # Non-Projective Probabilistic Parsing
199
+ #################################################################
200
+
201
+
202
+ class ProbabilisticNonprojectiveParser:
203
+ """A probabilistic non-projective dependency parser.
204
+
205
+ Nonprojective dependencies allows for "crossing branches" in the parse tree
206
+ which is necessary for representing particular linguistic phenomena, or even
207
+ typical parses in some languages. This parser follows the MST parsing
208
+ algorithm, outlined in McDonald(2005), which likens the search for the best
209
+ non-projective parse to finding the maximum spanning tree in a weighted
210
+ directed graph.
211
+
212
+ >>> class Scorer(DependencyScorerI):
213
+ ... def train(self, graphs):
214
+ ... pass
215
+ ...
216
+ ... def score(self, graph):
217
+ ... return [
218
+ ... [[], [5], [1], [1]],
219
+ ... [[], [], [11], [4]],
220
+ ... [[], [10], [], [5]],
221
+ ... [[], [8], [8], []],
222
+ ... ]
223
+
224
+
225
+ >>> npp = ProbabilisticNonprojectiveParser()
226
+ >>> npp.train([], Scorer())
227
+
228
+ >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
229
+ >>> len(list(parses))
230
+ 1
231
+
232
+ Rule based example
233
+
234
+ >>> from nltk.grammar import DependencyGrammar
235
+
236
+ >>> grammar = DependencyGrammar.fromstring('''
237
+ ... 'taught' -> 'play' | 'man'
238
+ ... 'man' -> 'the' | 'in'
239
+ ... 'in' -> 'corner'
240
+ ... 'corner' -> 'the'
241
+ ... 'play' -> 'golf' | 'dachshund' | 'to'
242
+ ... 'dachshund' -> 'his'
243
+ ... ''')
244
+
245
+ >>> ndp = NonprojectiveDependencyParser(grammar)
246
+ >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
247
+ >>> len(list(parses))
248
+ 4
249
+
250
+ """
251
+
252
+ def __init__(self):
253
+ """
254
+ Creates a new non-projective parser.
255
+ """
256
+ logging.debug("initializing prob. nonprojective...")
257
+
258
+ def train(self, graphs, dependency_scorer):
259
+ """
260
+ Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,
261
+ and establishes this as the parser's scorer. This is used to
262
+ initialize the scores on a ``DependencyGraph`` during the parsing
263
+ procedure.
264
+
265
+ :type graphs: list(DependencyGraph)
266
+ :param graphs: A list of dependency graphs to train the scorer.
267
+ :type dependency_scorer: DependencyScorerI
268
+ :param dependency_scorer: A scorer which implements the
269
+ ``DependencyScorerI`` interface.
270
+ """
271
+ self._scorer = dependency_scorer
272
+ self._scorer.train(graphs)
273
+
274
+ def initialize_edge_scores(self, graph):
275
+ """
276
+ Assigns a score to every edge in the ``DependencyGraph`` graph.
277
+ These scores are generated via the parser's scorer which
278
+ was assigned during the training process.
279
+
280
+ :type graph: DependencyGraph
281
+ :param graph: A dependency graph to assign scores to.
282
+ """
283
+ self.scores = self._scorer.score(graph)
284
+
285
+ def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
286
+ """
287
+ Takes a list of nodes that have been identified to belong to a cycle,
288
+ and collapses them into on larger node. The arcs of all nodes in
289
+ the graph must be updated to account for this.
290
+
291
+ :type new_node: Node.
292
+ :param new_node: A Node (Dictionary) to collapse the cycle nodes into.
293
+ :type cycle_path: A list of integers.
294
+ :param cycle_path: A list of node addresses, each of which is in the cycle.
295
+ :type g_graph, b_graph, c_graph: DependencyGraph
296
+ :param g_graph, b_graph, c_graph: Graphs which need to be updated.
297
+ """
298
+ logger.debug("Collapsing nodes...")
299
+ # Collapse all cycle nodes into v_n+1 in G_Graph
300
+ for cycle_node_index in cycle_path:
301
+ g_graph.remove_by_address(cycle_node_index)
302
+ g_graph.add_node(new_node)
303
+ g_graph.redirect_arcs(cycle_path, new_node["address"])
304
+
305
+ def update_edge_scores(self, new_node, cycle_path):
306
+ """
307
+ Updates the edge scores to reflect a collapse operation into
308
+ new_node.
309
+
310
+ :type new_node: A Node.
311
+ :param new_node: The node which cycle nodes are collapsed into.
312
+ :type cycle_path: A list of integers.
313
+ :param cycle_path: A list of node addresses that belong to the cycle.
314
+ """
315
+ logger.debug("cycle %s", cycle_path)
316
+
317
+ cycle_path = self.compute_original_indexes(cycle_path)
318
+
319
+ logger.debug("old cycle %s", cycle_path)
320
+ logger.debug("Prior to update: %s", self.scores)
321
+
322
+ for i, row in enumerate(self.scores):
323
+ for j, column in enumerate(self.scores[i]):
324
+ logger.debug(self.scores[i][j])
325
+ if j in cycle_path and i not in cycle_path and self.scores[i][j]:
326
+ subtract_val = self.compute_max_subtract_score(j, cycle_path)
327
+
328
+ logger.debug("%s - %s", self.scores[i][j], subtract_val)
329
+
330
+ new_vals = []
331
+ for cur_val in self.scores[i][j]:
332
+ new_vals.append(cur_val - subtract_val)
333
+
334
+ self.scores[i][j] = new_vals
335
+
336
+ for i, row in enumerate(self.scores):
337
+ for j, cell in enumerate(self.scores[i]):
338
+ if i in cycle_path and j in cycle_path:
339
+ self.scores[i][j] = []
340
+
341
+ logger.debug("After update: %s", self.scores)
342
+
343
+ def compute_original_indexes(self, new_indexes):
344
+ """
345
+ As nodes are collapsed into others, they are replaced
346
+ by the new node in the graph, but it's still necessary
347
+ to keep track of what these original nodes were. This
348
+ takes a list of node addresses and replaces any collapsed
349
+ node addresses with their original addresses.
350
+
351
+ :type new_indexes: A list of integers.
352
+ :param new_indexes: A list of node addresses to check for
353
+ subsumed nodes.
354
+ """
355
+ swapped = True
356
+ while swapped:
357
+ originals = []
358
+ swapped = False
359
+ for new_index in new_indexes:
360
+ if new_index in self.inner_nodes:
361
+ for old_val in self.inner_nodes[new_index]:
362
+ if old_val not in originals:
363
+ originals.append(old_val)
364
+ swapped = True
365
+ else:
366
+ originals.append(new_index)
367
+ new_indexes = originals
368
+ return new_indexes
369
+
370
+ def compute_max_subtract_score(self, column_index, cycle_indexes):
371
+ """
372
+ When updating scores the score of the highest-weighted incoming
373
+ arc is subtracted upon collapse. This returns the correct
374
+ amount to subtract from that edge.
375
+
376
+ :type column_index: integer.
377
+ :param column_index: A index representing the column of incoming arcs
378
+ to a particular node being updated
379
+ :type cycle_indexes: A list of integers.
380
+ :param cycle_indexes: Only arcs from cycle nodes are considered. This
381
+ is a list of such nodes addresses.
382
+ """
383
+ max_score = -100000
384
+ for row_index in cycle_indexes:
385
+ for subtract_val in self.scores[row_index][column_index]:
386
+ if subtract_val > max_score:
387
+ max_score = subtract_val
388
+ return max_score
389
+
390
+ def best_incoming_arc(self, node_index):
391
+ """
392
+ Returns the source of the best incoming arc to the
393
+ node with address: node_index
394
+
395
+ :type node_index: integer.
396
+ :param node_index: The address of the 'destination' node,
397
+ the node that is arced to.
398
+ """
399
+ originals = self.compute_original_indexes([node_index])
400
+ logger.debug("originals: %s", originals)
401
+
402
+ max_arc = None
403
+ max_score = None
404
+ for row_index in range(len(self.scores)):
405
+ for col_index in range(len(self.scores[row_index])):
406
+ if col_index in originals and (
407
+ max_score is None or self.scores[row_index][col_index] > max_score
408
+ ):
409
+ max_score = self.scores[row_index][col_index]
410
+ max_arc = row_index
411
+ logger.debug("%s, %s", row_index, col_index)
412
+
413
+ logger.debug(max_score)
414
+
415
+ for key in self.inner_nodes:
416
+ replaced_nodes = self.inner_nodes[key]
417
+ if max_arc in replaced_nodes:
418
+ return key
419
+
420
+ return max_arc
421
+
422
+ def original_best_arc(self, node_index):
423
+ originals = self.compute_original_indexes([node_index])
424
+ max_arc = None
425
+ max_score = None
426
+ max_orig = None
427
+ for row_index in range(len(self.scores)):
428
+ for col_index in range(len(self.scores[row_index])):
429
+ if col_index in originals and (
430
+ max_score is None or self.scores[row_index][col_index] > max_score
431
+ ):
432
+ max_score = self.scores[row_index][col_index]
433
+ max_arc = row_index
434
+ max_orig = col_index
435
+ return [max_arc, max_orig]
436
+
437
+ def parse(self, tokens, tags):
438
+ """
439
+ Parses a list of tokens in accordance to the MST parsing algorithm
440
+ for non-projective dependency parses. Assumes that the tokens to
441
+ be parsed have already been tagged and those tags are provided. Various
442
+ scoring methods can be used by implementing the ``DependencyScorerI``
443
+ interface and passing it to the training algorithm.
444
+
445
+ :type tokens: list(str)
446
+ :param tokens: A list of words or punctuation to be parsed.
447
+ :type tags: list(str)
448
+ :param tags: A list of tags corresponding by index to the words in the tokens list.
449
+ :return: An iterator of non-projective parses.
450
+ :rtype: iter(DependencyGraph)
451
+ """
452
+ self.inner_nodes = {}
453
+
454
+ # Initialize g_graph
455
+ g_graph = DependencyGraph()
456
+ for index, token in enumerate(tokens):
457
+ g_graph.nodes[index + 1].update(
458
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
459
+ )
460
+
461
+ # Fully connect non-root nodes in g_graph
462
+ g_graph.connect_graph()
463
+ original_graph = DependencyGraph()
464
+ for index, token in enumerate(tokens):
465
+ original_graph.nodes[index + 1].update(
466
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
467
+ )
468
+
469
+ b_graph = DependencyGraph()
470
+ c_graph = DependencyGraph()
471
+
472
+ for index, token in enumerate(tokens):
473
+ c_graph.nodes[index + 1].update(
474
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
475
+ )
476
+
477
+ # Assign initial scores to g_graph edges
478
+ self.initialize_edge_scores(g_graph)
479
+ logger.debug(self.scores)
480
+ # Initialize a list of unvisited vertices (by node address)
481
+ unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()]
482
+ # Iterate over unvisited vertices
483
+ nr_vertices = len(tokens)
484
+ betas = {}
485
+ while unvisited_vertices:
486
+ # Mark current node as visited
487
+ current_vertex = unvisited_vertices.pop(0)
488
+ logger.debug("current_vertex: %s", current_vertex)
489
+ # Get corresponding node n_i to vertex v_i
490
+ current_node = g_graph.get_by_address(current_vertex)
491
+ logger.debug("current_node: %s", current_node)
492
+ # Get best in-edge node b for current node
493
+ best_in_edge = self.best_incoming_arc(current_vertex)
494
+ betas[current_vertex] = self.original_best_arc(current_vertex)
495
+ logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex)
496
+ # b_graph = Union(b_graph, b)
497
+ for new_vertex in [current_vertex, best_in_edge]:
498
+ b_graph.nodes[new_vertex].update(
499
+ {"word": "TEMP", "rel": "NTOP", "address": new_vertex}
500
+ )
501
+ b_graph.add_arc(best_in_edge, current_vertex)
502
+ # Beta(current node) = b - stored for parse recovery
503
+ # If b_graph contains a cycle, collapse it
504
+ cycle_path = b_graph.contains_cycle()
505
+ if cycle_path:
506
+ # Create a new node v_n+1 with address = len(nodes) + 1
507
+ new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1}
508
+ # c_graph = Union(c_graph, v_n+1)
509
+ c_graph.add_node(new_node)
510
+ # Collapse all nodes in cycle C into v_n+1
511
+ self.update_edge_scores(new_node, cycle_path)
512
+ self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
513
+ for cycle_index in cycle_path:
514
+ c_graph.add_arc(new_node["address"], cycle_index)
515
+ # self.replaced_by[cycle_index] = new_node['address']
516
+
517
+ self.inner_nodes[new_node["address"]] = cycle_path
518
+
519
+ # Add v_n+1 to list of unvisited vertices
520
+ unvisited_vertices.insert(0, nr_vertices + 1)
521
+
522
+ # increment # of nodes counter
523
+ nr_vertices += 1
524
+
525
+ # Remove cycle nodes from b_graph; B = B - cycle c
526
+ for cycle_node_address in cycle_path:
527
+ b_graph.remove_by_address(cycle_node_address)
528
+
529
+ logger.debug("g_graph: %s", g_graph)
530
+ logger.debug("b_graph: %s", b_graph)
531
+ logger.debug("c_graph: %s", c_graph)
532
+ logger.debug("Betas: %s", betas)
533
+ logger.debug("replaced nodes %s", self.inner_nodes)
534
+
535
+ # Recover parse tree
536
+ logger.debug("Final scores: %s", self.scores)
537
+
538
+ logger.debug("Recovering parse...")
539
+ for i in range(len(tokens) + 1, nr_vertices + 1):
540
+ betas[betas[i][1]] = betas[i]
541
+
542
+ logger.debug("Betas: %s", betas)
543
+ for node in original_graph.nodes.values():
544
+ # TODO: It's dangerous to assume that deps it a dictionary
545
+ # because it's a default dictionary. Ideally, here we should not
546
+ # be concerned how dependencies are stored inside of a dependency
547
+ # graph.
548
+ node["deps"] = {}
549
+ for i in range(1, len(tokens) + 1):
550
+ original_graph.add_arc(betas[i][0], betas[i][1])
551
+
552
+ logger.debug("Done.")
553
+ yield original_graph
554
+
555
+
556
+ #################################################################
557
+ # Rule-based Non-Projective Parser
558
+ #################################################################
559
+
560
+
561
+ class NonprojectiveDependencyParser:
562
+ """
563
+ A non-projective, rule-based, dependency parser. This parser
564
+ will return the set of all possible non-projective parses based on
565
+ the word-to-word relations defined in the parser's dependency
566
+ grammar, and will allow the branches of the parse tree to cross
567
+ in order to capture a variety of linguistic phenomena that a
568
+ projective parser will not.
569
+ """
570
+
571
+ def __init__(self, dependency_grammar):
572
+ """
573
+ Creates a new ``NonprojectiveDependencyParser``.
574
+
575
+ :param dependency_grammar: a grammar of word-to-word relations.
576
+ :type dependency_grammar: DependencyGrammar
577
+ """
578
+ self._grammar = dependency_grammar
579
+
580
+ def parse(self, tokens):
581
+ """
582
+ Parses the input tokens with respect to the parser's grammar. Parsing
583
+ is accomplished by representing the search-space of possible parses as
584
+ a fully-connected directed graph. Arcs that would lead to ungrammatical
585
+ parses are removed and a lattice is constructed of length n, where n is
586
+ the number of input tokens, to represent all possible grammatical
587
+ traversals. All possible paths through the lattice are then enumerated
588
+ to produce the set of non-projective parses.
589
+
590
+ param tokens: A list of tokens to parse.
591
+ type tokens: list(str)
592
+ return: An iterator of non-projective parses.
593
+ rtype: iter(DependencyGraph)
594
+ """
595
+ # Create graph representation of tokens
596
+ self._graph = DependencyGraph()
597
+
598
+ for index, token in enumerate(tokens):
599
+ self._graph.nodes[index] = {
600
+ "word": token,
601
+ "deps": [],
602
+ "rel": "NTOP",
603
+ "address": index,
604
+ }
605
+
606
+ for head_node in self._graph.nodes.values():
607
+ deps = []
608
+ for dep_node in self._graph.nodes.values():
609
+ if (
610
+ self._grammar.contains(head_node["word"], dep_node["word"])
611
+ and head_node["word"] != dep_node["word"]
612
+ ):
613
+ deps.append(dep_node["address"])
614
+ head_node["deps"] = deps
615
+
616
+ # Create lattice of possible heads
617
+ roots = []
618
+ possible_heads = []
619
+ for i, word in enumerate(tokens):
620
+ heads = []
621
+ for j, head in enumerate(tokens):
622
+ if (i != j) and self._grammar.contains(head, word):
623
+ heads.append(j)
624
+ if len(heads) == 0:
625
+ roots.append(i)
626
+ possible_heads.append(heads)
627
+
628
+ # Set roots to attempt
629
+ if len(roots) < 2:
630
+ if len(roots) == 0:
631
+ for i in range(len(tokens)):
632
+ roots.append(i)
633
+
634
+ # Traverse lattice
635
+ analyses = []
636
+ for _ in roots:
637
+ stack = []
638
+ analysis = [[] for i in range(len(possible_heads))]
639
+ i = 0
640
+ forward = True
641
+ while i >= 0:
642
+ if forward:
643
+ if len(possible_heads[i]) == 1:
644
+ analysis[i] = possible_heads[i][0]
645
+ elif len(possible_heads[i]) == 0:
646
+ analysis[i] = -1
647
+ else:
648
+ head = possible_heads[i].pop()
649
+ analysis[i] = head
650
+ stack.append([i, head])
651
+ if not forward:
652
+ index_on_stack = False
653
+ for stack_item in stack:
654
+ if stack_item[0] == i:
655
+ index_on_stack = True
656
+ orig_length = len(possible_heads[i])
657
+
658
+ if index_on_stack and orig_length == 0:
659
+ for j in range(len(stack) - 1, -1, -1):
660
+ stack_item = stack[j]
661
+ if stack_item[0] == i:
662
+ possible_heads[i].append(stack.pop(j)[1])
663
+
664
+ elif index_on_stack and orig_length > 0:
665
+ head = possible_heads[i].pop()
666
+ analysis[i] = head
667
+ stack.append([i, head])
668
+ forward = True
669
+
670
+ if i + 1 == len(possible_heads):
671
+ analyses.append(analysis[:])
672
+ forward = False
673
+ if forward:
674
+ i += 1
675
+ else:
676
+ i -= 1
677
+
678
+ # Filter parses
679
+ # ensure 1 root, every thing has 1 head
680
+ for analysis in analyses:
681
+ if analysis.count(-1) > 1:
682
+ # there are several root elements!
683
+ continue
684
+
685
+ graph = DependencyGraph()
686
+ graph.root = graph.nodes[analysis.index(-1) + 1]
687
+
688
+ for address, (token, head_index) in enumerate(
689
+ zip(tokens, analysis), start=1
690
+ ):
691
+ head_address = head_index + 1
692
+
693
+ node = graph.nodes[address]
694
+ node.update({"word": token, "address": address})
695
+
696
+ if head_address == 0:
697
+ rel = "ROOT"
698
+ else:
699
+ rel = ""
700
+ graph.nodes[head_index + 1]["deps"][rel].append(address)
701
+
702
+ # TODO: check for cycles
703
+ yield graph
704
+
705
+
706
+ #################################################################
707
+ # Demos
708
+ #################################################################
709
+
710
+
711
+ def demo():
712
+ # hall_demo()
713
+ nonprojective_conll_parse_demo()
714
+ rule_based_demo()
715
+
716
+
717
+ def hall_demo():
718
+ npp = ProbabilisticNonprojectiveParser()
719
+ npp.train([], DemoScorer())
720
+ for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]):
721
+ print(parse_graph)
722
+
723
+
724
+ def nonprojective_conll_parse_demo():
725
+ from nltk.parse.dependencygraph import conll_data2
726
+
727
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
728
+ npp = ProbabilisticNonprojectiveParser()
729
+ npp.train(graphs, NaiveBayesDependencyScorer())
730
+ for parse_graph in npp.parse(
731
+ ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"]
732
+ ):
733
+ print(parse_graph)
734
+
735
+
736
+ def rule_based_demo():
737
+ from nltk.grammar import DependencyGrammar
738
+
739
+ grammar = DependencyGrammar.fromstring(
740
+ """
741
+ 'taught' -> 'play' | 'man'
742
+ 'man' -> 'the' | 'in'
743
+ 'in' -> 'corner'
744
+ 'corner' -> 'the'
745
+ 'play' -> 'golf' | 'dachshund' | 'to'
746
+ 'dachshund' -> 'his'
747
+ """
748
+ )
749
+ print(grammar)
750
+ ndp = NonprojectiveDependencyParser(grammar)
751
+ graphs = ndp.parse(
752
+ [
753
+ "the",
754
+ "man",
755
+ "in",
756
+ "the",
757
+ "corner",
758
+ "taught",
759
+ "his",
760
+ "dachshund",
761
+ "to",
762
+ "play",
763
+ "golf",
764
+ ]
765
+ )
766
+ print("Graphs:")
767
+ for graph in graphs:
768
+ print(graph)
769
+
770
+
771
+ if __name__ == "__main__":
772
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/stanford.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the Stanford Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Xu <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import os
10
+ import tempfile
11
+ import warnings
12
+ from subprocess import PIPE
13
+
14
+ from nltk.internals import (
15
+ _java_options,
16
+ config_java,
17
+ find_jar_iter,
18
+ find_jars_within_path,
19
+ java,
20
+ )
21
+ from nltk.parse.api import ParserI
22
+ from nltk.parse.dependencygraph import DependencyGraph
23
+ from nltk.tree import Tree
24
+
25
+ _stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml"
26
+
27
+
28
+ class GenericStanfordParser(ParserI):
29
+ """Interface to the Stanford Parser"""
30
+
31
+ _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar"
32
+ _JAR = r"stanford-parser\.jar"
33
+ _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser"
34
+
35
+ _USE_STDIN = False
36
+ _DOUBLE_SPACED_OUTPUT = False
37
+
38
+ def __init__(
39
+ self,
40
+ path_to_jar=None,
41
+ path_to_models_jar=None,
42
+ model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz",
43
+ encoding="utf8",
44
+ verbose=False,
45
+ java_options="-mx4g",
46
+ corenlp_options="",
47
+ ):
48
+
49
+ # find the most recent code and model jar
50
+ stanford_jar = max(
51
+ find_jar_iter(
52
+ self._JAR,
53
+ path_to_jar,
54
+ env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"),
55
+ searchpath=(),
56
+ url=_stanford_url,
57
+ verbose=verbose,
58
+ is_regex=True,
59
+ ),
60
+ key=lambda model_path: os.path.dirname(model_path),
61
+ )
62
+
63
+ model_jar = max(
64
+ find_jar_iter(
65
+ self._MODEL_JAR_PATTERN,
66
+ path_to_models_jar,
67
+ env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"),
68
+ searchpath=(),
69
+ url=_stanford_url,
70
+ verbose=verbose,
71
+ is_regex=True,
72
+ ),
73
+ key=lambda model_path: os.path.dirname(model_path),
74
+ )
75
+
76
+ # self._classpath = (stanford_jar, model_jar)
77
+
78
+ # Adding logging jar files to classpath
79
+ stanford_dir = os.path.split(stanford_jar)[0]
80
+ self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir))
81
+
82
+ self.model_path = model_path
83
+ self._encoding = encoding
84
+ self.corenlp_options = corenlp_options
85
+ self.java_options = java_options
86
+
87
+ def _parse_trees_output(self, output_):
88
+ res = []
89
+ cur_lines = []
90
+ cur_trees = []
91
+ blank = False
92
+ for line in output_.splitlines(False):
93
+ if line == "":
94
+ if blank:
95
+ res.append(iter(cur_trees))
96
+ cur_trees = []
97
+ blank = False
98
+ elif self._DOUBLE_SPACED_OUTPUT:
99
+ cur_trees.append(self._make_tree("\n".join(cur_lines)))
100
+ cur_lines = []
101
+ blank = True
102
+ else:
103
+ res.append(iter([self._make_tree("\n".join(cur_lines))]))
104
+ cur_lines = []
105
+ else:
106
+ cur_lines.append(line)
107
+ blank = False
108
+ return iter(res)
109
+
110
+ def parse_sents(self, sentences, verbose=False):
111
+ """
112
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
113
+ list where each sentence is a list of words.
114
+ Each sentence will be automatically tagged with this StanfordParser instance's
115
+ tagger.
116
+ If whitespaces exists inside a token, then the token will be treated as
117
+ separate tokens.
118
+
119
+ :param sentences: Input sentences to parse
120
+ :type sentences: list(list(str))
121
+ :rtype: iter(iter(Tree))
122
+ """
123
+ cmd = [
124
+ self._MAIN_CLASS,
125
+ "-model",
126
+ self.model_path,
127
+ "-sentences",
128
+ "newline",
129
+ "-outputFormat",
130
+ self._OUTPUT_FORMAT,
131
+ "-tokenized",
132
+ "-escaper",
133
+ "edu.stanford.nlp.process.PTBEscapingProcessor",
134
+ ]
135
+ return self._parse_trees_output(
136
+ self._execute(
137
+ cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose
138
+ )
139
+ )
140
+
141
+ def raw_parse(self, sentence, verbose=False):
142
+ """
143
+ Use StanfordParser to parse a sentence. Takes a sentence as a string;
144
+ before parsing, it will be automatically tokenized and tagged by
145
+ the Stanford Parser.
146
+
147
+ :param sentence: Input sentence to parse
148
+ :type sentence: str
149
+ :rtype: iter(Tree)
150
+ """
151
+ return next(self.raw_parse_sents([sentence], verbose))
152
+
153
+ def raw_parse_sents(self, sentences, verbose=False):
154
+ """
155
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
156
+ list of strings.
157
+ Each sentence will be automatically tokenized and tagged by the Stanford Parser.
158
+
159
+ :param sentences: Input sentences to parse
160
+ :type sentences: list(str)
161
+ :rtype: iter(iter(Tree))
162
+ """
163
+ cmd = [
164
+ self._MAIN_CLASS,
165
+ "-model",
166
+ self.model_path,
167
+ "-sentences",
168
+ "newline",
169
+ "-outputFormat",
170
+ self._OUTPUT_FORMAT,
171
+ ]
172
+ return self._parse_trees_output(
173
+ self._execute(cmd, "\n".join(sentences), verbose)
174
+ )
175
+
176
+ def tagged_parse(self, sentence, verbose=False):
177
+ """
178
+ Use StanfordParser to parse a sentence. Takes a sentence as a list of
179
+ (word, tag) tuples; the sentence must have already been tokenized and
180
+ tagged.
181
+
182
+ :param sentence: Input sentence to parse
183
+ :type sentence: list(tuple(str, str))
184
+ :rtype: iter(Tree)
185
+ """
186
+ return next(self.tagged_parse_sents([sentence], verbose))
187
+
188
+ def tagged_parse_sents(self, sentences, verbose=False):
189
+ """
190
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences
191
+ where each sentence is a list of (word, tag) tuples.
192
+ The sentences must have already been tokenized and tagged.
193
+
194
+ :param sentences: Input sentences to parse
195
+ :type sentences: list(list(tuple(str, str)))
196
+ :rtype: iter(iter(Tree))
197
+ """
198
+ tag_separator = "/"
199
+ cmd = [
200
+ self._MAIN_CLASS,
201
+ "-model",
202
+ self.model_path,
203
+ "-sentences",
204
+ "newline",
205
+ "-outputFormat",
206
+ self._OUTPUT_FORMAT,
207
+ "-tokenized",
208
+ "-tagSeparator",
209
+ tag_separator,
210
+ "-tokenizerFactory",
211
+ "edu.stanford.nlp.process.WhitespaceTokenizer",
212
+ "-tokenizerMethod",
213
+ "newCoreLabelTokenizerFactory",
214
+ ]
215
+ # We don't need to escape slashes as "splitting is done on the last instance of the character in the token"
216
+ return self._parse_trees_output(
217
+ self._execute(
218
+ cmd,
219
+ "\n".join(
220
+ " ".join(tag_separator.join(tagged) for tagged in sentence)
221
+ for sentence in sentences
222
+ ),
223
+ verbose,
224
+ )
225
+ )
226
+
227
+ def _execute(self, cmd, input_, verbose=False):
228
+ encoding = self._encoding
229
+ cmd.extend(["-encoding", encoding])
230
+ if self.corenlp_options:
231
+ cmd.extend(self.corenlp_options.split())
232
+
233
+ default_options = " ".join(_java_options)
234
+
235
+ # Configure java.
236
+ config_java(options=self.java_options, verbose=verbose)
237
+
238
+ # Windows is incompatible with NamedTemporaryFile() without passing in delete=False.
239
+ with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file:
240
+ # Write the actual sentences to the temporary input file
241
+ if isinstance(input_, str) and encoding:
242
+ input_ = input_.encode(encoding)
243
+ input_file.write(input_)
244
+ input_file.flush()
245
+
246
+ # Run the tagger and get the output.
247
+ if self._USE_STDIN:
248
+ input_file.seek(0)
249
+ stdout, stderr = java(
250
+ cmd,
251
+ classpath=self._classpath,
252
+ stdin=input_file,
253
+ stdout=PIPE,
254
+ stderr=PIPE,
255
+ )
256
+ else:
257
+ cmd.append(input_file.name)
258
+ stdout, stderr = java(
259
+ cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE
260
+ )
261
+
262
+ stdout = stdout.replace(b"\xc2\xa0", b" ")
263
+ stdout = stdout.replace(b"\x00\xa0", b" ")
264
+ stdout = stdout.decode(encoding)
265
+
266
+ os.unlink(input_file.name)
267
+
268
+ # Return java configurations to their default values.
269
+ config_java(options=default_options, verbose=False)
270
+
271
+ return stdout
272
+
273
+
274
+ class StanfordParser(GenericStanfordParser):
275
+ """
276
+ >>> parser=StanfordParser(
277
+ ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
278
+ ... ) # doctest: +SKIP
279
+
280
+ >>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP
281
+ [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
282
+ Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
283
+ Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])]
284
+
285
+ >>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents((
286
+ ... "the quick brown fox jumps over the lazy dog",
287
+ ... "the quick grey wolf jumps over the lazy fox"
288
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
289
+ [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
290
+ Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
291
+ Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP',
292
+ [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP',
293
+ [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']),
294
+ Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])]
295
+
296
+ >>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents((
297
+ ... "I 'm a dog".split(),
298
+ ... "This is my friends ' cat ( the tabby )".split(),
299
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
300
+ [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]),
301
+ Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP',
302
+ [Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']),
303
+ Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []),
304
+ Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])]
305
+
306
+ >>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents((
307
+ ... (
308
+ ... ("The", "DT"),
309
+ ... ("quick", "JJ"),
310
+ ... ("brown", "JJ"),
311
+ ... ("fox", "NN"),
312
+ ... ("jumped", "VBD"),
313
+ ... ("over", "IN"),
314
+ ... ("the", "DT"),
315
+ ... ("lazy", "JJ"),
316
+ ... ("dog", "NN"),
317
+ ... (".", "."),
318
+ ... ),
319
+ ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
320
+ [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
321
+ Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP',
322
+ [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])]
323
+ """
324
+
325
+ _OUTPUT_FORMAT = "penn"
326
+
327
+ def __init__(self, *args, **kwargs):
328
+ warnings.warn(
329
+ "The StanfordParser will be deprecated\n"
330
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.",
331
+ DeprecationWarning,
332
+ stacklevel=2,
333
+ )
334
+
335
+ super().__init__(*args, **kwargs)
336
+
337
+ def _make_tree(self, result):
338
+ return Tree.fromstring(result)
339
+
340
+
341
+ class StanfordDependencyParser(GenericStanfordParser):
342
+
343
+ """
344
+ >>> dep_parser=StanfordDependencyParser(
345
+ ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
346
+ ... ) # doctest: +SKIP
347
+
348
+ >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
349
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])]
350
+
351
+ >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
352
+ [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
353
+ ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
354
+ ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
355
+ ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
356
+
357
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
358
+ ... "The quick brown fox jumps over the lazy dog.",
359
+ ... "The quick grey wolf jumps over the lazy fox."
360
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
361
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]),
362
+ Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])]
363
+
364
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
365
+ ... "I 'm a dog".split(),
366
+ ... "This is my friends ' cat ( the tabby )".split(),
367
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
368
+ [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])]
369
+
370
+ >>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents((
371
+ ... (
372
+ ... ("The", "DT"),
373
+ ... ("quick", "JJ"),
374
+ ... ("brown", "JJ"),
375
+ ... ("fox", "NN"),
376
+ ... ("jumped", "VBD"),
377
+ ... ("over", "IN"),
378
+ ... ("the", "DT"),
379
+ ... ("lazy", "JJ"),
380
+ ... ("dog", "NN"),
381
+ ... (".", "."),
382
+ ... ),
383
+ ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
384
+ [[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
385
+ ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
386
+ ((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
387
+ ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
388
+
389
+ """
390
+
391
+ _OUTPUT_FORMAT = "conll2007"
392
+
393
+ def __init__(self, *args, **kwargs):
394
+ warnings.warn(
395
+ "The StanfordDependencyParser will be deprecated\n"
396
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
397
+ DeprecationWarning,
398
+ stacklevel=2,
399
+ )
400
+
401
+ super().__init__(*args, **kwargs)
402
+
403
+ def _make_tree(self, result):
404
+ return DependencyGraph(result, top_relation_label="root")
405
+
406
+
407
+ class StanfordNeuralDependencyParser(GenericStanfordParser):
408
+ """
409
+ >>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP
410
+ >>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP
411
+
412
+ >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
413
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])]
414
+
415
+ >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
416
+ [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det',
417
+ (u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'),
418
+ u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')),
419
+ ((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det',
420
+ (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'),
421
+ u'punct', (u'.', u'.'))]]
422
+
423
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
424
+ ... "The quick brown fox jumps over the lazy dog.",
425
+ ... "The quick grey wolf jumps over the lazy fox."
426
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
427
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over',
428
+ 'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']),
429
+ Tree('fox', ['over', 'the', 'lazy']), '.'])]
430
+
431
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
432
+ ... "I 'm a dog".split(),
433
+ ... "This is my friends ' cat ( the tabby )".split(),
434
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
435
+ [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends',
436
+ ['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])]
437
+ """
438
+
439
+ _OUTPUT_FORMAT = "conll"
440
+ _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
441
+ _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar"
442
+ _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar"
443
+ _USE_STDIN = True
444
+ _DOUBLE_SPACED_OUTPUT = True
445
+
446
+ def __init__(self, *args, **kwargs):
447
+ warnings.warn(
448
+ "The StanfordNeuralDependencyParser will be deprecated\n"
449
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
450
+ DeprecationWarning,
451
+ stacklevel=2,
452
+ )
453
+
454
+ super().__init__(*args, **kwargs)
455
+ self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse"
456
+
457
+ def tagged_parse_sents(self, sentences, verbose=False):
458
+ """
459
+ Currently unimplemented because the neural dependency parser (and
460
+ the StanfordCoreNLP pipeline class) doesn't support passing in pre-
461
+ tagged tokens.
462
+ """
463
+ raise NotImplementedError(
464
+ "tagged_parse[_sents] is not supported by "
465
+ "StanfordNeuralDependencyParser; use "
466
+ "parse[_sents] or raw_parse[_sents] instead."
467
+ )
468
+
469
+ def _make_tree(self, result):
470
+ return DependencyGraph(result, top_relation_label="ROOT")
venv/lib/python3.10/site-packages/nltk/parse/transitionparser.py ADDED
@@ -0,0 +1,794 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
2
+ #
3
+ # Author: Long Duong <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import pickle
10
+ import tempfile
11
+ from copy import deepcopy
12
+ from operator import itemgetter
13
+ from os import remove
14
+
15
+ try:
16
+ from numpy import array
17
+ from scipy import sparse
18
+ from sklearn import svm
19
+ from sklearn.datasets import load_svmlight_file
20
+ except ImportError:
21
+ pass
22
+
23
+ from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI
24
+
25
+
26
+ class Configuration:
27
+ """
28
+ Class for holding configuration which is the partial analysis of the input sentence.
29
+ The transition based parser aims at finding set of operators that transfer the initial
30
+ configuration to the terminal configuration.
31
+
32
+ The configuration includes:
33
+ - Stack: for storing partially proceeded words
34
+ - Buffer: for storing remaining input words
35
+ - Set of arcs: for storing partially built dependency tree
36
+
37
+ This class also provides a method to represent a configuration as list of features.
38
+ """
39
+
40
+ def __init__(self, dep_graph):
41
+ """
42
+ :param dep_graph: the representation of an input in the form of dependency graph.
43
+ :type dep_graph: DependencyGraph where the dependencies are not specified.
44
+ """
45
+ # dep_graph.nodes contain list of token for a sentence
46
+ self.stack = [0] # The root element
47
+ self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
48
+ self.arcs = [] # empty set of arc
49
+ self._tokens = dep_graph.nodes
50
+ self._max_address = len(self.buffer)
51
+
52
+ def __str__(self):
53
+ return (
54
+ "Stack : "
55
+ + str(self.stack)
56
+ + " Buffer : "
57
+ + str(self.buffer)
58
+ + " Arcs : "
59
+ + str(self.arcs)
60
+ )
61
+
62
+ def _check_informative(self, feat, flag=False):
63
+ """
64
+ Check whether a feature is informative
65
+ The flag control whether "_" is informative or not
66
+ """
67
+ if feat is None:
68
+ return False
69
+ if feat == "":
70
+ return False
71
+ if flag is False:
72
+ if feat == "_":
73
+ return False
74
+ return True
75
+
76
+ def extract_features(self):
77
+ """
78
+ Extract the set of features for the current configuration. Implement standard features as describe in
79
+ Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
80
+ Please note that these features are very basic.
81
+ :return: list(str)
82
+ """
83
+ result = []
84
+ # Todo : can come up with more complicated features set for better
85
+ # performance.
86
+ if len(self.stack) > 0:
87
+ # Stack 0
88
+ stack_idx0 = self.stack[len(self.stack) - 1]
89
+ token = self._tokens[stack_idx0]
90
+ if self._check_informative(token["word"], True):
91
+ result.append("STK_0_FORM_" + token["word"])
92
+ if "lemma" in token and self._check_informative(token["lemma"]):
93
+ result.append("STK_0_LEMMA_" + token["lemma"])
94
+ if self._check_informative(token["tag"]):
95
+ result.append("STK_0_POS_" + token["tag"])
96
+ if "feats" in token and self._check_informative(token["feats"]):
97
+ feats = token["feats"].split("|")
98
+ for feat in feats:
99
+ result.append("STK_0_FEATS_" + feat)
100
+ # Stack 1
101
+ if len(self.stack) > 1:
102
+ stack_idx1 = self.stack[len(self.stack) - 2]
103
+ token = self._tokens[stack_idx1]
104
+ if self._check_informative(token["tag"]):
105
+ result.append("STK_1_POS_" + token["tag"])
106
+
107
+ # Left most, right most dependency of stack[0]
108
+ left_most = 1000000
109
+ right_most = -1
110
+ dep_left_most = ""
111
+ dep_right_most = ""
112
+ for (wi, r, wj) in self.arcs:
113
+ if wi == stack_idx0:
114
+ if (wj > wi) and (wj > right_most):
115
+ right_most = wj
116
+ dep_right_most = r
117
+ if (wj < wi) and (wj < left_most):
118
+ left_most = wj
119
+ dep_left_most = r
120
+ if self._check_informative(dep_left_most):
121
+ result.append("STK_0_LDEP_" + dep_left_most)
122
+ if self._check_informative(dep_right_most):
123
+ result.append("STK_0_RDEP_" + dep_right_most)
124
+
125
+ # Check Buffered 0
126
+ if len(self.buffer) > 0:
127
+ # Buffer 0
128
+ buffer_idx0 = self.buffer[0]
129
+ token = self._tokens[buffer_idx0]
130
+ if self._check_informative(token["word"], True):
131
+ result.append("BUF_0_FORM_" + token["word"])
132
+ if "lemma" in token and self._check_informative(token["lemma"]):
133
+ result.append("BUF_0_LEMMA_" + token["lemma"])
134
+ if self._check_informative(token["tag"]):
135
+ result.append("BUF_0_POS_" + token["tag"])
136
+ if "feats" in token and self._check_informative(token["feats"]):
137
+ feats = token["feats"].split("|")
138
+ for feat in feats:
139
+ result.append("BUF_0_FEATS_" + feat)
140
+ # Buffer 1
141
+ if len(self.buffer) > 1:
142
+ buffer_idx1 = self.buffer[1]
143
+ token = self._tokens[buffer_idx1]
144
+ if self._check_informative(token["word"], True):
145
+ result.append("BUF_1_FORM_" + token["word"])
146
+ if self._check_informative(token["tag"]):
147
+ result.append("BUF_1_POS_" + token["tag"])
148
+ if len(self.buffer) > 2:
149
+ buffer_idx2 = self.buffer[2]
150
+ token = self._tokens[buffer_idx2]
151
+ if self._check_informative(token["tag"]):
152
+ result.append("BUF_2_POS_" + token["tag"])
153
+ if len(self.buffer) > 3:
154
+ buffer_idx3 = self.buffer[3]
155
+ token = self._tokens[buffer_idx3]
156
+ if self._check_informative(token["tag"]):
157
+ result.append("BUF_3_POS_" + token["tag"])
158
+ # Left most, right most dependency of stack[0]
159
+ left_most = 1000000
160
+ right_most = -1
161
+ dep_left_most = ""
162
+ dep_right_most = ""
163
+ for (wi, r, wj) in self.arcs:
164
+ if wi == buffer_idx0:
165
+ if (wj > wi) and (wj > right_most):
166
+ right_most = wj
167
+ dep_right_most = r
168
+ if (wj < wi) and (wj < left_most):
169
+ left_most = wj
170
+ dep_left_most = r
171
+ if self._check_informative(dep_left_most):
172
+ result.append("BUF_0_LDEP_" + dep_left_most)
173
+ if self._check_informative(dep_right_most):
174
+ result.append("BUF_0_RDEP_" + dep_right_most)
175
+
176
+ return result
177
+
178
+
179
+ class Transition:
180
+ """
181
+ This class defines a set of transition which is applied to a configuration to get another configuration
182
+ Note that for different parsing algorithm, the transition is different.
183
+ """
184
+
185
+ # Define set of transitions
186
+ LEFT_ARC = "LEFTARC"
187
+ RIGHT_ARC = "RIGHTARC"
188
+ SHIFT = "SHIFT"
189
+ REDUCE = "REDUCE"
190
+
191
+ def __init__(self, alg_option):
192
+ """
193
+ :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
194
+ :type alg_option: str
195
+ """
196
+ self._algo = alg_option
197
+ if alg_option not in [
198
+ TransitionParser.ARC_STANDARD,
199
+ TransitionParser.ARC_EAGER,
200
+ ]:
201
+ raise ValueError(
202
+ " Currently we only support %s and %s "
203
+ % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER)
204
+ )
205
+
206
+ def left_arc(self, conf, relation):
207
+ """
208
+ Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
209
+
210
+ :param configuration: is the current configuration
211
+ :return: A new configuration or -1 if the pre-condition is not satisfied
212
+ """
213
+ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
214
+ return -1
215
+ if conf.buffer[0] == 0:
216
+ # here is the Root element
217
+ return -1
218
+
219
+ idx_wi = conf.stack[len(conf.stack) - 1]
220
+
221
+ flag = True
222
+ if self._algo == TransitionParser.ARC_EAGER:
223
+ for (idx_parent, r, idx_child) in conf.arcs:
224
+ if idx_child == idx_wi:
225
+ flag = False
226
+
227
+ if flag:
228
+ conf.stack.pop()
229
+ idx_wj = conf.buffer[0]
230
+ conf.arcs.append((idx_wj, relation, idx_wi))
231
+ else:
232
+ return -1
233
+
234
+ def right_arc(self, conf, relation):
235
+ """
236
+ Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
237
+
238
+ :param configuration: is the current configuration
239
+ :return: A new configuration or -1 if the pre-condition is not satisfied
240
+ """
241
+ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
242
+ return -1
243
+ if self._algo == TransitionParser.ARC_STANDARD:
244
+ idx_wi = conf.stack.pop()
245
+ idx_wj = conf.buffer[0]
246
+ conf.buffer[0] = idx_wi
247
+ conf.arcs.append((idx_wi, relation, idx_wj))
248
+ else: # arc-eager
249
+ idx_wi = conf.stack[len(conf.stack) - 1]
250
+ idx_wj = conf.buffer.pop(0)
251
+ conf.stack.append(idx_wj)
252
+ conf.arcs.append((idx_wi, relation, idx_wj))
253
+
254
+ def reduce(self, conf):
255
+ """
256
+ Note that the algorithm for reduce is only available for arc-eager
257
+
258
+ :param configuration: is the current configuration
259
+ :return: A new configuration or -1 if the pre-condition is not satisfied
260
+ """
261
+
262
+ if self._algo != TransitionParser.ARC_EAGER:
263
+ return -1
264
+ if len(conf.stack) <= 0:
265
+ return -1
266
+
267
+ idx_wi = conf.stack[len(conf.stack) - 1]
268
+ flag = False
269
+ for (idx_parent, r, idx_child) in conf.arcs:
270
+ if idx_child == idx_wi:
271
+ flag = True
272
+ if flag:
273
+ conf.stack.pop() # reduce it
274
+ else:
275
+ return -1
276
+
277
+ def shift(self, conf):
278
+ """
279
+ Note that the algorithm for shift is the SAME for arc-standard and arc-eager
280
+
281
+ :param configuration: is the current configuration
282
+ :return: A new configuration or -1 if the pre-condition is not satisfied
283
+ """
284
+ if len(conf.buffer) <= 0:
285
+ return -1
286
+ idx_wi = conf.buffer.pop(0)
287
+ conf.stack.append(idx_wi)
288
+
289
+
290
+ class TransitionParser(ParserI):
291
+
292
+ """
293
+ Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
294
+ """
295
+
296
+ ARC_STANDARD = "arc-standard"
297
+ ARC_EAGER = "arc-eager"
298
+
299
+ def __init__(self, algorithm):
300
+ """
301
+ :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
302
+ :type algorithm: str
303
+ """
304
+ if not (algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
305
+ raise ValueError(
306
+ " Currently we only support %s and %s "
307
+ % (self.ARC_STANDARD, self.ARC_EAGER)
308
+ )
309
+ self._algorithm = algorithm
310
+
311
+ self._dictionary = {}
312
+ self._transition = {}
313
+ self._match_transition = {}
314
+
315
+ def _get_dep_relation(self, idx_parent, idx_child, depgraph):
316
+ p_node = depgraph.nodes[idx_parent]
317
+ c_node = depgraph.nodes[idx_child]
318
+
319
+ if c_node["word"] is None:
320
+ return None # Root word
321
+
322
+ if c_node["head"] == p_node["address"]:
323
+ return c_node["rel"]
324
+ else:
325
+ return None
326
+
327
+ def _convert_to_binary_features(self, features):
328
+ """
329
+ :param features: list of feature string which is needed to convert to binary features
330
+ :type features: list(str)
331
+ :return : string of binary features in libsvm format which is 'featureID:value' pairs
332
+ """
333
+ unsorted_result = []
334
+ for feature in features:
335
+ self._dictionary.setdefault(feature, len(self._dictionary))
336
+ unsorted_result.append(self._dictionary[feature])
337
+
338
+ # Default value of each feature is 1.0
339
+ return " ".join(
340
+ str(featureID) + ":1.0" for featureID in sorted(unsorted_result)
341
+ )
342
+
343
+ def _is_projective(self, depgraph):
344
+ arc_list = []
345
+ for key in depgraph.nodes:
346
+ node = depgraph.nodes[key]
347
+
348
+ if "head" in node:
349
+ childIdx = node["address"]
350
+ parentIdx = node["head"]
351
+ if parentIdx is not None:
352
+ arc_list.append((parentIdx, childIdx))
353
+
354
+ for (parentIdx, childIdx) in arc_list:
355
+ # Ensure that childIdx < parentIdx
356
+ if childIdx > parentIdx:
357
+ temp = childIdx
358
+ childIdx = parentIdx
359
+ parentIdx = temp
360
+ for k in range(childIdx + 1, parentIdx):
361
+ for m in range(len(depgraph.nodes)):
362
+ if (m < childIdx) or (m > parentIdx):
363
+ if (k, m) in arc_list:
364
+ return False
365
+ if (m, k) in arc_list:
366
+ return False
367
+ return True
368
+
369
+ def _write_to_file(self, key, binary_features, input_file):
370
+ """
371
+ write the binary features to input file and update the transition dictionary
372
+ """
373
+ self._transition.setdefault(key, len(self._transition) + 1)
374
+ self._match_transition[self._transition[key]] = key
375
+
376
+ input_str = str(self._transition[key]) + " " + binary_features + "\n"
377
+ input_file.write(input_str.encode("utf-8"))
378
+
379
+ def _create_training_examples_arc_std(self, depgraphs, input_file):
380
+ """
381
+ Create the training example in the libsvm format and write it to the input_file.
382
+ Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
383
+ """
384
+ operation = Transition(self.ARC_STANDARD)
385
+ count_proj = 0
386
+ training_seq = []
387
+
388
+ for depgraph in depgraphs:
389
+ if not self._is_projective(depgraph):
390
+ continue
391
+
392
+ count_proj += 1
393
+ conf = Configuration(depgraph)
394
+ while len(conf.buffer) > 0:
395
+ b0 = conf.buffer[0]
396
+ features = conf.extract_features()
397
+ binary_features = self._convert_to_binary_features(features)
398
+
399
+ if len(conf.stack) > 0:
400
+ s0 = conf.stack[len(conf.stack) - 1]
401
+ # Left-arc operation
402
+ rel = self._get_dep_relation(b0, s0, depgraph)
403
+ if rel is not None:
404
+ key = Transition.LEFT_ARC + ":" + rel
405
+ self._write_to_file(key, binary_features, input_file)
406
+ operation.left_arc(conf, rel)
407
+ training_seq.append(key)
408
+ continue
409
+
410
+ # Right-arc operation
411
+ rel = self._get_dep_relation(s0, b0, depgraph)
412
+ if rel is not None:
413
+ precondition = True
414
+ # Get the max-index of buffer
415
+ maxID = conf._max_address
416
+
417
+ for w in range(maxID + 1):
418
+ if w != b0:
419
+ relw = self._get_dep_relation(b0, w, depgraph)
420
+ if relw is not None:
421
+ if (b0, relw, w) not in conf.arcs:
422
+ precondition = False
423
+
424
+ if precondition:
425
+ key = Transition.RIGHT_ARC + ":" + rel
426
+ self._write_to_file(key, binary_features, input_file)
427
+ operation.right_arc(conf, rel)
428
+ training_seq.append(key)
429
+ continue
430
+
431
+ # Shift operation as the default
432
+ key = Transition.SHIFT
433
+ self._write_to_file(key, binary_features, input_file)
434
+ operation.shift(conf)
435
+ training_seq.append(key)
436
+
437
+ print(" Number of training examples : " + str(len(depgraphs)))
438
+ print(" Number of valid (projective) examples : " + str(count_proj))
439
+ return training_seq
440
+
441
+ def _create_training_examples_arc_eager(self, depgraphs, input_file):
442
+ """
443
+ Create the training example in the libsvm format and write it to the input_file.
444
+ Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
445
+ """
446
+ operation = Transition(self.ARC_EAGER)
447
+ countProj = 0
448
+ training_seq = []
449
+
450
+ for depgraph in depgraphs:
451
+ if not self._is_projective(depgraph):
452
+ continue
453
+
454
+ countProj += 1
455
+ conf = Configuration(depgraph)
456
+ while len(conf.buffer) > 0:
457
+ b0 = conf.buffer[0]
458
+ features = conf.extract_features()
459
+ binary_features = self._convert_to_binary_features(features)
460
+
461
+ if len(conf.stack) > 0:
462
+ s0 = conf.stack[len(conf.stack) - 1]
463
+ # Left-arc operation
464
+ rel = self._get_dep_relation(b0, s0, depgraph)
465
+ if rel is not None:
466
+ key = Transition.LEFT_ARC + ":" + rel
467
+ self._write_to_file(key, binary_features, input_file)
468
+ operation.left_arc(conf, rel)
469
+ training_seq.append(key)
470
+ continue
471
+
472
+ # Right-arc operation
473
+ rel = self._get_dep_relation(s0, b0, depgraph)
474
+ if rel is not None:
475
+ key = Transition.RIGHT_ARC + ":" + rel
476
+ self._write_to_file(key, binary_features, input_file)
477
+ operation.right_arc(conf, rel)
478
+ training_seq.append(key)
479
+ continue
480
+
481
+ # reduce operation
482
+ flag = False
483
+ for k in range(s0):
484
+ if self._get_dep_relation(k, b0, depgraph) is not None:
485
+ flag = True
486
+ if self._get_dep_relation(b0, k, depgraph) is not None:
487
+ flag = True
488
+ if flag:
489
+ key = Transition.REDUCE
490
+ self._write_to_file(key, binary_features, input_file)
491
+ operation.reduce(conf)
492
+ training_seq.append(key)
493
+ continue
494
+
495
+ # Shift operation as the default
496
+ key = Transition.SHIFT
497
+ self._write_to_file(key, binary_features, input_file)
498
+ operation.shift(conf)
499
+ training_seq.append(key)
500
+
501
+ print(" Number of training examples : " + str(len(depgraphs)))
502
+ print(" Number of valid (projective) examples : " + str(countProj))
503
+ return training_seq
504
+
505
+ def train(self, depgraphs, modelfile, verbose=True):
506
+ """
507
+ :param depgraphs : list of DependencyGraph as the training data
508
+ :type depgraphs : DependencyGraph
509
+ :param modelfile : file name to save the trained model
510
+ :type modelfile : str
511
+ """
512
+
513
+ try:
514
+ input_file = tempfile.NamedTemporaryFile(
515
+ prefix="transition_parse.train", dir=tempfile.gettempdir(), delete=False
516
+ )
517
+
518
+ if self._algorithm == self.ARC_STANDARD:
519
+ self._create_training_examples_arc_std(depgraphs, input_file)
520
+ else:
521
+ self._create_training_examples_arc_eager(depgraphs, input_file)
522
+
523
+ input_file.close()
524
+ # Using the temporary file to train the libsvm classifier
525
+ x_train, y_train = load_svmlight_file(input_file.name)
526
+ # The parameter is set according to the paper:
527
+ # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
528
+ # Todo : because of probability = True => very slow due to
529
+ # cross-validation. Need to improve the speed here
530
+ model = svm.SVC(
531
+ kernel="poly",
532
+ degree=2,
533
+ coef0=0,
534
+ gamma=0.2,
535
+ C=0.5,
536
+ verbose=verbose,
537
+ probability=True,
538
+ )
539
+
540
+ model.fit(x_train, y_train)
541
+ # Save the model to file name (as pickle)
542
+ pickle.dump(model, open(modelfile, "wb"))
543
+ finally:
544
+ remove(input_file.name)
545
+
546
+ def parse(self, depgraphs, modelFile):
547
+ """
548
+ :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
549
+ :type depgraphs: list(DependencyGraph)
550
+ :param modelfile: the model file
551
+ :type modelfile: str
552
+ :return: list (DependencyGraph) with the 'head' and 'rel' information
553
+ """
554
+ result = []
555
+ # First load the model
556
+ model = pickle.load(open(modelFile, "rb"))
557
+ operation = Transition(self._algorithm)
558
+
559
+ for depgraph in depgraphs:
560
+ conf = Configuration(depgraph)
561
+ while len(conf.buffer) > 0:
562
+ features = conf.extract_features()
563
+ col = []
564
+ row = []
565
+ data = []
566
+ for feature in features:
567
+ if feature in self._dictionary:
568
+ col.append(self._dictionary[feature])
569
+ row.append(0)
570
+ data.append(1.0)
571
+ np_col = array(sorted(col)) # NB : index must be sorted
572
+ np_row = array(row)
573
+ np_data = array(data)
574
+
575
+ x_test = sparse.csr_matrix(
576
+ (np_data, (np_row, np_col)), shape=(1, len(self._dictionary))
577
+ )
578
+
579
+ # It's best to use decision function as follow BUT it's not supported yet for sparse SVM
580
+ # Using decision function to build the votes array
581
+ # dec_func = model.decision_function(x_test)[0]
582
+ # votes = {}
583
+ # k = 0
584
+ # for i in range(len(model.classes_)):
585
+ # for j in range(i+1, len(model.classes_)):
586
+ # #if dec_func[k] > 0:
587
+ # votes.setdefault(i,0)
588
+ # votes[i] +=1
589
+ # else:
590
+ # votes.setdefault(j,0)
591
+ # votes[j] +=1
592
+ # k +=1
593
+ # Sort votes according to the values
594
+ # sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
595
+
596
+ # We will use predict_proba instead of decision_function
597
+ prob_dict = {}
598
+ pred_prob = model.predict_proba(x_test)[0]
599
+ for i in range(len(pred_prob)):
600
+ prob_dict[i] = pred_prob[i]
601
+ sorted_Prob = sorted(prob_dict.items(), key=itemgetter(1), reverse=True)
602
+
603
+ # Note that SHIFT is always a valid operation
604
+ for (y_pred_idx, confidence) in sorted_Prob:
605
+ # y_pred = model.predict(x_test)[0]
606
+ # From the prediction match to the operation
607
+ y_pred = model.classes_[y_pred_idx]
608
+
609
+ if y_pred in self._match_transition:
610
+ strTransition = self._match_transition[y_pred]
611
+ baseTransition = strTransition.split(":")[0]
612
+
613
+ if baseTransition == Transition.LEFT_ARC:
614
+ if (
615
+ operation.left_arc(conf, strTransition.split(":")[1])
616
+ != -1
617
+ ):
618
+ break
619
+ elif baseTransition == Transition.RIGHT_ARC:
620
+ if (
621
+ operation.right_arc(conf, strTransition.split(":")[1])
622
+ != -1
623
+ ):
624
+ break
625
+ elif baseTransition == Transition.REDUCE:
626
+ if operation.reduce(conf) != -1:
627
+ break
628
+ elif baseTransition == Transition.SHIFT:
629
+ if operation.shift(conf) != -1:
630
+ break
631
+ else:
632
+ raise ValueError(
633
+ "The predicted transition is not recognized, expected errors"
634
+ )
635
+
636
+ # Finish with operations build the dependency graph from Conf.arcs
637
+
638
+ new_depgraph = deepcopy(depgraph)
639
+ for key in new_depgraph.nodes:
640
+ node = new_depgraph.nodes[key]
641
+ node["rel"] = ""
642
+ # With the default, all the token depend on the Root
643
+ node["head"] = 0
644
+ for (head, rel, child) in conf.arcs:
645
+ c_node = new_depgraph.nodes[child]
646
+ c_node["head"] = head
647
+ c_node["rel"] = rel
648
+ result.append(new_depgraph)
649
+
650
+ return result
651
+
652
+
653
+ def demo():
654
+ """
655
+ >>> from nltk.parse import DependencyGraph, DependencyEvaluator
656
+ >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
657
+ >>> gold_sent = DependencyGraph(\"""
658
+ ... Economic JJ 2 ATT
659
+ ... news NN 3 SBJ
660
+ ... has VBD 0 ROOT
661
+ ... little JJ 5 ATT
662
+ ... effect NN 3 OBJ
663
+ ... on IN 5 ATT
664
+ ... financial JJ 8 ATT
665
+ ... markets NNS 6 PC
666
+ ... . . 3 PU
667
+ ... \""")
668
+
669
+ >>> conf = Configuration(gold_sent)
670
+
671
+ ###################### Check the Initial Feature ########################
672
+
673
+ >>> print(', '.join(conf.extract_features()))
674
+ STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
675
+
676
+ ###################### Check The Transition #######################
677
+ Check the Initialized Configuration
678
+ >>> print(conf)
679
+ Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
680
+
681
+ A. Do some transition checks for ARC-STANDARD
682
+
683
+ >>> operation = Transition('arc-standard')
684
+ >>> operation.shift(conf)
685
+ >>> operation.left_arc(conf, "ATT")
686
+ >>> operation.shift(conf)
687
+ >>> operation.left_arc(conf,"SBJ")
688
+ >>> operation.shift(conf)
689
+ >>> operation.shift(conf)
690
+ >>> operation.left_arc(conf, "ATT")
691
+ >>> operation.shift(conf)
692
+ >>> operation.shift(conf)
693
+ >>> operation.shift(conf)
694
+ >>> operation.left_arc(conf, "ATT")
695
+
696
+ Middle Configuration and Features Check
697
+ >>> print(conf)
698
+ Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
699
+
700
+ >>> print(', '.join(conf.extract_features()))
701
+ STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
702
+
703
+ >>> operation.right_arc(conf, "PC")
704
+ >>> operation.right_arc(conf, "ATT")
705
+ >>> operation.right_arc(conf, "OBJ")
706
+ >>> operation.shift(conf)
707
+ >>> operation.right_arc(conf, "PU")
708
+ >>> operation.right_arc(conf, "ROOT")
709
+ >>> operation.shift(conf)
710
+
711
+ Terminated Configuration Check
712
+ >>> print(conf)
713
+ Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
714
+
715
+
716
+ B. Do some transition checks for ARC-EAGER
717
+
718
+ >>> conf = Configuration(gold_sent)
719
+ >>> operation = Transition('arc-eager')
720
+ >>> operation.shift(conf)
721
+ >>> operation.left_arc(conf,'ATT')
722
+ >>> operation.shift(conf)
723
+ >>> operation.left_arc(conf,'SBJ')
724
+ >>> operation.right_arc(conf,'ROOT')
725
+ >>> operation.shift(conf)
726
+ >>> operation.left_arc(conf,'ATT')
727
+ >>> operation.right_arc(conf,'OBJ')
728
+ >>> operation.right_arc(conf,'ATT')
729
+ >>> operation.shift(conf)
730
+ >>> operation.left_arc(conf,'ATT')
731
+ >>> operation.right_arc(conf,'PC')
732
+ >>> operation.reduce(conf)
733
+ >>> operation.reduce(conf)
734
+ >>> operation.reduce(conf)
735
+ >>> operation.right_arc(conf,'PU')
736
+ >>> print(conf)
737
+ Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
738
+
739
+ ###################### Check The Training Function #######################
740
+
741
+ A. Check the ARC-STANDARD training
742
+ >>> import tempfile
743
+ >>> import os
744
+ >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
745
+
746
+ >>> parser_std = TransitionParser('arc-standard')
747
+ >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
748
+ Number of training examples : 1
749
+ Number of valid (projective) examples : 1
750
+ SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
751
+
752
+ >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False)
753
+ Number of training examples : 1
754
+ Number of valid (projective) examples : 1
755
+ >>> input_file.close()
756
+ >>> remove(input_file.name)
757
+
758
+ B. Check the ARC-EAGER training
759
+
760
+ >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
761
+ >>> parser_eager = TransitionParser('arc-eager')
762
+ >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
763
+ Number of training examples : 1
764
+ Number of valid (projective) examples : 1
765
+ SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
766
+
767
+ >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False)
768
+ Number of training examples : 1
769
+ Number of valid (projective) examples : 1
770
+
771
+ >>> input_file.close()
772
+ >>> remove(input_file.name)
773
+
774
+ ###################### Check The Parsing Function ########################
775
+
776
+ A. Check the ARC-STANDARD parser
777
+
778
+ >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
779
+ >>> de = DependencyEvaluator(result, [gold_sent])
780
+ >>> de.eval() >= (0, 0)
781
+ True
782
+
783
+ B. Check the ARC-EAGER parser
784
+ >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
785
+ >>> de = DependencyEvaluator(result, [gold_sent])
786
+ >>> de.eval() >= (0, 0)
787
+ True
788
+
789
+ Remove test temporary files
790
+ >>> remove('temp.arceager.model')
791
+ >>> remove('temp.arcstd.model')
792
+
793
+ Note that result is very poor because of only one training example.
794
+ """
venv/lib/python3.10/site-packages/nltk/parse/viterbi.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Viterbi Probabilistic Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from functools import reduce
10
+
11
+ from nltk.parse.api import ParserI
12
+ from nltk.tree import ProbabilisticTree, Tree
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Viterbi PCFG Parser
16
+ ##//////////////////////////////////////////////////////
17
+
18
+
19
+ class ViterbiParser(ParserI):
20
+ """
21
+ A bottom-up ``PCFG`` parser that uses dynamic programming to find
22
+ the single most likely parse for a text. The ``ViterbiParser`` parser
23
+ parses texts by filling in a "most likely constituent table".
24
+ This table records the most probable tree representation for any
25
+ given span and node value. In particular, it has an entry for
26
+ every start index, end index, and node value, recording the most
27
+ likely subtree that spans from the start index to the end index,
28
+ and has the given node value.
29
+
30
+ The ``ViterbiParser`` parser fills in this table incrementally. It starts
31
+ by filling in all entries for constituents that span one element
32
+ of text (i.e., entries where the end index is one greater than the
33
+ start index). After it has filled in all table entries for
34
+ constituents that span one element of text, it fills in the
35
+ entries for constitutants that span two elements of text. It
36
+ continues filling in the entries for constituents spanning larger
37
+ and larger portions of the text, until the entire table has been
38
+ filled. Finally, it returns the table entry for a constituent
39
+ spanning the entire text, whose node value is the grammar's start
40
+ symbol.
41
+
42
+ In order to find the most likely constituent with a given span and
43
+ node value, the ``ViterbiParser`` parser considers all productions that
44
+ could produce that node value. For each production, it finds all
45
+ children that collectively cover the span and have the node values
46
+ specified by the production's right hand side. If the probability
47
+ of the tree formed by applying the production to the children is
48
+ greater than the probability of the current entry in the table,
49
+ then the table is updated with this new tree.
50
+
51
+ A pseudo-code description of the algorithm used by
52
+ ``ViterbiParser`` is:
53
+
54
+ | Create an empty most likely constituent table, *MLC*.
55
+ | For width in 1...len(text):
56
+ | For start in 1...len(text)-width:
57
+ | For prod in grammar.productions:
58
+ | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC,
59
+ | where t[i].label()==prod.rhs[i],
60
+ | and the sequence covers [start:start+width]:
61
+ | old_p = MLC[start, start+width, prod.lhs]
62
+ | new_p = P(t[1])P(t[1])...P(t[n])P(prod)
63
+ | if new_p > old_p:
64
+ | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n])
65
+ | MLC[start, start+width, prod.lhs] = new_tree
66
+ | Return MLC[0, len(text), start_symbol]
67
+
68
+ :type _grammar: PCFG
69
+ :ivar _grammar: The grammar used to parse sentences.
70
+ :type _trace: int
71
+ :ivar _trace: The level of tracing output that should be generated
72
+ when parsing a text.
73
+ """
74
+
75
+ def __init__(self, grammar, trace=0):
76
+ """
77
+ Create a new ``ViterbiParser`` parser, that uses ``grammar`` to
78
+ parse texts.
79
+
80
+ :type grammar: PCFG
81
+ :param grammar: The grammar used to parse texts.
82
+ :type trace: int
83
+ :param trace: The level of tracing that should be used when
84
+ parsing a text. ``0`` will generate no tracing output;
85
+ and higher numbers will produce more verbose tracing
86
+ output.
87
+ """
88
+ self._grammar = grammar
89
+ self._trace = trace
90
+
91
+ def grammar(self):
92
+ return self._grammar
93
+
94
+ def trace(self, trace=2):
95
+ """
96
+ Set the level of tracing output that should be generated when
97
+ parsing a text.
98
+
99
+ :type trace: int
100
+ :param trace: The trace level. A trace level of ``0`` will
101
+ generate no tracing output; and higher trace levels will
102
+ produce more verbose tracing output.
103
+ :rtype: None
104
+ """
105
+ self._trace = trace
106
+
107
+ def parse(self, tokens):
108
+ # Inherit docs from ParserI
109
+
110
+ tokens = list(tokens)
111
+ self._grammar.check_coverage(tokens)
112
+
113
+ # The most likely constituent table. This table specifies the
114
+ # most likely constituent for a given span and type.
115
+ # Constituents can be either Trees or tokens. For Trees,
116
+ # the "type" is the Nonterminal for the tree's root node
117
+ # value. For Tokens, the "type" is the token's type.
118
+ # The table is stored as a dictionary, since it is sparse.
119
+ constituents = {}
120
+
121
+ # Initialize the constituents dictionary with the words from
122
+ # the text.
123
+ if self._trace:
124
+ print("Inserting tokens into the most likely" + " constituents table...")
125
+ for index in range(len(tokens)):
126
+ token = tokens[index]
127
+ constituents[index, index + 1, token] = token
128
+ if self._trace > 1:
129
+ self._trace_lexical_insertion(token, index, len(tokens))
130
+
131
+ # Consider each span of length 1, 2, ..., n; and add any trees
132
+ # that might cover that span to the constituents dictionary.
133
+ for length in range(1, len(tokens) + 1):
134
+ if self._trace:
135
+ print(
136
+ "Finding the most likely constituents"
137
+ + " spanning %d text elements..." % length
138
+ )
139
+ for start in range(len(tokens) - length + 1):
140
+ span = (start, start + length)
141
+ self._add_constituents_spanning(span, constituents, tokens)
142
+
143
+ # Return the tree that spans the entire text & have the right cat
144
+ tree = constituents.get((0, len(tokens), self._grammar.start()))
145
+ if tree is not None:
146
+ yield tree
147
+
148
+ def _add_constituents_spanning(self, span, constituents, tokens):
149
+ """
150
+ Find any constituents that might cover ``span``, and add them
151
+ to the most likely constituents table.
152
+
153
+ :rtype: None
154
+ :type span: tuple(int, int)
155
+ :param span: The section of the text for which we are
156
+ trying to find possible constituents. The span is
157
+ specified as a pair of integers, where the first integer
158
+ is the index of the first token that should be included in
159
+ the constituent; and the second integer is the index of
160
+ the first token that should not be included in the
161
+ constituent. I.e., the constituent should cover
162
+ ``text[span[0]:span[1]]``, where ``text`` is the text
163
+ that we are parsing.
164
+
165
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
166
+ :param constituents: The most likely constituents table. This
167
+ table records the most probable tree representation for
168
+ any given span and node value. In particular,
169
+ ``constituents(s,e,nv)`` is the most likely
170
+ ``ProbabilisticTree`` that covers ``text[s:e]``
171
+ and has a node value ``nv.symbol()``, where ``text``
172
+ is the text that we are parsing. When
173
+ ``_add_constituents_spanning`` is called, ``constituents``
174
+ should contain all possible constituents that are shorter
175
+ than ``span``.
176
+
177
+ :type tokens: list of tokens
178
+ :param tokens: The text we are parsing. This is only used for
179
+ trace output.
180
+ """
181
+ # Since some of the grammar productions may be unary, we need to
182
+ # repeatedly try all of the productions until none of them add any
183
+ # new constituents.
184
+ changed = True
185
+ while changed:
186
+ changed = False
187
+
188
+ # Find all ways instantiations of the grammar productions that
189
+ # cover the span.
190
+ instantiations = self._find_instantiations(span, constituents)
191
+
192
+ # For each production instantiation, add a new
193
+ # ProbabilisticTree whose probability is the product
194
+ # of the childrens' probabilities and the production's
195
+ # probability.
196
+ for (production, children) in instantiations:
197
+ subtrees = [c for c in children if isinstance(c, Tree)]
198
+ p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob())
199
+ node = production.lhs().symbol()
200
+ tree = ProbabilisticTree(node, children, prob=p)
201
+
202
+ # If it's new a constituent, then add it to the
203
+ # constituents dictionary.
204
+ c = constituents.get((span[0], span[1], production.lhs()))
205
+ if self._trace > 1:
206
+ if c is None or c != tree:
207
+ if c is None or c.prob() < tree.prob():
208
+ print(" Insert:", end=" ")
209
+ else:
210
+ print(" Discard:", end=" ")
211
+ self._trace_production(production, p, span, len(tokens))
212
+ if c is None or c.prob() < tree.prob():
213
+ constituents[span[0], span[1], production.lhs()] = tree
214
+ changed = True
215
+
216
+ def _find_instantiations(self, span, constituents):
217
+ """
218
+ :return: a list of the production instantiations that cover a
219
+ given span of the text. A "production instantiation" is
220
+ a tuple containing a production and a list of children,
221
+ where the production's right hand side matches the list of
222
+ children; and the children cover ``span``. :rtype: list
223
+ of ``pair`` of ``Production``, (list of
224
+ (``ProbabilisticTree`` or token.
225
+
226
+ :type span: tuple(int, int)
227
+ :param span: The section of the text for which we are
228
+ trying to find production instantiations. The span is
229
+ specified as a pair of integers, where the first integer
230
+ is the index of the first token that should be covered by
231
+ the production instantiation; and the second integer is
232
+ the index of the first token that should not be covered by
233
+ the production instantiation.
234
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
235
+ :param constituents: The most likely constituents table. This
236
+ table records the most probable tree representation for
237
+ any given span and node value. See the module
238
+ documentation for more information.
239
+ """
240
+ rv = []
241
+ for production in self._grammar.productions():
242
+ childlists = self._match_rhs(production.rhs(), span, constituents)
243
+
244
+ for childlist in childlists:
245
+ rv.append((production, childlist))
246
+ return rv
247
+
248
+ def _match_rhs(self, rhs, span, constituents):
249
+ """
250
+ :return: a set of all the lists of children that cover ``span``
251
+ and that match ``rhs``.
252
+ :rtype: list(list(ProbabilisticTree or token)
253
+
254
+ :type rhs: list(Nonterminal or any)
255
+ :param rhs: The list specifying what kinds of children need to
256
+ cover ``span``. Each nonterminal in ``rhs`` specifies
257
+ that the corresponding child should be a tree whose node
258
+ value is that nonterminal's symbol. Each terminal in ``rhs``
259
+ specifies that the corresponding child should be a token
260
+ whose type is that terminal.
261
+ :type span: tuple(int, int)
262
+ :param span: The section of the text for which we are
263
+ trying to find child lists. The span is specified as a
264
+ pair of integers, where the first integer is the index of
265
+ the first token that should be covered by the child list;
266
+ and the second integer is the index of the first token
267
+ that should not be covered by the child list.
268
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
269
+ :param constituents: The most likely constituents table. This
270
+ table records the most probable tree representation for
271
+ any given span and node value. See the module
272
+ documentation for more information.
273
+ """
274
+ (start, end) = span
275
+
276
+ # Base case
277
+ if start >= end and rhs == ():
278
+ return [[]]
279
+ if start >= end or rhs == ():
280
+ return []
281
+
282
+ # Find everything that matches the 1st symbol of the RHS
283
+ childlists = []
284
+ for split in range(start, end + 1):
285
+ l = constituents.get((start, split, rhs[0]))
286
+ if l is not None:
287
+ rights = self._match_rhs(rhs[1:], (split, end), constituents)
288
+ childlists += [[l] + r for r in rights]
289
+
290
+ return childlists
291
+
292
+ def _trace_production(self, production, p, span, width):
293
+ """
294
+ Print trace output indicating that a given production has been
295
+ applied at a given location.
296
+
297
+ :param production: The production that has been applied
298
+ :type production: Production
299
+ :param p: The probability of the tree produced by the production.
300
+ :type p: float
301
+ :param span: The span of the production
302
+ :type span: tuple
303
+ :rtype: None
304
+ """
305
+
306
+ str = "|" + "." * span[0]
307
+ str += "=" * (span[1] - span[0])
308
+ str += "." * (width - span[1]) + "| "
309
+ str += "%s" % production
310
+ if self._trace > 2:
311
+ str = f"{str:<40} {p:12.10f} "
312
+
313
+ print(str)
314
+
315
+ def _trace_lexical_insertion(self, token, index, width):
316
+ str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| "
317
+ str += f"{token}"
318
+ print(str)
319
+
320
+ def __repr__(self):
321
+ return "<ViterbiParser for %r>" % self._grammar
322
+
323
+
324
+ ##//////////////////////////////////////////////////////
325
+ ## Test Code
326
+ ##//////////////////////////////////////////////////////
327
+
328
+
329
+ def demo():
330
+ """
331
+ A demonstration of the probabilistic parsers. The user is
332
+ prompted to select which demo to run, and how many parses should
333
+ be found; and then each parser is run on the same demo, and a
334
+ summary of the results are displayed.
335
+ """
336
+ import sys
337
+ import time
338
+
339
+ from nltk import tokenize
340
+ from nltk.grammar import PCFG
341
+ from nltk.parse import ViterbiParser
342
+
343
+ toy_pcfg1 = PCFG.fromstring(
344
+ """
345
+ S -> NP VP [1.0]
346
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
347
+ Det -> 'the' [0.8] | 'my' [0.2]
348
+ N -> 'man' [0.5] | 'telescope' [0.5]
349
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
350
+ V -> 'ate' [0.35] | 'saw' [0.65]
351
+ PP -> P NP [1.0]
352
+ P -> 'with' [0.61] | 'under' [0.39]
353
+ """
354
+ )
355
+
356
+ toy_pcfg2 = PCFG.fromstring(
357
+ """
358
+ S -> NP VP [1.0]
359
+ VP -> V NP [.59]
360
+ VP -> V [.40]
361
+ VP -> VP PP [.01]
362
+ NP -> Det N [.41]
363
+ NP -> Name [.28]
364
+ NP -> NP PP [.31]
365
+ PP -> P NP [1.0]
366
+ V -> 'saw' [.21]
367
+ V -> 'ate' [.51]
368
+ V -> 'ran' [.28]
369
+ N -> 'boy' [.11]
370
+ N -> 'cookie' [.12]
371
+ N -> 'table' [.13]
372
+ N -> 'telescope' [.14]
373
+ N -> 'hill' [.5]
374
+ Name -> 'Jack' [.52]
375
+ Name -> 'Bob' [.48]
376
+ P -> 'with' [.61]
377
+ P -> 'under' [.39]
378
+ Det -> 'the' [.41]
379
+ Det -> 'a' [.31]
380
+ Det -> 'my' [.28]
381
+ """
382
+ )
383
+
384
+ # Define two demos. Each demo has a sentence and a grammar.
385
+ demos = [
386
+ ("I saw the man with my telescope", toy_pcfg1),
387
+ ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2),
388
+ ]
389
+
390
+ # Ask the user which demo they want to use.
391
+ print()
392
+ for i in range(len(demos)):
393
+ print(f"{i + 1:>3}: {demos[i][0]}")
394
+ print(" %r" % demos[i][1])
395
+ print()
396
+ print("Which demo (%d-%d)? " % (1, len(demos)), end=" ")
397
+ try:
398
+ snum = int(sys.stdin.readline().strip()) - 1
399
+ sent, grammar = demos[snum]
400
+ except:
401
+ print("Bad sentence number")
402
+ return
403
+
404
+ # Tokenize the sentence.
405
+ tokens = sent.split()
406
+
407
+ parser = ViterbiParser(grammar)
408
+ all_parses = {}
409
+
410
+ print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}")
411
+ parser.trace(3)
412
+ t = time.time()
413
+ parses = parser.parse_all(tokens)
414
+ time = time.time() - t
415
+ average = (
416
+ reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0
417
+ )
418
+ num_parses = len(parses)
419
+ for p in parses:
420
+ all_parses[p.freeze()] = 1
421
+
422
+ # Print some summary statistics
423
+ print()
424
+ print("Time (secs) # Parses Average P(parse)")
425
+ print("-----------------------------------------")
426
+ print("%11.4f%11d%19.14f" % (time, num_parses, average))
427
+ parses = all_parses.keys()
428
+ if parses:
429
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses)
430
+ else:
431
+ p = 0
432
+ print("------------------------------------------")
433
+ print("%11s%11d%19.14f" % ("n/a", len(parses), p))
434
+
435
+ # Ask the user if we should draw the parses.
436
+ print()
437
+ print("Draw parses (y/n)? ", end=" ")
438
+ if sys.stdin.readline().strip().lower().startswith("y"):
439
+ from nltk.draw.tree import draw_trees
440
+
441
+ print(" please wait...")
442
+ draw_trees(*parses)
443
+
444
+ # Ask the user if we should print the parses.
445
+ print()
446
+ print("Print parses (y/n)? ", end=" ")
447
+ if sys.stdin.readline().strip().lower().startswith("y"):
448
+ for parse in parses:
449
+ print(parse)
450
+
451
+
452
+ if __name__ == "__main__":
453
+ demo()
venv/lib/python3.10/site-packages/nltk/sem/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Semantic Interpretation
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ NLTK Semantic Interpretation Package
10
+
11
+ This package contains classes for representing semantic structure in
12
+ formulas of first-order logic and for evaluating such formulas in
13
+ set-theoretic models.
14
+
15
+ >>> from nltk.sem import logic
16
+ >>> logic._counter._value = 0
17
+
18
+ The package has two main components:
19
+
20
+ - ``logic`` provides support for analyzing expressions of First
21
+ Order Logic (FOL).
22
+ - ``evaluate`` allows users to recursively determine truth in a
23
+ model for formulas of FOL.
24
+
25
+ A model consists of a domain of discourse and a valuation function,
26
+ which assigns values to non-logical constants. We assume that entities
27
+ in the domain are represented as strings such as ``'b1'``, ``'g1'``,
28
+ etc. A ``Valuation`` is initialized with a list of (symbol, value)
29
+ pairs, where values are entities, sets of entities or sets of tuples
30
+ of entities.
31
+ The domain of discourse can be inferred from the valuation, and model
32
+ is then created with domain and valuation as parameters.
33
+
34
+ >>> from nltk.sem import Valuation, Model
35
+ >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'),
36
+ ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])),
37
+ ... ('dog', set(['d1'])),
38
+ ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))]
39
+ >>> val = Valuation(v)
40
+ >>> dom = val.domain
41
+ >>> m = Model(dom, val)
42
+ """
43
+
44
+ from nltk.sem.boxer import Boxer
45
+ from nltk.sem.drt import DRS, DrtExpression
46
+ from nltk.sem.evaluate import (
47
+ Assignment,
48
+ Model,
49
+ Undefined,
50
+ Valuation,
51
+ arity,
52
+ is_rel,
53
+ read_valuation,
54
+ set2rel,
55
+ )
56
+ from nltk.sem.lfg import FStructure
57
+ from nltk.sem.logic import (
58
+ ApplicationExpression,
59
+ Expression,
60
+ LogicalExpressionException,
61
+ Variable,
62
+ binding_ops,
63
+ boolean_ops,
64
+ equality_preds,
65
+ read_logic,
66
+ )
67
+ from nltk.sem.relextract import clause, extract_rels, rtuple
68
+ from nltk.sem.skolemize import skolemize
69
+ from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep
70
+
71
+ # from nltk.sem.glue import Glue
72
+ # from nltk.sem.hole import HoleSemantics
73
+ # from nltk.sem.cooper_storage import CooperStore
74
+
75
+ # don't import chat80 as its names are too generic
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc ADDED
Binary file (45.3 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc ADDED
Binary file (44.8 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc ADDED
Binary file (64 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
venv/lib/python3.10/site-packages/nltk/sem/boxer.py ADDED
@@ -0,0 +1,1605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to Boxer
2
+ # <http://svn.ask.it.usyd.edu.au/trac/candc/wiki/boxer>
3
+ #
4
+ # Author: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ An interface to Boxer.
12
+
13
+ This interface relies on the latest version of the development (subversion) version of
14
+ C&C and Boxer.
15
+
16
+ Usage
17
+ =====
18
+
19
+ Set the environment variable CANDC to the bin directory of your CandC installation.
20
+ The models directory should be in the CandC root directory.
21
+ For example::
22
+
23
+ /path/to/candc/
24
+ bin/
25
+ candc
26
+ boxer
27
+ models/
28
+ boxer/
29
+ """
30
+
31
+ import operator
32
+ import os
33
+ import re
34
+ import subprocess
35
+ import tempfile
36
+ from functools import reduce
37
+ from optparse import OptionParser
38
+
39
+ from nltk.internals import find_binary
40
+ from nltk.sem.drt import (
41
+ DRS,
42
+ DrtApplicationExpression,
43
+ DrtEqualityExpression,
44
+ DrtNegatedExpression,
45
+ DrtOrExpression,
46
+ DrtParser,
47
+ DrtProposition,
48
+ DrtTokens,
49
+ DrtVariableExpression,
50
+ )
51
+ from nltk.sem.logic import (
52
+ ExpectedMoreTokensException,
53
+ LogicalExpressionException,
54
+ UnexpectedTokenException,
55
+ Variable,
56
+ )
57
+
58
+
59
+ class Boxer:
60
+ """
61
+ This class is an interface to Johan Bos's program Boxer, a wide-coverage
62
+ semantic parser that produces Discourse Representation Structures (DRSs).
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ boxer_drs_interpreter=None,
68
+ elimeq=False,
69
+ bin_dir=None,
70
+ verbose=False,
71
+ resolve=True,
72
+ ):
73
+ """
74
+ :param boxer_drs_interpreter: A class that converts from the
75
+ ``AbstractBoxerDrs`` object hierarchy to a different object. The
76
+ default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK
77
+ DRT hierarchy.
78
+ :param elimeq: When set to true, Boxer removes all equalities from the
79
+ DRSs and discourse referents standing in the equality relation are
80
+ unified, but only if this can be done in a meaning-preserving manner.
81
+ :param resolve: When set to true, Boxer will resolve all anaphoric DRSs and perform merge-reduction.
82
+ Resolution follows Van der Sandt's theory of binding and accommodation.
83
+ """
84
+ if boxer_drs_interpreter is None:
85
+ boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter()
86
+ self._boxer_drs_interpreter = boxer_drs_interpreter
87
+
88
+ self._resolve = resolve
89
+ self._elimeq = elimeq
90
+
91
+ self.set_bin_dir(bin_dir, verbose)
92
+
93
+ def set_bin_dir(self, bin_dir, verbose=False):
94
+ self._candc_bin = self._find_binary("candc", bin_dir, verbose)
95
+ self._candc_models_path = os.path.normpath(
96
+ os.path.join(self._candc_bin[:-5], "../models")
97
+ )
98
+ self._boxer_bin = self._find_binary("boxer", bin_dir, verbose)
99
+
100
+ def interpret(self, input, discourse_id=None, question=False, verbose=False):
101
+ """
102
+ Use Boxer to give a first order representation.
103
+
104
+ :param input: str Input sentence to parse
105
+ :param occur_index: bool Should predicates be occurrence indexed?
106
+ :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
107
+ :return: ``drt.DrtExpression``
108
+ """
109
+ discourse_ids = [discourse_id] if discourse_id is not None else None
110
+ (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose)
111
+ if not d:
112
+ raise Exception(f'Unable to interpret: "{input}"')
113
+ return d
114
+
115
+ def interpret_multi(self, input, discourse_id=None, question=False, verbose=False):
116
+ """
117
+ Use Boxer to give a first order representation.
118
+
119
+ :param input: list of str Input sentences to parse as a single discourse
120
+ :param occur_index: bool Should predicates be occurrence indexed?
121
+ :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate.
122
+ :return: ``drt.DrtExpression``
123
+ """
124
+ discourse_ids = [discourse_id] if discourse_id is not None else None
125
+ (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose)
126
+ if not d:
127
+ raise Exception(f'Unable to interpret: "{input}"')
128
+ return d
129
+
130
+ def interpret_sents(
131
+ self, inputs, discourse_ids=None, question=False, verbose=False
132
+ ):
133
+ """
134
+ Use Boxer to give a first order representation.
135
+
136
+ :param inputs: list of str Input sentences to parse as individual discourses
137
+ :param occur_index: bool Should predicates be occurrence indexed?
138
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
139
+ :return: list of ``drt.DrtExpression``
140
+ """
141
+ return self.interpret_multi_sents(
142
+ [[input] for input in inputs], discourse_ids, question, verbose
143
+ )
144
+
145
+ def interpret_multi_sents(
146
+ self, inputs, discourse_ids=None, question=False, verbose=False
147
+ ):
148
+ """
149
+ Use Boxer to give a first order representation.
150
+
151
+ :param inputs: list of list of str Input discourses to parse
152
+ :param occur_index: bool Should predicates be occurrence indexed?
153
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
154
+ :return: ``drt.DrtExpression``
155
+ """
156
+ if discourse_ids is not None:
157
+ assert len(inputs) == len(discourse_ids)
158
+ assert reduce(operator.and_, (id is not None for id in discourse_ids))
159
+ use_disc_id = True
160
+ else:
161
+ discourse_ids = list(map(str, range(len(inputs))))
162
+ use_disc_id = False
163
+
164
+ candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
165
+ boxer_out = self._call_boxer(candc_out, verbose=verbose)
166
+
167
+ # if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
168
+ # raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
169
+
170
+ drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
171
+ return [drs_dict.get(id, None) for id in discourse_ids]
172
+
173
+ def _call_candc(self, inputs, discourse_ids, question, verbose=False):
174
+ """
175
+ Call the ``candc`` binary with the given input.
176
+
177
+ :param inputs: list of list of str Input discourses to parse
178
+ :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
179
+ :param filename: str A filename for the output file
180
+ :return: stdout
181
+ """
182
+ args = [
183
+ "--models",
184
+ os.path.join(self._candc_models_path, ["boxer", "questions"][question]),
185
+ "--candc-printer",
186
+ "boxer",
187
+ ]
188
+ return self._call(
189
+ "\n".join(
190
+ sum(
191
+ ([f"<META>'{id}'"] + d for d, id in zip(inputs, discourse_ids)),
192
+ [],
193
+ )
194
+ ),
195
+ self._candc_bin,
196
+ args,
197
+ verbose,
198
+ )
199
+
200
+ def _call_boxer(self, candc_out, verbose=False):
201
+ """
202
+ Call the ``boxer`` binary with the given input.
203
+
204
+ :param candc_out: str output from C&C parser
205
+ :return: stdout
206
+ """
207
+ f = None
208
+ try:
209
+ fd, temp_filename = tempfile.mkstemp(
210
+ prefix="boxer-", suffix=".in", text=True
211
+ )
212
+ f = os.fdopen(fd, "w")
213
+ f.write(candc_out.decode("utf-8"))
214
+ finally:
215
+ if f:
216
+ f.close()
217
+
218
+ args = [
219
+ "--box",
220
+ "false",
221
+ "--semantics",
222
+ "drs",
223
+ #'--flat', 'false', # removed from boxer
224
+ "--resolve",
225
+ ["false", "true"][self._resolve],
226
+ "--elimeq",
227
+ ["false", "true"][self._elimeq],
228
+ "--format",
229
+ "prolog",
230
+ "--instantiate",
231
+ "true",
232
+ "--input",
233
+ temp_filename,
234
+ ]
235
+ stdout = self._call(None, self._boxer_bin, args, verbose)
236
+ os.remove(temp_filename)
237
+ return stdout
238
+
239
+ def _find_binary(self, name, bin_dir, verbose=False):
240
+ return find_binary(
241
+ name,
242
+ path_to_bin=bin_dir,
243
+ env_vars=["CANDC"],
244
+ url="http://svn.ask.it.usyd.edu.au/trac/candc/",
245
+ binary_names=[name, name + ".exe"],
246
+ verbose=verbose,
247
+ )
248
+
249
+ def _call(self, input_str, binary, args=[], verbose=False):
250
+ """
251
+ Call the binary with the given input.
252
+
253
+ :param input_str: A string whose contents are used as stdin.
254
+ :param binary: The location of the binary to call
255
+ :param args: A list of command-line arguments.
256
+ :return: stdout
257
+ """
258
+ if verbose:
259
+ print("Calling:", binary)
260
+ print("Args:", args)
261
+ print("Input:", input_str)
262
+ print("Command:", binary + " " + " ".join(args))
263
+
264
+ # Call via a subprocess
265
+ if input_str is None:
266
+ cmd = [binary] + args
267
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
268
+ else:
269
+ cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args))
270
+ p = subprocess.Popen(
271
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
272
+ )
273
+ stdout, stderr = p.communicate()
274
+
275
+ if verbose:
276
+ print("Return code:", p.returncode)
277
+ if stdout:
278
+ print("stdout:\n", stdout, "\n")
279
+ if stderr:
280
+ print("stderr:\n", stderr, "\n")
281
+ if p.returncode != 0:
282
+ raise Exception(
283
+ "ERROR CALLING: {} {}\nReturncode: {}\n{}".format(
284
+ binary, " ".join(args), p.returncode, stderr
285
+ )
286
+ )
287
+
288
+ return stdout
289
+
290
+ def _parse_to_drs_dict(self, boxer_out, use_disc_id):
291
+ lines = boxer_out.decode("utf-8").split("\n")
292
+ drs_dict = {}
293
+ i = 0
294
+ while i < len(lines):
295
+ line = lines[i]
296
+ if line.startswith("id("):
297
+ comma_idx = line.index(",")
298
+ discourse_id = line[3:comma_idx]
299
+ if discourse_id[0] == "'" and discourse_id[-1] == "'":
300
+ discourse_id = discourse_id[1:-1]
301
+ drs_id = line[comma_idx + 1 : line.index(")")]
302
+ i += 1
303
+ line = lines[i]
304
+ assert line.startswith(f"sem({drs_id},")
305
+ if line[-4:] == "').'":
306
+ line = line[:-4] + ")."
307
+ assert line.endswith(")."), f"can't parse line: {line}"
308
+
309
+ search_start = len(f"sem({drs_id},[")
310
+ brace_count = 1
311
+ drs_start = -1
312
+ for j, c in enumerate(line[search_start:]):
313
+ if c == "[":
314
+ brace_count += 1
315
+ if c == "]":
316
+ brace_count -= 1
317
+ if brace_count == 0:
318
+ drs_start = search_start + j + 1
319
+ if line[drs_start : drs_start + 3] == "','":
320
+ drs_start = drs_start + 3
321
+ else:
322
+ drs_start = drs_start + 1
323
+ break
324
+ assert drs_start > -1
325
+
326
+ drs_input = line[drs_start:-2].strip()
327
+ parsed = self._parse_drs(drs_input, discourse_id, use_disc_id)
328
+ drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed)
329
+ i += 1
330
+ return drs_dict
331
+
332
+ def _parse_drs(self, drs_string, discourse_id, use_disc_id):
333
+ return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string)
334
+
335
+
336
+ class BoxerOutputDrsParser(DrtParser):
337
+ def __init__(self, discourse_id=None):
338
+ """
339
+ This class is used to parse the Prolog DRS output from Boxer into a
340
+ hierarchy of python objects.
341
+ """
342
+ DrtParser.__init__(self)
343
+ self.discourse_id = discourse_id
344
+ self.sentence_id_offset = None
345
+ self.quote_chars = [("'", "'", "\\", False)]
346
+
347
+ def parse(self, data, signature=None):
348
+ return DrtParser.parse(self, data, signature)
349
+
350
+ def get_all_symbols(self):
351
+ return ["(", ")", ",", "[", "]", ":"]
352
+
353
+ def handle(self, tok, context):
354
+ return self.handle_drs(tok)
355
+
356
+ def attempt_adjuncts(self, expression, context):
357
+ return expression
358
+
359
+ def parse_condition(self, indices):
360
+ """
361
+ Parse a DRS condition
362
+
363
+ :return: list of ``DrtExpression``
364
+ """
365
+ tok = self.token()
366
+ accum = self.handle_condition(tok, indices)
367
+ if accum is None:
368
+ raise UnexpectedTokenException(tok)
369
+ return accum
370
+
371
+ def handle_drs(self, tok):
372
+ if tok == "drs":
373
+ return self.parse_drs()
374
+ elif tok in ["merge", "smerge"]:
375
+ return self._handle_binary_expression(self._make_merge_expression)(None, [])
376
+ elif tok in ["alfa"]:
377
+ return self._handle_alfa(self._make_merge_expression)(None, [])
378
+
379
+ def handle_condition(self, tok, indices):
380
+ """
381
+ Handle a DRS condition
382
+
383
+ :param indices: list of int
384
+ :return: list of ``DrtExpression``
385
+ """
386
+ if tok == "not":
387
+ return [self._handle_not()]
388
+
389
+ if tok == "or":
390
+ conds = [self._handle_binary_expression(self._make_or_expression)]
391
+ elif tok == "imp":
392
+ conds = [self._handle_binary_expression(self._make_imp_expression)]
393
+ elif tok == "eq":
394
+ conds = [self._handle_eq()]
395
+ elif tok == "prop":
396
+ conds = [self._handle_prop()]
397
+
398
+ elif tok == "pred":
399
+ conds = [self._handle_pred()]
400
+ elif tok == "named":
401
+ conds = [self._handle_named()]
402
+ elif tok == "rel":
403
+ conds = [self._handle_rel()]
404
+ elif tok == "timex":
405
+ conds = self._handle_timex()
406
+ elif tok == "card":
407
+ conds = [self._handle_card()]
408
+
409
+ elif tok == "whq":
410
+ conds = [self._handle_whq()]
411
+ elif tok == "duplex":
412
+ conds = [self._handle_duplex()]
413
+
414
+ else:
415
+ conds = []
416
+
417
+ return sum(
418
+ (
419
+ [cond(sent_index, word_indices) for cond in conds]
420
+ for sent_index, word_indices in self._sent_and_word_indices(indices)
421
+ ),
422
+ [],
423
+ )
424
+
425
+ def _handle_not(self):
426
+ self.assertToken(self.token(), "(")
427
+ drs = self.process_next_expression(None)
428
+ self.assertToken(self.token(), ")")
429
+ return BoxerNot(drs)
430
+
431
+ def _handle_pred(self):
432
+ # pred(_G3943, dog, n, 0)
433
+ self.assertToken(self.token(), "(")
434
+ variable = self.parse_variable()
435
+ self.assertToken(self.token(), ",")
436
+ name = self.token()
437
+ self.assertToken(self.token(), ",")
438
+ pos = self.token()
439
+ self.assertToken(self.token(), ",")
440
+ sense = int(self.token())
441
+ self.assertToken(self.token(), ")")
442
+
443
+ def _handle_pred_f(sent_index, word_indices):
444
+ return BoxerPred(
445
+ self.discourse_id, sent_index, word_indices, variable, name, pos, sense
446
+ )
447
+
448
+ return _handle_pred_f
449
+
450
+ def _handle_duplex(self):
451
+ # duplex(whq, drs(...), var, drs(...))
452
+ self.assertToken(self.token(), "(")
453
+ # self.assertToken(self.token(), '[')
454
+ ans_types = []
455
+ # while self.token(0) != ']':
456
+ # cat = self.token()
457
+ # self.assertToken(self.token(), ':')
458
+ # if cat == 'des':
459
+ # ans_types.append(self.token())
460
+ # elif cat == 'num':
461
+ # ans_types.append('number')
462
+ # typ = self.token()
463
+ # if typ == 'cou':
464
+ # ans_types.append('count')
465
+ # else:
466
+ # ans_types.append(typ)
467
+ # else:
468
+ # ans_types.append(self.token())
469
+ # self.token() #swallow the ']'
470
+
471
+ self.assertToken(self.token(), "whq")
472
+ self.assertToken(self.token(), ",")
473
+ d1 = self.process_next_expression(None)
474
+ self.assertToken(self.token(), ",")
475
+ ref = self.parse_variable()
476
+ self.assertToken(self.token(), ",")
477
+ d2 = self.process_next_expression(None)
478
+ self.assertToken(self.token(), ")")
479
+ return lambda sent_index, word_indices: BoxerWhq(
480
+ self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2
481
+ )
482
+
483
+ def _handle_named(self):
484
+ # named(x0, john, per, 0)
485
+ self.assertToken(self.token(), "(")
486
+ variable = self.parse_variable()
487
+ self.assertToken(self.token(), ",")
488
+ name = self.token()
489
+ self.assertToken(self.token(), ",")
490
+ type = self.token()
491
+ self.assertToken(self.token(), ",")
492
+ sense = self.token() # as per boxer rev 2554
493
+ self.assertToken(self.token(), ")")
494
+ return lambda sent_index, word_indices: BoxerNamed(
495
+ self.discourse_id, sent_index, word_indices, variable, name, type, sense
496
+ )
497
+
498
+ def _handle_rel(self):
499
+ # rel(_G3993, _G3943, agent, 0)
500
+ self.assertToken(self.token(), "(")
501
+ var1 = self.parse_variable()
502
+ self.assertToken(self.token(), ",")
503
+ var2 = self.parse_variable()
504
+ self.assertToken(self.token(), ",")
505
+ rel = self.token()
506
+ self.assertToken(self.token(), ",")
507
+ sense = int(self.token())
508
+ self.assertToken(self.token(), ")")
509
+ return lambda sent_index, word_indices: BoxerRel(
510
+ self.discourse_id, sent_index, word_indices, var1, var2, rel, sense
511
+ )
512
+
513
+ def _handle_timex(self):
514
+ # timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX'))
515
+ self.assertToken(self.token(), "(")
516
+ arg = self.parse_variable()
517
+ self.assertToken(self.token(), ",")
518
+ new_conds = self._handle_time_expression(arg)
519
+ self.assertToken(self.token(), ")")
520
+ return new_conds
521
+
522
+ def _handle_time_expression(self, arg):
523
+ # date([]: (+), []:'XXXX', [1004]:'04', []:'XX')
524
+ tok = self.token()
525
+ self.assertToken(self.token(), "(")
526
+ if tok == "date":
527
+ conds = self._handle_date(arg)
528
+ elif tok == "time":
529
+ conds = self._handle_time(arg)
530
+ else:
531
+ return None
532
+ self.assertToken(self.token(), ")")
533
+ return [
534
+ lambda sent_index, word_indices: BoxerPred(
535
+ self.discourse_id, sent_index, word_indices, arg, tok, "n", 0
536
+ )
537
+ ] + [lambda sent_index, word_indices: cond for cond in conds]
538
+
539
+ def _handle_date(self, arg):
540
+ # []: (+), []:'XXXX', [1004]:'04', []:'XX'
541
+ conds = []
542
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
543
+ self._parse_index_list()
544
+ )
545
+ self.assertToken(self.token(), "(")
546
+ pol = self.token()
547
+ self.assertToken(self.token(), ")")
548
+ conds.append(
549
+ BoxerPred(
550
+ self.discourse_id,
551
+ sent_index,
552
+ word_indices,
553
+ arg,
554
+ f"date_pol_{pol}",
555
+ "a",
556
+ 0,
557
+ )
558
+ )
559
+ self.assertToken(self.token(), ",")
560
+
561
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
562
+ self._parse_index_list()
563
+ )
564
+ year = self.token()
565
+ if year != "XXXX":
566
+ year = year.replace(":", "_")
567
+ conds.append(
568
+ BoxerPred(
569
+ self.discourse_id,
570
+ sent_index,
571
+ word_indices,
572
+ arg,
573
+ f"date_year_{year}",
574
+ "a",
575
+ 0,
576
+ )
577
+ )
578
+ self.assertToken(self.token(), ",")
579
+
580
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
581
+ self._parse_index_list()
582
+ )
583
+ month = self.token()
584
+ if month != "XX":
585
+ conds.append(
586
+ BoxerPred(
587
+ self.discourse_id,
588
+ sent_index,
589
+ word_indices,
590
+ arg,
591
+ f"date_month_{month}",
592
+ "a",
593
+ 0,
594
+ )
595
+ )
596
+ self.assertToken(self.token(), ",")
597
+
598
+ ((sent_index, word_indices),) = self._sent_and_word_indices(
599
+ self._parse_index_list()
600
+ )
601
+ day = self.token()
602
+ if day != "XX":
603
+ conds.append(
604
+ BoxerPred(
605
+ self.discourse_id,
606
+ sent_index,
607
+ word_indices,
608
+ arg,
609
+ f"date_day_{day}",
610
+ "a",
611
+ 0,
612
+ )
613
+ )
614
+
615
+ return conds
616
+
617
+ def _handle_time(self, arg):
618
+ # time([1018]:'18', []:'XX', []:'XX')
619
+ conds = []
620
+ self._parse_index_list()
621
+ hour = self.token()
622
+ if hour != "XX":
623
+ conds.append(self._make_atom("r_hour_2", arg, hour))
624
+ self.assertToken(self.token(), ",")
625
+
626
+ self._parse_index_list()
627
+ min = self.token()
628
+ if min != "XX":
629
+ conds.append(self._make_atom("r_min_2", arg, min))
630
+ self.assertToken(self.token(), ",")
631
+
632
+ self._parse_index_list()
633
+ sec = self.token()
634
+ if sec != "XX":
635
+ conds.append(self._make_atom("r_sec_2", arg, sec))
636
+
637
+ return conds
638
+
639
+ def _handle_card(self):
640
+ # card(_G18535, 28, ge)
641
+ self.assertToken(self.token(), "(")
642
+ variable = self.parse_variable()
643
+ self.assertToken(self.token(), ",")
644
+ value = self.token()
645
+ self.assertToken(self.token(), ",")
646
+ type = self.token()
647
+ self.assertToken(self.token(), ")")
648
+ return lambda sent_index, word_indices: BoxerCard(
649
+ self.discourse_id, sent_index, word_indices, variable, value, type
650
+ )
651
+
652
+ def _handle_prop(self):
653
+ # prop(_G15949, drs(...))
654
+ self.assertToken(self.token(), "(")
655
+ variable = self.parse_variable()
656
+ self.assertToken(self.token(), ",")
657
+ drs = self.process_next_expression(None)
658
+ self.assertToken(self.token(), ")")
659
+ return lambda sent_index, word_indices: BoxerProp(
660
+ self.discourse_id, sent_index, word_indices, variable, drs
661
+ )
662
+
663
+ def _parse_index_list(self):
664
+ # [1001,1002]:
665
+ indices = []
666
+ self.assertToken(self.token(), "[")
667
+ while self.token(0) != "]":
668
+ indices.append(self.parse_index())
669
+ if self.token(0) == ",":
670
+ self.token() # swallow ','
671
+ self.token() # swallow ']'
672
+ self.assertToken(self.token(), ":")
673
+ return indices
674
+
675
+ def parse_drs(self):
676
+ # drs([[1001]:_G3943],
677
+ # [[1002]:pred(_G3943, dog, n, 0)]
678
+ # )
679
+ self.assertToken(self.token(), "(")
680
+ self.assertToken(self.token(), "[")
681
+ refs = set()
682
+ while self.token(0) != "]":
683
+ indices = self._parse_index_list()
684
+ refs.add(self.parse_variable())
685
+ if self.token(0) == ",":
686
+ self.token() # swallow ','
687
+ self.token() # swallow ']'
688
+ self.assertToken(self.token(), ",")
689
+ self.assertToken(self.token(), "[")
690
+ conds = []
691
+ while self.token(0) != "]":
692
+ indices = self._parse_index_list()
693
+ conds.extend(self.parse_condition(indices))
694
+ if self.token(0) == ",":
695
+ self.token() # swallow ','
696
+ self.token() # swallow ']'
697
+ self.assertToken(self.token(), ")")
698
+ return BoxerDrs(list(refs), conds)
699
+
700
+ def _handle_binary_expression(self, make_callback):
701
+ self.assertToken(self.token(), "(")
702
+ drs1 = self.process_next_expression(None)
703
+ self.assertToken(self.token(), ",")
704
+ drs2 = self.process_next_expression(None)
705
+ self.assertToken(self.token(), ")")
706
+ return lambda sent_index, word_indices: make_callback(
707
+ sent_index, word_indices, drs1, drs2
708
+ )
709
+
710
+ def _handle_alfa(self, make_callback):
711
+ self.assertToken(self.token(), "(")
712
+ type = self.token()
713
+ self.assertToken(self.token(), ",")
714
+ drs1 = self.process_next_expression(None)
715
+ self.assertToken(self.token(), ",")
716
+ drs2 = self.process_next_expression(None)
717
+ self.assertToken(self.token(), ")")
718
+ return lambda sent_index, word_indices: make_callback(
719
+ sent_index, word_indices, drs1, drs2
720
+ )
721
+
722
+ def _handle_eq(self):
723
+ self.assertToken(self.token(), "(")
724
+ var1 = self.parse_variable()
725
+ self.assertToken(self.token(), ",")
726
+ var2 = self.parse_variable()
727
+ self.assertToken(self.token(), ")")
728
+ return lambda sent_index, word_indices: BoxerEq(
729
+ self.discourse_id, sent_index, word_indices, var1, var2
730
+ )
731
+
732
+ def _handle_whq(self):
733
+ self.assertToken(self.token(), "(")
734
+ self.assertToken(self.token(), "[")
735
+ ans_types = []
736
+ while self.token(0) != "]":
737
+ cat = self.token()
738
+ self.assertToken(self.token(), ":")
739
+ if cat == "des":
740
+ ans_types.append(self.token())
741
+ elif cat == "num":
742
+ ans_types.append("number")
743
+ typ = self.token()
744
+ if typ == "cou":
745
+ ans_types.append("count")
746
+ else:
747
+ ans_types.append(typ)
748
+ else:
749
+ ans_types.append(self.token())
750
+ self.token() # swallow the ']'
751
+
752
+ self.assertToken(self.token(), ",")
753
+ d1 = self.process_next_expression(None)
754
+ self.assertToken(self.token(), ",")
755
+ ref = self.parse_variable()
756
+ self.assertToken(self.token(), ",")
757
+ d2 = self.process_next_expression(None)
758
+ self.assertToken(self.token(), ")")
759
+ return lambda sent_index, word_indices: BoxerWhq(
760
+ self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2
761
+ )
762
+
763
+ def _make_merge_expression(self, sent_index, word_indices, drs1, drs2):
764
+ return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
765
+
766
+ def _make_or_expression(self, sent_index, word_indices, drs1, drs2):
767
+ return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2)
768
+
769
+ def _make_imp_expression(self, sent_index, word_indices, drs1, drs2):
770
+ return BoxerDrs(drs1.refs, drs1.conds, drs2)
771
+
772
+ def parse_variable(self):
773
+ var = self.token()
774
+ assert re.match(r"^[exps]\d+$", var), var
775
+ return var
776
+
777
+ def parse_index(self):
778
+ return int(self.token())
779
+
780
+ def _sent_and_word_indices(self, indices):
781
+ """
782
+ :return: list of (sent_index, word_indices) tuples
783
+ """
784
+ sent_indices = {(i / 1000) - 1 for i in indices if i >= 0}
785
+ if sent_indices:
786
+ pairs = []
787
+ for sent_index in sent_indices:
788
+ word_indices = [
789
+ (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1
790
+ ]
791
+ pairs.append((sent_index, word_indices))
792
+ return pairs
793
+ else:
794
+ word_indices = [(i % 1000) - 1 for i in indices]
795
+ return [(None, word_indices)]
796
+
797
+
798
+ class BoxerDrsParser(DrtParser):
799
+ """
800
+ Reparse the str form of subclasses of ``AbstractBoxerDrs``
801
+ """
802
+
803
+ def __init__(self, discourse_id=None):
804
+ DrtParser.__init__(self)
805
+ self.discourse_id = discourse_id
806
+
807
+ def get_all_symbols(self):
808
+ return [
809
+ DrtTokens.OPEN,
810
+ DrtTokens.CLOSE,
811
+ DrtTokens.COMMA,
812
+ DrtTokens.OPEN_BRACKET,
813
+ DrtTokens.CLOSE_BRACKET,
814
+ ]
815
+
816
+ def attempt_adjuncts(self, expression, context):
817
+ return expression
818
+
819
+ def handle(self, tok, context):
820
+ try:
821
+ # if tok == 'drs':
822
+ # self.assertNextToken(DrtTokens.OPEN)
823
+ # label = int(self.token())
824
+ # self.assertNextToken(DrtTokens.COMMA)
825
+ # refs = list(map(int, self.handle_refs()))
826
+ # self.assertNextToken(DrtTokens.COMMA)
827
+ # conds = self.handle_conds(None)
828
+ # self.assertNextToken(DrtTokens.CLOSE)
829
+ # return BoxerDrs(label, refs, conds)
830
+ if tok == "pred":
831
+ self.assertNextToken(DrtTokens.OPEN)
832
+ disc_id = (
833
+ self.discourse_id if self.discourse_id is not None else self.token()
834
+ )
835
+ self.assertNextToken(DrtTokens.COMMA)
836
+ sent_id = self.nullableIntToken()
837
+ self.assertNextToken(DrtTokens.COMMA)
838
+ word_ids = list(map(int, self.handle_refs()))
839
+ self.assertNextToken(DrtTokens.COMMA)
840
+ variable = int(self.token())
841
+ self.assertNextToken(DrtTokens.COMMA)
842
+ name = self.token()
843
+ self.assertNextToken(DrtTokens.COMMA)
844
+ pos = self.token()
845
+ self.assertNextToken(DrtTokens.COMMA)
846
+ sense = int(self.token())
847
+ self.assertNextToken(DrtTokens.CLOSE)
848
+ return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense)
849
+ elif tok == "named":
850
+ self.assertNextToken(DrtTokens.OPEN)
851
+ disc_id = (
852
+ self.discourse_id if self.discourse_id is not None else self.token()
853
+ )
854
+ self.assertNextToken(DrtTokens.COMMA)
855
+ sent_id = int(self.token())
856
+ self.assertNextToken(DrtTokens.COMMA)
857
+ word_ids = map(int, self.handle_refs())
858
+ self.assertNextToken(DrtTokens.COMMA)
859
+ variable = int(self.token())
860
+ self.assertNextToken(DrtTokens.COMMA)
861
+ name = self.token()
862
+ self.assertNextToken(DrtTokens.COMMA)
863
+ type = self.token()
864
+ self.assertNextToken(DrtTokens.COMMA)
865
+ sense = int(self.token())
866
+ self.assertNextToken(DrtTokens.CLOSE)
867
+ return BoxerNamed(
868
+ disc_id, sent_id, word_ids, variable, name, type, sense
869
+ )
870
+ elif tok == "rel":
871
+ self.assertNextToken(DrtTokens.OPEN)
872
+ disc_id = (
873
+ self.discourse_id if self.discourse_id is not None else self.token()
874
+ )
875
+ self.assertNextToken(DrtTokens.COMMA)
876
+ sent_id = self.nullableIntToken()
877
+ self.assertNextToken(DrtTokens.COMMA)
878
+ word_ids = list(map(int, self.handle_refs()))
879
+ self.assertNextToken(DrtTokens.COMMA)
880
+ var1 = int(self.token())
881
+ self.assertNextToken(DrtTokens.COMMA)
882
+ var2 = int(self.token())
883
+ self.assertNextToken(DrtTokens.COMMA)
884
+ rel = self.token()
885
+ self.assertNextToken(DrtTokens.COMMA)
886
+ sense = int(self.token())
887
+ self.assertNextToken(DrtTokens.CLOSE)
888
+ return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense)
889
+ elif tok == "prop":
890
+ self.assertNextToken(DrtTokens.OPEN)
891
+ disc_id = (
892
+ self.discourse_id if self.discourse_id is not None else self.token()
893
+ )
894
+ self.assertNextToken(DrtTokens.COMMA)
895
+ sent_id = int(self.token())
896
+ self.assertNextToken(DrtTokens.COMMA)
897
+ word_ids = list(map(int, self.handle_refs()))
898
+ self.assertNextToken(DrtTokens.COMMA)
899
+ variable = int(self.token())
900
+ self.assertNextToken(DrtTokens.COMMA)
901
+ drs = self.process_next_expression(None)
902
+ self.assertNextToken(DrtTokens.CLOSE)
903
+ return BoxerProp(disc_id, sent_id, word_ids, variable, drs)
904
+ elif tok == "not":
905
+ self.assertNextToken(DrtTokens.OPEN)
906
+ drs = self.process_next_expression(None)
907
+ self.assertNextToken(DrtTokens.CLOSE)
908
+ return BoxerNot(drs)
909
+ elif tok == "imp":
910
+ self.assertNextToken(DrtTokens.OPEN)
911
+ drs1 = self.process_next_expression(None)
912
+ self.assertNextToken(DrtTokens.COMMA)
913
+ drs2 = self.process_next_expression(None)
914
+ self.assertNextToken(DrtTokens.CLOSE)
915
+ return BoxerDrs(drs1.refs, drs1.conds, drs2)
916
+ elif tok == "or":
917
+ self.assertNextToken(DrtTokens.OPEN)
918
+ disc_id = (
919
+ self.discourse_id if self.discourse_id is not None else self.token()
920
+ )
921
+ self.assertNextToken(DrtTokens.COMMA)
922
+ sent_id = self.nullableIntToken()
923
+ self.assertNextToken(DrtTokens.COMMA)
924
+ word_ids = map(int, self.handle_refs())
925
+ self.assertNextToken(DrtTokens.COMMA)
926
+ drs1 = self.process_next_expression(None)
927
+ self.assertNextToken(DrtTokens.COMMA)
928
+ drs2 = self.process_next_expression(None)
929
+ self.assertNextToken(DrtTokens.CLOSE)
930
+ return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2)
931
+ elif tok == "eq":
932
+ self.assertNextToken(DrtTokens.OPEN)
933
+ disc_id = (
934
+ self.discourse_id if self.discourse_id is not None else self.token()
935
+ )
936
+ self.assertNextToken(DrtTokens.COMMA)
937
+ sent_id = self.nullableIntToken()
938
+ self.assertNextToken(DrtTokens.COMMA)
939
+ word_ids = list(map(int, self.handle_refs()))
940
+ self.assertNextToken(DrtTokens.COMMA)
941
+ var1 = int(self.token())
942
+ self.assertNextToken(DrtTokens.COMMA)
943
+ var2 = int(self.token())
944
+ self.assertNextToken(DrtTokens.CLOSE)
945
+ return BoxerEq(disc_id, sent_id, word_ids, var1, var2)
946
+ elif tok == "card":
947
+ self.assertNextToken(DrtTokens.OPEN)
948
+ disc_id = (
949
+ self.discourse_id if self.discourse_id is not None else self.token()
950
+ )
951
+ self.assertNextToken(DrtTokens.COMMA)
952
+ sent_id = self.nullableIntToken()
953
+ self.assertNextToken(DrtTokens.COMMA)
954
+ word_ids = map(int, self.handle_refs())
955
+ self.assertNextToken(DrtTokens.COMMA)
956
+ var = int(self.token())
957
+ self.assertNextToken(DrtTokens.COMMA)
958
+ value = self.token()
959
+ self.assertNextToken(DrtTokens.COMMA)
960
+ type = self.token()
961
+ self.assertNextToken(DrtTokens.CLOSE)
962
+ return BoxerCard(disc_id, sent_id, word_ids, var, value, type)
963
+ elif tok == "whq":
964
+ self.assertNextToken(DrtTokens.OPEN)
965
+ disc_id = (
966
+ self.discourse_id if self.discourse_id is not None else self.token()
967
+ )
968
+ self.assertNextToken(DrtTokens.COMMA)
969
+ sent_id = self.nullableIntToken()
970
+ self.assertNextToken(DrtTokens.COMMA)
971
+ word_ids = list(map(int, self.handle_refs()))
972
+ self.assertNextToken(DrtTokens.COMMA)
973
+ ans_types = self.handle_refs()
974
+ self.assertNextToken(DrtTokens.COMMA)
975
+ drs1 = self.process_next_expression(None)
976
+ self.assertNextToken(DrtTokens.COMMA)
977
+ var = int(self.token())
978
+ self.assertNextToken(DrtTokens.COMMA)
979
+ drs2 = self.process_next_expression(None)
980
+ self.assertNextToken(DrtTokens.CLOSE)
981
+ return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2)
982
+ except Exception as e:
983
+ raise LogicalExpressionException(self._currentIndex, str(e)) from e
984
+ assert False, repr(tok)
985
+
986
+ def nullableIntToken(self):
987
+ t = self.token()
988
+ return int(t) if t != "None" else None
989
+
990
+ def get_next_token_variable(self, description):
991
+ try:
992
+ return self.token()
993
+ except ExpectedMoreTokensException as e:
994
+ raise ExpectedMoreTokensException(e.index, "Variable expected.") from e
995
+
996
+
997
+ class AbstractBoxerDrs:
998
+ def variables(self):
999
+ """
1000
+ :return: (set<variables>, set<events>, set<propositions>)
1001
+ """
1002
+ variables, events, propositions = self._variables()
1003
+ return (variables - (events | propositions), events, propositions - events)
1004
+
1005
+ def variable_types(self):
1006
+ vartypes = {}
1007
+ for t, vars in zip(("z", "e", "p"), self.variables()):
1008
+ for v in vars:
1009
+ vartypes[v] = t
1010
+ return vartypes
1011
+
1012
+ def _variables(self):
1013
+ """
1014
+ :return: (set<variables>, set<events>, set<propositions>)
1015
+ """
1016
+ return (set(), set(), set())
1017
+
1018
+ def atoms(self):
1019
+ return set()
1020
+
1021
+ def clean(self):
1022
+ return self
1023
+
1024
+ def _clean_name(self, name):
1025
+ return name.replace("-", "_").replace("'", "_")
1026
+
1027
+ def renumber_sentences(self, f):
1028
+ return self
1029
+
1030
+ def __hash__(self):
1031
+ return hash(f"{self}")
1032
+
1033
+
1034
+ class BoxerDrs(AbstractBoxerDrs):
1035
+ def __init__(self, refs, conds, consequent=None):
1036
+ AbstractBoxerDrs.__init__(self)
1037
+ self.refs = refs
1038
+ self.conds = conds
1039
+ self.consequent = consequent
1040
+
1041
+ def _variables(self):
1042
+ variables = (set(), set(), set())
1043
+ for cond in self.conds:
1044
+ for s, v in zip(variables, cond._variables()):
1045
+ s.update(v)
1046
+ if self.consequent is not None:
1047
+ for s, v in zip(variables, self.consequent._variables()):
1048
+ s.update(v)
1049
+ return variables
1050
+
1051
+ def atoms(self):
1052
+ atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set())
1053
+ if self.consequent is not None:
1054
+ atoms.update(self.consequent.atoms())
1055
+ return atoms
1056
+
1057
+ def clean(self):
1058
+ consequent = self.consequent.clean() if self.consequent else None
1059
+ return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent)
1060
+
1061
+ def renumber_sentences(self, f):
1062
+ consequent = self.consequent.renumber_sentences(f) if self.consequent else None
1063
+ return BoxerDrs(
1064
+ self.refs, [c.renumber_sentences(f) for c in self.conds], consequent
1065
+ )
1066
+
1067
+ def __repr__(self):
1068
+ s = "drs([{}], [{}])".format(
1069
+ ", ".join("%s" % r for r in self.refs),
1070
+ ", ".join("%s" % c for c in self.conds),
1071
+ )
1072
+ if self.consequent is not None:
1073
+ s = f"imp({s}, {self.consequent})"
1074
+ return s
1075
+
1076
+ def __eq__(self, other):
1077
+ return (
1078
+ self.__class__ == other.__class__
1079
+ and self.refs == other.refs
1080
+ and len(self.conds) == len(other.conds)
1081
+ and reduce(
1082
+ operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds))
1083
+ )
1084
+ and self.consequent == other.consequent
1085
+ )
1086
+
1087
+ def __ne__(self, other):
1088
+ return not self == other
1089
+
1090
+ __hash__ = AbstractBoxerDrs.__hash__
1091
+
1092
+
1093
+ class BoxerNot(AbstractBoxerDrs):
1094
+ def __init__(self, drs):
1095
+ AbstractBoxerDrs.__init__(self)
1096
+ self.drs = drs
1097
+
1098
+ def _variables(self):
1099
+ return self.drs._variables()
1100
+
1101
+ def atoms(self):
1102
+ return self.drs.atoms()
1103
+
1104
+ def clean(self):
1105
+ return BoxerNot(self.drs.clean())
1106
+
1107
+ def renumber_sentences(self, f):
1108
+ return BoxerNot(self.drs.renumber_sentences(f))
1109
+
1110
+ def __repr__(self):
1111
+ return "not(%s)" % (self.drs)
1112
+
1113
+ def __eq__(self, other):
1114
+ return self.__class__ == other.__class__ and self.drs == other.drs
1115
+
1116
+ def __ne__(self, other):
1117
+ return not self == other
1118
+
1119
+ __hash__ = AbstractBoxerDrs.__hash__
1120
+
1121
+
1122
+ class BoxerIndexed(AbstractBoxerDrs):
1123
+ def __init__(self, discourse_id, sent_index, word_indices):
1124
+ AbstractBoxerDrs.__init__(self)
1125
+ self.discourse_id = discourse_id
1126
+ self.sent_index = sent_index
1127
+ self.word_indices = word_indices
1128
+
1129
+ def atoms(self):
1130
+ return {self}
1131
+
1132
+ def __eq__(self, other):
1133
+ return (
1134
+ self.__class__ == other.__class__
1135
+ and self.discourse_id == other.discourse_id
1136
+ and self.sent_index == other.sent_index
1137
+ and self.word_indices == other.word_indices
1138
+ and reduce(operator.and_, (s == o for s, o in zip(self, other)))
1139
+ )
1140
+
1141
+ def __ne__(self, other):
1142
+ return not self == other
1143
+
1144
+ __hash__ = AbstractBoxerDrs.__hash__
1145
+
1146
+ def __repr__(self):
1147
+ s = "{}({}, {}, [{}]".format(
1148
+ self._pred(),
1149
+ self.discourse_id,
1150
+ self.sent_index,
1151
+ ", ".join("%s" % wi for wi in self.word_indices),
1152
+ )
1153
+ for v in self:
1154
+ s += ", %s" % v
1155
+ return s + ")"
1156
+
1157
+
1158
+ class BoxerPred(BoxerIndexed):
1159
+ def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense):
1160
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1161
+ self.var = var
1162
+ self.name = name
1163
+ self.pos = pos
1164
+ self.sense = sense
1165
+
1166
+ def _variables(self):
1167
+ return ({self.var}, set(), set())
1168
+
1169
+ def change_var(self, var):
1170
+ return BoxerPred(
1171
+ self.discourse_id,
1172
+ self.sent_index,
1173
+ self.word_indices,
1174
+ var,
1175
+ self.name,
1176
+ self.pos,
1177
+ self.sense,
1178
+ )
1179
+
1180
+ def clean(self):
1181
+ return BoxerPred(
1182
+ self.discourse_id,
1183
+ self.sent_index,
1184
+ self.word_indices,
1185
+ self.var,
1186
+ self._clean_name(self.name),
1187
+ self.pos,
1188
+ self.sense,
1189
+ )
1190
+
1191
+ def renumber_sentences(self, f):
1192
+ new_sent_index = f(self.sent_index)
1193
+ return BoxerPred(
1194
+ self.discourse_id,
1195
+ new_sent_index,
1196
+ self.word_indices,
1197
+ self.var,
1198
+ self.name,
1199
+ self.pos,
1200
+ self.sense,
1201
+ )
1202
+
1203
+ def __iter__(self):
1204
+ return iter((self.var, self.name, self.pos, self.sense))
1205
+
1206
+ def _pred(self):
1207
+ return "pred"
1208
+
1209
+
1210
+ class BoxerNamed(BoxerIndexed):
1211
+ def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense):
1212
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1213
+ self.var = var
1214
+ self.name = name
1215
+ self.type = type
1216
+ self.sense = sense
1217
+
1218
+ def _variables(self):
1219
+ return ({self.var}, set(), set())
1220
+
1221
+ def change_var(self, var):
1222
+ return BoxerNamed(
1223
+ self.discourse_id,
1224
+ self.sent_index,
1225
+ self.word_indices,
1226
+ var,
1227
+ self.name,
1228
+ self.type,
1229
+ self.sense,
1230
+ )
1231
+
1232
+ def clean(self):
1233
+ return BoxerNamed(
1234
+ self.discourse_id,
1235
+ self.sent_index,
1236
+ self.word_indices,
1237
+ self.var,
1238
+ self._clean_name(self.name),
1239
+ self.type,
1240
+ self.sense,
1241
+ )
1242
+
1243
+ def renumber_sentences(self, f):
1244
+ return BoxerNamed(
1245
+ self.discourse_id,
1246
+ f(self.sent_index),
1247
+ self.word_indices,
1248
+ self.var,
1249
+ self.name,
1250
+ self.type,
1251
+ self.sense,
1252
+ )
1253
+
1254
+ def __iter__(self):
1255
+ return iter((self.var, self.name, self.type, self.sense))
1256
+
1257
+ def _pred(self):
1258
+ return "named"
1259
+
1260
+
1261
+ class BoxerRel(BoxerIndexed):
1262
+ def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense):
1263
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1264
+ self.var1 = var1
1265
+ self.var2 = var2
1266
+ self.rel = rel
1267
+ self.sense = sense
1268
+
1269
+ def _variables(self):
1270
+ return ({self.var1, self.var2}, set(), set())
1271
+
1272
+ def clean(self):
1273
+ return BoxerRel(
1274
+ self.discourse_id,
1275
+ self.sent_index,
1276
+ self.word_indices,
1277
+ self.var1,
1278
+ self.var2,
1279
+ self._clean_name(self.rel),
1280
+ self.sense,
1281
+ )
1282
+
1283
+ def renumber_sentences(self, f):
1284
+ return BoxerRel(
1285
+ self.discourse_id,
1286
+ f(self.sent_index),
1287
+ self.word_indices,
1288
+ self.var1,
1289
+ self.var2,
1290
+ self.rel,
1291
+ self.sense,
1292
+ )
1293
+
1294
+ def __iter__(self):
1295
+ return iter((self.var1, self.var2, self.rel, self.sense))
1296
+
1297
+ def _pred(self):
1298
+ return "rel"
1299
+
1300
+
1301
+ class BoxerProp(BoxerIndexed):
1302
+ def __init__(self, discourse_id, sent_index, word_indices, var, drs):
1303
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1304
+ self.var = var
1305
+ self.drs = drs
1306
+
1307
+ def _variables(self):
1308
+ return tuple(
1309
+ map(operator.or_, (set(), set(), {self.var}), self.drs._variables())
1310
+ )
1311
+
1312
+ def referenced_labels(self):
1313
+ return {self.drs}
1314
+
1315
+ def atoms(self):
1316
+ return self.drs.atoms()
1317
+
1318
+ def clean(self):
1319
+ return BoxerProp(
1320
+ self.discourse_id,
1321
+ self.sent_index,
1322
+ self.word_indices,
1323
+ self.var,
1324
+ self.drs.clean(),
1325
+ )
1326
+
1327
+ def renumber_sentences(self, f):
1328
+ return BoxerProp(
1329
+ self.discourse_id,
1330
+ f(self.sent_index),
1331
+ self.word_indices,
1332
+ self.var,
1333
+ self.drs.renumber_sentences(f),
1334
+ )
1335
+
1336
+ def __iter__(self):
1337
+ return iter((self.var, self.drs))
1338
+
1339
+ def _pred(self):
1340
+ return "prop"
1341
+
1342
+
1343
+ class BoxerEq(BoxerIndexed):
1344
+ def __init__(self, discourse_id, sent_index, word_indices, var1, var2):
1345
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1346
+ self.var1 = var1
1347
+ self.var2 = var2
1348
+
1349
+ def _variables(self):
1350
+ return ({self.var1, self.var2}, set(), set())
1351
+
1352
+ def atoms(self):
1353
+ return set()
1354
+
1355
+ def renumber_sentences(self, f):
1356
+ return BoxerEq(
1357
+ self.discourse_id,
1358
+ f(self.sent_index),
1359
+ self.word_indices,
1360
+ self.var1,
1361
+ self.var2,
1362
+ )
1363
+
1364
+ def __iter__(self):
1365
+ return iter((self.var1, self.var2))
1366
+
1367
+ def _pred(self):
1368
+ return "eq"
1369
+
1370
+
1371
+ class BoxerCard(BoxerIndexed):
1372
+ def __init__(self, discourse_id, sent_index, word_indices, var, value, type):
1373
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1374
+ self.var = var
1375
+ self.value = value
1376
+ self.type = type
1377
+
1378
+ def _variables(self):
1379
+ return ({self.var}, set(), set())
1380
+
1381
+ def renumber_sentences(self, f):
1382
+ return BoxerCard(
1383
+ self.discourse_id,
1384
+ f(self.sent_index),
1385
+ self.word_indices,
1386
+ self.var,
1387
+ self.value,
1388
+ self.type,
1389
+ )
1390
+
1391
+ def __iter__(self):
1392
+ return iter((self.var, self.value, self.type))
1393
+
1394
+ def _pred(self):
1395
+ return "card"
1396
+
1397
+
1398
+ class BoxerOr(BoxerIndexed):
1399
+ def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2):
1400
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1401
+ self.drs1 = drs1
1402
+ self.drs2 = drs2
1403
+
1404
+ def _variables(self):
1405
+ return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables()))
1406
+
1407
+ def atoms(self):
1408
+ return self.drs1.atoms() | self.drs2.atoms()
1409
+
1410
+ def clean(self):
1411
+ return BoxerOr(
1412
+ self.discourse_id,
1413
+ self.sent_index,
1414
+ self.word_indices,
1415
+ self.drs1.clean(),
1416
+ self.drs2.clean(),
1417
+ )
1418
+
1419
+ def renumber_sentences(self, f):
1420
+ return BoxerOr(
1421
+ self.discourse_id,
1422
+ f(self.sent_index),
1423
+ self.word_indices,
1424
+ self.drs1,
1425
+ self.drs2,
1426
+ )
1427
+
1428
+ def __iter__(self):
1429
+ return iter((self.drs1, self.drs2))
1430
+
1431
+ def _pred(self):
1432
+ return "or"
1433
+
1434
+
1435
+ class BoxerWhq(BoxerIndexed):
1436
+ def __init__(
1437
+ self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2
1438
+ ):
1439
+ BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices)
1440
+ self.ans_types = ans_types
1441
+ self.drs1 = drs1
1442
+ self.variable = variable
1443
+ self.drs2 = drs2
1444
+
1445
+ def _variables(self):
1446
+ return tuple(
1447
+ map(
1448
+ operator.or_,
1449
+ ({self.variable}, set(), set()),
1450
+ self.drs1._variables(),
1451
+ self.drs2._variables(),
1452
+ )
1453
+ )
1454
+
1455
+ def atoms(self):
1456
+ return self.drs1.atoms() | self.drs2.atoms()
1457
+
1458
+ def clean(self):
1459
+ return BoxerWhq(
1460
+ self.discourse_id,
1461
+ self.sent_index,
1462
+ self.word_indices,
1463
+ self.ans_types,
1464
+ self.drs1.clean(),
1465
+ self.variable,
1466
+ self.drs2.clean(),
1467
+ )
1468
+
1469
+ def renumber_sentences(self, f):
1470
+ return BoxerWhq(
1471
+ self.discourse_id,
1472
+ f(self.sent_index),
1473
+ self.word_indices,
1474
+ self.ans_types,
1475
+ self.drs1,
1476
+ self.variable,
1477
+ self.drs2,
1478
+ )
1479
+
1480
+ def __iter__(self):
1481
+ return iter(
1482
+ ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2)
1483
+ )
1484
+
1485
+ def _pred(self):
1486
+ return "whq"
1487
+
1488
+
1489
+ class PassthroughBoxerDrsInterpreter:
1490
+ def interpret(self, ex):
1491
+ return ex
1492
+
1493
+
1494
+ class NltkDrtBoxerDrsInterpreter:
1495
+ def __init__(self, occur_index=False):
1496
+ self._occur_index = occur_index
1497
+
1498
+ def interpret(self, ex):
1499
+ """
1500
+ :param ex: ``AbstractBoxerDrs``
1501
+ :return: ``DrtExpression``
1502
+ """
1503
+ if isinstance(ex, BoxerDrs):
1504
+ drs = DRS(
1505
+ [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds))
1506
+ )
1507
+ if ex.consequent is not None:
1508
+ drs.consequent = self.interpret(ex.consequent)
1509
+ return drs
1510
+ elif isinstance(ex, BoxerNot):
1511
+ return DrtNegatedExpression(self.interpret(ex.drs))
1512
+ elif isinstance(ex, BoxerPred):
1513
+ pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex)
1514
+ return self._make_atom(pred, ex.var)
1515
+ elif isinstance(ex, BoxerNamed):
1516
+ pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex)
1517
+ return self._make_atom(pred, ex.var)
1518
+ elif isinstance(ex, BoxerRel):
1519
+ pred = self._add_occur_indexing("%s" % (ex.rel), ex)
1520
+ return self._make_atom(pred, ex.var1, ex.var2)
1521
+ elif isinstance(ex, BoxerProp):
1522
+ return DrtProposition(Variable(ex.var), self.interpret(ex.drs))
1523
+ elif isinstance(ex, BoxerEq):
1524
+ return DrtEqualityExpression(
1525
+ DrtVariableExpression(Variable(ex.var1)),
1526
+ DrtVariableExpression(Variable(ex.var2)),
1527
+ )
1528
+ elif isinstance(ex, BoxerCard):
1529
+ pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex)
1530
+ return self._make_atom(pred, ex.var)
1531
+ elif isinstance(ex, BoxerOr):
1532
+ return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2))
1533
+ elif isinstance(ex, BoxerWhq):
1534
+ drs1 = self.interpret(ex.drs1)
1535
+ drs2 = self.interpret(ex.drs2)
1536
+ return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds)
1537
+ assert False, f"{ex.__class__.__name__}: {ex}"
1538
+
1539
+ def _make_atom(self, pred, *args):
1540
+ accum = DrtVariableExpression(Variable(pred))
1541
+ for arg in args:
1542
+ accum = DrtApplicationExpression(
1543
+ accum, DrtVariableExpression(Variable(arg))
1544
+ )
1545
+ return accum
1546
+
1547
+ def _add_occur_indexing(self, base, ex):
1548
+ if self._occur_index and ex.sent_index is not None:
1549
+ if ex.discourse_id:
1550
+ base += "_%s" % ex.discourse_id
1551
+ base += "_s%s" % ex.sent_index
1552
+ base += "_w%s" % sorted(ex.word_indices)[0]
1553
+ return base
1554
+
1555
+
1556
+ class UnparseableInputException(Exception):
1557
+ pass
1558
+
1559
+
1560
+ if __name__ == "__main__":
1561
+ opts = OptionParser("usage: %prog TEXT [options]")
1562
+ opts.add_option(
1563
+ "--verbose",
1564
+ "-v",
1565
+ help="display verbose logs",
1566
+ action="store_true",
1567
+ default=False,
1568
+ dest="verbose",
1569
+ )
1570
+ opts.add_option(
1571
+ "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol"
1572
+ )
1573
+ opts.add_option(
1574
+ "--question",
1575
+ "-q",
1576
+ help="input is a question",
1577
+ action="store_true",
1578
+ default=False,
1579
+ dest="question",
1580
+ )
1581
+ opts.add_option(
1582
+ "--occur",
1583
+ "-o",
1584
+ help="occurrence index",
1585
+ action="store_true",
1586
+ default=False,
1587
+ dest="occur_index",
1588
+ )
1589
+ (options, args) = opts.parse_args()
1590
+
1591
+ if len(args) != 1:
1592
+ opts.error("incorrect number of arguments")
1593
+
1594
+ interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index)
1595
+ drs = Boxer(interpreter).interpret_multi(
1596
+ args[0].split(r"\n"), question=options.question, verbose=options.verbose
1597
+ )
1598
+ if drs is None:
1599
+ print(None)
1600
+ else:
1601
+ drs = drs.simplify().eliminate_equality()
1602
+ if options.fol:
1603
+ print(drs.fol().normalize())
1604
+ else:
1605
+ drs.pretty_print()
venv/lib/python3.10/site-packages/nltk/sem/chat80.py ADDED
@@ -0,0 +1,857 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chat-80 KB Reader
2
+ # See https://www.w3.org/TR/swbp-skos-core-guide/
3
+ #
4
+ # Copyright (C) 2001-2023 NLTK Project
5
+ # Author: Ewan Klein <[email protected]>,
6
+ # URL: <https://www.nltk.org>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ r"""
10
+ Overview
11
+ ========
12
+
13
+ Chat-80 was a natural language system which allowed the user to
14
+ interrogate a Prolog knowledge base in the domain of world
15
+ geography. It was developed in the early '80s by Warren and Pereira; see
16
+ ``https://www.aclweb.org/anthology/J82-3002.pdf`` for a description and
17
+ ``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source
18
+ files.
19
+
20
+ This module contains functions to extract data from the Chat-80
21
+ relation files ('the world database'), and convert then into a format
22
+ that can be incorporated in the FOL models of
23
+ ``nltk.sem.evaluate``. The code assumes that the Prolog
24
+ input files are available in the NLTK corpora directory.
25
+
26
+ The Chat-80 World Database consists of the following files::
27
+
28
+ world0.pl
29
+ rivers.pl
30
+ cities.pl
31
+ countries.pl
32
+ contain.pl
33
+ borders.pl
34
+
35
+ This module uses a slightly modified version of ``world0.pl``, in which
36
+ a set of Prolog rules have been omitted. The modified file is named
37
+ ``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since
38
+ it uses a list rather than a string in the second field.
39
+
40
+ Reading Chat-80 Files
41
+ =====================
42
+
43
+ Chat-80 relations are like tables in a relational database. The
44
+ relation acts as the name of the table; the first argument acts as the
45
+ 'primary key'; and subsequent arguments are further fields in the
46
+ table. In general, the name of the table provides a label for a unary
47
+ predicate whose extension is all the primary keys. For example,
48
+ relations in ``cities.pl`` are of the following form::
49
+
50
+ 'city(athens,greece,1368).'
51
+
52
+ Here, ``'athens'`` is the key, and will be mapped to a member of the
53
+ unary predicate *city*.
54
+
55
+ The fields in the table are mapped to binary predicates. The first
56
+ argument of the predicate is the primary key, while the second
57
+ argument is the data in the relevant field. Thus, in the above
58
+ example, the third field is mapped to the binary predicate
59
+ *population_of*, whose extension is a set of pairs such as
60
+ ``'(athens, 1368)'``.
61
+
62
+ An exception to this general framework is required by the relations in
63
+ the files ``borders.pl`` and ``contains.pl``. These contain facts of the
64
+ following form::
65
+
66
+ 'borders(albania,greece).'
67
+
68
+ 'contains0(africa,central_africa).'
69
+
70
+ We do not want to form a unary concept out the element in
71
+ the first field of these records, and we want the label of the binary
72
+ relation just to be ``'border'``/``'contain'`` respectively.
73
+
74
+ In order to drive the extraction process, we use 'relation metadata bundles'
75
+ which are Python dictionaries such as the following::
76
+
77
+ city = {'label': 'city',
78
+ 'closures': [],
79
+ 'schema': ['city', 'country', 'population'],
80
+ 'filename': 'cities.pl'}
81
+
82
+ According to this, the file ``city['filename']`` contains a list of
83
+ relational tuples (or more accurately, the corresponding strings in
84
+ Prolog form) whose predicate symbol is ``city['label']`` and whose
85
+ relational schema is ``city['schema']``. The notion of a ``closure`` is
86
+ discussed in the next section.
87
+
88
+ Concepts
89
+ ========
90
+ In order to encapsulate the results of the extraction, a class of
91
+ ``Concept`` objects is introduced. A ``Concept`` object has a number of
92
+ attributes, in particular a ``prefLabel`` and ``extension``, which make
93
+ it easier to inspect the output of the extraction. In addition, the
94
+ ``extension`` can be further processed: in the case of the ``'border'``
95
+ relation, we check that the relation is symmetric, and in the case
96
+ of the ``'contain'`` relation, we carry out the transitive
97
+ closure. The closure properties associated with a concept is
98
+ indicated in the relation metadata, as indicated earlier.
99
+
100
+ The ``extension`` of a ``Concept`` object is then incorporated into a
101
+ ``Valuation`` object.
102
+
103
+ Persistence
104
+ ===========
105
+ The functions ``val_dump`` and ``val_load`` are provided to allow a
106
+ valuation to be stored in a persistent database and re-loaded, rather
107
+ than having to be re-computed each time.
108
+
109
+ Individuals and Lexical Items
110
+ =============================
111
+ As well as deriving relations from the Chat-80 data, we also create a
112
+ set of individual constants, one for each entity in the domain. The
113
+ individual constants are string-identical to the entities. For
114
+ example, given a data item such as ``'zloty'``, we add to the valuation
115
+ a pair ``('zloty', 'zloty')``. In order to parse English sentences that
116
+ refer to these entities, we also create a lexical item such as the
117
+ following for each individual constant::
118
+
119
+ PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty'
120
+
121
+ The set of rules is written to the file ``chat_pnames.cfg`` in the
122
+ current directory.
123
+
124
+ """
125
+
126
+ import os
127
+ import re
128
+ import shelve
129
+ import sys
130
+
131
+ import nltk.data
132
+
133
+ ###########################################################################
134
+ # Chat-80 relation metadata bundles needed to build the valuation
135
+ ###########################################################################
136
+
137
+ borders = {
138
+ "rel_name": "borders",
139
+ "closures": ["symmetric"],
140
+ "schema": ["region", "border"],
141
+ "filename": "borders.pl",
142
+ }
143
+
144
+ contains = {
145
+ "rel_name": "contains0",
146
+ "closures": ["transitive"],
147
+ "schema": ["region", "contain"],
148
+ "filename": "contain.pl",
149
+ }
150
+
151
+ city = {
152
+ "rel_name": "city",
153
+ "closures": [],
154
+ "schema": ["city", "country", "population"],
155
+ "filename": "cities.pl",
156
+ }
157
+
158
+ country = {
159
+ "rel_name": "country",
160
+ "closures": [],
161
+ "schema": [
162
+ "country",
163
+ "region",
164
+ "latitude",
165
+ "longitude",
166
+ "area",
167
+ "population",
168
+ "capital",
169
+ "currency",
170
+ ],
171
+ "filename": "countries.pl",
172
+ }
173
+
174
+ circle_of_lat = {
175
+ "rel_name": "circle_of_latitude",
176
+ "closures": [],
177
+ "schema": ["circle_of_latitude", "degrees"],
178
+ "filename": "world1.pl",
179
+ }
180
+
181
+ circle_of_long = {
182
+ "rel_name": "circle_of_longitude",
183
+ "closures": [],
184
+ "schema": ["circle_of_longitude", "degrees"],
185
+ "filename": "world1.pl",
186
+ }
187
+
188
+ continent = {
189
+ "rel_name": "continent",
190
+ "closures": [],
191
+ "schema": ["continent"],
192
+ "filename": "world1.pl",
193
+ }
194
+
195
+ region = {
196
+ "rel_name": "in_continent",
197
+ "closures": [],
198
+ "schema": ["region", "continent"],
199
+ "filename": "world1.pl",
200
+ }
201
+
202
+ ocean = {
203
+ "rel_name": "ocean",
204
+ "closures": [],
205
+ "schema": ["ocean"],
206
+ "filename": "world1.pl",
207
+ }
208
+
209
+ sea = {"rel_name": "sea", "closures": [], "schema": ["sea"], "filename": "world1.pl"}
210
+
211
+
212
+ items = [
213
+ "borders",
214
+ "contains",
215
+ "city",
216
+ "country",
217
+ "circle_of_lat",
218
+ "circle_of_long",
219
+ "continent",
220
+ "region",
221
+ "ocean",
222
+ "sea",
223
+ ]
224
+ items = tuple(sorted(items))
225
+
226
+ item_metadata = {
227
+ "borders": borders,
228
+ "contains": contains,
229
+ "city": city,
230
+ "country": country,
231
+ "circle_of_lat": circle_of_lat,
232
+ "circle_of_long": circle_of_long,
233
+ "continent": continent,
234
+ "region": region,
235
+ "ocean": ocean,
236
+ "sea": sea,
237
+ }
238
+
239
+ rels = item_metadata.values()
240
+
241
+ not_unary = ["borders.pl", "contain.pl"]
242
+
243
+ ###########################################################################
244
+
245
+
246
+ class Concept:
247
+ """
248
+ A Concept class, loosely based on SKOS
249
+ (https://www.w3.org/TR/swbp-skos-core-guide/).
250
+ """
251
+
252
+ def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()):
253
+ """
254
+ :param prefLabel: the preferred label for the concept
255
+ :type prefLabel: str
256
+ :param arity: the arity of the concept
257
+ :type arity: int
258
+ :param altLabels: other (related) labels
259
+ :type altLabels: list
260
+ :param closures: closure properties of the extension
261
+ (list items can be ``symmetric``, ``reflexive``, ``transitive``)
262
+ :type closures: list
263
+ :param extension: the extensional value of the concept
264
+ :type extension: set
265
+ """
266
+ self.prefLabel = prefLabel
267
+ self.arity = arity
268
+ self.altLabels = altLabels
269
+ self.closures = closures
270
+ # keep _extension internally as a set
271
+ self._extension = extension
272
+ # public access is via a list (for slicing)
273
+ self.extension = sorted(list(extension))
274
+
275
+ def __str__(self):
276
+ # _extension = ''
277
+ # for element in sorted(self.extension):
278
+ # if isinstance(element, tuple):
279
+ # element = '(%s, %s)' % (element)
280
+ # _extension += element + ', '
281
+ # _extension = _extension[:-1]
282
+
283
+ return "Label = '{}'\nArity = {}\nExtension = {}".format(
284
+ self.prefLabel,
285
+ self.arity,
286
+ self.extension,
287
+ )
288
+
289
+ def __repr__(self):
290
+ return "Concept('%s')" % self.prefLabel
291
+
292
+ def augment(self, data):
293
+ """
294
+ Add more data to the ``Concept``'s extension set.
295
+
296
+ :param data: a new semantic value
297
+ :type data: string or pair of strings
298
+ :rtype: set
299
+
300
+ """
301
+ self._extension.add(data)
302
+ self.extension = sorted(list(self._extension))
303
+ return self._extension
304
+
305
+ def _make_graph(self, s):
306
+ """
307
+ Convert a set of pairs into an adjacency linked list encoding of a graph.
308
+ """
309
+ g = {}
310
+ for (x, y) in s:
311
+ if x in g:
312
+ g[x].append(y)
313
+ else:
314
+ g[x] = [y]
315
+ return g
316
+
317
+ def _transclose(self, g):
318
+ """
319
+ Compute the transitive closure of a graph represented as a linked list.
320
+ """
321
+ for x in g:
322
+ for adjacent in g[x]:
323
+ # check that adjacent is a key
324
+ if adjacent in g:
325
+ for y in g[adjacent]:
326
+ if y not in g[x]:
327
+ g[x].append(y)
328
+ return g
329
+
330
+ def _make_pairs(self, g):
331
+ """
332
+ Convert an adjacency linked list back into a set of pairs.
333
+ """
334
+ pairs = []
335
+ for node in g:
336
+ for adjacent in g[node]:
337
+ pairs.append((node, adjacent))
338
+ return set(pairs)
339
+
340
+ def close(self):
341
+ """
342
+ Close a binary relation in the ``Concept``'s extension set.
343
+
344
+ :return: a new extension for the ``Concept`` in which the
345
+ relation is closed under a given property
346
+ """
347
+ from nltk.sem import is_rel
348
+
349
+ assert is_rel(self._extension)
350
+ if "symmetric" in self.closures:
351
+ pairs = []
352
+ for (x, y) in self._extension:
353
+ pairs.append((y, x))
354
+ sym = set(pairs)
355
+ self._extension = self._extension.union(sym)
356
+ if "transitive" in self.closures:
357
+ all = self._make_graph(self._extension)
358
+ closed = self._transclose(all)
359
+ trans = self._make_pairs(closed)
360
+ self._extension = self._extension.union(trans)
361
+ self.extension = sorted(list(self._extension))
362
+
363
+
364
+ def clause2concepts(filename, rel_name, schema, closures=[]):
365
+ """
366
+ Convert a file of Prolog clauses into a list of ``Concept`` objects.
367
+
368
+ :param filename: filename containing the relations
369
+ :type filename: str
370
+ :param rel_name: name of the relation
371
+ :type rel_name: str
372
+ :param schema: the schema used in a set of relational tuples
373
+ :type schema: list
374
+ :param closures: closure properties for the extension of the concept
375
+ :type closures: list
376
+ :return: a list of ``Concept`` objects
377
+ :rtype: list
378
+ """
379
+ concepts = []
380
+ # position of the subject of a binary relation
381
+ subj = 0
382
+ # label of the 'primary key'
383
+ pkey = schema[0]
384
+ # fields other than the primary key
385
+ fields = schema[1:]
386
+
387
+ # convert a file into a list of lists
388
+ records = _str2records(filename, rel_name)
389
+
390
+ # add a unary concept corresponding to the set of entities
391
+ # in the primary key position
392
+ # relations in 'not_unary' are more like ordinary binary relations
393
+ if not filename in not_unary:
394
+ concepts.append(unary_concept(pkey, subj, records))
395
+
396
+ # add a binary concept for each non-key field
397
+ for field in fields:
398
+ obj = schema.index(field)
399
+ concepts.append(binary_concept(field, closures, subj, obj, records))
400
+
401
+ return concepts
402
+
403
+
404
+ def cities2table(filename, rel_name, dbname, verbose=False, setup=False):
405
+ """
406
+ Convert a file of Prolog clauses into a database table.
407
+
408
+ This is not generic, since it doesn't allow arbitrary
409
+ schemas to be set as a parameter.
410
+
411
+ Intended usage::
412
+
413
+ cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True)
414
+
415
+ :param filename: filename containing the relations
416
+ :type filename: str
417
+ :param rel_name: name of the relation
418
+ :type rel_name: str
419
+ :param dbname: filename of persistent store
420
+ :type schema: str
421
+ """
422
+ import sqlite3
423
+
424
+ records = _str2records(filename, rel_name)
425
+ connection = sqlite3.connect(dbname)
426
+ cur = connection.cursor()
427
+ if setup:
428
+ cur.execute(
429
+ """CREATE TABLE city_table
430
+ (City text, Country text, Population int)"""
431
+ )
432
+
433
+ table_name = "city_table"
434
+ for t in records:
435
+ cur.execute("insert into %s values (?,?,?)" % table_name, t)
436
+ if verbose:
437
+ print("inserting values into %s: " % table_name, t)
438
+ connection.commit()
439
+ if verbose:
440
+ print("Committing update to %s" % dbname)
441
+ cur.close()
442
+
443
+
444
+ def sql_query(dbname, query):
445
+ """
446
+ Execute an SQL query over a database.
447
+ :param dbname: filename of persistent store
448
+ :type schema: str
449
+ :param query: SQL query
450
+ :type rel_name: str
451
+ """
452
+ import sqlite3
453
+
454
+ try:
455
+ path = nltk.data.find(dbname)
456
+ connection = sqlite3.connect(str(path))
457
+ cur = connection.cursor()
458
+ return cur.execute(query)
459
+ except (ValueError, sqlite3.OperationalError):
460
+ import warnings
461
+
462
+ warnings.warn(
463
+ "Make sure the database file %s is installed and uncompressed." % dbname
464
+ )
465
+ raise
466
+
467
+
468
+ def _str2records(filename, rel):
469
+ """
470
+ Read a file into memory and convert each relation clause into a list.
471
+ """
472
+ recs = []
473
+ contents = nltk.data.load("corpora/chat80/%s" % filename, format="text")
474
+ for line in contents.splitlines():
475
+ if line.startswith(rel):
476
+ line = re.sub(rel + r"\(", "", line)
477
+ line = re.sub(r"\)\.$", "", line)
478
+ record = line.split(",")
479
+ recs.append(record)
480
+ return recs
481
+
482
+
483
+ def unary_concept(label, subj, records):
484
+ """
485
+ Make a unary concept out of the primary key in a record.
486
+
487
+ A record is a list of entities in some relation, such as
488
+ ``['france', 'paris']``, where ``'france'`` is acting as the primary
489
+ key.
490
+
491
+ :param label: the preferred label for the concept
492
+ :type label: string
493
+ :param subj: position in the record of the subject of the predicate
494
+ :type subj: int
495
+ :param records: a list of records
496
+ :type records: list of lists
497
+ :return: ``Concept`` of arity 1
498
+ :rtype: Concept
499
+ """
500
+ c = Concept(label, arity=1, extension=set())
501
+ for record in records:
502
+ c.augment(record[subj])
503
+ return c
504
+
505
+
506
+ def binary_concept(label, closures, subj, obj, records):
507
+ """
508
+ Make a binary concept out of the primary key and another field in a record.
509
+
510
+ A record is a list of entities in some relation, such as
511
+ ``['france', 'paris']``, where ``'france'`` is acting as the primary
512
+ key, and ``'paris'`` stands in the ``'capital_of'`` relation to
513
+ ``'france'``.
514
+
515
+ More generally, given a record such as ``['a', 'b', 'c']``, where
516
+ label is bound to ``'B'``, and ``obj`` bound to 1, the derived
517
+ binary concept will have label ``'B_of'``, and its extension will
518
+ be a set of pairs such as ``('a', 'b')``.
519
+
520
+
521
+ :param label: the base part of the preferred label for the concept
522
+ :type label: str
523
+ :param closures: closure properties for the extension of the concept
524
+ :type closures: list
525
+ :param subj: position in the record of the subject of the predicate
526
+ :type subj: int
527
+ :param obj: position in the record of the object of the predicate
528
+ :type obj: int
529
+ :param records: a list of records
530
+ :type records: list of lists
531
+ :return: ``Concept`` of arity 2
532
+ :rtype: Concept
533
+ """
534
+ if not label == "border" and not label == "contain":
535
+ label = label + "_of"
536
+ c = Concept(label, arity=2, closures=closures, extension=set())
537
+ for record in records:
538
+ c.augment((record[subj], record[obj]))
539
+ # close the concept's extension according to the properties in closures
540
+ c.close()
541
+ return c
542
+
543
+
544
+ def process_bundle(rels):
545
+ """
546
+ Given a list of relation metadata bundles, make a corresponding
547
+ dictionary of concepts, indexed by the relation name.
548
+
549
+ :param rels: bundle of metadata needed for constructing a concept
550
+ :type rels: list(dict)
551
+ :return: a dictionary of concepts, indexed by the relation name.
552
+ :rtype: dict(str): Concept
553
+ """
554
+ concepts = {}
555
+ for rel in rels:
556
+ rel_name = rel["rel_name"]
557
+ closures = rel["closures"]
558
+ schema = rel["schema"]
559
+ filename = rel["filename"]
560
+
561
+ concept_list = clause2concepts(filename, rel_name, schema, closures)
562
+ for c in concept_list:
563
+ label = c.prefLabel
564
+ if label in concepts:
565
+ for data in c.extension:
566
+ concepts[label].augment(data)
567
+ concepts[label].close()
568
+ else:
569
+ concepts[label] = c
570
+ return concepts
571
+
572
+
573
+ def make_valuation(concepts, read=False, lexicon=False):
574
+ """
575
+ Convert a list of ``Concept`` objects into a list of (label, extension) pairs;
576
+ optionally create a ``Valuation`` object.
577
+
578
+ :param concepts: concepts
579
+ :type concepts: list(Concept)
580
+ :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation``
581
+ :type read: bool
582
+ :rtype: list or Valuation
583
+ """
584
+ vals = []
585
+
586
+ for c in concepts:
587
+ vals.append((c.prefLabel, c.extension))
588
+ if lexicon:
589
+ read = True
590
+ if read:
591
+ from nltk.sem import Valuation
592
+
593
+ val = Valuation({})
594
+ val.update(vals)
595
+ # add labels for individuals
596
+ val = label_indivs(val, lexicon=lexicon)
597
+ return val
598
+ else:
599
+ return vals
600
+
601
+
602
+ def val_dump(rels, db):
603
+ """
604
+ Make a ``Valuation`` from a list of relation metadata bundles and dump to
605
+ persistent database.
606
+
607
+ :param rels: bundle of metadata needed for constructing a concept
608
+ :type rels: list of dict
609
+ :param db: name of file to which data is written.
610
+ The suffix '.db' will be automatically appended.
611
+ :type db: str
612
+ """
613
+ concepts = process_bundle(rels).values()
614
+ valuation = make_valuation(concepts, read=True)
615
+ db_out = shelve.open(db, "n")
616
+
617
+ db_out.update(valuation)
618
+
619
+ db_out.close()
620
+
621
+
622
+ def val_load(db):
623
+ """
624
+ Load a ``Valuation`` from a persistent database.
625
+
626
+ :param db: name of file from which data is read.
627
+ The suffix '.db' should be omitted from the name.
628
+ :type db: str
629
+ """
630
+ dbname = db + ".db"
631
+
632
+ if not os.access(dbname, os.R_OK):
633
+ sys.exit("Cannot read file: %s" % dbname)
634
+ else:
635
+ db_in = shelve.open(db)
636
+ from nltk.sem import Valuation
637
+
638
+ val = Valuation(db_in)
639
+ # val.read(db_in.items())
640
+ return val
641
+
642
+
643
+ # def alpha(str):
644
+ # """
645
+ # Utility to filter out non-alphabetic constants.
646
+
647
+ #:param str: candidate constant
648
+ #:type str: string
649
+ #:rtype: bool
650
+ # """
651
+ # try:
652
+ # int(str)
653
+ # return False
654
+ # except ValueError:
655
+ ## some unknown values in records are labeled '?'
656
+ # if not str == '?':
657
+ # return True
658
+
659
+
660
+ def label_indivs(valuation, lexicon=False):
661
+ """
662
+ Assign individual constants to the individuals in the domain of a ``Valuation``.
663
+
664
+ Given a valuation with an entry of the form ``{'rel': {'a': True}}``,
665
+ add a new entry ``{'a': 'a'}``.
666
+
667
+ :type valuation: Valuation
668
+ :rtype: Valuation
669
+ """
670
+ # collect all the individuals into a domain
671
+ domain = valuation.domain
672
+ # convert the domain into a sorted list of alphabetic terms
673
+ # use the same string as a label
674
+ pairs = [(e, e) for e in domain]
675
+ if lexicon:
676
+ lex = make_lex(domain)
677
+ with open("chat_pnames.cfg", "w") as outfile:
678
+ outfile.writelines(lex)
679
+ # read the pairs into the valuation
680
+ valuation.update(pairs)
681
+ return valuation
682
+
683
+
684
+ def make_lex(symbols):
685
+ """
686
+ Create lexical CFG rules for each individual symbol.
687
+
688
+ Given a valuation with an entry of the form ``{'zloty': 'zloty'}``,
689
+ create a lexical rule for the proper name 'Zloty'.
690
+
691
+ :param symbols: a list of individual constants in the semantic representation
692
+ :type symbols: sequence -- set(str)
693
+ :rtype: list(str)
694
+ """
695
+ lex = []
696
+ header = """
697
+ ##################################################################
698
+ # Lexical rules automatically generated by running 'chat80.py -x'.
699
+ ##################################################################
700
+
701
+ """
702
+ lex.append(header)
703
+ template = r"PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n"
704
+
705
+ for s in symbols:
706
+ parts = s.split("_")
707
+ caps = [p.capitalize() for p in parts]
708
+ pname = "_".join(caps)
709
+ rule = template % (s, pname)
710
+ lex.append(rule)
711
+ return lex
712
+
713
+
714
+ ###########################################################################
715
+ # Interface function to emulate other corpus readers
716
+ ###########################################################################
717
+
718
+
719
+ def concepts(items=items):
720
+ """
721
+ Build a list of concepts corresponding to the relation names in ``items``.
722
+
723
+ :param items: names of the Chat-80 relations to extract
724
+ :type items: list(str)
725
+ :return: the ``Concept`` objects which are extracted from the relations
726
+ :rtype: list(Concept)
727
+ """
728
+ if isinstance(items, str):
729
+ items = (items,)
730
+
731
+ rels = [item_metadata[r] for r in items]
732
+
733
+ concept_map = process_bundle(rels)
734
+ return concept_map.values()
735
+
736
+
737
+ ###########################################################################
738
+
739
+
740
+ def main():
741
+ import sys
742
+ from optparse import OptionParser
743
+
744
+ description = """
745
+ Extract data from the Chat-80 Prolog files and convert them into a
746
+ Valuation object for use in the NLTK semantics package.
747
+ """
748
+
749
+ opts = OptionParser(description=description)
750
+ opts.set_defaults(verbose=True, lex=False, vocab=False)
751
+ opts.add_option(
752
+ "-s", "--store", dest="outdb", help="store a valuation in DB", metavar="DB"
753
+ )
754
+ opts.add_option(
755
+ "-l",
756
+ "--load",
757
+ dest="indb",
758
+ help="load a stored valuation from DB",
759
+ metavar="DB",
760
+ )
761
+ opts.add_option(
762
+ "-c",
763
+ "--concepts",
764
+ action="store_true",
765
+ help="print concepts instead of a valuation",
766
+ )
767
+ opts.add_option(
768
+ "-r",
769
+ "--relation",
770
+ dest="label",
771
+ help="print concept with label REL (check possible labels with '-v' option)",
772
+ metavar="REL",
773
+ )
774
+ opts.add_option(
775
+ "-q",
776
+ "--quiet",
777
+ action="store_false",
778
+ dest="verbose",
779
+ help="don't print out progress info",
780
+ )
781
+ opts.add_option(
782
+ "-x",
783
+ "--lex",
784
+ action="store_true",
785
+ dest="lex",
786
+ help="write a file of lexical entries for country names, then exit",
787
+ )
788
+ opts.add_option(
789
+ "-v",
790
+ "--vocab",
791
+ action="store_true",
792
+ dest="vocab",
793
+ help="print out the vocabulary of concept labels and their arity, then exit",
794
+ )
795
+
796
+ (options, args) = opts.parse_args()
797
+ if options.outdb and options.indb:
798
+ opts.error("Options --store and --load are mutually exclusive")
799
+
800
+ if options.outdb:
801
+ # write the valuation to a persistent database
802
+ if options.verbose:
803
+ outdb = options.outdb + ".db"
804
+ print("Dumping a valuation to %s" % outdb)
805
+ val_dump(rels, options.outdb)
806
+ sys.exit(0)
807
+ else:
808
+ # try to read in a valuation from a database
809
+ if options.indb is not None:
810
+ dbname = options.indb + ".db"
811
+ if not os.access(dbname, os.R_OK):
812
+ sys.exit("Cannot read file: %s" % dbname)
813
+ else:
814
+ valuation = val_load(options.indb)
815
+ # we need to create the valuation from scratch
816
+ else:
817
+ # build some concepts
818
+ concept_map = process_bundle(rels)
819
+ concepts = concept_map.values()
820
+ # just print out the vocabulary
821
+ if options.vocab:
822
+ items = sorted((c.arity, c.prefLabel) for c in concepts)
823
+ for (arity, label) in items:
824
+ print(label, arity)
825
+ sys.exit(0)
826
+ # show all the concepts
827
+ if options.concepts:
828
+ for c in concepts:
829
+ print(c)
830
+ print()
831
+ if options.label:
832
+ print(concept_map[options.label])
833
+ sys.exit(0)
834
+ else:
835
+ # turn the concepts into a Valuation
836
+ if options.lex:
837
+ if options.verbose:
838
+ print("Writing out lexical rules")
839
+ make_valuation(concepts, lexicon=True)
840
+ else:
841
+ valuation = make_valuation(concepts, read=True)
842
+ print(valuation)
843
+
844
+
845
+ def sql_demo():
846
+ """
847
+ Print out every row from the 'city.db' database.
848
+ """
849
+ print()
850
+ print("Using SQL to extract rows from 'city.db' RDB.")
851
+ for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"):
852
+ print(row)
853
+
854
+
855
+ if __name__ == "__main__":
856
+ main()
857
+ sql_demo()
venv/lib/python3.10/site-packages/nltk/sem/cooper_storage.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Cooper storage for Quantifier Ambiguity
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from nltk.parse import load_parser
9
+ from nltk.parse.featurechart import InstantiateVarsChart
10
+ from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable
11
+
12
+
13
+ class CooperStore:
14
+ """
15
+ A container for handling quantifier ambiguity via Cooper storage.
16
+ """
17
+
18
+ def __init__(self, featstruct):
19
+ """
20
+ :param featstruct: The value of the ``sem`` node in a tree from
21
+ ``parse_with_bindops()``
22
+ :type featstruct: FeatStruct (with features ``core`` and ``store``)
23
+
24
+ """
25
+ self.featstruct = featstruct
26
+ self.readings = []
27
+ try:
28
+ self.core = featstruct["CORE"]
29
+ self.store = featstruct["STORE"]
30
+ except KeyError:
31
+ print("%s is not a Cooper storage structure" % featstruct)
32
+
33
+ def _permute(self, lst):
34
+ """
35
+ :return: An iterator over the permutations of the input list
36
+ :type lst: list
37
+ :rtype: iter
38
+ """
39
+ remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :]
40
+ if lst:
41
+ for index, x in enumerate(lst):
42
+ for y in self._permute(remove(lst, index)):
43
+ yield (x,) + y
44
+ else:
45
+ yield ()
46
+
47
+ def s_retrieve(self, trace=False):
48
+ r"""
49
+ Carry out S-Retrieval of binding operators in store. If hack=True,
50
+ serialize the bindop and core as strings and reparse. Ugh.
51
+
52
+ Each permutation of the store (i.e. list of binding operators) is
53
+ taken to be a possible scoping of quantifiers. We iterate through the
54
+ binding operators in each permutation, and successively apply them to
55
+ the current term, starting with the core semantic representation,
56
+ working from the inside out.
57
+
58
+ Binding operators are of the form::
59
+
60
+ bo(\P.all x.(man(x) -> P(x)),z1)
61
+ """
62
+ for perm, store_perm in enumerate(self._permute(self.store)):
63
+ if trace:
64
+ print("Permutation %s" % (perm + 1))
65
+ term = self.core
66
+ for bindop in store_perm:
67
+ # we just want the arguments that are wrapped by the 'bo' predicate
68
+ quant, varex = tuple(bindop.args)
69
+ # use var to make an abstraction over the current term and then
70
+ # apply the quantifier to it
71
+ term = ApplicationExpression(
72
+ quant, LambdaExpression(varex.variable, term)
73
+ )
74
+ if trace:
75
+ print(" ", term)
76
+ term = term.simplify()
77
+ self.readings.append(term)
78
+
79
+
80
+ def parse_with_bindops(sentence, grammar=None, trace=0):
81
+ """
82
+ Use a grammar with Binding Operators to parse a sentence.
83
+ """
84
+ if not grammar:
85
+ grammar = "grammars/book_grammars/storage.fcfg"
86
+ parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
87
+ # Parse the sentence.
88
+ tokens = sentence.split()
89
+ return list(parser.parse(tokens))
90
+
91
+
92
+ def demo():
93
+ from nltk.sem import cooper_storage as cs
94
+
95
+ sentence = "every girl chases a dog"
96
+ # sentence = "a man gives a bone to every dog"
97
+ print()
98
+ print("Analysis of sentence '%s'" % sentence)
99
+ print("=" * 50)
100
+ trees = cs.parse_with_bindops(sentence, trace=0)
101
+ for tree in trees:
102
+ semrep = cs.CooperStore(tree.label()["SEM"])
103
+ print()
104
+ print("Binding operators:")
105
+ print("-" * 15)
106
+ for s in semrep.store:
107
+ print(s)
108
+ print()
109
+ print("Core:")
110
+ print("-" * 15)
111
+ print(semrep.core)
112
+ print()
113
+ print("S-Retrieval:")
114
+ print("-" * 15)
115
+ semrep.s_retrieve(trace=True)
116
+ print("Readings:")
117
+ print("-" * 15)
118
+
119
+ for i, reading in enumerate(semrep.readings):
120
+ print(f"{i + 1}: {reading}")
121
+
122
+
123
+ if __name__ == "__main__":
124
+ demo()
venv/lib/python3.10/site-packages/nltk/sem/drt.py ADDED
@@ -0,0 +1,1460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Discourse Representation Theory (DRT)
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import operator
10
+ from functools import reduce
11
+ from itertools import chain
12
+
13
+ from nltk.sem.logic import (
14
+ APP,
15
+ AbstractVariableExpression,
16
+ AllExpression,
17
+ AndExpression,
18
+ ApplicationExpression,
19
+ BinaryExpression,
20
+ BooleanExpression,
21
+ ConstantExpression,
22
+ EqualityExpression,
23
+ EventVariableExpression,
24
+ ExistsExpression,
25
+ Expression,
26
+ FunctionVariableExpression,
27
+ ImpExpression,
28
+ IndividualVariableExpression,
29
+ LambdaExpression,
30
+ LogicParser,
31
+ NegatedExpression,
32
+ OrExpression,
33
+ Tokens,
34
+ Variable,
35
+ is_eventvar,
36
+ is_funcvar,
37
+ is_indvar,
38
+ unique_variable,
39
+ )
40
+
41
+ # Import Tkinter-based modules if they are available
42
+ try:
43
+ from tkinter import Canvas, Tk
44
+ from tkinter.font import Font
45
+
46
+ from nltk.util import in_idle
47
+
48
+ except ImportError:
49
+ # No need to print a warning here, nltk.draw has already printed one.
50
+ pass
51
+
52
+
53
+ class DrtTokens(Tokens):
54
+ DRS = "DRS"
55
+ DRS_CONC = "+"
56
+ PRONOUN = "PRO"
57
+ OPEN_BRACKET = "["
58
+ CLOSE_BRACKET = "]"
59
+ COLON = ":"
60
+
61
+ PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON]
62
+
63
+ SYMBOLS = Tokens.SYMBOLS + PUNCT
64
+
65
+ TOKENS = Tokens.TOKENS + [DRS] + PUNCT
66
+
67
+
68
+ class DrtParser(LogicParser):
69
+ """A lambda calculus expression parser."""
70
+
71
+ def __init__(self):
72
+ LogicParser.__init__(self)
73
+
74
+ self.operator_precedence = dict(
75
+ [(x, 1) for x in DrtTokens.LAMBDA_LIST]
76
+ + [(x, 2) for x in DrtTokens.NOT_LIST]
77
+ + [(APP, 3)]
78
+ + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST]
79
+ + [(DrtTokens.COLON, 5)]
80
+ + [(DrtTokens.DRS_CONC, 6)]
81
+ + [(x, 7) for x in DrtTokens.OR_LIST]
82
+ + [(x, 8) for x in DrtTokens.IMP_LIST]
83
+ + [(None, 9)]
84
+ )
85
+
86
+ def get_all_symbols(self):
87
+ """This method exists to be overridden"""
88
+ return DrtTokens.SYMBOLS
89
+
90
+ def isvariable(self, tok):
91
+ return tok not in DrtTokens.TOKENS
92
+
93
+ def handle(self, tok, context):
94
+ """This method is intended to be overridden for logics that
95
+ use different operators or expressions"""
96
+ if tok in DrtTokens.NOT_LIST:
97
+ return self.handle_negation(tok, context)
98
+
99
+ elif tok in DrtTokens.LAMBDA_LIST:
100
+ return self.handle_lambda(tok, context)
101
+
102
+ elif tok == DrtTokens.OPEN:
103
+ if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET:
104
+ return self.handle_DRS(tok, context)
105
+ else:
106
+ return self.handle_open(tok, context)
107
+
108
+ elif tok.upper() == DrtTokens.DRS:
109
+ self.assertNextToken(DrtTokens.OPEN)
110
+ return self.handle_DRS(tok, context)
111
+
112
+ elif self.isvariable(tok):
113
+ if self.inRange(0) and self.token(0) == DrtTokens.COLON:
114
+ return self.handle_prop(tok, context)
115
+ else:
116
+ return self.handle_variable(tok, context)
117
+
118
+ def make_NegatedExpression(self, expression):
119
+ return DrtNegatedExpression(expression)
120
+
121
+ def handle_DRS(self, tok, context):
122
+ # a DRS
123
+ refs = self.handle_refs()
124
+ if (
125
+ self.inRange(0) and self.token(0) == DrtTokens.COMMA
126
+ ): # if there is a comma (it's optional)
127
+ self.token() # swallow the comma
128
+ conds = self.handle_conds(context)
129
+ self.assertNextToken(DrtTokens.CLOSE)
130
+ return DRS(refs, conds, None)
131
+
132
+ def handle_refs(self):
133
+ self.assertNextToken(DrtTokens.OPEN_BRACKET)
134
+ refs = []
135
+ while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
136
+ # Support expressions like: DRS([x y],C) == DRS([x,y],C)
137
+ if refs and self.token(0) == DrtTokens.COMMA:
138
+ self.token() # swallow the comma
139
+ refs.append(self.get_next_token_variable("quantified"))
140
+ self.assertNextToken(DrtTokens.CLOSE_BRACKET)
141
+ return refs
142
+
143
+ def handle_conds(self, context):
144
+ self.assertNextToken(DrtTokens.OPEN_BRACKET)
145
+ conds = []
146
+ while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET:
147
+ # Support expressions like: DRS([x y],C) == DRS([x, y],C)
148
+ if conds and self.token(0) == DrtTokens.COMMA:
149
+ self.token() # swallow the comma
150
+ conds.append(self.process_next_expression(context))
151
+ self.assertNextToken(DrtTokens.CLOSE_BRACKET)
152
+ return conds
153
+
154
+ def handle_prop(self, tok, context):
155
+ variable = self.make_VariableExpression(tok)
156
+ self.assertNextToken(":")
157
+ drs = self.process_next_expression(DrtTokens.COLON)
158
+ return DrtProposition(variable, drs)
159
+
160
+ def make_EqualityExpression(self, first, second):
161
+ """This method serves as a hook for other logic parsers that
162
+ have different equality expression classes"""
163
+ return DrtEqualityExpression(first, second)
164
+
165
+ def get_BooleanExpression_factory(self, tok):
166
+ """This method serves as a hook for other logic parsers that
167
+ have different boolean operators"""
168
+ if tok == DrtTokens.DRS_CONC:
169
+ return lambda first, second: DrtConcatenation(first, second, None)
170
+ elif tok in DrtTokens.OR_LIST:
171
+ return DrtOrExpression
172
+ elif tok in DrtTokens.IMP_LIST:
173
+
174
+ def make_imp_expression(first, second):
175
+ if isinstance(first, DRS):
176
+ return DRS(first.refs, first.conds, second)
177
+ if isinstance(first, DrtConcatenation):
178
+ return DrtConcatenation(first.first, first.second, second)
179
+ raise Exception("Antecedent of implication must be a DRS")
180
+
181
+ return make_imp_expression
182
+ else:
183
+ return None
184
+
185
+ def make_BooleanExpression(self, factory, first, second):
186
+ return factory(first, second)
187
+
188
+ def make_ApplicationExpression(self, function, argument):
189
+ return DrtApplicationExpression(function, argument)
190
+
191
+ def make_VariableExpression(self, name):
192
+ return DrtVariableExpression(Variable(name))
193
+
194
+ def make_LambdaExpression(self, variables, term):
195
+ return DrtLambdaExpression(variables, term)
196
+
197
+
198
+ class DrtExpression:
199
+ """
200
+ This is the base abstract DRT Expression from which every DRT
201
+ Expression extends.
202
+ """
203
+
204
+ _drt_parser = DrtParser()
205
+
206
+ @classmethod
207
+ def fromstring(cls, s):
208
+ return cls._drt_parser.parse(s)
209
+
210
+ def applyto(self, other):
211
+ return DrtApplicationExpression(self, other)
212
+
213
+ def __neg__(self):
214
+ return DrtNegatedExpression(self)
215
+
216
+ def __and__(self, other):
217
+ return NotImplemented
218
+
219
+ def __or__(self, other):
220
+ assert isinstance(other, DrtExpression)
221
+ return DrtOrExpression(self, other)
222
+
223
+ def __gt__(self, other):
224
+ assert isinstance(other, DrtExpression)
225
+ if isinstance(self, DRS):
226
+ return DRS(self.refs, self.conds, other)
227
+ if isinstance(self, DrtConcatenation):
228
+ return DrtConcatenation(self.first, self.second, other)
229
+ raise Exception("Antecedent of implication must be a DRS")
230
+
231
+ def equiv(self, other, prover=None):
232
+ """
233
+ Check for logical equivalence.
234
+ Pass the expression (self <-> other) to the theorem prover.
235
+ If the prover says it is valid, then the self and other are equal.
236
+
237
+ :param other: an ``DrtExpression`` to check equality against
238
+ :param prover: a ``nltk.inference.api.Prover``
239
+ """
240
+ assert isinstance(other, DrtExpression)
241
+
242
+ f1 = self.simplify().fol()
243
+ f2 = other.simplify().fol()
244
+ return f1.equiv(f2, prover)
245
+
246
+ @property
247
+ def type(self):
248
+ raise AttributeError(
249
+ "'%s' object has no attribute 'type'" % self.__class__.__name__
250
+ )
251
+
252
+ def typecheck(self, signature=None):
253
+ raise NotImplementedError()
254
+
255
+ def __add__(self, other):
256
+ return DrtConcatenation(self, other, None)
257
+
258
+ def get_refs(self, recursive=False):
259
+ """
260
+ Return the set of discourse referents in this DRS.
261
+ :param recursive: bool Also find discourse referents in subterms?
262
+ :return: list of ``Variable`` objects
263
+ """
264
+ raise NotImplementedError()
265
+
266
+ def is_pronoun_function(self):
267
+ """Is self of the form "PRO(x)"?"""
268
+ return (
269
+ isinstance(self, DrtApplicationExpression)
270
+ and isinstance(self.function, DrtAbstractVariableExpression)
271
+ and self.function.variable.name == DrtTokens.PRONOUN
272
+ and isinstance(self.argument, DrtIndividualVariableExpression)
273
+ )
274
+
275
+ def make_EqualityExpression(self, first, second):
276
+ return DrtEqualityExpression(first, second)
277
+
278
+ def make_VariableExpression(self, variable):
279
+ return DrtVariableExpression(variable)
280
+
281
+ def resolve_anaphora(self):
282
+ return resolve_anaphora(self)
283
+
284
+ def eliminate_equality(self):
285
+ return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__)
286
+
287
+ def pretty_format(self):
288
+ """
289
+ Draw the DRS
290
+ :return: the pretty print string
291
+ """
292
+ return "\n".join(self._pretty())
293
+
294
+ def pretty_print(self):
295
+ print(self.pretty_format())
296
+
297
+ def draw(self):
298
+ DrsDrawer(self).draw()
299
+
300
+
301
+ class DRS(DrtExpression, Expression):
302
+ """A Discourse Representation Structure."""
303
+
304
+ def __init__(self, refs, conds, consequent=None):
305
+ """
306
+ :param refs: list of ``DrtIndividualVariableExpression`` for the
307
+ discourse referents
308
+ :param conds: list of ``Expression`` for the conditions
309
+ """
310
+ self.refs = refs
311
+ self.conds = conds
312
+ self.consequent = consequent
313
+
314
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
315
+ """Replace all instances of variable v with expression E in self,
316
+ where v is free in self."""
317
+ if variable in self.refs:
318
+ # if a bound variable is the thing being replaced
319
+ if not replace_bound:
320
+ return self
321
+ else:
322
+ i = self.refs.index(variable)
323
+ if self.consequent:
324
+ consequent = self.consequent.replace(
325
+ variable, expression, True, alpha_convert
326
+ )
327
+ else:
328
+ consequent = None
329
+ return DRS(
330
+ self.refs[:i] + [expression.variable] + self.refs[i + 1 :],
331
+ [
332
+ cond.replace(variable, expression, True, alpha_convert)
333
+ for cond in self.conds
334
+ ],
335
+ consequent,
336
+ )
337
+ else:
338
+ if alpha_convert:
339
+ # any bound variable that appears in the expression must
340
+ # be alpha converted to avoid a conflict
341
+ for ref in set(self.refs) & expression.free():
342
+ newvar = unique_variable(ref)
343
+ newvarex = DrtVariableExpression(newvar)
344
+ i = self.refs.index(ref)
345
+ if self.consequent:
346
+ consequent = self.consequent.replace(
347
+ ref, newvarex, True, alpha_convert
348
+ )
349
+ else:
350
+ consequent = None
351
+ self = DRS(
352
+ self.refs[:i] + [newvar] + self.refs[i + 1 :],
353
+ [
354
+ cond.replace(ref, newvarex, True, alpha_convert)
355
+ for cond in self.conds
356
+ ],
357
+ consequent,
358
+ )
359
+
360
+ # replace in the conditions
361
+ if self.consequent:
362
+ consequent = self.consequent.replace(
363
+ variable, expression, replace_bound, alpha_convert
364
+ )
365
+ else:
366
+ consequent = None
367
+ return DRS(
368
+ self.refs,
369
+ [
370
+ cond.replace(variable, expression, replace_bound, alpha_convert)
371
+ for cond in self.conds
372
+ ],
373
+ consequent,
374
+ )
375
+
376
+ def free(self):
377
+ """:see: Expression.free()"""
378
+ conds_free = reduce(operator.or_, [c.free() for c in self.conds], set())
379
+ if self.consequent:
380
+ conds_free.update(self.consequent.free())
381
+ return conds_free - set(self.refs)
382
+
383
+ def get_refs(self, recursive=False):
384
+ """:see: AbstractExpression.get_refs()"""
385
+ if recursive:
386
+ conds_refs = self.refs + list(
387
+ chain.from_iterable(c.get_refs(True) for c in self.conds)
388
+ )
389
+ if self.consequent:
390
+ conds_refs.extend(self.consequent.get_refs(True))
391
+ return conds_refs
392
+ else:
393
+ return self.refs
394
+
395
+ def visit(self, function, combinator):
396
+ """:see: Expression.visit()"""
397
+ parts = list(map(function, self.conds))
398
+ if self.consequent:
399
+ parts.append(function(self.consequent))
400
+ return combinator(parts)
401
+
402
+ def visit_structured(self, function, combinator):
403
+ """:see: Expression.visit_structured()"""
404
+ consequent = function(self.consequent) if self.consequent else None
405
+ return combinator(self.refs, list(map(function, self.conds)), consequent)
406
+
407
+ def eliminate_equality(self):
408
+ drs = self
409
+ i = 0
410
+ while i < len(drs.conds):
411
+ cond = drs.conds[i]
412
+ if (
413
+ isinstance(cond, EqualityExpression)
414
+ and isinstance(cond.first, AbstractVariableExpression)
415
+ and isinstance(cond.second, AbstractVariableExpression)
416
+ ):
417
+ drs = DRS(
418
+ list(set(drs.refs) - {cond.second.variable}),
419
+ drs.conds[:i] + drs.conds[i + 1 :],
420
+ drs.consequent,
421
+ )
422
+ if cond.second.variable != cond.first.variable:
423
+ drs = drs.replace(cond.second.variable, cond.first, False, False)
424
+ i = 0
425
+ i -= 1
426
+ i += 1
427
+
428
+ conds = []
429
+ for cond in drs.conds:
430
+ new_cond = cond.eliminate_equality()
431
+ new_cond_simp = new_cond.simplify()
432
+ if (
433
+ not isinstance(new_cond_simp, DRS)
434
+ or new_cond_simp.refs
435
+ or new_cond_simp.conds
436
+ or new_cond_simp.consequent
437
+ ):
438
+ conds.append(new_cond)
439
+
440
+ consequent = drs.consequent.eliminate_equality() if drs.consequent else None
441
+ return DRS(drs.refs, conds, consequent)
442
+
443
+ def fol(self):
444
+ if self.consequent:
445
+ accum = None
446
+ if self.conds:
447
+ accum = reduce(AndExpression, [c.fol() for c in self.conds])
448
+
449
+ if accum:
450
+ accum = ImpExpression(accum, self.consequent.fol())
451
+ else:
452
+ accum = self.consequent.fol()
453
+
454
+ for ref in self.refs[::-1]:
455
+ accum = AllExpression(ref, accum)
456
+
457
+ return accum
458
+
459
+ else:
460
+ if not self.conds:
461
+ raise Exception("Cannot convert DRS with no conditions to FOL.")
462
+ accum = reduce(AndExpression, [c.fol() for c in self.conds])
463
+ for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]):
464
+ accum = ExistsExpression(ref, accum)
465
+ return accum
466
+
467
+ def _pretty(self):
468
+ refs_line = " ".join(self._order_ref_strings(self.refs))
469
+
470
+ cond_lines = [
471
+ cond
472
+ for cond_line in [
473
+ filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds
474
+ ]
475
+ for cond in cond_line
476
+ ]
477
+ length = max([len(refs_line)] + list(map(len, cond_lines)))
478
+ drs = (
479
+ [
480
+ " _" + "_" * length + "_ ",
481
+ "| " + refs_line.ljust(length) + " |",
482
+ "|-" + "-" * length + "-|",
483
+ ]
484
+ + ["| " + line.ljust(length) + " |" for line in cond_lines]
485
+ + ["|_" + "_" * length + "_|"]
486
+ )
487
+ if self.consequent:
488
+ return DrtBinaryExpression._assemble_pretty(
489
+ drs, DrtTokens.IMP, self.consequent._pretty()
490
+ )
491
+ return drs
492
+
493
+ def _order_ref_strings(self, refs):
494
+ strings = ["%s" % ref for ref in refs]
495
+ ind_vars = []
496
+ func_vars = []
497
+ event_vars = []
498
+ other_vars = []
499
+ for s in strings:
500
+ if is_indvar(s):
501
+ ind_vars.append(s)
502
+ elif is_funcvar(s):
503
+ func_vars.append(s)
504
+ elif is_eventvar(s):
505
+ event_vars.append(s)
506
+ else:
507
+ other_vars.append(s)
508
+ return (
509
+ sorted(other_vars)
510
+ + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0]))
511
+ + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0])))
512
+ + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0])))
513
+ )
514
+
515
+ def __eq__(self, other):
516
+ r"""Defines equality modulo alphabetic variance.
517
+ If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
518
+ if isinstance(other, DRS):
519
+ if len(self.refs) == len(other.refs):
520
+ converted_other = other
521
+ for (r1, r2) in zip(self.refs, converted_other.refs):
522
+ varex = self.make_VariableExpression(r1)
523
+ converted_other = converted_other.replace(r2, varex, True)
524
+ if self.consequent == converted_other.consequent and len(
525
+ self.conds
526
+ ) == len(converted_other.conds):
527
+ for c1, c2 in zip(self.conds, converted_other.conds):
528
+ if not (c1 == c2):
529
+ return False
530
+ return True
531
+ return False
532
+
533
+ def __ne__(self, other):
534
+ return not self == other
535
+
536
+ __hash__ = Expression.__hash__
537
+
538
+ def __str__(self):
539
+ drs = "([{}],[{}])".format(
540
+ ",".join(self._order_ref_strings(self.refs)),
541
+ ", ".join("%s" % cond for cond in self.conds),
542
+ ) # map(str, self.conds)))
543
+ if self.consequent:
544
+ return (
545
+ DrtTokens.OPEN
546
+ + drs
547
+ + " "
548
+ + DrtTokens.IMP
549
+ + " "
550
+ + "%s" % self.consequent
551
+ + DrtTokens.CLOSE
552
+ )
553
+ return drs
554
+
555
+
556
+ def DrtVariableExpression(variable):
557
+ """
558
+ This is a factory method that instantiates and returns a subtype of
559
+ ``DrtAbstractVariableExpression`` appropriate for the given variable.
560
+ """
561
+ if is_indvar(variable.name):
562
+ return DrtIndividualVariableExpression(variable)
563
+ elif is_funcvar(variable.name):
564
+ return DrtFunctionVariableExpression(variable)
565
+ elif is_eventvar(variable.name):
566
+ return DrtEventVariableExpression(variable)
567
+ else:
568
+ return DrtConstantExpression(variable)
569
+
570
+
571
+ class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression):
572
+ def fol(self):
573
+ return self
574
+
575
+ def get_refs(self, recursive=False):
576
+ """:see: AbstractExpression.get_refs()"""
577
+ return []
578
+
579
+ def _pretty(self):
580
+ s = "%s" % self
581
+ blank = " " * len(s)
582
+ return [blank, blank, s, blank]
583
+
584
+ def eliminate_equality(self):
585
+ return self
586
+
587
+
588
+ class DrtIndividualVariableExpression(
589
+ DrtAbstractVariableExpression, IndividualVariableExpression
590
+ ):
591
+ pass
592
+
593
+
594
+ class DrtFunctionVariableExpression(
595
+ DrtAbstractVariableExpression, FunctionVariableExpression
596
+ ):
597
+ pass
598
+
599
+
600
+ class DrtEventVariableExpression(
601
+ DrtIndividualVariableExpression, EventVariableExpression
602
+ ):
603
+ pass
604
+
605
+
606
+ class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression):
607
+ pass
608
+
609
+
610
+ class DrtProposition(DrtExpression, Expression):
611
+ def __init__(self, variable, drs):
612
+ self.variable = variable
613
+ self.drs = drs
614
+
615
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
616
+ if self.variable == variable:
617
+ assert isinstance(
618
+ expression, DrtAbstractVariableExpression
619
+ ), "Can only replace a proposition label with a variable"
620
+ return DrtProposition(
621
+ expression.variable,
622
+ self.drs.replace(variable, expression, replace_bound, alpha_convert),
623
+ )
624
+ else:
625
+ return DrtProposition(
626
+ self.variable,
627
+ self.drs.replace(variable, expression, replace_bound, alpha_convert),
628
+ )
629
+
630
+ def eliminate_equality(self):
631
+ return DrtProposition(self.variable, self.drs.eliminate_equality())
632
+
633
+ def get_refs(self, recursive=False):
634
+ return self.drs.get_refs(True) if recursive else []
635
+
636
+ def __eq__(self, other):
637
+ return (
638
+ self.__class__ == other.__class__
639
+ and self.variable == other.variable
640
+ and self.drs == other.drs
641
+ )
642
+
643
+ def __ne__(self, other):
644
+ return not self == other
645
+
646
+ __hash__ = Expression.__hash__
647
+
648
+ def fol(self):
649
+ return self.drs.fol()
650
+
651
+ def _pretty(self):
652
+ drs_s = self.drs._pretty()
653
+ blank = " " * len("%s" % self.variable)
654
+ return (
655
+ [blank + " " + line for line in drs_s[:1]]
656
+ + ["%s" % self.variable + ":" + line for line in drs_s[1:2]]
657
+ + [blank + " " + line for line in drs_s[2:]]
658
+ )
659
+
660
+ def visit(self, function, combinator):
661
+ """:see: Expression.visit()"""
662
+ return combinator([function(self.drs)])
663
+
664
+ def visit_structured(self, function, combinator):
665
+ """:see: Expression.visit_structured()"""
666
+ return combinator(self.variable, function(self.drs))
667
+
668
+ def __str__(self):
669
+ return f"prop({self.variable}, {self.drs})"
670
+
671
+
672
+ class DrtNegatedExpression(DrtExpression, NegatedExpression):
673
+ def fol(self):
674
+ return NegatedExpression(self.term.fol())
675
+
676
+ def get_refs(self, recursive=False):
677
+ """:see: AbstractExpression.get_refs()"""
678
+ return self.term.get_refs(recursive)
679
+
680
+ def _pretty(self):
681
+ term_lines = self.term._pretty()
682
+ return (
683
+ [" " + line for line in term_lines[:2]]
684
+ + ["__ " + line for line in term_lines[2:3]]
685
+ + [" | " + line for line in term_lines[3:4]]
686
+ + [" " + line for line in term_lines[4:]]
687
+ )
688
+
689
+
690
+ class DrtLambdaExpression(DrtExpression, LambdaExpression):
691
+ def alpha_convert(self, newvar):
692
+ """Rename all occurrences of the variable introduced by this variable
693
+ binder in the expression to ``newvar``.
694
+ :param newvar: ``Variable``, for the new variable
695
+ """
696
+ return self.__class__(
697
+ newvar,
698
+ self.term.replace(self.variable, DrtVariableExpression(newvar), True),
699
+ )
700
+
701
+ def fol(self):
702
+ return LambdaExpression(self.variable, self.term.fol())
703
+
704
+ def _pretty(self):
705
+ variables = [self.variable]
706
+ term = self.term
707
+ while term.__class__ == self.__class__:
708
+ variables.append(term.variable)
709
+ term = term.term
710
+ var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT
711
+ term_lines = term._pretty()
712
+ blank = " " * len(var_string)
713
+ return (
714
+ [" " + blank + line for line in term_lines[:1]]
715
+ + [r" \ " + blank + line for line in term_lines[1:2]]
716
+ + [r" /\ " + var_string + line for line in term_lines[2:3]]
717
+ + [" " + blank + line for line in term_lines[3:]]
718
+ )
719
+
720
+ def get_refs(self, recursive=False):
721
+ """:see: AbstractExpression.get_refs()"""
722
+ return (
723
+ [self.variable] + self.term.get_refs(True) if recursive else [self.variable]
724
+ )
725
+
726
+
727
+ class DrtBinaryExpression(DrtExpression, BinaryExpression):
728
+ def get_refs(self, recursive=False):
729
+ """:see: AbstractExpression.get_refs()"""
730
+ return (
731
+ self.first.get_refs(True) + self.second.get_refs(True) if recursive else []
732
+ )
733
+
734
+ def _pretty(self):
735
+ return DrtBinaryExpression._assemble_pretty(
736
+ self._pretty_subex(self.first),
737
+ self.getOp(),
738
+ self._pretty_subex(self.second),
739
+ )
740
+
741
+ @staticmethod
742
+ def _assemble_pretty(first_lines, op, second_lines):
743
+ max_lines = max(len(first_lines), len(second_lines))
744
+ first_lines = _pad_vertically(first_lines, max_lines)
745
+ second_lines = _pad_vertically(second_lines, max_lines)
746
+ blank = " " * len(op)
747
+ first_second_lines = list(zip(first_lines, second_lines))
748
+ return (
749
+ [
750
+ " " + first_line + " " + blank + " " + second_line + " "
751
+ for first_line, second_line in first_second_lines[:2]
752
+ ]
753
+ + [
754
+ "(" + first_line + " " + op + " " + second_line + ")"
755
+ for first_line, second_line in first_second_lines[2:3]
756
+ ]
757
+ + [
758
+ " " + first_line + " " + blank + " " + second_line + " "
759
+ for first_line, second_line in first_second_lines[3:]
760
+ ]
761
+ )
762
+
763
+ def _pretty_subex(self, subex):
764
+ return subex._pretty()
765
+
766
+
767
+ class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression):
768
+ pass
769
+
770
+
771
+ class DrtOrExpression(DrtBooleanExpression, OrExpression):
772
+ def fol(self):
773
+ return OrExpression(self.first.fol(), self.second.fol())
774
+
775
+ def _pretty_subex(self, subex):
776
+ if isinstance(subex, DrtOrExpression):
777
+ return [line[1:-1] for line in subex._pretty()]
778
+ return DrtBooleanExpression._pretty_subex(self, subex)
779
+
780
+
781
+ class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression):
782
+ def fol(self):
783
+ return EqualityExpression(self.first.fol(), self.second.fol())
784
+
785
+
786
+ class DrtConcatenation(DrtBooleanExpression):
787
+ """DRS of the form '(DRS + DRS)'"""
788
+
789
+ def __init__(self, first, second, consequent=None):
790
+ DrtBooleanExpression.__init__(self, first, second)
791
+ self.consequent = consequent
792
+
793
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
794
+ """Replace all instances of variable v with expression E in self,
795
+ where v is free in self."""
796
+ first = self.first
797
+ second = self.second
798
+ consequent = self.consequent
799
+
800
+ # If variable is bound
801
+ if variable in self.get_refs():
802
+ if replace_bound:
803
+ first = first.replace(
804
+ variable, expression, replace_bound, alpha_convert
805
+ )
806
+ second = second.replace(
807
+ variable, expression, replace_bound, alpha_convert
808
+ )
809
+ if consequent:
810
+ consequent = consequent.replace(
811
+ variable, expression, replace_bound, alpha_convert
812
+ )
813
+ else:
814
+ if alpha_convert:
815
+ # alpha convert every ref that is free in 'expression'
816
+ for ref in set(self.get_refs(True)) & expression.free():
817
+ v = DrtVariableExpression(unique_variable(ref))
818
+ first = first.replace(ref, v, True, alpha_convert)
819
+ second = second.replace(ref, v, True, alpha_convert)
820
+ if consequent:
821
+ consequent = consequent.replace(ref, v, True, alpha_convert)
822
+
823
+ first = first.replace(variable, expression, replace_bound, alpha_convert)
824
+ second = second.replace(variable, expression, replace_bound, alpha_convert)
825
+ if consequent:
826
+ consequent = consequent.replace(
827
+ variable, expression, replace_bound, alpha_convert
828
+ )
829
+
830
+ return self.__class__(first, second, consequent)
831
+
832
+ def eliminate_equality(self):
833
+ # TODO: at some point. for now, simplify.
834
+ drs = self.simplify()
835
+ assert not isinstance(drs, DrtConcatenation)
836
+ return drs.eliminate_equality()
837
+
838
+ def simplify(self):
839
+ first = self.first.simplify()
840
+ second = self.second.simplify()
841
+ consequent = self.consequent.simplify() if self.consequent else None
842
+
843
+ if isinstance(first, DRS) and isinstance(second, DRS):
844
+ # For any ref that is in both 'first' and 'second'
845
+ for ref in set(first.get_refs(True)) & set(second.get_refs(True)):
846
+ # alpha convert the ref in 'second' to prevent collision
847
+ newvar = DrtVariableExpression(unique_variable(ref))
848
+ second = second.replace(ref, newvar, True)
849
+
850
+ return DRS(first.refs + second.refs, first.conds + second.conds, consequent)
851
+ else:
852
+ return self.__class__(first, second, consequent)
853
+
854
+ def get_refs(self, recursive=False):
855
+ """:see: AbstractExpression.get_refs()"""
856
+ refs = self.first.get_refs(recursive) + self.second.get_refs(recursive)
857
+ if self.consequent and recursive:
858
+ refs.extend(self.consequent.get_refs(True))
859
+ return refs
860
+
861
+ def getOp(self):
862
+ return DrtTokens.DRS_CONC
863
+
864
+ def __eq__(self, other):
865
+ r"""Defines equality modulo alphabetic variance.
866
+ If we are comparing \x.M and \y.N, then check equality of M and N[x/y]."""
867
+ if isinstance(other, DrtConcatenation):
868
+ self_refs = self.get_refs()
869
+ other_refs = other.get_refs()
870
+ if len(self_refs) == len(other_refs):
871
+ converted_other = other
872
+ for (r1, r2) in zip(self_refs, other_refs):
873
+ varex = self.make_VariableExpression(r1)
874
+ converted_other = converted_other.replace(r2, varex, True)
875
+ return (
876
+ self.first == converted_other.first
877
+ and self.second == converted_other.second
878
+ and self.consequent == converted_other.consequent
879
+ )
880
+ return False
881
+
882
+ def __ne__(self, other):
883
+ return not self == other
884
+
885
+ __hash__ = DrtBooleanExpression.__hash__
886
+
887
+ def fol(self):
888
+ e = AndExpression(self.first.fol(), self.second.fol())
889
+ if self.consequent:
890
+ e = ImpExpression(e, self.consequent.fol())
891
+ return e
892
+
893
+ def _pretty(self):
894
+ drs = DrtBinaryExpression._assemble_pretty(
895
+ self._pretty_subex(self.first),
896
+ self.getOp(),
897
+ self._pretty_subex(self.second),
898
+ )
899
+ if self.consequent:
900
+ drs = DrtBinaryExpression._assemble_pretty(
901
+ drs, DrtTokens.IMP, self.consequent._pretty()
902
+ )
903
+ return drs
904
+
905
+ def _pretty_subex(self, subex):
906
+ if isinstance(subex, DrtConcatenation):
907
+ return [line[1:-1] for line in subex._pretty()]
908
+ return DrtBooleanExpression._pretty_subex(self, subex)
909
+
910
+ def visit(self, function, combinator):
911
+ """:see: Expression.visit()"""
912
+ if self.consequent:
913
+ return combinator(
914
+ [function(self.first), function(self.second), function(self.consequent)]
915
+ )
916
+ else:
917
+ return combinator([function(self.first), function(self.second)])
918
+
919
+ def __str__(self):
920
+ first = self._str_subex(self.first)
921
+ second = self._str_subex(self.second)
922
+ drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE
923
+ if self.consequent:
924
+ return (
925
+ DrtTokens.OPEN
926
+ + drs
927
+ + " "
928
+ + DrtTokens.IMP
929
+ + " "
930
+ + "%s" % self.consequent
931
+ + DrtTokens.CLOSE
932
+ )
933
+ return drs
934
+
935
+ def _str_subex(self, subex):
936
+ s = "%s" % subex
937
+ if isinstance(subex, DrtConcatenation) and subex.consequent is None:
938
+ return s[1:-1]
939
+ return s
940
+
941
+
942
+ class DrtApplicationExpression(DrtExpression, ApplicationExpression):
943
+ def fol(self):
944
+ return ApplicationExpression(self.function.fol(), self.argument.fol())
945
+
946
+ def get_refs(self, recursive=False):
947
+ """:see: AbstractExpression.get_refs()"""
948
+ return (
949
+ self.function.get_refs(True) + self.argument.get_refs(True)
950
+ if recursive
951
+ else []
952
+ )
953
+
954
+ def _pretty(self):
955
+ function, args = self.uncurry()
956
+ function_lines = function._pretty()
957
+ args_lines = [arg._pretty() for arg in args]
958
+ max_lines = max(map(len, [function_lines] + args_lines))
959
+ function_lines = _pad_vertically(function_lines, max_lines)
960
+ args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines]
961
+ func_args_lines = list(zip(function_lines, list(zip(*args_lines))))
962
+ return (
963
+ [
964
+ func_line + " " + " ".join(args_line) + " "
965
+ for func_line, args_line in func_args_lines[:2]
966
+ ]
967
+ + [
968
+ func_line + "(" + ",".join(args_line) + ")"
969
+ for func_line, args_line in func_args_lines[2:3]
970
+ ]
971
+ + [
972
+ func_line + " " + " ".join(args_line) + " "
973
+ for func_line, args_line in func_args_lines[3:]
974
+ ]
975
+ )
976
+
977
+
978
+ def _pad_vertically(lines, max_lines):
979
+ pad_line = [" " * len(lines[0])]
980
+ return lines + pad_line * (max_lines - len(lines))
981
+
982
+
983
+ class PossibleAntecedents(list, DrtExpression, Expression):
984
+ def free(self):
985
+ """Set of free variables."""
986
+ return set(self)
987
+
988
+ def replace(self, variable, expression, replace_bound=False, alpha_convert=True):
989
+ """Replace all instances of variable v with expression E in self,
990
+ where v is free in self."""
991
+ result = PossibleAntecedents()
992
+ for item in self:
993
+ if item == variable:
994
+ self.append(expression)
995
+ else:
996
+ self.append(item)
997
+ return result
998
+
999
+ def _pretty(self):
1000
+ s = "%s" % self
1001
+ blank = " " * len(s)
1002
+ return [blank, blank, s]
1003
+
1004
+ def __str__(self):
1005
+ return "[" + ",".join("%s" % it for it in self) + "]"
1006
+
1007
+
1008
+ class AnaphoraResolutionException(Exception):
1009
+ pass
1010
+
1011
+
1012
+ def resolve_anaphora(expression, trail=[]):
1013
+ if isinstance(expression, ApplicationExpression):
1014
+ if expression.is_pronoun_function():
1015
+ possible_antecedents = PossibleAntecedents()
1016
+ for ancestor in trail:
1017
+ for ref in ancestor.get_refs():
1018
+ refex = expression.make_VariableExpression(ref)
1019
+
1020
+ # ==========================================================
1021
+ # Don't allow resolution to itself or other types
1022
+ # ==========================================================
1023
+ if refex.__class__ == expression.argument.__class__ and not (
1024
+ refex == expression.argument
1025
+ ):
1026
+ possible_antecedents.append(refex)
1027
+
1028
+ if len(possible_antecedents) == 1:
1029
+ resolution = possible_antecedents[0]
1030
+ else:
1031
+ resolution = possible_antecedents
1032
+ return expression.make_EqualityExpression(expression.argument, resolution)
1033
+ else:
1034
+ r_function = resolve_anaphora(expression.function, trail + [expression])
1035
+ r_argument = resolve_anaphora(expression.argument, trail + [expression])
1036
+ return expression.__class__(r_function, r_argument)
1037
+
1038
+ elif isinstance(expression, DRS):
1039
+ r_conds = []
1040
+ for cond in expression.conds:
1041
+ r_cond = resolve_anaphora(cond, trail + [expression])
1042
+
1043
+ # if the condition is of the form '(x = [])' then raise exception
1044
+ if isinstance(r_cond, EqualityExpression):
1045
+ if isinstance(r_cond.first, PossibleAntecedents):
1046
+ # Reverse the order so that the variable is on the left
1047
+ temp = r_cond.first
1048
+ r_cond.first = r_cond.second
1049
+ r_cond.second = temp
1050
+ if isinstance(r_cond.second, PossibleAntecedents):
1051
+ if not r_cond.second:
1052
+ raise AnaphoraResolutionException(
1053
+ "Variable '%s' does not "
1054
+ "resolve to anything." % r_cond.first
1055
+ )
1056
+
1057
+ r_conds.append(r_cond)
1058
+ if expression.consequent:
1059
+ consequent = resolve_anaphora(expression.consequent, trail + [expression])
1060
+ else:
1061
+ consequent = None
1062
+ return expression.__class__(expression.refs, r_conds, consequent)
1063
+
1064
+ elif isinstance(expression, AbstractVariableExpression):
1065
+ return expression
1066
+
1067
+ elif isinstance(expression, NegatedExpression):
1068
+ return expression.__class__(
1069
+ resolve_anaphora(expression.term, trail + [expression])
1070
+ )
1071
+
1072
+ elif isinstance(expression, DrtConcatenation):
1073
+ if expression.consequent:
1074
+ consequent = resolve_anaphora(expression.consequent, trail + [expression])
1075
+ else:
1076
+ consequent = None
1077
+ return expression.__class__(
1078
+ resolve_anaphora(expression.first, trail + [expression]),
1079
+ resolve_anaphora(expression.second, trail + [expression]),
1080
+ consequent,
1081
+ )
1082
+
1083
+ elif isinstance(expression, BinaryExpression):
1084
+ return expression.__class__(
1085
+ resolve_anaphora(expression.first, trail + [expression]),
1086
+ resolve_anaphora(expression.second, trail + [expression]),
1087
+ )
1088
+
1089
+ elif isinstance(expression, LambdaExpression):
1090
+ return expression.__class__(
1091
+ expression.variable, resolve_anaphora(expression.term, trail + [expression])
1092
+ )
1093
+
1094
+
1095
+ class DrsDrawer:
1096
+ BUFFER = 3 # Space between elements
1097
+ TOPSPACE = 10 # Space above whole DRS
1098
+ OUTERSPACE = 6 # Space to the left, right, and bottom of the while DRS
1099
+
1100
+ def __init__(self, drs, size_canvas=True, canvas=None):
1101
+ """
1102
+ :param drs: ``DrtExpression``, The DRS to be drawn
1103
+ :param size_canvas: bool, True if the canvas size should be the exact size of the DRS
1104
+ :param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas.
1105
+ """
1106
+ master = None
1107
+ if not canvas:
1108
+ master = Tk()
1109
+ master.title("DRT")
1110
+
1111
+ font = Font(family="helvetica", size=12)
1112
+
1113
+ if size_canvas:
1114
+ canvas = Canvas(master, width=0, height=0)
1115
+ canvas.font = font
1116
+ self.canvas = canvas
1117
+ (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE)
1118
+
1119
+ width = max(right + self.OUTERSPACE, 100)
1120
+ height = bottom + self.OUTERSPACE
1121
+ canvas = Canvas(master, width=width, height=height) # , bg='white')
1122
+ else:
1123
+ canvas = Canvas(master, width=300, height=300)
1124
+
1125
+ canvas.pack()
1126
+ canvas.font = font
1127
+
1128
+ self.canvas = canvas
1129
+ self.drs = drs
1130
+ self.master = master
1131
+
1132
+ def _get_text_height(self):
1133
+ """Get the height of a line of text"""
1134
+ return self.canvas.font.metrics("linespace")
1135
+
1136
+ def draw(self, x=OUTERSPACE, y=TOPSPACE):
1137
+ """Draw the DRS"""
1138
+ self._handle(self.drs, self._draw_command, x, y)
1139
+
1140
+ if self.master and not in_idle():
1141
+ self.master.mainloop()
1142
+ else:
1143
+ return self._visit(self.drs, x, y)
1144
+
1145
+ def _visit(self, expression, x, y):
1146
+ """
1147
+ Return the bottom-rightmost point without actually drawing the item
1148
+
1149
+ :param expression: the item to visit
1150
+ :param x: the top of the current drawing area
1151
+ :param y: the left side of the current drawing area
1152
+ :return: the bottom-rightmost point
1153
+ """
1154
+ return self._handle(expression, self._visit_command, x, y)
1155
+
1156
+ def _draw_command(self, item, x, y):
1157
+ """
1158
+ Draw the given item at the given location
1159
+
1160
+ :param item: the item to draw
1161
+ :param x: the top of the current drawing area
1162
+ :param y: the left side of the current drawing area
1163
+ :return: the bottom-rightmost point
1164
+ """
1165
+ if isinstance(item, str):
1166
+ self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item)
1167
+ elif isinstance(item, tuple):
1168
+ # item is the lower-right of a box
1169
+ (right, bottom) = item
1170
+ self.canvas.create_rectangle(x, y, right, bottom)
1171
+ horiz_line_y = (
1172
+ y + self._get_text_height() + (self.BUFFER * 2)
1173
+ ) # the line separating refs from conds
1174
+ self.canvas.create_line(x, horiz_line_y, right, horiz_line_y)
1175
+
1176
+ return self._visit_command(item, x, y)
1177
+
1178
+ def _visit_command(self, item, x, y):
1179
+ """
1180
+ Return the bottom-rightmost point without actually drawing the item
1181
+
1182
+ :param item: the item to visit
1183
+ :param x: the top of the current drawing area
1184
+ :param y: the left side of the current drawing area
1185
+ :return: the bottom-rightmost point
1186
+ """
1187
+ if isinstance(item, str):
1188
+ return (x + self.canvas.font.measure(item), y + self._get_text_height())
1189
+ elif isinstance(item, tuple):
1190
+ return item
1191
+
1192
+ def _handle(self, expression, command, x=0, y=0):
1193
+ """
1194
+ :param expression: the expression to handle
1195
+ :param command: the function to apply, either _draw_command or _visit_command
1196
+ :param x: the top of the current drawing area
1197
+ :param y: the left side of the current drawing area
1198
+ :return: the bottom-rightmost point
1199
+ """
1200
+ if command == self._visit_command:
1201
+ # if we don't need to draw the item, then we can use the cached values
1202
+ try:
1203
+ # attempt to retrieve cached values
1204
+ right = expression._drawing_width + x
1205
+ bottom = expression._drawing_height + y
1206
+ return (right, bottom)
1207
+ except AttributeError:
1208
+ # the values have not been cached yet, so compute them
1209
+ pass
1210
+
1211
+ if isinstance(expression, DrtAbstractVariableExpression):
1212
+ factory = self._handle_VariableExpression
1213
+ elif isinstance(expression, DRS):
1214
+ factory = self._handle_DRS
1215
+ elif isinstance(expression, DrtNegatedExpression):
1216
+ factory = self._handle_NegatedExpression
1217
+ elif isinstance(expression, DrtLambdaExpression):
1218
+ factory = self._handle_LambdaExpression
1219
+ elif isinstance(expression, BinaryExpression):
1220
+ factory = self._handle_BinaryExpression
1221
+ elif isinstance(expression, DrtApplicationExpression):
1222
+ factory = self._handle_ApplicationExpression
1223
+ elif isinstance(expression, PossibleAntecedents):
1224
+ factory = self._handle_VariableExpression
1225
+ elif isinstance(expression, DrtProposition):
1226
+ factory = self._handle_DrtProposition
1227
+ else:
1228
+ raise Exception(expression.__class__.__name__)
1229
+
1230
+ (right, bottom) = factory(expression, command, x, y)
1231
+
1232
+ # cache the values
1233
+ expression._drawing_width = right - x
1234
+ expression._drawing_height = bottom - y
1235
+
1236
+ return (right, bottom)
1237
+
1238
+ def _handle_VariableExpression(self, expression, command, x, y):
1239
+ return command("%s" % expression, x, y)
1240
+
1241
+ def _handle_NegatedExpression(self, expression, command, x, y):
1242
+ # Find the width of the negation symbol
1243
+ right = self._visit_command(DrtTokens.NOT, x, y)[0]
1244
+
1245
+ # Handle term
1246
+ (right, bottom) = self._handle(expression.term, command, right, y)
1247
+
1248
+ # Handle variables now that we know the y-coordinate
1249
+ command(
1250
+ DrtTokens.NOT,
1251
+ x,
1252
+ self._get_centered_top(y, bottom - y, self._get_text_height()),
1253
+ )
1254
+
1255
+ return (right, bottom)
1256
+
1257
+ def _handle_DRS(self, expression, command, x, y):
1258
+ left = x + self.BUFFER # indent the left side
1259
+ bottom = y + self.BUFFER # indent the top
1260
+
1261
+ # Handle Discourse Referents
1262
+ if expression.refs:
1263
+ refs = " ".join("%s" % r for r in expression.refs)
1264
+ else:
1265
+ refs = " "
1266
+ (max_right, bottom) = command(refs, left, bottom)
1267
+ bottom += self.BUFFER * 2
1268
+
1269
+ # Handle Conditions
1270
+ if expression.conds:
1271
+ for cond in expression.conds:
1272
+ (right, bottom) = self._handle(cond, command, left, bottom)
1273
+ max_right = max(max_right, right)
1274
+ bottom += self.BUFFER
1275
+ else:
1276
+ bottom += self._get_text_height() + self.BUFFER
1277
+
1278
+ # Handle Box
1279
+ max_right += self.BUFFER
1280
+ return command((max_right, bottom), x, y)
1281
+
1282
+ def _handle_ApplicationExpression(self, expression, command, x, y):
1283
+ function, args = expression.uncurry()
1284
+ if not isinstance(function, DrtAbstractVariableExpression):
1285
+ # It's not a predicate expression ("P(x,y)"), so leave arguments curried
1286
+ function = expression.function
1287
+ args = [expression.argument]
1288
+
1289
+ # Get the max bottom of any element on the line
1290
+ function_bottom = self._visit(function, x, y)[1]
1291
+ max_bottom = max(
1292
+ [function_bottom] + [self._visit(arg, x, y)[1] for arg in args]
1293
+ )
1294
+
1295
+ line_height = max_bottom - y
1296
+
1297
+ # Handle 'function'
1298
+ function_drawing_top = self._get_centered_top(
1299
+ y, line_height, function._drawing_height
1300
+ )
1301
+ right = self._handle(function, command, x, function_drawing_top)[0]
1302
+
1303
+ # Handle open paren
1304
+ centred_string_top = self._get_centered_top(
1305
+ y, line_height, self._get_text_height()
1306
+ )
1307
+ right = command(DrtTokens.OPEN, right, centred_string_top)[0]
1308
+
1309
+ # Handle each arg
1310
+ for (i, arg) in enumerate(args):
1311
+ arg_drawing_top = self._get_centered_top(
1312
+ y, line_height, arg._drawing_height
1313
+ )
1314
+ right = self._handle(arg, command, right, arg_drawing_top)[0]
1315
+
1316
+ if i + 1 < len(args):
1317
+ # since it's not the last arg, add a comma
1318
+ right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0]
1319
+
1320
+ # Handle close paren
1321
+ right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
1322
+
1323
+ return (right, max_bottom)
1324
+
1325
+ def _handle_LambdaExpression(self, expression, command, x, y):
1326
+ # Find the width of the lambda symbol and abstracted variables
1327
+ variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT
1328
+ right = self._visit_command(variables, x, y)[0]
1329
+
1330
+ # Handle term
1331
+ (right, bottom) = self._handle(expression.term, command, right, y)
1332
+
1333
+ # Handle variables now that we know the y-coordinate
1334
+ command(
1335
+ variables, x, self._get_centered_top(y, bottom - y, self._get_text_height())
1336
+ )
1337
+
1338
+ return (right, bottom)
1339
+
1340
+ def _handle_BinaryExpression(self, expression, command, x, y):
1341
+ # Get the full height of the line, based on the operands
1342
+ first_height = self._visit(expression.first, 0, 0)[1]
1343
+ second_height = self._visit(expression.second, 0, 0)[1]
1344
+ line_height = max(first_height, second_height)
1345
+
1346
+ # Handle open paren
1347
+ centred_string_top = self._get_centered_top(
1348
+ y, line_height, self._get_text_height()
1349
+ )
1350
+ right = command(DrtTokens.OPEN, x, centred_string_top)[0]
1351
+
1352
+ # Handle the first operand
1353
+ first_height = expression.first._drawing_height
1354
+ (right, first_bottom) = self._handle(
1355
+ expression.first,
1356
+ command,
1357
+ right,
1358
+ self._get_centered_top(y, line_height, first_height),
1359
+ )
1360
+
1361
+ # Handle the operator
1362
+ right = command(" %s " % expression.getOp(), right, centred_string_top)[0]
1363
+
1364
+ # Handle the second operand
1365
+ second_height = expression.second._drawing_height
1366
+ (right, second_bottom) = self._handle(
1367
+ expression.second,
1368
+ command,
1369
+ right,
1370
+ self._get_centered_top(y, line_height, second_height),
1371
+ )
1372
+
1373
+ # Handle close paren
1374
+ right = command(DrtTokens.CLOSE, right, centred_string_top)[0]
1375
+
1376
+ return (right, max(first_bottom, second_bottom))
1377
+
1378
+ def _handle_DrtProposition(self, expression, command, x, y):
1379
+ # Find the width of the negation symbol
1380
+ right = command(expression.variable, x, y)[0]
1381
+
1382
+ # Handle term
1383
+ (right, bottom) = self._handle(expression.term, command, right, y)
1384
+
1385
+ return (right, bottom)
1386
+
1387
+ def _get_centered_top(self, top, full_height, item_height):
1388
+ """Get the y-coordinate of the point that a figure should start at if
1389
+ its height is 'item_height' and it needs to be centered in an area that
1390
+ starts at 'top' and is 'full_height' tall."""
1391
+ return top + (full_height - item_height) / 2
1392
+
1393
+
1394
+ def demo():
1395
+ print("=" * 20 + "TEST PARSE" + "=" * 20)
1396
+ dexpr = DrtExpression.fromstring
1397
+ print(dexpr(r"([x,y],[sees(x,y)])"))
1398
+ print(dexpr(r"([x],[man(x), walks(x)])"))
1399
+ print(dexpr(r"\x.\y.([],[sees(x,y)])"))
1400
+ print(dexpr(r"\x.([],[walks(x)])(john)"))
1401
+ print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))"))
1402
+ print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))"))
1403
+ print(dexpr(r"([x],[PRO(x), sees(John,x)])"))
1404
+ print(dexpr(r"([x],[man(x), -([],[walks(x)])])"))
1405
+ print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])"))
1406
+
1407
+ print("=" * 20 + "Test fol()" + "=" * 20)
1408
+ print(dexpr(r"([x,y],[sees(x,y)])").fol())
1409
+
1410
+ print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20)
1411
+ e1 = dexpr(r"\x.([],[P(x)])")
1412
+ print(e1)
1413
+ e2 = e1.alpha_convert(Variable("z"))
1414
+ print(e2)
1415
+ print(e1 == e2)
1416
+
1417
+ print("=" * 20 + "Test resolve_anaphora()" + "=" * 20)
1418
+ print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])")))
1419
+ print(
1420
+ resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])"))
1421
+ )
1422
+ print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))")))
1423
+
1424
+ print("=" * 20 + "Test pretty_print()" + "=" * 20)
1425
+ dexpr(r"([],[])").pretty_print()
1426
+ dexpr(
1427
+ r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])"
1428
+ ).pretty_print()
1429
+ dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print()
1430
+ dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print()
1431
+ dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print()
1432
+
1433
+
1434
+ def test_draw():
1435
+ try:
1436
+ from tkinter import Tk
1437
+ except ImportError as e:
1438
+ raise ValueError("tkinter is required, but it's not available.")
1439
+
1440
+ expressions = [
1441
+ r"x",
1442
+ r"([],[])",
1443
+ r"([x],[])",
1444
+ r"([x],[man(x)])",
1445
+ r"([x,y],[sees(x,y)])",
1446
+ r"([x],[man(x), walks(x)])",
1447
+ r"\x.([],[man(x), walks(x)])",
1448
+ r"\x y.([],[sees(x,y)])",
1449
+ r"([],[(([],[walks(x)]) + ([],[runs(x)]))])",
1450
+ r"([x],[man(x), -([],[walks(x)])])",
1451
+ r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])",
1452
+ ]
1453
+
1454
+ for e in expressions:
1455
+ d = DrtExpression.fromstring(e)
1456
+ d.draw()
1457
+
1458
+
1459
+ if __name__ == "__main__":
1460
+ demo()
venv/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse
2
+ # Representation Theory (DRT) as meaning language
3
+ #
4
+ # Author: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ try:
11
+ from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk
12
+ from tkinter.font import Font
13
+
14
+ from nltk.draw.util import CanvasFrame, ShowText
15
+
16
+ except ImportError:
17
+ """Ignore ImportError because tkinter might not be available."""
18
+
19
+ from nltk.parse import MaltParser
20
+ from nltk.sem.drt import DrsDrawer, DrtVariableExpression
21
+ from nltk.sem.glue import DrtGlue
22
+ from nltk.sem.logic import Variable
23
+ from nltk.tag import RegexpTagger
24
+ from nltk.util import in_idle
25
+
26
+
27
+ class DrtGlueDemo:
28
+ def __init__(self, examples):
29
+ # Set up the main window.
30
+ self._top = Tk()
31
+ self._top.title("DRT Glue Demo")
32
+
33
+ # Set up key bindings.
34
+ self._init_bindings()
35
+
36
+ # Initialize the fonts.self._error = None
37
+ self._init_fonts(self._top)
38
+
39
+ self._examples = examples
40
+ self._readingCache = [None for example in examples]
41
+
42
+ # The user can hide the grammar.
43
+ self._show_grammar = IntVar(self._top)
44
+ self._show_grammar.set(1)
45
+
46
+ # Set the data to None
47
+ self._curExample = -1
48
+ self._readings = []
49
+ self._drs = None
50
+ self._drsWidget = None
51
+ self._error = None
52
+
53
+ self._init_glue()
54
+
55
+ # Create the basic frames.
56
+ self._init_menubar(self._top)
57
+ self._init_buttons(self._top)
58
+ self._init_exampleListbox(self._top)
59
+ self._init_readingListbox(self._top)
60
+ self._init_canvas(self._top)
61
+
62
+ # Resize callback
63
+ self._canvas.bind("<Configure>", self._configure)
64
+
65
+ #########################################
66
+ ## Initialization Helpers
67
+ #########################################
68
+
69
+ def _init_glue(self):
70
+ tagger = RegexpTagger(
71
+ [
72
+ ("^(David|Mary|John)$", "NNP"),
73
+ (
74
+ "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$",
75
+ "VB",
76
+ ),
77
+ ("^(go|order|vanish|find|approach)$", "VB"),
78
+ ("^(a)$", "ex_quant"),
79
+ ("^(every)$", "univ_quant"),
80
+ ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"),
81
+ ("^(big|gray|former)$", "JJ"),
82
+ ("^(him|himself)$", "PRP"),
83
+ ]
84
+ )
85
+
86
+ depparser = MaltParser(tagger=tagger)
87
+ self._glue = DrtGlue(depparser=depparser, remove_duplicates=False)
88
+
89
+ def _init_fonts(self, root):
90
+ # See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
91
+ self._sysfont = Font(font=Button()["font"])
92
+ root.option_add("*Font", self._sysfont)
93
+
94
+ # TWhat's our font size (default=same as sysfont)
95
+ self._size = IntVar(root)
96
+ self._size.set(self._sysfont.cget("size"))
97
+
98
+ self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get())
99
+ self._font = Font(family="helvetica", size=self._size.get())
100
+ if self._size.get() < 0:
101
+ big = self._size.get() - 2
102
+ else:
103
+ big = self._size.get() + 2
104
+ self._bigfont = Font(family="helvetica", weight="bold", size=big)
105
+
106
+ def _init_exampleListbox(self, parent):
107
+ self._exampleFrame = listframe = Frame(parent)
108
+ self._exampleFrame.pack(fill="both", side="left", padx=2)
109
+ self._exampleList_label = Label(
110
+ self._exampleFrame, font=self._boldfont, text="Examples"
111
+ )
112
+ self._exampleList_label.pack()
113
+ self._exampleList = Listbox(
114
+ self._exampleFrame,
115
+ selectmode="single",
116
+ relief="groove",
117
+ background="white",
118
+ foreground="#909090",
119
+ font=self._font,
120
+ selectforeground="#004040",
121
+ selectbackground="#c0f0c0",
122
+ )
123
+
124
+ self._exampleList.pack(side="right", fill="both", expand=1)
125
+
126
+ for example in self._examples:
127
+ self._exampleList.insert("end", (" %s" % example))
128
+ self._exampleList.config(height=min(len(self._examples), 25), width=40)
129
+
130
+ # Add a scrollbar if there are more than 25 examples.
131
+ if len(self._examples) > 25:
132
+ listscroll = Scrollbar(self._exampleFrame, orient="vertical")
133
+ self._exampleList.config(yscrollcommand=listscroll.set)
134
+ listscroll.config(command=self._exampleList.yview)
135
+ listscroll.pack(side="left", fill="y")
136
+
137
+ # If they select a example, apply it.
138
+ self._exampleList.bind("<<ListboxSelect>>", self._exampleList_select)
139
+
140
+ def _init_readingListbox(self, parent):
141
+ self._readingFrame = listframe = Frame(parent)
142
+ self._readingFrame.pack(fill="both", side="left", padx=2)
143
+ self._readingList_label = Label(
144
+ self._readingFrame, font=self._boldfont, text="Readings"
145
+ )
146
+ self._readingList_label.pack()
147
+ self._readingList = Listbox(
148
+ self._readingFrame,
149
+ selectmode="single",
150
+ relief="groove",
151
+ background="white",
152
+ foreground="#909090",
153
+ font=self._font,
154
+ selectforeground="#004040",
155
+ selectbackground="#c0f0c0",
156
+ )
157
+
158
+ self._readingList.pack(side="right", fill="both", expand=1)
159
+
160
+ # Add a scrollbar if there are more than 25 examples.
161
+ listscroll = Scrollbar(self._readingFrame, orient="vertical")
162
+ self._readingList.config(yscrollcommand=listscroll.set)
163
+ listscroll.config(command=self._readingList.yview)
164
+ listscroll.pack(side="right", fill="y")
165
+
166
+ self._populate_readingListbox()
167
+
168
+ def _populate_readingListbox(self):
169
+ # Populate the listbox with integers
170
+ self._readingList.delete(0, "end")
171
+ for i in range(len(self._readings)):
172
+ self._readingList.insert("end", (" %s" % (i + 1)))
173
+ self._readingList.config(height=min(len(self._readings), 25), width=5)
174
+
175
+ # If they select a example, apply it.
176
+ self._readingList.bind("<<ListboxSelect>>", self._readingList_select)
177
+
178
+ def _init_bindings(self):
179
+ # Key bindings are a good thing.
180
+ self._top.bind("<Control-q>", self.destroy)
181
+ self._top.bind("<Control-x>", self.destroy)
182
+ self._top.bind("<Escape>", self.destroy)
183
+ self._top.bind("n", self.next)
184
+ self._top.bind("<space>", self.next)
185
+ self._top.bind("p", self.prev)
186
+ self._top.bind("<BackSpace>", self.prev)
187
+
188
+ def _init_buttons(self, parent):
189
+ # Set up the frames.
190
+ self._buttonframe = buttonframe = Frame(parent)
191
+ buttonframe.pack(fill="none", side="bottom", padx=3, pady=2)
192
+ Button(
193
+ buttonframe,
194
+ text="Prev",
195
+ background="#90c0d0",
196
+ foreground="black",
197
+ command=self.prev,
198
+ ).pack(side="left")
199
+ Button(
200
+ buttonframe,
201
+ text="Next",
202
+ background="#90c0d0",
203
+ foreground="black",
204
+ command=self.next,
205
+ ).pack(side="left")
206
+
207
+ def _configure(self, event):
208
+ self._autostep = 0
209
+ (x1, y1, x2, y2) = self._cframe.scrollregion()
210
+ y2 = event.height - 6
211
+ self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2)
212
+ self._redraw()
213
+
214
+ def _init_canvas(self, parent):
215
+ self._cframe = CanvasFrame(
216
+ parent,
217
+ background="white",
218
+ # width=525, height=250,
219
+ closeenough=10,
220
+ border=2,
221
+ relief="sunken",
222
+ )
223
+ self._cframe.pack(expand=1, fill="both", side="top", pady=2)
224
+ canvas = self._canvas = self._cframe.canvas()
225
+
226
+ # Initially, there's no tree or text
227
+ self._tree = None
228
+ self._textwidgets = []
229
+ self._textline = None
230
+
231
+ def _init_menubar(self, parent):
232
+ menubar = Menu(parent)
233
+
234
+ filemenu = Menu(menubar, tearoff=0)
235
+ filemenu.add_command(
236
+ label="Exit", underline=1, command=self.destroy, accelerator="q"
237
+ )
238
+ menubar.add_cascade(label="File", underline=0, menu=filemenu)
239
+
240
+ actionmenu = Menu(menubar, tearoff=0)
241
+ actionmenu.add_command(
242
+ label="Next", underline=0, command=self.next, accelerator="n, Space"
243
+ )
244
+ actionmenu.add_command(
245
+ label="Previous", underline=0, command=self.prev, accelerator="p, Backspace"
246
+ )
247
+ menubar.add_cascade(label="Action", underline=0, menu=actionmenu)
248
+
249
+ optionmenu = Menu(menubar, tearoff=0)
250
+ optionmenu.add_checkbutton(
251
+ label="Remove Duplicates",
252
+ underline=0,
253
+ variable=self._glue.remove_duplicates,
254
+ command=self._toggle_remove_duplicates,
255
+ accelerator="r",
256
+ )
257
+ menubar.add_cascade(label="Options", underline=0, menu=optionmenu)
258
+
259
+ viewmenu = Menu(menubar, tearoff=0)
260
+ viewmenu.add_radiobutton(
261
+ label="Tiny",
262
+ variable=self._size,
263
+ underline=0,
264
+ value=10,
265
+ command=self.resize,
266
+ )
267
+ viewmenu.add_radiobutton(
268
+ label="Small",
269
+ variable=self._size,
270
+ underline=0,
271
+ value=12,
272
+ command=self.resize,
273
+ )
274
+ viewmenu.add_radiobutton(
275
+ label="Medium",
276
+ variable=self._size,
277
+ underline=0,
278
+ value=14,
279
+ command=self.resize,
280
+ )
281
+ viewmenu.add_radiobutton(
282
+ label="Large",
283
+ variable=self._size,
284
+ underline=0,
285
+ value=18,
286
+ command=self.resize,
287
+ )
288
+ viewmenu.add_radiobutton(
289
+ label="Huge",
290
+ variable=self._size,
291
+ underline=0,
292
+ value=24,
293
+ command=self.resize,
294
+ )
295
+ menubar.add_cascade(label="View", underline=0, menu=viewmenu)
296
+
297
+ helpmenu = Menu(menubar, tearoff=0)
298
+ helpmenu.add_command(label="About", underline=0, command=self.about)
299
+ menubar.add_cascade(label="Help", underline=0, menu=helpmenu)
300
+
301
+ parent.config(menu=menubar)
302
+
303
+ #########################################
304
+ ## Main draw procedure
305
+ #########################################
306
+
307
+ def _redraw(self):
308
+ canvas = self._canvas
309
+
310
+ # Delete the old DRS, widgets, etc.
311
+ if self._drsWidget is not None:
312
+ self._drsWidget.clear()
313
+
314
+ if self._drs:
315
+ self._drsWidget = DrsWidget(self._canvas, self._drs)
316
+ self._drsWidget.draw()
317
+
318
+ if self._error:
319
+ self._drsWidget = DrsWidget(self._canvas, self._error)
320
+ self._drsWidget.draw()
321
+
322
+ #########################################
323
+ ## Button Callbacks
324
+ #########################################
325
+
326
+ def destroy(self, *e):
327
+ self._autostep = 0
328
+ if self._top is None:
329
+ return
330
+ self._top.destroy()
331
+ self._top = None
332
+
333
+ def prev(self, *e):
334
+ selection = self._readingList.curselection()
335
+ readingListSize = self._readingList.size()
336
+
337
+ # there are readings
338
+ if readingListSize > 0:
339
+ # if one reading is currently selected
340
+ if len(selection) == 1:
341
+ index = int(selection[0])
342
+
343
+ # if it's on (or before) the first item
344
+ if index <= 0:
345
+ self._select_previous_example()
346
+ else:
347
+ self._readingList_store_selection(index - 1)
348
+
349
+ else:
350
+ # select its first reading
351
+ self._readingList_store_selection(readingListSize - 1)
352
+
353
+ else:
354
+ self._select_previous_example()
355
+
356
+ def _select_previous_example(self):
357
+ # if the current example is not the first example
358
+ if self._curExample > 0:
359
+ self._exampleList_store_selection(self._curExample - 1)
360
+ else:
361
+ # go to the last example
362
+ self._exampleList_store_selection(len(self._examples) - 1)
363
+
364
+ def next(self, *e):
365
+ selection = self._readingList.curselection()
366
+ readingListSize = self._readingList.size()
367
+
368
+ # if there are readings
369
+ if readingListSize > 0:
370
+ # if one reading is currently selected
371
+ if len(selection) == 1:
372
+ index = int(selection[0])
373
+
374
+ # if it's on (or past) the last item
375
+ if index >= (readingListSize - 1):
376
+ self._select_next_example()
377
+ else:
378
+ self._readingList_store_selection(index + 1)
379
+
380
+ else:
381
+ # select its first reading
382
+ self._readingList_store_selection(0)
383
+
384
+ else:
385
+ self._select_next_example()
386
+
387
+ def _select_next_example(self):
388
+ # if the current example is not the last example
389
+ if self._curExample < len(self._examples) - 1:
390
+ self._exampleList_store_selection(self._curExample + 1)
391
+ else:
392
+ # go to the first example
393
+ self._exampleList_store_selection(0)
394
+
395
+ def about(self, *e):
396
+ ABOUT = (
397
+ "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n"
398
+ + "Written by Daniel H. Garrette"
399
+ )
400
+ TITLE = "About: NLTK DRT Glue Demo"
401
+ try:
402
+ from tkinter.messagebox import Message
403
+
404
+ Message(message=ABOUT, title=TITLE).show()
405
+ except:
406
+ ShowText(self._top, TITLE, ABOUT)
407
+
408
+ def postscript(self, *e):
409
+ self._autostep = 0
410
+ self._cframe.print_to_file()
411
+
412
+ def mainloop(self, *args, **kwargs):
413
+ """
414
+ Enter the Tkinter mainloop. This function must be called if
415
+ this demo is created from a non-interactive program (e.g.
416
+ from a secript); otherwise, the demo will close as soon as
417
+ the script completes.
418
+ """
419
+ if in_idle():
420
+ return
421
+ self._top.mainloop(*args, **kwargs)
422
+
423
+ def resize(self, size=None):
424
+ if size is not None:
425
+ self._size.set(size)
426
+ size = self._size.get()
427
+ self._font.configure(size=-(abs(size)))
428
+ self._boldfont.configure(size=-(abs(size)))
429
+ self._sysfont.configure(size=-(abs(size)))
430
+ self._bigfont.configure(size=-(abs(size + 2)))
431
+ self._redraw()
432
+
433
+ def _toggle_remove_duplicates(self):
434
+ self._glue.remove_duplicates = not self._glue.remove_duplicates
435
+
436
+ self._exampleList.selection_clear(0, "end")
437
+ self._readings = []
438
+ self._populate_readingListbox()
439
+ self._readingCache = [None for ex in self._examples]
440
+ self._curExample = -1
441
+ self._error = None
442
+
443
+ self._drs = None
444
+ self._redraw()
445
+
446
+ def _exampleList_select(self, event):
447
+ selection = self._exampleList.curselection()
448
+ if len(selection) != 1:
449
+ return
450
+ self._exampleList_store_selection(int(selection[0]))
451
+
452
+ def _exampleList_store_selection(self, index):
453
+ self._curExample = index
454
+ example = self._examples[index]
455
+
456
+ self._exampleList.selection_clear(0, "end")
457
+ if example:
458
+ cache = self._readingCache[index]
459
+ if cache:
460
+ if isinstance(cache, list):
461
+ self._readings = cache
462
+ self._error = None
463
+ else:
464
+ self._readings = []
465
+ self._error = cache
466
+ else:
467
+ try:
468
+ self._readings = self._glue.parse_to_meaning(example)
469
+ self._error = None
470
+ self._readingCache[index] = self._readings
471
+ except Exception as e:
472
+ self._readings = []
473
+ self._error = DrtVariableExpression(Variable("Error: " + str(e)))
474
+ self._readingCache[index] = self._error
475
+
476
+ # add a star to the end of the example
477
+ self._exampleList.delete(index)
478
+ self._exampleList.insert(index, (" %s *" % example))
479
+ self._exampleList.config(
480
+ height=min(len(self._examples), 25), width=40
481
+ )
482
+
483
+ self._populate_readingListbox()
484
+
485
+ self._exampleList.selection_set(index)
486
+
487
+ self._drs = None
488
+ self._redraw()
489
+
490
+ def _readingList_select(self, event):
491
+ selection = self._readingList.curselection()
492
+ if len(selection) != 1:
493
+ return
494
+ self._readingList_store_selection(int(selection[0]))
495
+
496
+ def _readingList_store_selection(self, index):
497
+ reading = self._readings[index]
498
+
499
+ self._readingList.selection_clear(0, "end")
500
+ if reading:
501
+ self._readingList.selection_set(index)
502
+
503
+ self._drs = reading.simplify().normalize().resolve_anaphora()
504
+
505
+ self._redraw()
506
+
507
+
508
+ class DrsWidget:
509
+ def __init__(self, canvas, drs, **attribs):
510
+ self._drs = drs
511
+ self._canvas = canvas
512
+ canvas.font = Font(
513
+ font=canvas.itemcget(canvas.create_text(0, 0, text=""), "font")
514
+ )
515
+ canvas._BUFFER = 3
516
+ self.bbox = (0, 0, 0, 0)
517
+
518
+ def draw(self):
519
+ (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw()
520
+ self.bbox = (0, 0, right + 1, bottom + 1)
521
+
522
+ def clear(self):
523
+ self._canvas.create_rectangle(self.bbox, fill="white", width="0")
524
+
525
+
526
+ def demo():
527
+ examples = [
528
+ "John walks",
529
+ "David sees Mary",
530
+ "David eats a sandwich",
531
+ "every man chases a dog",
532
+ # 'every man believes a dog yawns',
533
+ # 'John gives David a sandwich',
534
+ "John chases himself",
535
+ # 'John persuades David to order a pizza',
536
+ # 'John tries to go',
537
+ # 'John tries to find a unicorn',
538
+ # 'John seems to vanish',
539
+ # 'a unicorn seems to approach',
540
+ # 'every big cat leaves',
541
+ # 'every gray cat leaves',
542
+ # 'every big gray cat leaves',
543
+ # 'a former senator leaves',
544
+ # 'John likes a cat',
545
+ # 'John likes every cat',
546
+ # 'he walks',
547
+ # 'John walks and he leaves'
548
+ ]
549
+ DrtGlueDemo(examples).mainloop()
550
+
551
+
552
+ if __name__ == "__main__":
553
+ demo()
venv/lib/python3.10/site-packages/nltk/sem/evaluate.py ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Models for first-order languages with lambda
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Ewan Klein <[email protected]>,
5
+ # URL: <https://www.nltk.org>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ # TODO:
9
+ # - fix tracing
10
+ # - fix iterator-based approach to existentials
11
+
12
+ """
13
+ This module provides data structures for representing first-order
14
+ models.
15
+ """
16
+
17
+ import inspect
18
+ import re
19
+ import sys
20
+ import textwrap
21
+ from pprint import pformat
22
+
23
+ from nltk.decorators import decorator # this used in code that is commented out
24
+ from nltk.sem.logic import (
25
+ AbstractVariableExpression,
26
+ AllExpression,
27
+ AndExpression,
28
+ ApplicationExpression,
29
+ EqualityExpression,
30
+ ExistsExpression,
31
+ Expression,
32
+ IffExpression,
33
+ ImpExpression,
34
+ IndividualVariableExpression,
35
+ IotaExpression,
36
+ LambdaExpression,
37
+ NegatedExpression,
38
+ OrExpression,
39
+ Variable,
40
+ is_indvar,
41
+ )
42
+
43
+
44
+ class Error(Exception):
45
+ pass
46
+
47
+
48
+ class Undefined(Error):
49
+ pass
50
+
51
+
52
+ def trace(f, *args, **kw):
53
+ argspec = inspect.getfullargspec(f)
54
+ d = dict(zip(argspec[0], args))
55
+ if d.pop("trace", None):
56
+ print()
57
+ for item in d.items():
58
+ print("%s => %s" % item)
59
+ return f(*args, **kw)
60
+
61
+
62
+ def is_rel(s):
63
+ """
64
+ Check whether a set represents a relation (of any arity).
65
+
66
+ :param s: a set containing tuples of str elements
67
+ :type s: set
68
+ :rtype: bool
69
+ """
70
+ # we have the empty relation, i.e. set()
71
+ if len(s) == 0:
72
+ return True
73
+ # all the elements are tuples of the same length
74
+ elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)):
75
+ return True
76
+ else:
77
+ raise ValueError("Set %r contains sequences of different lengths" % s)
78
+
79
+
80
+ def set2rel(s):
81
+ """
82
+ Convert a set containing individuals (strings or numbers) into a set of
83
+ unary tuples. Any tuples of strings already in the set are passed through
84
+ unchanged.
85
+
86
+ For example:
87
+ - set(['a', 'b']) => set([('a',), ('b',)])
88
+ - set([3, 27]) => set([('3',), ('27',)])
89
+
90
+ :type s: set
91
+ :rtype: set of tuple of str
92
+ """
93
+ new = set()
94
+ for elem in s:
95
+ if isinstance(elem, str):
96
+ new.add((elem,))
97
+ elif isinstance(elem, int):
98
+ new.add(str(elem))
99
+ else:
100
+ new.add(elem)
101
+ return new
102
+
103
+
104
+ def arity(rel):
105
+ """
106
+ Check the arity of a relation.
107
+ :type rel: set of tuples
108
+ :rtype: int of tuple of str
109
+ """
110
+ if len(rel) == 0:
111
+ return 0
112
+ return len(list(rel)[0])
113
+
114
+
115
+ class Valuation(dict):
116
+ """
117
+ A dictionary which represents a model-theoretic Valuation of non-logical constants.
118
+ Keys are strings representing the constants to be interpreted, and values correspond
119
+ to individuals (represented as strings) and n-ary relations (represented as sets of tuples
120
+ of strings).
121
+
122
+ An instance of ``Valuation`` will raise a KeyError exception (i.e.,
123
+ just behave like a standard dictionary) if indexed with an expression that
124
+ is not in its list of symbols.
125
+ """
126
+
127
+ def __init__(self, xs):
128
+ """
129
+ :param xs: a list of (symbol, value) pairs.
130
+ """
131
+ super().__init__()
132
+ for (sym, val) in xs:
133
+ if isinstance(val, str) or isinstance(val, bool):
134
+ self[sym] = val
135
+ elif isinstance(val, set):
136
+ self[sym] = set2rel(val)
137
+ else:
138
+ msg = textwrap.fill(
139
+ "Error in initializing Valuation. "
140
+ "Unrecognized value for symbol '%s':\n%s" % (sym, val),
141
+ width=66,
142
+ )
143
+
144
+ raise ValueError(msg)
145
+
146
+ def __getitem__(self, key):
147
+ if key in self:
148
+ return dict.__getitem__(self, key)
149
+ else:
150
+ raise Undefined("Unknown expression: '%s'" % key)
151
+
152
+ def __str__(self):
153
+ return pformat(self)
154
+
155
+ @property
156
+ def domain(self):
157
+ """Set-theoretic domain of the value-space of a Valuation."""
158
+ dom = []
159
+ for val in self.values():
160
+ if isinstance(val, str):
161
+ dom.append(val)
162
+ elif not isinstance(val, bool):
163
+ dom.extend(
164
+ [elem for tuple_ in val for elem in tuple_ if elem is not None]
165
+ )
166
+ return set(dom)
167
+
168
+ @property
169
+ def symbols(self):
170
+ """The non-logical constants which the Valuation recognizes."""
171
+ return sorted(self.keys())
172
+
173
+ @classmethod
174
+ def fromstring(cls, s):
175
+ return read_valuation(s)
176
+
177
+
178
+ ##########################################
179
+ # REs used by the _read_valuation function
180
+ ##########################################
181
+ _VAL_SPLIT_RE = re.compile(r"\s*=+>\s*")
182
+ _ELEMENT_SPLIT_RE = re.compile(r"\s*,\s*")
183
+ _TUPLES_RE = re.compile(
184
+ r"""\s*
185
+ (\([^)]+\)) # tuple-expression
186
+ \s*""",
187
+ re.VERBOSE,
188
+ )
189
+
190
+
191
+ def _read_valuation_line(s):
192
+ """
193
+ Read a line in a valuation file.
194
+
195
+ Lines are expected to be of the form::
196
+
197
+ noosa => n
198
+ girl => {g1, g2}
199
+ chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)}
200
+
201
+ :param s: input line
202
+ :type s: str
203
+ :return: a pair (symbol, value)
204
+ :rtype: tuple
205
+ """
206
+ pieces = _VAL_SPLIT_RE.split(s)
207
+ symbol = pieces[0]
208
+ value = pieces[1]
209
+ # check whether the value is meant to be a set
210
+ if value.startswith("{"):
211
+ value = value[1:-1]
212
+ tuple_strings = _TUPLES_RE.findall(value)
213
+ # are the set elements tuples?
214
+ if tuple_strings:
215
+ set_elements = []
216
+ for ts in tuple_strings:
217
+ ts = ts[1:-1]
218
+ element = tuple(_ELEMENT_SPLIT_RE.split(ts))
219
+ set_elements.append(element)
220
+ else:
221
+ set_elements = _ELEMENT_SPLIT_RE.split(value)
222
+ value = set(set_elements)
223
+ return symbol, value
224
+
225
+
226
+ def read_valuation(s, encoding=None):
227
+ """
228
+ Convert a valuation string into a valuation.
229
+
230
+ :param s: a valuation string
231
+ :type s: str
232
+ :param encoding: the encoding of the input string, if it is binary
233
+ :type encoding: str
234
+ :return: a ``nltk.sem`` valuation
235
+ :rtype: Valuation
236
+ """
237
+ if encoding is not None:
238
+ s = s.decode(encoding)
239
+ statements = []
240
+ for linenum, line in enumerate(s.splitlines()):
241
+ line = line.strip()
242
+ if line.startswith("#") or line == "":
243
+ continue
244
+ try:
245
+ statements.append(_read_valuation_line(line))
246
+ except ValueError as e:
247
+ raise ValueError(f"Unable to parse line {linenum}: {line}") from e
248
+ return Valuation(statements)
249
+
250
+
251
+ class Assignment(dict):
252
+ r"""
253
+ A dictionary which represents an assignment of values to variables.
254
+
255
+ An assignment can only assign values from its domain.
256
+
257
+ If an unknown expression *a* is passed to a model *M*\ 's
258
+ interpretation function *i*, *i* will first check whether *M*\ 's
259
+ valuation assigns an interpretation to *a* as a constant, and if
260
+ this fails, *i* will delegate the interpretation of *a* to
261
+ *g*. *g* only assigns values to individual variables (i.e.,
262
+ members of the class ``IndividualVariableExpression`` in the ``logic``
263
+ module. If a variable is not assigned a value by *g*, it will raise
264
+ an ``Undefined`` exception.
265
+
266
+ A variable *Assignment* is a mapping from individual variables to
267
+ entities in the domain. Individual variables are usually indicated
268
+ with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally
269
+ followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are
270
+ created using the ``Assignment`` constructor, which also takes the
271
+ domain as a parameter.
272
+
273
+ >>> from nltk.sem.evaluate import Assignment
274
+ >>> dom = set(['u1', 'u2', 'u3', 'u4'])
275
+ >>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')])
276
+ >>> g3 == {'x': 'u1', 'y': 'u2'}
277
+ True
278
+
279
+ There is also a ``print`` format for assignments which uses a notation
280
+ closer to that in logic textbooks:
281
+
282
+ >>> print(g3)
283
+ g[u1/x][u2/y]
284
+
285
+ It is also possible to update an assignment using the ``add`` method:
286
+
287
+ >>> dom = set(['u1', 'u2', 'u3', 'u4'])
288
+ >>> g4 = Assignment(dom)
289
+ >>> g4.add('x', 'u1')
290
+ {'x': 'u1'}
291
+
292
+ With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary:
293
+
294
+ >>> g4.purge()
295
+ >>> g4
296
+ {}
297
+
298
+ :param domain: the domain of discourse
299
+ :type domain: set
300
+ :param assign: a list of (varname, value) associations
301
+ :type assign: list
302
+ """
303
+
304
+ def __init__(self, domain, assign=None):
305
+ super().__init__()
306
+ self.domain = domain
307
+ if assign:
308
+ for (var, val) in assign:
309
+ assert val in self.domain, "'{}' is not in the domain: {}".format(
310
+ val,
311
+ self.domain,
312
+ )
313
+ assert is_indvar(var), (
314
+ "Wrong format for an Individual Variable: '%s'" % var
315
+ )
316
+ self[var] = val
317
+ self.variant = None
318
+ self._addvariant()
319
+
320
+ def __getitem__(self, key):
321
+ if key in self:
322
+ return dict.__getitem__(self, key)
323
+ else:
324
+ raise Undefined("Not recognized as a variable: '%s'" % key)
325
+
326
+ def copy(self):
327
+ new = Assignment(self.domain)
328
+ new.update(self)
329
+ return new
330
+
331
+ def purge(self, var=None):
332
+ """
333
+ Remove one or all keys (i.e. logic variables) from an
334
+ assignment, and update ``self.variant``.
335
+
336
+ :param var: a Variable acting as a key for the assignment.
337
+ """
338
+ if var:
339
+ del self[var]
340
+ else:
341
+ self.clear()
342
+ self._addvariant()
343
+ return None
344
+
345
+ def __str__(self):
346
+ """
347
+ Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]'
348
+ """
349
+ gstring = "g"
350
+ # Deterministic output for unit testing.
351
+ variant = sorted(self.variant)
352
+ for (val, var) in variant:
353
+ gstring += f"[{val}/{var}]"
354
+ return gstring
355
+
356
+ def _addvariant(self):
357
+ """
358
+ Create a more pretty-printable version of the assignment.
359
+ """
360
+ list_ = []
361
+ for item in self.items():
362
+ pair = (item[1], item[0])
363
+ list_.append(pair)
364
+ self.variant = list_
365
+ return None
366
+
367
+ def add(self, var, val):
368
+ """
369
+ Add a new variable-value pair to the assignment, and update
370
+ ``self.variant``.
371
+
372
+ """
373
+ assert val in self.domain, f"{val} is not in the domain {self.domain}"
374
+ assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var
375
+ self[var] = val
376
+ self._addvariant()
377
+ return self
378
+
379
+
380
+ class Model:
381
+ """
382
+ A first order model is a domain *D* of discourse and a valuation *V*.
383
+
384
+ A domain *D* is a set, and a valuation *V* is a map that associates
385
+ expressions with values in the model.
386
+ The domain of *V* should be a subset of *D*.
387
+
388
+ Construct a new ``Model``.
389
+
390
+ :type domain: set
391
+ :param domain: A set of entities representing the domain of discourse of the model.
392
+ :type valuation: Valuation
393
+ :param valuation: the valuation of the model.
394
+ :param prop: If this is set, then we are building a propositional\
395
+ model and don't require the domain of *V* to be subset of *D*.
396
+ """
397
+
398
+ def __init__(self, domain, valuation):
399
+ assert isinstance(domain, set)
400
+ self.domain = domain
401
+ self.valuation = valuation
402
+ if not domain.issuperset(valuation.domain):
403
+ raise Error(
404
+ "The valuation domain, %s, must be a subset of the model's domain, %s"
405
+ % (valuation.domain, domain)
406
+ )
407
+
408
+ def __repr__(self):
409
+ return f"({self.domain!r}, {self.valuation!r})"
410
+
411
+ def __str__(self):
412
+ return f"Domain = {self.domain},\nValuation = \n{self.valuation}"
413
+
414
+ def evaluate(self, expr, g, trace=None):
415
+ """
416
+ Read input expressions, and provide a handler for ``satisfy``
417
+ that blocks further propagation of the ``Undefined`` error.
418
+ :param expr: An ``Expression`` of ``logic``.
419
+ :type g: Assignment
420
+ :param g: an assignment to individual variables.
421
+ :rtype: bool or 'Undefined'
422
+ """
423
+ try:
424
+ parsed = Expression.fromstring(expr)
425
+ value = self.satisfy(parsed, g, trace=trace)
426
+ if trace:
427
+ print()
428
+ print(f"'{expr}' evaluates to {value} under M, {g}")
429
+ return value
430
+ except Undefined:
431
+ if trace:
432
+ print()
433
+ print(f"'{expr}' is undefined under M, {g}")
434
+ return "Undefined"
435
+
436
+ def satisfy(self, parsed, g, trace=None):
437
+ """
438
+ Recursive interpretation function for a formula of first-order logic.
439
+
440
+ Raises an ``Undefined`` error when ``parsed`` is an atomic string
441
+ but is not a symbol or an individual variable.
442
+
443
+ :return: Returns a truth value or ``Undefined`` if ``parsed`` is\
444
+ complex, and calls the interpretation function ``i`` if ``parsed``\
445
+ is atomic.
446
+
447
+ :param parsed: An expression of ``logic``.
448
+ :type g: Assignment
449
+ :param g: an assignment to individual variables.
450
+ """
451
+
452
+ if isinstance(parsed, ApplicationExpression):
453
+ function, arguments = parsed.uncurry()
454
+ if isinstance(function, AbstractVariableExpression):
455
+ # It's a predicate expression ("P(x,y)"), so used uncurried arguments
456
+ funval = self.satisfy(function, g)
457
+ argvals = tuple(self.satisfy(arg, g) for arg in arguments)
458
+ return argvals in funval
459
+ else:
460
+ # It must be a lambda expression, so use curried form
461
+ funval = self.satisfy(parsed.function, g)
462
+ argval = self.satisfy(parsed.argument, g)
463
+ return funval[argval]
464
+ elif isinstance(parsed, NegatedExpression):
465
+ return not self.satisfy(parsed.term, g)
466
+ elif isinstance(parsed, AndExpression):
467
+ return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g)
468
+ elif isinstance(parsed, OrExpression):
469
+ return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g)
470
+ elif isinstance(parsed, ImpExpression):
471
+ return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g)
472
+ elif isinstance(parsed, IffExpression):
473
+ return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
474
+ elif isinstance(parsed, EqualityExpression):
475
+ return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g)
476
+ elif isinstance(parsed, AllExpression):
477
+ new_g = g.copy()
478
+ for u in self.domain:
479
+ new_g.add(parsed.variable.name, u)
480
+ if not self.satisfy(parsed.term, new_g):
481
+ return False
482
+ return True
483
+ elif isinstance(parsed, ExistsExpression):
484
+ new_g = g.copy()
485
+ for u in self.domain:
486
+ new_g.add(parsed.variable.name, u)
487
+ if self.satisfy(parsed.term, new_g):
488
+ return True
489
+ return False
490
+ elif isinstance(parsed, IotaExpression):
491
+ new_g = g.copy()
492
+ for u in self.domain:
493
+ new_g.add(parsed.variable.name, u)
494
+ if self.satisfy(parsed.term, new_g):
495
+ return True
496
+ return False
497
+ elif isinstance(parsed, LambdaExpression):
498
+ cf = {}
499
+ var = parsed.variable.name
500
+ for u in self.domain:
501
+ val = self.satisfy(parsed.term, g.add(var, u))
502
+ # NB the dict would be a lot smaller if we do this:
503
+ # if val: cf[u] = val
504
+ # But then need to deal with cases where f(a) should yield
505
+ # a function rather than just False.
506
+ cf[u] = val
507
+ return cf
508
+ else:
509
+ return self.i(parsed, g, trace)
510
+
511
+ # @decorator(trace_eval)
512
+ def i(self, parsed, g, trace=False):
513
+ """
514
+ An interpretation function.
515
+
516
+ Assuming that ``parsed`` is atomic:
517
+
518
+ - if ``parsed`` is a non-logical constant, calls the valuation *V*
519
+ - else if ``parsed`` is an individual variable, calls assignment *g*
520
+ - else returns ``Undefined``.
521
+
522
+ :param parsed: an ``Expression`` of ``logic``.
523
+ :type g: Assignment
524
+ :param g: an assignment to individual variables.
525
+ :return: a semantic value
526
+ """
527
+ # If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols
528
+ # and also be an IndividualVariableExpression. We want to catch this first case.
529
+ # So there is a procedural consequence to the ordering of clauses here:
530
+ if parsed.variable.name in self.valuation.symbols:
531
+ return self.valuation[parsed.variable.name]
532
+ elif isinstance(parsed, IndividualVariableExpression):
533
+ return g[parsed.variable.name]
534
+
535
+ else:
536
+ raise Undefined("Can't find a value for %s" % parsed)
537
+
538
+ def satisfiers(self, parsed, varex, g, trace=None, nesting=0):
539
+ """
540
+ Generate the entities from the model's domain that satisfy an open formula.
541
+
542
+ :param parsed: an open formula
543
+ :type parsed: Expression
544
+ :param varex: the relevant free individual variable in ``parsed``.
545
+ :type varex: VariableExpression or str
546
+ :param g: a variable assignment
547
+ :type g: Assignment
548
+ :return: a set of the entities that satisfy ``parsed``.
549
+ """
550
+
551
+ spacer = " "
552
+ indent = spacer + (spacer * nesting)
553
+ candidates = []
554
+
555
+ if isinstance(varex, str):
556
+ var = Variable(varex)
557
+ else:
558
+ var = varex
559
+
560
+ if var in parsed.free():
561
+ if trace:
562
+ print()
563
+ print(
564
+ (spacer * nesting)
565
+ + f"Open formula is '{parsed}' with assignment {g}"
566
+ )
567
+ for u in self.domain:
568
+ new_g = g.copy()
569
+ new_g.add(var.name, u)
570
+ if trace and trace > 1:
571
+ lowtrace = trace - 1
572
+ else:
573
+ lowtrace = 0
574
+ value = self.satisfy(parsed, new_g, lowtrace)
575
+
576
+ if trace:
577
+ print(indent + "(trying assignment %s)" % new_g)
578
+
579
+ # parsed == False under g[u/var]?
580
+ if value == False:
581
+ if trace:
582
+ print(indent + f"value of '{parsed}' under {new_g} is False")
583
+
584
+ # so g[u/var] is a satisfying assignment
585
+ else:
586
+ candidates.append(u)
587
+ if trace:
588
+ print(indent + f"value of '{parsed}' under {new_g} is {value}")
589
+
590
+ result = {c for c in candidates}
591
+ # var isn't free in parsed
592
+ else:
593
+ raise Undefined(f"{var.name} is not free in {parsed}")
594
+
595
+ return result
596
+
597
+
598
+ # //////////////////////////////////////////////////////////////////////
599
+ # Demo..
600
+ # //////////////////////////////////////////////////////////////////////
601
+ # number of spacer chars
602
+ mult = 30
603
+
604
+ # Demo 1: Propositional Logic
605
+ #################
606
+ def propdemo(trace=None):
607
+ """Example of a propositional model."""
608
+
609
+ global val1, dom1, m1, g1
610
+ val1 = Valuation([("P", True), ("Q", True), ("R", False)])
611
+ dom1 = set()
612
+ m1 = Model(dom1, val1)
613
+ g1 = Assignment(dom1)
614
+
615
+ print()
616
+ print("*" * mult)
617
+ print("Propositional Formulas Demo")
618
+ print("*" * mult)
619
+ print("(Propositional constants treated as nullary predicates)")
620
+ print()
621
+ print("Model m1:\n", m1)
622
+ print("*" * mult)
623
+ sentences = [
624
+ "(P & Q)",
625
+ "(P & R)",
626
+ "- P",
627
+ "- R",
628
+ "- - P",
629
+ "- (P & R)",
630
+ "(P | R)",
631
+ "(R | P)",
632
+ "(R | R)",
633
+ "(- P | R)",
634
+ "(P | - P)",
635
+ "(P -> Q)",
636
+ "(P -> R)",
637
+ "(R -> P)",
638
+ "(P <-> P)",
639
+ "(R <-> R)",
640
+ "(P <-> R)",
641
+ ]
642
+
643
+ for sent in sentences:
644
+ if trace:
645
+ print()
646
+ m1.evaluate(sent, g1, trace)
647
+ else:
648
+ print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}")
649
+
650
+
651
+ # Demo 2: FOL Model
652
+ #############
653
+
654
+
655
+ def folmodel(quiet=False, trace=None):
656
+ """Example of a first-order model."""
657
+
658
+ global val2, v2, dom2, m2, g2
659
+
660
+ v2 = [
661
+ ("adam", "b1"),
662
+ ("betty", "g1"),
663
+ ("fido", "d1"),
664
+ ("girl", {"g1", "g2"}),
665
+ ("boy", {"b1", "b2"}),
666
+ ("dog", {"d1"}),
667
+ ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}),
668
+ ]
669
+ val2 = Valuation(v2)
670
+ dom2 = val2.domain
671
+ m2 = Model(dom2, val2)
672
+ g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")])
673
+
674
+ if not quiet:
675
+ print()
676
+ print("*" * mult)
677
+ print("Models Demo")
678
+ print("*" * mult)
679
+ print("Model m2:\n", "-" * 14, "\n", m2)
680
+ print("Variable assignment = ", g2)
681
+
682
+ exprs = ["adam", "boy", "love", "walks", "x", "y", "z"]
683
+ parsed_exprs = [Expression.fromstring(e) for e in exprs]
684
+
685
+ print()
686
+ for parsed in parsed_exprs:
687
+ try:
688
+ print(
689
+ "The interpretation of '%s' in m2 is %s"
690
+ % (parsed, m2.i(parsed, g2))
691
+ )
692
+ except Undefined:
693
+ print("The interpretation of '%s' in m2 is Undefined" % parsed)
694
+
695
+ applications = [
696
+ ("boy", ("adam")),
697
+ ("walks", ("adam",)),
698
+ ("love", ("adam", "y")),
699
+ ("love", ("y", "adam")),
700
+ ]
701
+
702
+ for (fun, args) in applications:
703
+ try:
704
+ funval = m2.i(Expression.fromstring(fun), g2)
705
+ argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args)
706
+ print(f"{fun}({args}) evaluates to {argsval in funval}")
707
+ except Undefined:
708
+ print(f"{fun}({args}) evaluates to Undefined")
709
+
710
+
711
+ # Demo 3: FOL
712
+ #########
713
+
714
+
715
+ def foldemo(trace=None):
716
+ """
717
+ Interpretation of closed expressions in a first-order model.
718
+ """
719
+ folmodel(quiet=True)
720
+
721
+ print()
722
+ print("*" * mult)
723
+ print("FOL Formulas Demo")
724
+ print("*" * mult)
725
+
726
+ formulas = [
727
+ "love (adam, betty)",
728
+ "(adam = mia)",
729
+ "\\x. (boy(x) | girl(x))",
730
+ "\\x. boy(x)(adam)",
731
+ "\\x y. love(x, y)",
732
+ "\\x y. love(x, y)(adam)(betty)",
733
+ "\\x y. love(x, y)(adam, betty)",
734
+ "\\x y. (boy(x) & love(x, y))",
735
+ "\\x. exists y. (boy(x) & love(x, y))",
736
+ "exists z1. boy(z1)",
737
+ "exists x. (boy(x) & -(x = adam))",
738
+ "exists x. (boy(x) & all y. love(y, x))",
739
+ "all x. (boy(x) | girl(x))",
740
+ "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy.
741
+ "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves.
742
+ "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl.
743
+ "all x. (dog(x) -> - girl(x))",
744
+ "exists x. exists y. (love(x, y) & love(x, y))",
745
+ ]
746
+
747
+ for fmla in formulas:
748
+ g2.purge()
749
+ if trace:
750
+ m2.evaluate(fmla, g2, trace)
751
+ else:
752
+ print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}")
753
+
754
+
755
+ # Demo 3: Satisfaction
756
+ #############
757
+
758
+
759
+ def satdemo(trace=None):
760
+ """Satisfiers of an open formula in a first order model."""
761
+
762
+ print()
763
+ print("*" * mult)
764
+ print("Satisfiers Demo")
765
+ print("*" * mult)
766
+
767
+ folmodel(quiet=True)
768
+
769
+ formulas = [
770
+ "boy(x)",
771
+ "(x = x)",
772
+ "(boy(x) | girl(x))",
773
+ "(boy(x) & girl(x))",
774
+ "love(adam, x)",
775
+ "love(x, adam)",
776
+ "-(x = adam)",
777
+ "exists z22. love(x, z22)",
778
+ "exists y. love(y, x)",
779
+ "all y. (girl(y) -> love(x, y))",
780
+ "all y. (girl(y) -> love(y, x))",
781
+ "all y. (girl(y) -> (boy(x) & love(y, x)))",
782
+ "(boy(x) & all y. (girl(y) -> love(x, y)))",
783
+ "(boy(x) & all y. (girl(y) -> love(y, x)))",
784
+ "(boy(x) & exists y. (girl(y) & love(y, x)))",
785
+ "(girl(x) -> dog(x))",
786
+ "all y. (dog(y) -> (x = y))",
787
+ "exists y. love(y, x)",
788
+ "exists y. (love(adam, y) & love(y, x))",
789
+ ]
790
+
791
+ if trace:
792
+ print(m2)
793
+
794
+ for fmla in formulas:
795
+ print(fmla)
796
+ Expression.fromstring(fmla)
797
+
798
+ parsed = [Expression.fromstring(fmla) for fmla in formulas]
799
+
800
+ for p in parsed:
801
+ g2.purge()
802
+ print(
803
+ "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace))
804
+ )
805
+
806
+
807
+ def demo(num=0, trace=None):
808
+ """
809
+ Run exists demos.
810
+
811
+ - num = 1: propositional logic demo
812
+ - num = 2: first order model demo (only if trace is set)
813
+ - num = 3: first order sentences demo
814
+ - num = 4: satisfaction of open formulas demo
815
+ - any other value: run all the demos
816
+
817
+ :param trace: trace = 1, or trace = 2 for more verbose tracing
818
+ """
819
+ demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo}
820
+
821
+ try:
822
+ demos[num](trace=trace)
823
+ except KeyError:
824
+ for num in demos:
825
+ demos[num](trace=trace)
826
+
827
+
828
+ if __name__ == "__main__":
829
+ demo(2, trace=0)
venv/lib/python3.10/site-packages/nltk/sem/glue.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Glue Semantics
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import os
10
+ from itertools import chain
11
+
12
+ import nltk
13
+ from nltk.internals import Counter
14
+ from nltk.sem import drt, linearlogic
15
+ from nltk.sem.logic import (
16
+ AbstractVariableExpression,
17
+ Expression,
18
+ LambdaExpression,
19
+ Variable,
20
+ VariableExpression,
21
+ )
22
+ from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger
23
+
24
+ SPEC_SEMTYPES = {
25
+ "a": "ex_quant",
26
+ "an": "ex_quant",
27
+ "every": "univ_quant",
28
+ "the": "def_art",
29
+ "no": "no_quant",
30
+ "default": "ex_quant",
31
+ }
32
+
33
+ OPTIONAL_RELATIONSHIPS = ["nmod", "vmod", "punct"]
34
+
35
+
36
+ class GlueFormula:
37
+ def __init__(self, meaning, glue, indices=None):
38
+ if not indices:
39
+ indices = set()
40
+
41
+ if isinstance(meaning, str):
42
+ self.meaning = Expression.fromstring(meaning)
43
+ elif isinstance(meaning, Expression):
44
+ self.meaning = meaning
45
+ else:
46
+ raise RuntimeError(
47
+ "Meaning term neither string or expression: %s, %s"
48
+ % (meaning, meaning.__class__)
49
+ )
50
+
51
+ if isinstance(glue, str):
52
+ self.glue = linearlogic.LinearLogicParser().parse(glue)
53
+ elif isinstance(glue, linearlogic.Expression):
54
+ self.glue = glue
55
+ else:
56
+ raise RuntimeError(
57
+ "Glue term neither string or expression: %s, %s"
58
+ % (glue, glue.__class__)
59
+ )
60
+
61
+ self.indices = indices
62
+
63
+ def applyto(self, arg):
64
+ """self = (\\x.(walk x), (subj -o f))
65
+ arg = (john , subj)
66
+ returns ((walk john), f)
67
+ """
68
+ if self.indices & arg.indices: # if the sets are NOT disjoint
69
+ raise linearlogic.LinearLogicApplicationException(
70
+ f"'{self}' applied to '{arg}'. Indices are not disjoint."
71
+ )
72
+ else: # if the sets ARE disjoint
73
+ return_indices = self.indices | arg.indices
74
+
75
+ try:
76
+ return_glue = linearlogic.ApplicationExpression(
77
+ self.glue, arg.glue, arg.indices
78
+ )
79
+ except linearlogic.LinearLogicApplicationException as e:
80
+ raise linearlogic.LinearLogicApplicationException(
81
+ f"'{self.simplify()}' applied to '{arg.simplify()}'"
82
+ ) from e
83
+
84
+ arg_meaning_abstracted = arg.meaning
85
+ if return_indices:
86
+ for dep in self.glue.simplify().antecedent.dependencies[
87
+ ::-1
88
+ ]: # if self.glue is (A -o B), dep is in A.dependencies
89
+ arg_meaning_abstracted = self.make_LambdaExpression(
90
+ Variable("v%s" % dep), arg_meaning_abstracted
91
+ )
92
+ return_meaning = self.meaning.applyto(arg_meaning_abstracted)
93
+
94
+ return self.__class__(return_meaning, return_glue, return_indices)
95
+
96
+ def make_VariableExpression(self, name):
97
+ return VariableExpression(name)
98
+
99
+ def make_LambdaExpression(self, variable, term):
100
+ return LambdaExpression(variable, term)
101
+
102
+ def lambda_abstract(self, other):
103
+ assert isinstance(other, GlueFormula)
104
+ assert isinstance(other.meaning, AbstractVariableExpression)
105
+ return self.__class__(
106
+ self.make_LambdaExpression(other.meaning.variable, self.meaning),
107
+ linearlogic.ImpExpression(other.glue, self.glue),
108
+ )
109
+
110
+ def compile(self, counter=None):
111
+ """From Iddo Lev's PhD Dissertation p108-109"""
112
+ if not counter:
113
+ counter = Counter()
114
+ (compiled_glue, new_forms) = self.glue.simplify().compile_pos(
115
+ counter, self.__class__
116
+ )
117
+ return new_forms + [
118
+ self.__class__(self.meaning, compiled_glue, {counter.get()})
119
+ ]
120
+
121
+ def simplify(self):
122
+ return self.__class__(
123
+ self.meaning.simplify(), self.glue.simplify(), self.indices
124
+ )
125
+
126
+ def __eq__(self, other):
127
+ return (
128
+ self.__class__ == other.__class__
129
+ and self.meaning == other.meaning
130
+ and self.glue == other.glue
131
+ )
132
+
133
+ def __ne__(self, other):
134
+ return not self == other
135
+
136
+ # sorting for use in doctests which must be deterministic
137
+ def __lt__(self, other):
138
+ return str(self) < str(other)
139
+
140
+ def __str__(self):
141
+ assert isinstance(self.indices, set)
142
+ accum = f"{self.meaning} : {self.glue}"
143
+ if self.indices:
144
+ accum += (
145
+ " : {" + ", ".join(str(index) for index in sorted(self.indices)) + "}"
146
+ )
147
+ return accum
148
+
149
+ def __repr__(self):
150
+ return "%s" % self
151
+
152
+
153
+ class GlueDict(dict):
154
+ def __init__(self, filename, encoding=None):
155
+ self.filename = filename
156
+ self.file_encoding = encoding
157
+ self.read_file()
158
+
159
+ def read_file(self, empty_first=True):
160
+ if empty_first:
161
+ self.clear()
162
+
163
+ try:
164
+ contents = nltk.data.load(
165
+ self.filename, format="text", encoding=self.file_encoding
166
+ )
167
+ # TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load()
168
+ except LookupError as e:
169
+ try:
170
+ contents = nltk.data.load(
171
+ "file:" + self.filename, format="text", encoding=self.file_encoding
172
+ )
173
+ except LookupError:
174
+ raise e
175
+ lines = contents.splitlines()
176
+
177
+ for line in lines: # example: 'n : (\\x.(<word> x), (v-or))'
178
+ # lambdacalc -^ linear logic -^
179
+ line = line.strip() # remove trailing newline
180
+ if not len(line):
181
+ continue # skip empty lines
182
+ if line[0] == "#":
183
+ continue # skip commented out lines
184
+
185
+ parts = line.split(
186
+ " : ", 2
187
+ ) # ['verb', '(\\x.(<word> x), ( subj -o f ))', '[subj]']
188
+
189
+ glue_formulas = []
190
+ paren_count = 0
191
+ tuple_start = 0
192
+ tuple_comma = 0
193
+
194
+ relationships = None
195
+
196
+ if len(parts) > 1:
197
+ for (i, c) in enumerate(parts[1]):
198
+ if c == "(":
199
+ if paren_count == 0: # if it's the first '(' of a tuple
200
+ tuple_start = i + 1 # then save the index
201
+ paren_count += 1
202
+ elif c == ")":
203
+ paren_count -= 1
204
+ if paren_count == 0: # if it's the last ')' of a tuple
205
+ meaning_term = parts[1][
206
+ tuple_start:tuple_comma
207
+ ] # '\\x.(<word> x)'
208
+ glue_term = parts[1][tuple_comma + 1 : i] # '(v-r)'
209
+ glue_formulas.append(
210
+ [meaning_term, glue_term]
211
+ ) # add the GlueFormula to the list
212
+ elif c == ",":
213
+ if (
214
+ paren_count == 1
215
+ ): # if it's a comma separating the parts of the tuple
216
+ tuple_comma = i # then save the index
217
+ elif c == "#": # skip comments at the ends of lines
218
+ if (
219
+ paren_count != 0
220
+ ): # if the line hasn't parsed correctly so far
221
+ raise RuntimeError(
222
+ "Formula syntax is incorrect for entry " + line
223
+ )
224
+ break # break to the next line
225
+
226
+ if len(parts) > 2: # if there is a relationship entry at the end
227
+ rel_start = parts[2].index("[") + 1
228
+ rel_end = parts[2].index("]")
229
+ if rel_start == rel_end:
230
+ relationships = frozenset()
231
+ else:
232
+ relationships = frozenset(
233
+ r.strip() for r in parts[2][rel_start:rel_end].split(",")
234
+ )
235
+
236
+ try:
237
+ start_inheritance = parts[0].index("(")
238
+ end_inheritance = parts[0].index(")")
239
+ sem = parts[0][:start_inheritance].strip()
240
+ supertype = parts[0][start_inheritance + 1 : end_inheritance]
241
+ except:
242
+ sem = parts[0].strip()
243
+ supertype = None
244
+
245
+ if sem not in self:
246
+ self[sem] = {}
247
+
248
+ if (
249
+ relationships is None
250
+ ): # if not specified for a specific relationship set
251
+ # add all relationship entries for parents
252
+ if supertype:
253
+ for rels in self[supertype]:
254
+ if rels not in self[sem]:
255
+ self[sem][rels] = []
256
+ glue = self[supertype][rels]
257
+ self[sem][rels].extend(glue)
258
+ self[sem][rels].extend(
259
+ glue_formulas
260
+ ) # add the glue formulas to every rel entry
261
+ else:
262
+ if None not in self[sem]:
263
+ self[sem][None] = []
264
+ self[sem][None].extend(
265
+ glue_formulas
266
+ ) # add the glue formulas to every rel entry
267
+ else:
268
+ if relationships not in self[sem]:
269
+ self[sem][relationships] = []
270
+ if supertype:
271
+ self[sem][relationships].extend(self[supertype][relationships])
272
+ self[sem][relationships].extend(
273
+ glue_formulas
274
+ ) # add the glue entry to the dictionary
275
+
276
+ def __str__(self):
277
+ accum = ""
278
+ for pos in self:
279
+ str_pos = "%s" % pos
280
+ for relset in self[pos]:
281
+ i = 1
282
+ for gf in self[pos][relset]:
283
+ if i == 1:
284
+ accum += str_pos + ": "
285
+ else:
286
+ accum += " " * (len(str_pos) + 2)
287
+ accum += "%s" % gf
288
+ if relset and i == len(self[pos][relset]):
289
+ accum += " : %s" % relset
290
+ accum += "\n"
291
+ i += 1
292
+ return accum
293
+
294
+ def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False):
295
+ if node is None:
296
+ # TODO: should it be depgraph.root? Is this code tested?
297
+ top = depgraph.nodes[0]
298
+ depList = list(chain.from_iterable(top["deps"].values()))
299
+ root = depgraph.nodes[depList[0]]
300
+
301
+ return self.to_glueformula_list(depgraph, root, Counter(), verbose)
302
+
303
+ glueformulas = self.lookup(node, depgraph, counter)
304
+ for dep_idx in chain.from_iterable(node["deps"].values()):
305
+ dep = depgraph.nodes[dep_idx]
306
+ glueformulas.extend(
307
+ self.to_glueformula_list(depgraph, dep, counter, verbose)
308
+ )
309
+ return glueformulas
310
+
311
+ def lookup(self, node, depgraph, counter):
312
+ semtype_names = self.get_semtypes(node)
313
+
314
+ semtype = None
315
+ for name in semtype_names:
316
+ if name in self:
317
+ semtype = self[name]
318
+ break
319
+ if semtype is None:
320
+ # raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word)
321
+ return []
322
+
323
+ self.add_missing_dependencies(node, depgraph)
324
+
325
+ lookup = self._lookup_semtype_option(semtype, node, depgraph)
326
+
327
+ if not len(lookup):
328
+ raise KeyError(
329
+ "There is no GlueDict entry for sem type of '%s' "
330
+ "with tag '%s', and rel '%s'" % (node["word"], node["tag"], node["rel"])
331
+ )
332
+
333
+ return self.get_glueformulas_from_semtype_entry(
334
+ lookup, node["word"], node, depgraph, counter
335
+ )
336
+
337
+ def add_missing_dependencies(self, node, depgraph):
338
+ rel = node["rel"].lower()
339
+
340
+ if rel == "main":
341
+ headnode = depgraph.nodes[node["head"]]
342
+ subj = self.lookup_unique("subj", headnode, depgraph)
343
+ relation = subj["rel"]
344
+ node["deps"].setdefault(relation, [])
345
+ node["deps"][relation].append(subj["address"])
346
+ # node['deps'].append(subj['address'])
347
+
348
+ def _lookup_semtype_option(self, semtype, node, depgraph):
349
+ relationships = frozenset(
350
+ depgraph.nodes[dep]["rel"].lower()
351
+ for dep in chain.from_iterable(node["deps"].values())
352
+ if depgraph.nodes[dep]["rel"].lower() not in OPTIONAL_RELATIONSHIPS
353
+ )
354
+
355
+ try:
356
+ lookup = semtype[relationships]
357
+ except KeyError:
358
+ # An exact match is not found, so find the best match where
359
+ # 'best' is defined as the glue entry whose relationship set has the
360
+ # most relations of any possible relationship set that is a subset
361
+ # of the actual depgraph
362
+ best_match = frozenset()
363
+ for relset_option in set(semtype) - {None}:
364
+ if (
365
+ len(relset_option) > len(best_match)
366
+ and relset_option < relationships
367
+ ):
368
+ best_match = relset_option
369
+ if not best_match:
370
+ if None in semtype:
371
+ best_match = None
372
+ else:
373
+ return None
374
+ lookup = semtype[best_match]
375
+
376
+ return lookup
377
+
378
+ def get_semtypes(self, node):
379
+ """
380
+ Based on the node, return a list of plausible semtypes in order of
381
+ plausibility.
382
+ """
383
+ rel = node["rel"].lower()
384
+ word = node["word"].lower()
385
+
386
+ if rel == "spec":
387
+ if word in SPEC_SEMTYPES:
388
+ return [SPEC_SEMTYPES[word]]
389
+ else:
390
+ return [SPEC_SEMTYPES["default"]]
391
+ elif rel in ["nmod", "vmod"]:
392
+ return [node["tag"], rel]
393
+ else:
394
+ return [node["tag"]]
395
+
396
+ def get_glueformulas_from_semtype_entry(
397
+ self, lookup, word, node, depgraph, counter
398
+ ):
399
+ glueformulas = []
400
+
401
+ glueFormulaFactory = self.get_GlueFormula_factory()
402
+ for meaning, glue in lookup:
403
+ gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue)
404
+ if not len(glueformulas):
405
+ gf.word = word
406
+ else:
407
+ gf.word = f"{word}{len(glueformulas) + 1}"
408
+
409
+ gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get())
410
+
411
+ glueformulas.append(gf)
412
+ return glueformulas
413
+
414
+ def get_meaning_formula(self, generic, word):
415
+ """
416
+ :param generic: A meaning formula string containing the
417
+ parameter "<word>"
418
+ :param word: The actual word to be replace "<word>"
419
+ """
420
+ word = word.replace(".", "")
421
+ return generic.replace("<word>", word)
422
+
423
+ def initialize_labels(self, expr, node, depgraph, unique_index):
424
+ if isinstance(expr, linearlogic.AtomicExpression):
425
+ name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index)
426
+ if name[0].isupper():
427
+ return linearlogic.VariableExpression(name)
428
+ else:
429
+ return linearlogic.ConstantExpression(name)
430
+ else:
431
+ return linearlogic.ImpExpression(
432
+ self.initialize_labels(expr.antecedent, node, depgraph, unique_index),
433
+ self.initialize_labels(expr.consequent, node, depgraph, unique_index),
434
+ )
435
+
436
+ def find_label_name(self, name, node, depgraph, unique_index):
437
+ try:
438
+ dot = name.index(".")
439
+
440
+ before_dot = name[:dot]
441
+ after_dot = name[dot + 1 :]
442
+ if before_dot == "super":
443
+ return self.find_label_name(
444
+ after_dot, depgraph.nodes[node["head"]], depgraph, unique_index
445
+ )
446
+ else:
447
+ return self.find_label_name(
448
+ after_dot,
449
+ self.lookup_unique(before_dot, node, depgraph),
450
+ depgraph,
451
+ unique_index,
452
+ )
453
+ except ValueError:
454
+ lbl = self.get_label(node)
455
+ if name == "f":
456
+ return lbl
457
+ elif name == "v":
458
+ return "%sv" % lbl
459
+ elif name == "r":
460
+ return "%sr" % lbl
461
+ elif name == "super":
462
+ return self.get_label(depgraph.nodes[node["head"]])
463
+ elif name == "var":
464
+ return f"{lbl.upper()}{unique_index}"
465
+ elif name == "a":
466
+ return self.get_label(self.lookup_unique("conja", node, depgraph))
467
+ elif name == "b":
468
+ return self.get_label(self.lookup_unique("conjb", node, depgraph))
469
+ else:
470
+ return self.get_label(self.lookup_unique(name, node, depgraph))
471
+
472
+ def get_label(self, node):
473
+ """
474
+ Pick an alphabetic character as identifier for an entity in the model.
475
+
476
+ :param value: where to index into the list of characters
477
+ :type value: int
478
+ """
479
+ value = node["address"]
480
+
481
+ letter = [
482
+ "f",
483
+ "g",
484
+ "h",
485
+ "i",
486
+ "j",
487
+ "k",
488
+ "l",
489
+ "m",
490
+ "n",
491
+ "o",
492
+ "p",
493
+ "q",
494
+ "r",
495
+ "s",
496
+ "t",
497
+ "u",
498
+ "v",
499
+ "w",
500
+ "x",
501
+ "y",
502
+ "z",
503
+ "a",
504
+ "b",
505
+ "c",
506
+ "d",
507
+ "e",
508
+ ][value - 1]
509
+ num = int(value) // 26
510
+ if num > 0:
511
+ return letter + str(num)
512
+ else:
513
+ return letter
514
+
515
+ def lookup_unique(self, rel, node, depgraph):
516
+ """
517
+ Lookup 'key'. There should be exactly one item in the associated relation.
518
+ """
519
+ deps = [
520
+ depgraph.nodes[dep]
521
+ for dep in chain.from_iterable(node["deps"].values())
522
+ if depgraph.nodes[dep]["rel"].lower() == rel.lower()
523
+ ]
524
+
525
+ if len(deps) == 0:
526
+ raise KeyError(
527
+ "'{}' doesn't contain a feature '{}'".format(node["word"], rel)
528
+ )
529
+ elif len(deps) > 1:
530
+ raise KeyError(
531
+ "'{}' should only have one feature '{}'".format(node["word"], rel)
532
+ )
533
+ else:
534
+ return deps[0]
535
+
536
+ def get_GlueFormula_factory(self):
537
+ return GlueFormula
538
+
539
+
540
+ class Glue:
541
+ def __init__(
542
+ self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False
543
+ ):
544
+ self.verbose = verbose
545
+ self.remove_duplicates = remove_duplicates
546
+ self.depparser = depparser
547
+
548
+ from nltk import Prover9
549
+
550
+ self.prover = Prover9()
551
+
552
+ if semtype_file:
553
+ self.semtype_file = semtype_file
554
+ else:
555
+ self.semtype_file = os.path.join(
556
+ "grammars", "sample_grammars", "glue.semtype"
557
+ )
558
+
559
+ def train_depparser(self, depgraphs=None):
560
+ if depgraphs:
561
+ self.depparser.train(depgraphs)
562
+ else:
563
+ self.depparser.train_from_file(
564
+ nltk.data.find(
565
+ os.path.join("grammars", "sample_grammars", "glue_train.conll")
566
+ )
567
+ )
568
+
569
+ def parse_to_meaning(self, sentence):
570
+ readings = []
571
+ for agenda in self.parse_to_compiled(sentence):
572
+ readings.extend(self.get_readings(agenda))
573
+ return readings
574
+
575
+ def get_readings(self, agenda):
576
+ readings = []
577
+ agenda_length = len(agenda)
578
+ atomics = dict()
579
+ nonatomics = dict()
580
+ while agenda: # is not empty
581
+ cur = agenda.pop()
582
+ glue_simp = cur.glue.simplify()
583
+ if isinstance(
584
+ glue_simp, linearlogic.ImpExpression
585
+ ): # if cur.glue is non-atomic
586
+ for key in atomics:
587
+ try:
588
+ if isinstance(cur.glue, linearlogic.ApplicationExpression):
589
+ bindings = cur.glue.bindings
590
+ else:
591
+ bindings = linearlogic.BindingDict()
592
+ glue_simp.antecedent.unify(key, bindings)
593
+ for atomic in atomics[key]:
594
+ if not (
595
+ cur.indices & atomic.indices
596
+ ): # if the sets of indices are disjoint
597
+ try:
598
+ agenda.append(cur.applyto(atomic))
599
+ except linearlogic.LinearLogicApplicationException:
600
+ pass
601
+ except linearlogic.UnificationException:
602
+ pass
603
+ try:
604
+ nonatomics[glue_simp.antecedent].append(cur)
605
+ except KeyError:
606
+ nonatomics[glue_simp.antecedent] = [cur]
607
+
608
+ else: # else cur.glue is atomic
609
+ for key in nonatomics:
610
+ for nonatomic in nonatomics[key]:
611
+ try:
612
+ if isinstance(
613
+ nonatomic.glue, linearlogic.ApplicationExpression
614
+ ):
615
+ bindings = nonatomic.glue.bindings
616
+ else:
617
+ bindings = linearlogic.BindingDict()
618
+ glue_simp.unify(key, bindings)
619
+ if not (
620
+ cur.indices & nonatomic.indices
621
+ ): # if the sets of indices are disjoint
622
+ try:
623
+ agenda.append(nonatomic.applyto(cur))
624
+ except linearlogic.LinearLogicApplicationException:
625
+ pass
626
+ except linearlogic.UnificationException:
627
+ pass
628
+ try:
629
+ atomics[glue_simp].append(cur)
630
+ except KeyError:
631
+ atomics[glue_simp] = [cur]
632
+
633
+ for entry in atomics:
634
+ for gf in atomics[entry]:
635
+ if len(gf.indices) == agenda_length:
636
+ self._add_to_reading_list(gf, readings)
637
+ for entry in nonatomics:
638
+ for gf in nonatomics[entry]:
639
+ if len(gf.indices) == agenda_length:
640
+ self._add_to_reading_list(gf, readings)
641
+ return readings
642
+
643
+ def _add_to_reading_list(self, glueformula, reading_list):
644
+ add_reading = True
645
+ if self.remove_duplicates:
646
+ for reading in reading_list:
647
+ try:
648
+ if reading.equiv(glueformula.meaning, self.prover):
649
+ add_reading = False
650
+ break
651
+ except Exception as e:
652
+ # if there is an exception, the syntax of the formula
653
+ # may not be understandable by the prover, so don't
654
+ # throw out the reading.
655
+ print("Error when checking logical equality of statements", e)
656
+
657
+ if add_reading:
658
+ reading_list.append(glueformula.meaning)
659
+
660
+ def parse_to_compiled(self, sentence):
661
+ gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)]
662
+ return [self.gfl_to_compiled(gfl) for gfl in gfls]
663
+
664
+ def dep_parse(self, sentence):
665
+ """
666
+ Return a dependency graph for the sentence.
667
+
668
+ :param sentence: the sentence to be parsed
669
+ :type sentence: list(str)
670
+ :rtype: DependencyGraph
671
+ """
672
+
673
+ # Lazy-initialize the depparser
674
+ if self.depparser is None:
675
+ from nltk.parse import MaltParser
676
+
677
+ self.depparser = MaltParser(tagger=self.get_pos_tagger())
678
+ if not self.depparser._trained:
679
+ self.train_depparser()
680
+ return self.depparser.parse(sentence, verbose=self.verbose)
681
+
682
+ def depgraph_to_glue(self, depgraph):
683
+ return self.get_glue_dict().to_glueformula_list(depgraph)
684
+
685
+ def get_glue_dict(self):
686
+ return GlueDict(self.semtype_file)
687
+
688
+ def gfl_to_compiled(self, gfl):
689
+ index_counter = Counter()
690
+ return_list = []
691
+ for gf in gfl:
692
+ return_list.extend(gf.compile(index_counter))
693
+
694
+ if self.verbose:
695
+ print("Compiled Glue Premises:")
696
+ for cgf in return_list:
697
+ print(cgf)
698
+
699
+ return return_list
700
+
701
+ def get_pos_tagger(self):
702
+ from nltk.corpus import brown
703
+
704
+ regexp_tagger = RegexpTagger(
705
+ [
706
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
707
+ (r"(The|the|A|a|An|an)$", "AT"), # articles
708
+ (r".*able$", "JJ"), # adjectives
709
+ (r".*ness$", "NN"), # nouns formed from adjectives
710
+ (r".*ly$", "RB"), # adverbs
711
+ (r".*s$", "NNS"), # plural nouns
712
+ (r".*ing$", "VBG"), # gerunds
713
+ (r".*ed$", "VBD"), # past tense verbs
714
+ (r".*", "NN"), # nouns (default)
715
+ ]
716
+ )
717
+ brown_train = brown.tagged_sents(categories="news")
718
+ unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger)
719
+ bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger)
720
+ trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger)
721
+
722
+ # Override particular words
723
+ main_tagger = RegexpTagger(
724
+ [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")],
725
+ backoff=trigram_tagger,
726
+ )
727
+
728
+ return main_tagger
729
+
730
+
731
+ class DrtGlueFormula(GlueFormula):
732
+ def __init__(self, meaning, glue, indices=None):
733
+ if not indices:
734
+ indices = set()
735
+
736
+ if isinstance(meaning, str):
737
+ self.meaning = drt.DrtExpression.fromstring(meaning)
738
+ elif isinstance(meaning, drt.DrtExpression):
739
+ self.meaning = meaning
740
+ else:
741
+ raise RuntimeError(
742
+ "Meaning term neither string or expression: %s, %s"
743
+ % (meaning, meaning.__class__)
744
+ )
745
+
746
+ if isinstance(glue, str):
747
+ self.glue = linearlogic.LinearLogicParser().parse(glue)
748
+ elif isinstance(glue, linearlogic.Expression):
749
+ self.glue = glue
750
+ else:
751
+ raise RuntimeError(
752
+ "Glue term neither string or expression: %s, %s"
753
+ % (glue, glue.__class__)
754
+ )
755
+
756
+ self.indices = indices
757
+
758
+ def make_VariableExpression(self, name):
759
+ return drt.DrtVariableExpression(name)
760
+
761
+ def make_LambdaExpression(self, variable, term):
762
+ return drt.DrtLambdaExpression(variable, term)
763
+
764
+
765
+ class DrtGlueDict(GlueDict):
766
+ def get_GlueFormula_factory(self):
767
+ return DrtGlueFormula
768
+
769
+
770
+ class DrtGlue(Glue):
771
+ def __init__(
772
+ self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False
773
+ ):
774
+ if not semtype_file:
775
+ semtype_file = os.path.join(
776
+ "grammars", "sample_grammars", "drt_glue.semtype"
777
+ )
778
+ Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose)
779
+
780
+ def get_glue_dict(self):
781
+ return DrtGlueDict(self.semtype_file)
782
+
783
+
784
+ def demo(show_example=-1):
785
+ from nltk.parse import MaltParser
786
+
787
+ examples = [
788
+ "David sees Mary",
789
+ "David eats a sandwich",
790
+ "every man chases a dog",
791
+ "every man believes a dog sleeps",
792
+ "John gives David a sandwich",
793
+ "John chases himself",
794
+ ]
795
+ # 'John persuades David to order a pizza',
796
+ # 'John tries to go',
797
+ # 'John tries to find a unicorn',
798
+ # 'John seems to vanish',
799
+ # 'a unicorn seems to approach',
800
+ # 'every big cat leaves',
801
+ # 'every gray cat leaves',
802
+ # 'every big gray cat leaves',
803
+ # 'a former senator leaves',
804
+
805
+ print("============== DEMO ==============")
806
+
807
+ tagger = RegexpTagger(
808
+ [
809
+ ("^(David|Mary|John)$", "NNP"),
810
+ (
811
+ "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$",
812
+ "VB",
813
+ ),
814
+ ("^(go|order|vanish|find|approach)$", "VB"),
815
+ ("^(a)$", "ex_quant"),
816
+ ("^(every)$", "univ_quant"),
817
+ ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"),
818
+ ("^(big|gray|former)$", "JJ"),
819
+ ("^(him|himself)$", "PRP"),
820
+ ]
821
+ )
822
+
823
+ depparser = MaltParser(tagger=tagger)
824
+ glue = Glue(depparser=depparser, verbose=False)
825
+
826
+ for (i, sentence) in enumerate(examples):
827
+ if i == show_example or show_example == -1:
828
+ print(f"[[[Example {i}]]] {sentence}")
829
+ for reading in glue.parse_to_meaning(sentence.split()):
830
+ print(reading.simplify())
831
+ print("")
832
+
833
+
834
+ if __name__ == "__main__":
835
+ demo()
venv/lib/python3.10/site-packages/nltk/sem/hole.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Logic
2
+ #
3
+ # Author: Peter Wang
4
+ # Updated by: Dan Garrette <[email protected]>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ An implementation of the Hole Semantics model, following Blackburn and Bos,
12
+ Representation and Inference for Natural Language (CSLI, 2005).
13
+
14
+ The semantic representations are built by the grammar hole.fcfg.
15
+ This module contains driver code to read in sentences and parse them
16
+ according to a hole semantics grammar.
17
+
18
+ After parsing, the semantic representation is in the form of an underspecified
19
+ representation that is not easy to read. We use a "plugging" algorithm to
20
+ convert that representation into first-order logic formulas.
21
+ """
22
+
23
+ from functools import reduce
24
+
25
+ from nltk.parse import load_parser
26
+ from nltk.sem.logic import (
27
+ AllExpression,
28
+ AndExpression,
29
+ ApplicationExpression,
30
+ ExistsExpression,
31
+ IffExpression,
32
+ ImpExpression,
33
+ LambdaExpression,
34
+ NegatedExpression,
35
+ OrExpression,
36
+ )
37
+ from nltk.sem.skolemize import skolemize
38
+
39
+ # Note that in this code there may be multiple types of trees being referred to:
40
+ #
41
+ # 1. parse trees
42
+ # 2. the underspecified representation
43
+ # 3. first-order logic formula trees
44
+ # 4. the search space when plugging (search tree)
45
+ #
46
+
47
+
48
+ class Constants:
49
+ ALL = "ALL"
50
+ EXISTS = "EXISTS"
51
+ NOT = "NOT"
52
+ AND = "AND"
53
+ OR = "OR"
54
+ IMP = "IMP"
55
+ IFF = "IFF"
56
+ PRED = "PRED"
57
+ LEQ = "LEQ"
58
+ HOLE = "HOLE"
59
+ LABEL = "LABEL"
60
+
61
+ MAP = {
62
+ ALL: lambda v, e: AllExpression(v.variable, e),
63
+ EXISTS: lambda v, e: ExistsExpression(v.variable, e),
64
+ NOT: NegatedExpression,
65
+ AND: AndExpression,
66
+ OR: OrExpression,
67
+ IMP: ImpExpression,
68
+ IFF: IffExpression,
69
+ PRED: ApplicationExpression,
70
+ }
71
+
72
+
73
+ class HoleSemantics:
74
+ """
75
+ This class holds the broken-down components of a hole semantics, i.e. it
76
+ extracts the holes, labels, logic formula fragments and constraints out of
77
+ a big conjunction of such as produced by the hole semantics grammar. It
78
+ then provides some operations on the semantics dealing with holes, labels
79
+ and finding legal ways to plug holes with labels.
80
+ """
81
+
82
+ def __init__(self, usr):
83
+ """
84
+ Constructor. `usr' is a ``sem.Expression`` representing an
85
+ Underspecified Representation Structure (USR). A USR has the following
86
+ special predicates:
87
+ ALL(l,v,n),
88
+ EXISTS(l,v,n),
89
+ AND(l,n,n),
90
+ OR(l,n,n),
91
+ IMP(l,n,n),
92
+ IFF(l,n,n),
93
+ PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions,
94
+ LEQ(n,n),
95
+ HOLE(n),
96
+ LABEL(n)
97
+ where l is the label of the node described by the predicate, n is either
98
+ a label or a hole, and v is a variable.
99
+ """
100
+ self.holes = set()
101
+ self.labels = set()
102
+ self.fragments = {} # mapping of label -> formula fragment
103
+ self.constraints = set() # set of Constraints
104
+ self._break_down(usr)
105
+ self.top_most_labels = self._find_top_most_labels()
106
+ self.top_hole = self._find_top_hole()
107
+
108
+ def is_node(self, x):
109
+ """
110
+ Return true if x is a node (label or hole) in this semantic
111
+ representation.
112
+ """
113
+ return x in (self.labels | self.holes)
114
+
115
+ def _break_down(self, usr):
116
+ """
117
+ Extract holes, labels, formula fragments and constraints from the hole
118
+ semantics underspecified representation (USR).
119
+ """
120
+ if isinstance(usr, AndExpression):
121
+ self._break_down(usr.first)
122
+ self._break_down(usr.second)
123
+ elif isinstance(usr, ApplicationExpression):
124
+ func, args = usr.uncurry()
125
+ if func.variable.name == Constants.LEQ:
126
+ self.constraints.add(Constraint(args[0], args[1]))
127
+ elif func.variable.name == Constants.HOLE:
128
+ self.holes.add(args[0])
129
+ elif func.variable.name == Constants.LABEL:
130
+ self.labels.add(args[0])
131
+ else:
132
+ label = args[0]
133
+ assert label not in self.fragments
134
+ self.fragments[label] = (func, args[1:])
135
+ else:
136
+ raise ValueError(usr.label())
137
+
138
+ def _find_top_nodes(self, node_list):
139
+ top_nodes = node_list.copy()
140
+ for f in self.fragments.values():
141
+ # the label is the first argument of the predicate
142
+ args = f[1]
143
+ for arg in args:
144
+ if arg in node_list:
145
+ top_nodes.discard(arg)
146
+ return top_nodes
147
+
148
+ def _find_top_most_labels(self):
149
+ """
150
+ Return the set of labels which are not referenced directly as part of
151
+ another formula fragment. These will be the top-most labels for the
152
+ subtree that they are part of.
153
+ """
154
+ return self._find_top_nodes(self.labels)
155
+
156
+ def _find_top_hole(self):
157
+ """
158
+ Return the hole that will be the top of the formula tree.
159
+ """
160
+ top_holes = self._find_top_nodes(self.holes)
161
+ assert len(top_holes) == 1 # it must be unique
162
+ return top_holes.pop()
163
+
164
+ def pluggings(self):
165
+ """
166
+ Calculate and return all the legal pluggings (mappings of labels to
167
+ holes) of this semantics given the constraints.
168
+ """
169
+ record = []
170
+ self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record)
171
+ return record
172
+
173
+ def _plug_nodes(self, queue, potential_labels, plug_acc, record):
174
+ """
175
+ Plug the nodes in `queue' with the labels in `potential_labels'.
176
+
177
+ Each element of `queue' is a tuple of the node to plug and the list of
178
+ ancestor holes from the root of the graph to that node.
179
+
180
+ `potential_labels' is a set of the labels which are still available for
181
+ plugging.
182
+
183
+ `plug_acc' is the incomplete mapping of holes to labels made on the
184
+ current branch of the search tree so far.
185
+
186
+ `record' is a list of all the complete pluggings that we have found in
187
+ total so far. It is the only parameter that is destructively updated.
188
+ """
189
+ if queue != []:
190
+ (node, ancestors) = queue[0]
191
+ if node in self.holes:
192
+ # The node is a hole, try to plug it.
193
+ self._plug_hole(
194
+ node, ancestors, queue[1:], potential_labels, plug_acc, record
195
+ )
196
+ else:
197
+ assert node in self.labels
198
+ # The node is a label. Replace it in the queue by the holes and
199
+ # labels in the formula fragment named by that label.
200
+ args = self.fragments[node][1]
201
+ head = [(a, ancestors) for a in args if self.is_node(a)]
202
+ self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record)
203
+ else:
204
+ raise Exception("queue empty")
205
+
206
+ def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record):
207
+ """
208
+ Try all possible ways of plugging a single hole.
209
+ See _plug_nodes for the meanings of the parameters.
210
+ """
211
+ # Add the current hole we're trying to plug into the list of ancestors.
212
+ assert hole not in ancestors0
213
+ ancestors = [hole] + ancestors0
214
+
215
+ # Try each potential label in this hole in turn.
216
+ for l in potential_labels0:
217
+ # Is the label valid in this hole?
218
+ if self._violates_constraints(l, ancestors):
219
+ continue
220
+
221
+ plug_acc = plug_acc0.copy()
222
+ plug_acc[hole] = l
223
+ potential_labels = potential_labels0.copy()
224
+ potential_labels.remove(l)
225
+
226
+ if len(potential_labels) == 0:
227
+ # No more potential labels. That must mean all the holes have
228
+ # been filled so we have found a legal plugging so remember it.
229
+ #
230
+ # Note that the queue might not be empty because there might
231
+ # be labels on there that point to formula fragments with
232
+ # no holes in them. _sanity_check_plugging will make sure
233
+ # all holes are filled.
234
+ self._sanity_check_plugging(plug_acc, self.top_hole, [])
235
+ record.append(plug_acc)
236
+ else:
237
+ # Recursively try to fill in the rest of the holes in the
238
+ # queue. The label we just plugged into the hole could have
239
+ # holes of its own so at the end of the queue. Putting it on
240
+ # the end of the queue gives us a breadth-first search, so that
241
+ # all the holes at level i of the formula tree are filled
242
+ # before filling level i+1.
243
+ # A depth-first search would work as well since the trees must
244
+ # be finite but the bookkeeping would be harder.
245
+ self._plug_nodes(
246
+ queue + [(l, ancestors)], potential_labels, plug_acc, record
247
+ )
248
+
249
+ def _violates_constraints(self, label, ancestors):
250
+ """
251
+ Return True if the `label' cannot be placed underneath the holes given
252
+ by the set `ancestors' because it would violate the constraints imposed
253
+ on it.
254
+ """
255
+ for c in self.constraints:
256
+ if c.lhs == label:
257
+ if c.rhs not in ancestors:
258
+ return True
259
+ return False
260
+
261
+ def _sanity_check_plugging(self, plugging, node, ancestors):
262
+ """
263
+ Make sure that a given plugging is legal. We recursively go through
264
+ each node and make sure that no constraints are violated.
265
+ We also check that all holes have been filled.
266
+ """
267
+ if node in self.holes:
268
+ ancestors = [node] + ancestors
269
+ label = plugging[node]
270
+ else:
271
+ label = node
272
+ assert label in self.labels
273
+ for c in self.constraints:
274
+ if c.lhs == label:
275
+ assert c.rhs in ancestors
276
+ args = self.fragments[label][1]
277
+ for arg in args:
278
+ if self.is_node(arg):
279
+ self._sanity_check_plugging(plugging, arg, [label] + ancestors)
280
+
281
+ def formula_tree(self, plugging):
282
+ """
283
+ Return the first-order logic formula tree for this underspecified
284
+ representation using the plugging given.
285
+ """
286
+ return self._formula_tree(plugging, self.top_hole)
287
+
288
+ def _formula_tree(self, plugging, node):
289
+ if node in plugging:
290
+ return self._formula_tree(plugging, plugging[node])
291
+ elif node in self.fragments:
292
+ pred, args = self.fragments[node]
293
+ children = [self._formula_tree(plugging, arg) for arg in args]
294
+ return reduce(Constants.MAP[pred.variable.name], children)
295
+ else:
296
+ return node
297
+
298
+
299
+ class Constraint:
300
+ """
301
+ This class represents a constraint of the form (L =< N),
302
+ where L is a label and N is a node (a label or a hole).
303
+ """
304
+
305
+ def __init__(self, lhs, rhs):
306
+ self.lhs = lhs
307
+ self.rhs = rhs
308
+
309
+ def __eq__(self, other):
310
+ if self.__class__ == other.__class__:
311
+ return self.lhs == other.lhs and self.rhs == other.rhs
312
+ else:
313
+ return False
314
+
315
+ def __ne__(self, other):
316
+ return not (self == other)
317
+
318
+ def __hash__(self):
319
+ return hash(repr(self))
320
+
321
+ def __repr__(self):
322
+ return f"({self.lhs} < {self.rhs})"
323
+
324
+
325
+ def hole_readings(sentence, grammar_filename=None, verbose=False):
326
+ if not grammar_filename:
327
+ grammar_filename = "grammars/sample_grammars/hole.fcfg"
328
+
329
+ if verbose:
330
+ print("Reading grammar file", grammar_filename)
331
+
332
+ parser = load_parser(grammar_filename)
333
+
334
+ # Parse the sentence.
335
+ tokens = sentence.split()
336
+ trees = list(parser.parse(tokens))
337
+ if verbose:
338
+ print("Got %d different parses" % len(trees))
339
+
340
+ all_readings = []
341
+ for tree in trees:
342
+ # Get the semantic feature from the top of the parse tree.
343
+ sem = tree.label()["SEM"].simplify()
344
+
345
+ # Print the raw semantic representation.
346
+ if verbose:
347
+ print("Raw: ", sem)
348
+
349
+ # Skolemize away all quantifiers. All variables become unique.
350
+ while isinstance(sem, LambdaExpression):
351
+ sem = sem.term
352
+ skolemized = skolemize(sem)
353
+
354
+ if verbose:
355
+ print("Skolemized:", skolemized)
356
+
357
+ # Break the hole semantics representation down into its components
358
+ # i.e. holes, labels, formula fragments and constraints.
359
+ hole_sem = HoleSemantics(skolemized)
360
+
361
+ # Maybe show the details of the semantic representation.
362
+ if verbose:
363
+ print("Holes: ", hole_sem.holes)
364
+ print("Labels: ", hole_sem.labels)
365
+ print("Constraints: ", hole_sem.constraints)
366
+ print("Top hole: ", hole_sem.top_hole)
367
+ print("Top labels: ", hole_sem.top_most_labels)
368
+ print("Fragments:")
369
+ for l, f in hole_sem.fragments.items():
370
+ print(f"\t{l}: {f}")
371
+
372
+ # Find all the possible ways to plug the formulas together.
373
+ pluggings = hole_sem.pluggings()
374
+
375
+ # Build FOL formula trees using the pluggings.
376
+ readings = list(map(hole_sem.formula_tree, pluggings))
377
+
378
+ # Print out the formulas in a textual format.
379
+ if verbose:
380
+ for i, r in enumerate(readings):
381
+ print()
382
+ print("%d. %s" % (i, r))
383
+ print()
384
+
385
+ all_readings.extend(readings)
386
+
387
+ return all_readings
388
+
389
+
390
+ if __name__ == "__main__":
391
+ for r in hole_readings("a dog barks"):
392
+ print(r)
393
+ print()
394
+ for r in hole_readings("every girl chases a dog"):
395
+ print(r)
venv/lib/python3.10/site-packages/nltk/sem/lfg.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Lexical Functional Grammar
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from itertools import chain
10
+
11
+ from nltk.internals import Counter
12
+
13
+
14
+ class FStructure(dict):
15
+ def safeappend(self, key, item):
16
+ """
17
+ Append 'item' to the list at 'key'. If no list exists for 'key', then
18
+ construct one.
19
+ """
20
+ if key not in self:
21
+ self[key] = []
22
+ self[key].append(item)
23
+
24
+ def __setitem__(self, key, value):
25
+ dict.__setitem__(self, key.lower(), value)
26
+
27
+ def __getitem__(self, key):
28
+ return dict.__getitem__(self, key.lower())
29
+
30
+ def __contains__(self, key):
31
+ return dict.__contains__(self, key.lower())
32
+
33
+ def to_glueformula_list(self, glue_dict):
34
+ depgraph = self.to_depgraph()
35
+ return glue_dict.to_glueformula_list(depgraph)
36
+
37
+ def to_depgraph(self, rel=None):
38
+ from nltk.parse.dependencygraph import DependencyGraph
39
+
40
+ depgraph = DependencyGraph()
41
+ nodes = depgraph.nodes
42
+
43
+ self._to_depgraph(nodes, 0, "ROOT")
44
+
45
+ # Add all the dependencies for all the nodes
46
+ for address, node in nodes.items():
47
+ for n2 in (n for n in nodes.values() if n["rel"] != "TOP"):
48
+ if n2["head"] == address:
49
+ relation = n2["rel"]
50
+ node["deps"].setdefault(relation, [])
51
+ node["deps"][relation].append(n2["address"])
52
+
53
+ depgraph.root = nodes[1]
54
+
55
+ return depgraph
56
+
57
+ def _to_depgraph(self, nodes, head, rel):
58
+ index = len(nodes)
59
+
60
+ nodes[index].update(
61
+ {
62
+ "address": index,
63
+ "word": self.pred[0],
64
+ "tag": self.pred[1],
65
+ "head": head,
66
+ "rel": rel,
67
+ }
68
+ )
69
+
70
+ for feature in sorted(self):
71
+ for item in sorted(self[feature]):
72
+ if isinstance(item, FStructure):
73
+ item._to_depgraph(nodes, index, feature)
74
+ elif isinstance(item, tuple):
75
+ new_index = len(nodes)
76
+ nodes[new_index].update(
77
+ {
78
+ "address": new_index,
79
+ "word": item[0],
80
+ "tag": item[1],
81
+ "head": index,
82
+ "rel": feature,
83
+ }
84
+ )
85
+ elif isinstance(item, list):
86
+ for n in item:
87
+ n._to_depgraph(nodes, index, feature)
88
+ else:
89
+ raise Exception(
90
+ "feature %s is not an FStruct, a list, or a tuple" % feature
91
+ )
92
+
93
+ @staticmethod
94
+ def read_depgraph(depgraph):
95
+ return FStructure._read_depgraph(depgraph.root, depgraph)
96
+
97
+ @staticmethod
98
+ def _read_depgraph(node, depgraph, label_counter=None, parent=None):
99
+ if not label_counter:
100
+ label_counter = Counter()
101
+
102
+ if node["rel"].lower() in ["spec", "punct"]:
103
+ # the value of a 'spec' entry is a word, not an FStructure
104
+ return (node["word"], node["tag"])
105
+
106
+ else:
107
+ fstruct = FStructure()
108
+ fstruct.pred = None
109
+ fstruct.label = FStructure._make_label(label_counter.get())
110
+
111
+ fstruct.parent = parent
112
+
113
+ word, tag = node["word"], node["tag"]
114
+ if tag[:2] == "VB":
115
+ if tag[2:3] == "D":
116
+ fstruct.safeappend("tense", ("PAST", "tense"))
117
+ fstruct.pred = (word, tag[:2])
118
+
119
+ if not fstruct.pred:
120
+ fstruct.pred = (word, tag)
121
+
122
+ children = [
123
+ depgraph.nodes[idx]
124
+ for idx in chain.from_iterable(node["deps"].values())
125
+ ]
126
+ for child in children:
127
+ fstruct.safeappend(
128
+ child["rel"],
129
+ FStructure._read_depgraph(child, depgraph, label_counter, fstruct),
130
+ )
131
+
132
+ return fstruct
133
+
134
+ @staticmethod
135
+ def _make_label(value):
136
+ """
137
+ Pick an alphabetic character as identifier for an entity in the model.
138
+
139
+ :param value: where to index into the list of characters
140
+ :type value: int
141
+ """
142
+ letter = [
143
+ "f",
144
+ "g",
145
+ "h",
146
+ "i",
147
+ "j",
148
+ "k",
149
+ "l",
150
+ "m",
151
+ "n",
152
+ "o",
153
+ "p",
154
+ "q",
155
+ "r",
156
+ "s",
157
+ "t",
158
+ "u",
159
+ "v",
160
+ "w",
161
+ "x",
162
+ "y",
163
+ "z",
164
+ "a",
165
+ "b",
166
+ "c",
167
+ "d",
168
+ "e",
169
+ ][value - 1]
170
+ num = int(value) // 26
171
+ if num > 0:
172
+ return letter + str(num)
173
+ else:
174
+ return letter
175
+
176
+ def __repr__(self):
177
+ return self.__str__().replace("\n", "")
178
+
179
+ def __str__(self):
180
+ return self.pretty_format()
181
+
182
+ def pretty_format(self, indent=3):
183
+ try:
184
+ accum = "%s:[" % self.label
185
+ except NameError:
186
+ accum = "["
187
+ try:
188
+ accum += "pred '%s'" % (self.pred[0])
189
+ except NameError:
190
+ pass
191
+
192
+ for feature in sorted(self):
193
+ for item in self[feature]:
194
+ if isinstance(item, FStructure):
195
+ next_indent = indent + len(feature) + 3 + len(self.label)
196
+ accum += "\n{}{} {}".format(
197
+ " " * (indent),
198
+ feature,
199
+ item.pretty_format(next_indent),
200
+ )
201
+ elif isinstance(item, tuple):
202
+ accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0])
203
+ elif isinstance(item, list):
204
+ accum += "\n{}{} {{{}}}".format(
205
+ " " * (indent),
206
+ feature,
207
+ ("\n%s" % (" " * (indent + len(feature) + 2))).join(item),
208
+ )
209
+ else: # ERROR
210
+ raise Exception(
211
+ "feature %s is not an FStruct, a list, or a tuple" % feature
212
+ )
213
+ return accum + "]"
214
+
215
+
216
+ def demo_read_depgraph():
217
+ from nltk.parse.dependencygraph import DependencyGraph
218
+
219
+ dg1 = DependencyGraph(
220
+ """\
221
+ Esso NNP 2 SUB
222
+ said VBD 0 ROOT
223
+ the DT 5 NMOD
224
+ Whiting NNP 5 NMOD
225
+ field NN 6 SUB
226
+ started VBD 2 VMOD
227
+ production NN 6 OBJ
228
+ Tuesday NNP 6 VMOD
229
+ """
230
+ )
231
+ dg2 = DependencyGraph(
232
+ """\
233
+ John NNP 2 SUB
234
+ sees VBP 0 ROOT
235
+ Mary NNP 2 OBJ
236
+ """
237
+ )
238
+ dg3 = DependencyGraph(
239
+ """\
240
+ a DT 2 SPEC
241
+ man NN 3 SUBJ
242
+ walks VB 0 ROOT
243
+ """
244
+ )
245
+ dg4 = DependencyGraph(
246
+ """\
247
+ every DT 2 SPEC
248
+ girl NN 3 SUBJ
249
+ chases VB 0 ROOT
250
+ a DT 5 SPEC
251
+ dog NN 3 OBJ
252
+ """
253
+ )
254
+
255
+ depgraphs = [dg1, dg2, dg3, dg4]
256
+ for dg in depgraphs:
257
+ print(FStructure.read_depgraph(dg))
258
+
259
+
260
+ if __name__ == "__main__":
261
+ demo_read_depgraph()
venv/lib/python3.10/site-packages/nltk/sem/linearlogic.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Linear Logic
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.internals import Counter
10
+ from nltk.sem.logic import APP, LogicParser
11
+
12
+ _counter = Counter()
13
+
14
+
15
+ class Tokens:
16
+ # Punctuation
17
+ OPEN = "("
18
+ CLOSE = ")"
19
+
20
+ # Operations
21
+ IMP = "-o"
22
+
23
+ PUNCT = [OPEN, CLOSE]
24
+ TOKENS = PUNCT + [IMP]
25
+
26
+
27
+ class LinearLogicParser(LogicParser):
28
+ """A linear logic expression parser."""
29
+
30
+ def __init__(self):
31
+ LogicParser.__init__(self)
32
+
33
+ self.operator_precedence = {APP: 1, Tokens.IMP: 2, None: 3}
34
+ self.right_associated_operations += [Tokens.IMP]
35
+
36
+ def get_all_symbols(self):
37
+ return Tokens.TOKENS
38
+
39
+ def handle(self, tok, context):
40
+ if tok not in Tokens.TOKENS:
41
+ return self.handle_variable(tok, context)
42
+ elif tok == Tokens.OPEN:
43
+ return self.handle_open(tok, context)
44
+
45
+ def get_BooleanExpression_factory(self, tok):
46
+ if tok == Tokens.IMP:
47
+ return ImpExpression
48
+ else:
49
+ return None
50
+
51
+ def make_BooleanExpression(self, factory, first, second):
52
+ return factory(first, second)
53
+
54
+ def attempt_ApplicationExpression(self, expression, context):
55
+ """Attempt to make an application expression. If the next tokens
56
+ are an argument in parens, then the argument expression is a
57
+ function being applied to the arguments. Otherwise, return the
58
+ argument expression."""
59
+ if self.has_priority(APP, context):
60
+ if self.inRange(0) and self.token(0) == Tokens.OPEN:
61
+ self.token() # swallow then open paren
62
+ argument = self.process_next_expression(APP)
63
+ self.assertNextToken(Tokens.CLOSE)
64
+ expression = ApplicationExpression(expression, argument, None)
65
+ return expression
66
+
67
+ def make_VariableExpression(self, name):
68
+ if name[0].isupper():
69
+ return VariableExpression(name)
70
+ else:
71
+ return ConstantExpression(name)
72
+
73
+
74
+ class Expression:
75
+
76
+ _linear_logic_parser = LinearLogicParser()
77
+
78
+ @classmethod
79
+ def fromstring(cls, s):
80
+ return cls._linear_logic_parser.parse(s)
81
+
82
+ def applyto(self, other, other_indices=None):
83
+ return ApplicationExpression(self, other, other_indices)
84
+
85
+ def __call__(self, other):
86
+ return self.applyto(other)
87
+
88
+ def __repr__(self):
89
+ return f"<{self.__class__.__name__} {self}>"
90
+
91
+
92
+ class AtomicExpression(Expression):
93
+ def __init__(self, name, dependencies=None):
94
+ """
95
+ :param name: str for the constant name
96
+ :param dependencies: list of int for the indices on which this atom is dependent
97
+ """
98
+ assert isinstance(name, str)
99
+ self.name = name
100
+
101
+ if not dependencies:
102
+ dependencies = []
103
+ self.dependencies = dependencies
104
+
105
+ def simplify(self, bindings=None):
106
+ """
107
+ If 'self' is bound by 'bindings', return the atomic to which it is bound.
108
+ Otherwise, return self.
109
+
110
+ :param bindings: ``BindingDict`` A dictionary of bindings used to simplify
111
+ :return: ``AtomicExpression``
112
+ """
113
+ if bindings and self in bindings:
114
+ return bindings[self]
115
+ else:
116
+ return self
117
+
118
+ def compile_pos(self, index_counter, glueFormulaFactory):
119
+ """
120
+ From Iddo Lev's PhD Dissertation p108-109
121
+
122
+ :param index_counter: ``Counter`` for unique indices
123
+ :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas
124
+ :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas
125
+ """
126
+ self.dependencies = []
127
+ return (self, [])
128
+
129
+ def compile_neg(self, index_counter, glueFormulaFactory):
130
+ """
131
+ From Iddo Lev's PhD Dissertation p108-109
132
+
133
+ :param index_counter: ``Counter`` for unique indices
134
+ :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas
135
+ :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas
136
+ """
137
+ self.dependencies = []
138
+ return (self, [])
139
+
140
+ def initialize_labels(self, fstruct):
141
+ self.name = fstruct.initialize_label(self.name.lower())
142
+
143
+ def __eq__(self, other):
144
+ return self.__class__ == other.__class__ and self.name == other.name
145
+
146
+ def __ne__(self, other):
147
+ return not self == other
148
+
149
+ def __str__(self):
150
+ accum = self.name
151
+ if self.dependencies:
152
+ accum += "%s" % self.dependencies
153
+ return accum
154
+
155
+ def __hash__(self):
156
+ return hash(self.name)
157
+
158
+
159
+ class ConstantExpression(AtomicExpression):
160
+ def unify(self, other, bindings):
161
+ """
162
+ If 'other' is a constant, then it must be equal to 'self'. If 'other' is a variable,
163
+ then it must not be bound to anything other than 'self'.
164
+
165
+ :param other: ``Expression``
166
+ :param bindings: ``BindingDict`` A dictionary of all current bindings
167
+ :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new binding
168
+ :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings'
169
+ """
170
+ assert isinstance(other, Expression)
171
+ if isinstance(other, VariableExpression):
172
+ try:
173
+ return bindings + BindingDict([(other, self)])
174
+ except VariableBindingException:
175
+ pass
176
+ elif self == other:
177
+ return bindings
178
+ raise UnificationException(self, other, bindings)
179
+
180
+
181
+ class VariableExpression(AtomicExpression):
182
+ def unify(self, other, bindings):
183
+ """
184
+ 'self' must not be bound to anything other than 'other'.
185
+
186
+ :param other: ``Expression``
187
+ :param bindings: ``BindingDict`` A dictionary of all current bindings
188
+ :return: ``BindingDict`` A new combined dictionary of of 'bindings' and the new binding
189
+ :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings'
190
+ """
191
+ assert isinstance(other, Expression)
192
+ try:
193
+ if self == other:
194
+ return bindings
195
+ else:
196
+ return bindings + BindingDict([(self, other)])
197
+ except VariableBindingException as e:
198
+ raise UnificationException(self, other, bindings) from e
199
+
200
+
201
+ class ImpExpression(Expression):
202
+ def __init__(self, antecedent, consequent):
203
+ """
204
+ :param antecedent: ``Expression`` for the antecedent
205
+ :param consequent: ``Expression`` for the consequent
206
+ """
207
+ assert isinstance(antecedent, Expression)
208
+ assert isinstance(consequent, Expression)
209
+ self.antecedent = antecedent
210
+ self.consequent = consequent
211
+
212
+ def simplify(self, bindings=None):
213
+ return self.__class__(
214
+ self.antecedent.simplify(bindings), self.consequent.simplify(bindings)
215
+ )
216
+
217
+ def unify(self, other, bindings):
218
+ """
219
+ Both the antecedent and consequent of 'self' and 'other' must unify.
220
+
221
+ :param other: ``ImpExpression``
222
+ :param bindings: ``BindingDict`` A dictionary of all current bindings
223
+ :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new bindings
224
+ :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings'
225
+ """
226
+ assert isinstance(other, ImpExpression)
227
+ try:
228
+ return (
229
+ bindings
230
+ + self.antecedent.unify(other.antecedent, bindings)
231
+ + self.consequent.unify(other.consequent, bindings)
232
+ )
233
+ except VariableBindingException as e:
234
+ raise UnificationException(self, other, bindings) from e
235
+
236
+ def compile_pos(self, index_counter, glueFormulaFactory):
237
+ """
238
+ From Iddo Lev's PhD Dissertation p108-109
239
+
240
+ :param index_counter: ``Counter`` for unique indices
241
+ :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas
242
+ :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas
243
+ """
244
+ (a, a_new) = self.antecedent.compile_neg(index_counter, glueFormulaFactory)
245
+ (c, c_new) = self.consequent.compile_pos(index_counter, glueFormulaFactory)
246
+ return (ImpExpression(a, c), a_new + c_new)
247
+
248
+ def compile_neg(self, index_counter, glueFormulaFactory):
249
+ """
250
+ From Iddo Lev's PhD Dissertation p108-109
251
+
252
+ :param index_counter: ``Counter`` for unique indices
253
+ :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas
254
+ :return: (``Expression``,list of ``GlueFormula``) for the compiled linear logic and any newly created glue formulas
255
+ """
256
+ (a, a_new) = self.antecedent.compile_pos(index_counter, glueFormulaFactory)
257
+ (c, c_new) = self.consequent.compile_neg(index_counter, glueFormulaFactory)
258
+ fresh_index = index_counter.get()
259
+ c.dependencies.append(fresh_index)
260
+ new_v = glueFormulaFactory("v%s" % fresh_index, a, {fresh_index})
261
+ return (c, a_new + c_new + [new_v])
262
+
263
+ def initialize_labels(self, fstruct):
264
+ self.antecedent.initialize_labels(fstruct)
265
+ self.consequent.initialize_labels(fstruct)
266
+
267
+ def __eq__(self, other):
268
+ return (
269
+ self.__class__ == other.__class__
270
+ and self.antecedent == other.antecedent
271
+ and self.consequent == other.consequent
272
+ )
273
+
274
+ def __ne__(self, other):
275
+ return not self == other
276
+
277
+ def __str__(self):
278
+ return "{}{} {} {}{}".format(
279
+ Tokens.OPEN,
280
+ self.antecedent,
281
+ Tokens.IMP,
282
+ self.consequent,
283
+ Tokens.CLOSE,
284
+ )
285
+
286
+ def __hash__(self):
287
+ return hash(f"{hash(self.antecedent)}{Tokens.IMP}{hash(self.consequent)}")
288
+
289
+
290
+ class ApplicationExpression(Expression):
291
+ def __init__(self, function, argument, argument_indices=None):
292
+ """
293
+ :param function: ``Expression`` for the function
294
+ :param argument: ``Expression`` for the argument
295
+ :param argument_indices: set for the indices of the glue formula from which the argument came
296
+ :raise LinearLogicApplicationException: If 'function' cannot be applied to 'argument' given 'argument_indices'.
297
+ """
298
+ function_simp = function.simplify()
299
+ argument_simp = argument.simplify()
300
+
301
+ assert isinstance(function_simp, ImpExpression)
302
+ assert isinstance(argument_simp, Expression)
303
+
304
+ bindings = BindingDict()
305
+
306
+ try:
307
+ if isinstance(function, ApplicationExpression):
308
+ bindings += function.bindings
309
+ if isinstance(argument, ApplicationExpression):
310
+ bindings += argument.bindings
311
+ bindings += function_simp.antecedent.unify(argument_simp, bindings)
312
+ except UnificationException as e:
313
+ raise LinearLogicApplicationException(
314
+ f"Cannot apply {function_simp} to {argument_simp}. {e}"
315
+ ) from e
316
+
317
+ # If you are running it on complied premises, more conditions apply
318
+ if argument_indices:
319
+ # A.dependencies of (A -o (B -o C)) must be a proper subset of argument_indices
320
+ if not set(function_simp.antecedent.dependencies) < argument_indices:
321
+ raise LinearLogicApplicationException(
322
+ "Dependencies unfulfilled when attempting to apply Linear Logic formula %s to %s"
323
+ % (function_simp, argument_simp)
324
+ )
325
+ if set(function_simp.antecedent.dependencies) == argument_indices:
326
+ raise LinearLogicApplicationException(
327
+ "Dependencies not a proper subset of indices when attempting to apply Linear Logic formula %s to %s"
328
+ % (function_simp, argument_simp)
329
+ )
330
+
331
+ self.function = function
332
+ self.argument = argument
333
+ self.bindings = bindings
334
+
335
+ def simplify(self, bindings=None):
336
+ """
337
+ Since function is an implication, return its consequent. There should be
338
+ no need to check that the application is valid since the checking is done
339
+ by the constructor.
340
+
341
+ :param bindings: ``BindingDict`` A dictionary of bindings used to simplify
342
+ :return: ``Expression``
343
+ """
344
+ if not bindings:
345
+ bindings = self.bindings
346
+
347
+ return self.function.simplify(bindings).consequent
348
+
349
+ def __eq__(self, other):
350
+ return (
351
+ self.__class__ == other.__class__
352
+ and self.function == other.function
353
+ and self.argument == other.argument
354
+ )
355
+
356
+ def __ne__(self, other):
357
+ return not self == other
358
+
359
+ def __str__(self):
360
+ return "%s" % self.function + Tokens.OPEN + "%s" % self.argument + Tokens.CLOSE
361
+
362
+ def __hash__(self):
363
+ return hash(f"{hash(self.antecedent)}{Tokens.OPEN}{hash(self.consequent)}")
364
+
365
+
366
+ class BindingDict:
367
+ def __init__(self, bindings=None):
368
+ """
369
+ :param bindings:
370
+ list [(``VariableExpression``, ``AtomicExpression``)] to initialize the dictionary
371
+ dict {``VariableExpression``: ``AtomicExpression``} to initialize the dictionary
372
+ """
373
+ self.d = {}
374
+
375
+ if isinstance(bindings, dict):
376
+ bindings = bindings.items()
377
+
378
+ if bindings:
379
+ for (v, b) in bindings:
380
+ self[v] = b
381
+
382
+ def __setitem__(self, variable, binding):
383
+ """
384
+ A binding is consistent with the dict if its variable is not already bound, OR if its
385
+ variable is already bound to its argument.
386
+
387
+ :param variable: ``VariableExpression`` The variable bind
388
+ :param binding: ``Expression`` The expression to which 'variable' should be bound
389
+ :raise VariableBindingException: If the variable cannot be bound in this dictionary
390
+ """
391
+ assert isinstance(variable, VariableExpression)
392
+ assert isinstance(binding, Expression)
393
+
394
+ assert variable != binding
395
+
396
+ existing = self.d.get(variable, None)
397
+
398
+ if not existing or binding == existing:
399
+ self.d[variable] = binding
400
+ else:
401
+ raise VariableBindingException(
402
+ "Variable %s already bound to another value" % (variable)
403
+ )
404
+
405
+ def __getitem__(self, variable):
406
+ """
407
+ Return the expression to which 'variable' is bound
408
+ """
409
+ assert isinstance(variable, VariableExpression)
410
+
411
+ intermediate = self.d[variable]
412
+ while intermediate:
413
+ try:
414
+ intermediate = self.d[intermediate]
415
+ except KeyError:
416
+ return intermediate
417
+
418
+ def __contains__(self, item):
419
+ return item in self.d
420
+
421
+ def __add__(self, other):
422
+ """
423
+ :param other: ``BindingDict`` The dict with which to combine self
424
+ :return: ``BindingDict`` A new dict containing all the elements of both parameters
425
+ :raise VariableBindingException: If the parameter dictionaries are not consistent with each other
426
+ """
427
+ try:
428
+ combined = BindingDict()
429
+ for v in self.d:
430
+ combined[v] = self.d[v]
431
+ for v in other.d:
432
+ combined[v] = other.d[v]
433
+ return combined
434
+ except VariableBindingException as e:
435
+ raise VariableBindingException(
436
+ "Attempting to add two contradicting"
437
+ " VariableBindingsLists: %s, %s" % (self, other)
438
+ ) from e
439
+
440
+ def __ne__(self, other):
441
+ return not self == other
442
+
443
+ def __eq__(self, other):
444
+ if not isinstance(other, BindingDict):
445
+ raise TypeError
446
+ return self.d == other.d
447
+
448
+ def __str__(self):
449
+ return "{" + ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + "}"
450
+
451
+ def __repr__(self):
452
+ return "BindingDict: %s" % self
453
+
454
+
455
+ class VariableBindingException(Exception):
456
+ pass
457
+
458
+
459
+ class UnificationException(Exception):
460
+ def __init__(self, a, b, bindings):
461
+ Exception.__init__(self, f"Cannot unify {a} with {b} given {bindings}")
462
+
463
+
464
+ class LinearLogicApplicationException(Exception):
465
+ pass
466
+
467
+
468
+ def demo():
469
+ lexpr = Expression.fromstring
470
+
471
+ print(lexpr(r"f"))
472
+ print(lexpr(r"(g -o f)"))
473
+ print(lexpr(r"((g -o G) -o G)"))
474
+ print(lexpr(r"g -o h -o f"))
475
+ print(lexpr(r"(g -o f)(g)").simplify())
476
+ print(lexpr(r"(H -o f)(g)").simplify())
477
+ print(lexpr(r"((g -o G) -o G)((g -o f))").simplify())
478
+ print(lexpr(r"(H -o H)((g -o f))").simplify())
479
+
480
+
481
+ if __name__ == "__main__":
482
+ demo()