applied-ai-018 commited on
Commit
9b2b726
·
verified ·
1 Parent(s): 0f7793d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/14.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg.pt +3 -0
  10. ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg_sq.pt +3 -0
  11. ckpts/universal/global_step40/zero/26.input_layernorm.weight/fp32.pt +3 -0
  12. ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  13. venv/lib/python3.10/site-packages/nltk/cluster/util.py +300 -0
  14. venv/lib/python3.10/site-packages/nltk/parse/__init__.py +102 -0
  15. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/nltk/parse/api.py +72 -0
  36. venv/lib/python3.10/site-packages/nltk/parse/bllip.py +299 -0
  37. venv/lib/python3.10/site-packages/nltk/parse/chart.py +1848 -0
  38. venv/lib/python3.10/site-packages/nltk/parse/dependencygraph.py +799 -0
  39. venv/lib/python3.10/site-packages/nltk/parse/pchart.py +579 -0
  40. venv/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py +716 -0
  41. venv/lib/python3.10/site-packages/nltk/parse/recursivedescent.py +684 -0
  42. venv/lib/python3.10/site-packages/nltk/parse/shiftreduce.py +479 -0
  43. venv/lib/python3.10/site-packages/nltk/parse/util.py +234 -0
  44. venv/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919884b6d10ee17e5c5b639c1411394f84ef73524c379e454c1085f018f33fbd
3
+ size 33555612
ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e160f6140f0f3d71aeadb8db97bbc70be894f1f500c734db34a07ae43767beb6
3
+ size 9372
ckpts/universal/global_step40/zero/14.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a6a8a597e0b1e79d66bd8ccab6e636a37d16a276ae9a4a8344c2dc0d95f20d
3
+ size 9293
ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ca5dea5f26ac963c29753d8d2242cb785ed77a3767bd3dd2828916dbb320e66
3
+ size 9372
ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa30388f624ced306c7f5a3cdaf4d28e4906a0c1912b2e88ed1faebd0e3e5e98
3
+ size 9387
ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a2bd8c6105d794a281cbb230e8aee689d14d9b4ba8120beec09fff26a8ca1a0
3
+ size 9293
ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86b9b6b5cc941b50ef0640fe548a91d8185b557c4f00c0adce54388dfa9a1344
3
+ size 33555627
ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:873e9dd677576eeae755c1200f9f0250b6b701a1bef6e4f4e66987a7d119a9d7
3
+ size 33555533
ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d60d7b9dd3fd75fb28d60475dac58910f82c24304f4c3d9b808544de715d5a
3
+ size 9372
ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6dee10baea593f908f384d1a2eb90daac3f9120b2c3dbe4fcc7116b9678cc9b
3
+ size 9387
ckpts/universal/global_step40/zero/26.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d200745c9f640ae2de38d5e8f66ed52191c1a48862f2d49fcca6cc0e7e9b0472
3
+ size 9293
ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cac4041933538666ff1790170326d845ee32386e99f2db2cb80b920a39a4d750
3
+ size 33555612
venv/lib/python3.10/site-packages/nltk/cluster/util.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterer Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Contributor: J Richard Snape
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ import copy
9
+ from abc import abstractmethod
10
+ from math import sqrt
11
+ from sys import stdout
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ from nltk.cluster.api import ClusterI
19
+
20
+
21
+ class VectorSpaceClusterer(ClusterI):
22
+ """
23
+ Abstract clusterer which takes tokens and maps them into a vector space.
24
+ Optionally performs singular value decomposition to reduce the
25
+ dimensionality.
26
+ """
27
+
28
+ def __init__(self, normalise=False, svd_dimensions=None):
29
+ """
30
+ :param normalise: should vectors be normalised to length 1
31
+ :type normalise: boolean
32
+ :param svd_dimensions: number of dimensions to use in reducing vector
33
+ dimensionsionality with SVD
34
+ :type svd_dimensions: int
35
+ """
36
+ self._Tt = None
37
+ self._should_normalise = normalise
38
+ self._svd_dimensions = svd_dimensions
39
+
40
+ def cluster(self, vectors, assign_clusters=False, trace=False):
41
+ assert len(vectors) > 0
42
+
43
+ # normalise the vectors
44
+ if self._should_normalise:
45
+ vectors = list(map(self._normalise, vectors))
46
+
47
+ # use SVD to reduce the dimensionality
48
+ if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
49
+ [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
50
+ S = d[: self._svd_dimensions] * numpy.identity(
51
+ self._svd_dimensions, numpy.float64
52
+ )
53
+ T = u[:, : self._svd_dimensions]
54
+ Dt = vt[: self._svd_dimensions, :]
55
+ vectors = numpy.transpose(numpy.dot(S, Dt))
56
+ self._Tt = numpy.transpose(T)
57
+
58
+ # call abstract method to cluster the vectors
59
+ self.cluster_vectorspace(vectors, trace)
60
+
61
+ # assign the vectors to clusters
62
+ if assign_clusters:
63
+ return [self.classify(vector) for vector in vectors]
64
+
65
+ @abstractmethod
66
+ def cluster_vectorspace(self, vectors, trace):
67
+ """
68
+ Finds the clusters using the given set of vectors.
69
+ """
70
+
71
+ def classify(self, vector):
72
+ if self._should_normalise:
73
+ vector = self._normalise(vector)
74
+ if self._Tt is not None:
75
+ vector = numpy.dot(self._Tt, vector)
76
+ cluster = self.classify_vectorspace(vector)
77
+ return self.cluster_name(cluster)
78
+
79
+ @abstractmethod
80
+ def classify_vectorspace(self, vector):
81
+ """
82
+ Returns the index of the appropriate cluster for the vector.
83
+ """
84
+
85
+ def likelihood(self, vector, label):
86
+ if self._should_normalise:
87
+ vector = self._normalise(vector)
88
+ if self._Tt is not None:
89
+ vector = numpy.dot(self._Tt, vector)
90
+ return self.likelihood_vectorspace(vector, label)
91
+
92
+ def likelihood_vectorspace(self, vector, cluster):
93
+ """
94
+ Returns the likelihood of the vector belonging to the cluster.
95
+ """
96
+ predicted = self.classify_vectorspace(vector)
97
+ return 1.0 if cluster == predicted else 0.0
98
+
99
+ def vector(self, vector):
100
+ """
101
+ Returns the vector after normalisation and dimensionality reduction
102
+ """
103
+ if self._should_normalise:
104
+ vector = self._normalise(vector)
105
+ if self._Tt is not None:
106
+ vector = numpy.dot(self._Tt, vector)
107
+ return vector
108
+
109
+ def _normalise(self, vector):
110
+ """
111
+ Normalises the vector to unit length.
112
+ """
113
+ return vector / sqrt(numpy.dot(vector, vector))
114
+
115
+
116
+ def euclidean_distance(u, v):
117
+ """
118
+ Returns the euclidean distance between vectors u and v. This is equivalent
119
+ to the length of the vector (u - v).
120
+ """
121
+ diff = u - v
122
+ return sqrt(numpy.dot(diff, diff))
123
+
124
+
125
+ def cosine_distance(u, v):
126
+ """
127
+ Returns 1 minus the cosine of the angle between vectors v and u. This is
128
+ equal to ``1 - (u.v / |u||v|)``.
129
+ """
130
+ return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
131
+
132
+
133
+ class _DendrogramNode:
134
+ """Tree node of a dendrogram."""
135
+
136
+ def __init__(self, value, *children):
137
+ self._value = value
138
+ self._children = children
139
+
140
+ def leaves(self, values=True):
141
+ if self._children:
142
+ leaves = []
143
+ for child in self._children:
144
+ leaves.extend(child.leaves(values))
145
+ return leaves
146
+ elif values:
147
+ return [self._value]
148
+ else:
149
+ return [self]
150
+
151
+ def groups(self, n):
152
+ queue = [(self._value, self)]
153
+
154
+ while len(queue) < n:
155
+ priority, node = queue.pop()
156
+ if not node._children:
157
+ queue.push((priority, node))
158
+ break
159
+ for child in node._children:
160
+ if child._children:
161
+ queue.append((child._value, child))
162
+ else:
163
+ queue.append((0, child))
164
+ # makes the earliest merges at the start, latest at the end
165
+ queue.sort()
166
+
167
+ groups = []
168
+ for priority, node in queue:
169
+ groups.append(node.leaves())
170
+ return groups
171
+
172
+ def __lt__(self, comparator):
173
+ return cosine_distance(self._value, comparator._value) < 0
174
+
175
+
176
+ class Dendrogram:
177
+ """
178
+ Represents a dendrogram, a tree with a specified branching order. This
179
+ must be initialised with the leaf items, then iteratively call merge for
180
+ each branch. This class constructs a tree representing the order of calls
181
+ to the merge function.
182
+ """
183
+
184
+ def __init__(self, items=[]):
185
+ """
186
+ :param items: the items at the leaves of the dendrogram
187
+ :type items: sequence of (any)
188
+ """
189
+ self._items = [_DendrogramNode(item) for item in items]
190
+ self._original_items = copy.copy(self._items)
191
+ self._merge = 1
192
+
193
+ def merge(self, *indices):
194
+ """
195
+ Merges nodes at given indices in the dendrogram. The nodes will be
196
+ combined which then replaces the first node specified. All other nodes
197
+ involved in the merge will be removed.
198
+
199
+ :param indices: indices of the items to merge (at least two)
200
+ :type indices: seq of int
201
+ """
202
+ assert len(indices) >= 2
203
+ node = _DendrogramNode(self._merge, *(self._items[i] for i in indices))
204
+ self._merge += 1
205
+ self._items[indices[0]] = node
206
+ for i in indices[1:]:
207
+ del self._items[i]
208
+
209
+ def groups(self, n):
210
+ """
211
+ Finds the n-groups of items (leaves) reachable from a cut at depth n.
212
+ :param n: number of groups
213
+ :type n: int
214
+ """
215
+ if len(self._items) > 1:
216
+ root = _DendrogramNode(self._merge, *self._items)
217
+ else:
218
+ root = self._items[0]
219
+ return root.groups(n)
220
+
221
+ def show(self, leaf_labels=[]):
222
+ """
223
+ Print the dendrogram in ASCII art to standard out.
224
+
225
+ :param leaf_labels: an optional list of strings to use for labeling the
226
+ leaves
227
+ :type leaf_labels: list
228
+ """
229
+
230
+ # ASCII rendering characters
231
+ JOIN, HLINK, VLINK = "+", "-", "|"
232
+
233
+ # find the root (or create one)
234
+ if len(self._items) > 1:
235
+ root = _DendrogramNode(self._merge, *self._items)
236
+ else:
237
+ root = self._items[0]
238
+ leaves = self._original_items
239
+
240
+ if leaf_labels:
241
+ last_row = leaf_labels
242
+ else:
243
+ last_row = ["%s" % leaf._value for leaf in leaves]
244
+
245
+ # find the bottom row and the best cell width
246
+ width = max(map(len, last_row)) + 1
247
+ lhalf = width // 2
248
+ rhalf = int(width - lhalf - 1)
249
+
250
+ # display functions
251
+ def format(centre, left=" ", right=" "):
252
+ return f"{lhalf * left}{centre}{right * rhalf}"
253
+
254
+ def display(str):
255
+ stdout.write(str)
256
+
257
+ # for each merge, top down
258
+ queue = [(root._value, root)]
259
+ verticals = [format(" ") for leaf in leaves]
260
+ while queue:
261
+ priority, node = queue.pop()
262
+ child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
263
+ indices = list(map(leaves.index, child_left_leaf))
264
+ if child_left_leaf:
265
+ min_idx = min(indices)
266
+ max_idx = max(indices)
267
+ for i in range(len(leaves)):
268
+ if leaves[i] in child_left_leaf:
269
+ if i == min_idx:
270
+ display(format(JOIN, " ", HLINK))
271
+ elif i == max_idx:
272
+ display(format(JOIN, HLINK, " "))
273
+ else:
274
+ display(format(JOIN, HLINK, HLINK))
275
+ verticals[i] = format(VLINK)
276
+ elif min_idx <= i <= max_idx:
277
+ display(format(HLINK, HLINK, HLINK))
278
+ else:
279
+ display(verticals[i])
280
+ display("\n")
281
+ for child in node._children:
282
+ if child._children:
283
+ queue.append((child._value, child))
284
+ queue.sort()
285
+
286
+ for vertical in verticals:
287
+ display(vertical)
288
+ display("\n")
289
+
290
+ # finally, display the last line
291
+ display("".join(item.center(width) for item in last_row))
292
+ display("\n")
293
+
294
+ def __repr__(self):
295
+ if len(self._items) > 1:
296
+ root = _DendrogramNode(self._merge, *self._items)
297
+ else:
298
+ root = self._items[0]
299
+ leaves = root.leaves(False)
300
+ return "<Dendrogram with %d leaves>" % len(leaves)
venv/lib/python3.10/site-packages/nltk/parse/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parsers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ """
11
+ NLTK Parsers
12
+
13
+ Classes and interfaces for producing tree structures that represent
14
+ the internal organization of a text. This task is known as "parsing"
15
+ the text, and the resulting tree structures are called the text's
16
+ "parses". Typically, the text is a single sentence, and the tree
17
+ structure represents the syntactic structure of the sentence.
18
+ However, parsers can also be used in other domains. For example,
19
+ parsers can be used to derive the morphological structure of the
20
+ morphemes that make up a word, or to derive the discourse structure
21
+ for a set of utterances.
22
+
23
+ Sometimes, a single piece of text can be represented by more than one
24
+ tree structure. Texts represented by more than one tree structure are
25
+ called "ambiguous" texts. Note that there are actually two ways in
26
+ which a text can be ambiguous:
27
+
28
+ - The text has multiple correct parses.
29
+ - There is not enough information to decide which of several
30
+ candidate parses is correct.
31
+
32
+ However, the parser module does *not* distinguish these two types of
33
+ ambiguity.
34
+
35
+ The parser module defines ``ParserI``, a standard interface for parsing
36
+ texts; and two simple implementations of that interface,
37
+ ``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains
38
+ three sub-modules for specialized kinds of parsing:
39
+
40
+ - ``nltk.parser.chart`` defines chart parsing, which uses dynamic
41
+ programming to efficiently parse texts.
42
+ - ``nltk.parser.probabilistic`` defines probabilistic parsing, which
43
+ associates a probability with each parse.
44
+ """
45
+
46
+ from nltk.parse.api import ParserI
47
+ from nltk.parse.bllip import BllipParser
48
+ from nltk.parse.chart import (
49
+ BottomUpChartParser,
50
+ BottomUpLeftCornerChartParser,
51
+ ChartParser,
52
+ LeftCornerChartParser,
53
+ SteppingChartParser,
54
+ TopDownChartParser,
55
+ )
56
+ from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser
57
+ from nltk.parse.dependencygraph import DependencyGraph
58
+ from nltk.parse.earleychart import (
59
+ EarleyChartParser,
60
+ FeatureEarleyChartParser,
61
+ FeatureIncrementalBottomUpChartParser,
62
+ FeatureIncrementalBottomUpLeftCornerChartParser,
63
+ FeatureIncrementalChartParser,
64
+ FeatureIncrementalTopDownChartParser,
65
+ IncrementalBottomUpChartParser,
66
+ IncrementalBottomUpLeftCornerChartParser,
67
+ IncrementalChartParser,
68
+ IncrementalLeftCornerChartParser,
69
+ IncrementalTopDownChartParser,
70
+ )
71
+ from nltk.parse.evaluate import DependencyEvaluator
72
+ from nltk.parse.featurechart import (
73
+ FeatureBottomUpChartParser,
74
+ FeatureBottomUpLeftCornerChartParser,
75
+ FeatureChartParser,
76
+ FeatureTopDownChartParser,
77
+ )
78
+ from nltk.parse.malt import MaltParser
79
+ from nltk.parse.nonprojectivedependencyparser import (
80
+ NaiveBayesDependencyScorer,
81
+ NonprojectiveDependencyParser,
82
+ ProbabilisticNonprojectiveParser,
83
+ )
84
+ from nltk.parse.pchart import (
85
+ BottomUpProbabilisticChartParser,
86
+ InsideChartParser,
87
+ LongestChartParser,
88
+ RandomChartParser,
89
+ UnsortedChartParser,
90
+ )
91
+ from nltk.parse.projectivedependencyparser import (
92
+ ProbabilisticProjectiveDependencyParser,
93
+ ProjectiveDependencyParser,
94
+ )
95
+ from nltk.parse.recursivedescent import (
96
+ RecursiveDescentParser,
97
+ SteppingRecursiveDescentParser,
98
+ )
99
+ from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser
100
+ from nltk.parse.transitionparser import TransitionParser
101
+ from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser
102
+ from nltk.parse.viterbi import ViterbiParser
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.93 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc ADDED
Binary file (7.15 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc ADDED
Binary file (53.9 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (4.04 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/parse/api.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parser API
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import itertools
11
+
12
+ from nltk.internals import overridden
13
+
14
+
15
+ class ParserI:
16
+ """
17
+ A processing class for deriving trees that represent possible
18
+ structures for a sequence of tokens. These tree structures are
19
+ known as "parses". Typically, parsers are used to derive syntax
20
+ trees for sentences. But parsers can also be used to derive other
21
+ kinds of tree structure, such as morphological trees and discourse
22
+ structures.
23
+
24
+ Subclasses must define:
25
+ - at least one of: ``parse()``, ``parse_sents()``.
26
+
27
+ Subclasses may define:
28
+ - ``grammar()``
29
+ """
30
+
31
+ def grammar(self):
32
+ """
33
+ :return: The grammar used by this parser.
34
+ """
35
+ raise NotImplementedError()
36
+
37
+ def parse(self, sent, *args, **kwargs):
38
+ """
39
+ :return: An iterator that generates parse trees for the sentence.
40
+ When possible this list is sorted from most likely to least likely.
41
+
42
+ :param sent: The sentence to be parsed
43
+ :type sent: list(str)
44
+ :rtype: iter(Tree)
45
+ """
46
+ if overridden(self.parse_sents):
47
+ return next(self.parse_sents([sent], *args, **kwargs))
48
+ elif overridden(self.parse_one):
49
+ return (
50
+ tree
51
+ for tree in [self.parse_one(sent, *args, **kwargs)]
52
+ if tree is not None
53
+ )
54
+ elif overridden(self.parse_all):
55
+ return iter(self.parse_all(sent, *args, **kwargs))
56
+ else:
57
+ raise NotImplementedError()
58
+
59
+ def parse_sents(self, sents, *args, **kwargs):
60
+ """
61
+ Apply ``self.parse()`` to each element of ``sents``.
62
+ :rtype: iter(iter(Tree))
63
+ """
64
+ return (self.parse(sent, *args, **kwargs) for sent in sents)
65
+
66
+ def parse_all(self, sent, *args, **kwargs):
67
+ """:rtype: list(Tree)"""
68
+ return list(self.parse(sent, *args, **kwargs))
69
+
70
+ def parse_one(self, sent, *args, **kwargs):
71
+ """:rtype: Tree or None"""
72
+ return next(self.parse(sent, *args, **kwargs), None)
venv/lib/python3.10/site-packages/nltk/parse/bllip.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to BLLIP Parser
2
+ #
3
+ # Author: David McClosky <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.parse.api import ParserI
10
+ from nltk.tree import Tree
11
+
12
+ """
13
+ Interface for parsing with BLLIP Parser. Requires the Python
14
+ bllipparser module. BllipParser objects can be constructed with the
15
+ ``BllipParser.from_unified_model_dir`` class method or manually using the
16
+ ``BllipParser`` constructor. The former is generally easier if you have
17
+ a BLLIP Parser unified model directory -- a basic model can be obtained
18
+ from NLTK's downloader. More unified parsing models can be obtained with
19
+ BLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher``
20
+ or see docs for ``bllipparser.ModelFetcher.download_and_install_model``).
21
+
22
+ Basic usage::
23
+
24
+ # download and install a basic unified parsing model (Wall Street Journal)
25
+ # sudo python -m nltk.downloader bllip_wsj_no_aux
26
+
27
+ >>> from nltk.data import find
28
+ >>> model_dir = find('models/bllip_wsj_no_aux').path
29
+ >>> bllip = BllipParser.from_unified_model_dir(model_dir)
30
+
31
+ # 1-best parsing
32
+ >>> sentence1 = 'British left waffles on Falklands .'.split()
33
+ >>> top_parse = bllip.parse_one(sentence1)
34
+ >>> print(top_parse)
35
+ (S1
36
+ (S
37
+ (NP (JJ British) (NN left))
38
+ (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands))))
39
+ (. .)))
40
+
41
+ # n-best parsing
42
+ >>> sentence2 = 'Time flies'.split()
43
+ >>> all_parses = bllip.parse_all(sentence2)
44
+ >>> print(len(all_parses))
45
+ 50
46
+ >>> print(all_parses[0])
47
+ (S1 (S (NP (NNP Time)) (VP (VBZ flies))))
48
+
49
+ # incorporating external tagging constraints (None means unconstrained tag)
50
+ >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')])
51
+ >>> print(next(constrained1))
52
+ (S1 (NP (VB Time) (NNS flies)))
53
+ >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)])
54
+ >>> print(next(constrained2))
55
+ (S1 (NP (NN Time) (VBZ flies)))
56
+
57
+ References
58
+ ----------
59
+
60
+ - Charniak, Eugene. "A maximum-entropy-inspired parser." Proceedings of
61
+ the 1st North American chapter of the Association for Computational
62
+ Linguistics conference. Association for Computational Linguistics,
63
+ 2000.
64
+
65
+ - Charniak, Eugene, and Mark Johnson. "Coarse-to-fine n-best parsing
66
+ and MaxEnt discriminative reranking." Proceedings of the 43rd Annual
67
+ Meeting on Association for Computational Linguistics. Association
68
+ for Computational Linguistics, 2005.
69
+
70
+ Known issues
71
+ ------------
72
+
73
+ Note that BLLIP Parser is not currently threadsafe. Since this module
74
+ uses a SWIG interface, it is potentially unsafe to create multiple
75
+ ``BllipParser`` objects in the same process. BLLIP Parser currently
76
+ has issues with non-ASCII text and will raise an error if given any.
77
+
78
+ See https://pypi.python.org/pypi/bllipparser/ for more information
79
+ on BLLIP Parser's Python interface.
80
+ """
81
+
82
+ __all__ = ["BllipParser"]
83
+
84
+ # this block allows this module to be imported even if bllipparser isn't
85
+ # available
86
+ try:
87
+ from bllipparser import RerankingParser
88
+ from bllipparser.RerankingParser import get_unified_model_parameters
89
+
90
+ def _ensure_bllip_import_or_error():
91
+ pass
92
+
93
+ except ImportError as ie:
94
+
95
+ def _ensure_bllip_import_or_error(ie=ie):
96
+ raise ImportError("Couldn't import bllipparser module: %s" % ie)
97
+
98
+
99
+ def _ensure_ascii(words):
100
+ try:
101
+ for i, word in enumerate(words):
102
+ word.encode("ascii")
103
+ except UnicodeEncodeError as e:
104
+ raise ValueError(
105
+ f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser "
106
+ "currently doesn't support non-ASCII inputs."
107
+ ) from e
108
+
109
+
110
+ def _scored_parse_to_nltk_tree(scored_parse):
111
+ return Tree.fromstring(str(scored_parse.ptb_parse))
112
+
113
+
114
+ class BllipParser(ParserI):
115
+ """
116
+ Interface for parsing with BLLIP Parser. BllipParser objects can be
117
+ constructed with the ``BllipParser.from_unified_model_dir`` class
118
+ method or manually using the ``BllipParser`` constructor.
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ parser_model=None,
124
+ reranker_features=None,
125
+ reranker_weights=None,
126
+ parser_options=None,
127
+ reranker_options=None,
128
+ ):
129
+ """
130
+ Load a BLLIP Parser model from scratch. You'll typically want to
131
+ use the ``from_unified_model_dir()`` class method to construct
132
+ this object.
133
+
134
+ :param parser_model: Path to parser model directory
135
+ :type parser_model: str
136
+
137
+ :param reranker_features: Path the reranker model's features file
138
+ :type reranker_features: str
139
+
140
+ :param reranker_weights: Path the reranker model's weights file
141
+ :type reranker_weights: str
142
+
143
+ :param parser_options: optional dictionary of parser options, see
144
+ ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
145
+ for more information.
146
+ :type parser_options: dict(str)
147
+
148
+ :param reranker_options: optional
149
+ dictionary of reranker options, see
150
+ ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
151
+ for more information.
152
+ :type reranker_options: dict(str)
153
+ """
154
+ _ensure_bllip_import_or_error()
155
+
156
+ parser_options = parser_options or {}
157
+ reranker_options = reranker_options or {}
158
+
159
+ self.rrp = RerankingParser()
160
+ self.rrp.load_parser_model(parser_model, **parser_options)
161
+ if reranker_features and reranker_weights:
162
+ self.rrp.load_reranker_model(
163
+ features_filename=reranker_features,
164
+ weights_filename=reranker_weights,
165
+ **reranker_options,
166
+ )
167
+
168
+ def parse(self, sentence):
169
+ """
170
+ Use BLLIP Parser to parse a sentence. Takes a sentence as a list
171
+ of words; it will be automatically tagged with this BLLIP Parser
172
+ instance's tagger.
173
+
174
+ :return: An iterator that generates parse trees for the sentence
175
+ from most likely to least likely.
176
+
177
+ :param sentence: The sentence to be parsed
178
+ :type sentence: list(str)
179
+ :rtype: iter(Tree)
180
+ """
181
+ _ensure_ascii(sentence)
182
+ nbest_list = self.rrp.parse(sentence)
183
+ for scored_parse in nbest_list:
184
+ yield _scored_parse_to_nltk_tree(scored_parse)
185
+
186
+ def tagged_parse(self, word_and_tag_pairs):
187
+ """
188
+ Use BLLIP to parse a sentence. Takes a sentence as a list of
189
+ (word, tag) tuples; the sentence must have already been tokenized
190
+ and tagged. BLLIP will attempt to use the tags provided but may
191
+ use others if it can't come up with a complete parse subject
192
+ to those constraints. You may also specify a tag as ``None``
193
+ to leave a token's tag unconstrained.
194
+
195
+ :return: An iterator that generates parse trees for the sentence
196
+ from most likely to least likely.
197
+
198
+ :param sentence: Input sentence to parse as (word, tag) pairs
199
+ :type sentence: list(tuple(str, str))
200
+ :rtype: iter(Tree)
201
+ """
202
+ words = []
203
+ tag_map = {}
204
+ for i, (word, tag) in enumerate(word_and_tag_pairs):
205
+ words.append(word)
206
+ if tag is not None:
207
+ tag_map[i] = tag
208
+
209
+ _ensure_ascii(words)
210
+ nbest_list = self.rrp.parse_tagged(words, tag_map)
211
+ for scored_parse in nbest_list:
212
+ yield _scored_parse_to_nltk_tree(scored_parse)
213
+
214
+ @classmethod
215
+ def from_unified_model_dir(
216
+ cls, model_dir, parser_options=None, reranker_options=None
217
+ ):
218
+ """
219
+ Create a ``BllipParser`` object from a unified parsing model
220
+ directory. Unified parsing model directories are a standardized
221
+ way of storing BLLIP parser and reranker models together on disk.
222
+ See ``bllipparser.RerankingParser.get_unified_model_parameters()``
223
+ for more information about unified model directories.
224
+
225
+ :return: A ``BllipParser`` object using the parser and reranker
226
+ models in the model directory.
227
+
228
+ :param model_dir: Path to the unified model directory.
229
+ :type model_dir: str
230
+ :param parser_options: optional dictionary of parser options, see
231
+ ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
232
+ for more information.
233
+ :type parser_options: dict(str)
234
+ :param reranker_options: optional dictionary of reranker options, see
235
+ ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
236
+ for more information.
237
+ :type reranker_options: dict(str)
238
+ :rtype: BllipParser
239
+ """
240
+ (
241
+ parser_model_dir,
242
+ reranker_features_filename,
243
+ reranker_weights_filename,
244
+ ) = get_unified_model_parameters(model_dir)
245
+ return cls(
246
+ parser_model_dir,
247
+ reranker_features_filename,
248
+ reranker_weights_filename,
249
+ parser_options,
250
+ reranker_options,
251
+ )
252
+
253
+
254
+ def demo():
255
+ """This assumes the Python module bllipparser is installed."""
256
+
257
+ # download and install a basic unified parsing model (Wall Street Journal)
258
+ # sudo python -m nltk.downloader bllip_wsj_no_aux
259
+
260
+ from nltk.data import find
261
+
262
+ model_dir = find("models/bllip_wsj_no_aux").path
263
+
264
+ print("Loading BLLIP Parsing models...")
265
+ # the easiest way to get started is to use a unified model
266
+ bllip = BllipParser.from_unified_model_dir(model_dir)
267
+ print("Done.")
268
+
269
+ sentence1 = "British left waffles on Falklands .".split()
270
+ sentence2 = "I saw the man with the telescope .".split()
271
+ # this sentence is known to fail under the WSJ parsing model
272
+ fail1 = "# ! ? : -".split()
273
+ for sentence in (sentence1, sentence2, fail1):
274
+ print("Sentence: %r" % " ".join(sentence))
275
+ try:
276
+ tree = next(bllip.parse(sentence))
277
+ print(tree)
278
+ except StopIteration:
279
+ print("(parse failed)")
280
+
281
+ # n-best parsing demo
282
+ for i, parse in enumerate(bllip.parse(sentence1)):
283
+ print("parse %d:\n%s" % (i, parse))
284
+
285
+ # using external POS tag constraints
286
+ print(
287
+ "forcing 'tree' to be 'NN':",
288
+ next(bllip.tagged_parse([("A", None), ("tree", "NN")])),
289
+ )
290
+ print(
291
+ "forcing 'A' to be 'DT' and 'tree' to be 'NNP':",
292
+ next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])),
293
+ )
294
+ # constraints don't have to make sense... (though on more complicated
295
+ # sentences, they may cause the parse to fail)
296
+ print(
297
+ "forcing 'A' to be 'NNP':",
298
+ next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])),
299
+ )
venv/lib/python3.10/site-packages/nltk/parse/chart.py ADDED
@@ -0,0 +1,1848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: A Chart Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jean Mark Gawron <[email protected]>
7
+ # Peter Ljunglöf <[email protected]>
8
+ # URL: <https://www.nltk.org/>
9
+ # For license information, see LICENSE.TXT
10
+
11
+ """
12
+ Data classes and parser implementations for "chart parsers", which
13
+ use dynamic programming to efficiently parse a text. A chart
14
+ parser derives parse trees for a text by iteratively adding "edges"
15
+ to a "chart." Each edge represents a hypothesis about the tree
16
+ structure for a subsequence of the text. The chart is a
17
+ "blackboard" for composing and combining these hypotheses.
18
+
19
+ When a chart parser begins parsing a text, it creates a new (empty)
20
+ chart, spanning the text. It then incrementally adds new edges to the
21
+ chart. A set of "chart rules" specifies the conditions under which
22
+ new edges should be added to the chart. Once the chart reaches a
23
+ stage where none of the chart rules adds any new edges, parsing is
24
+ complete.
25
+
26
+ Charts are encoded with the ``Chart`` class, and edges are encoded with
27
+ the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module
28
+ defines three chart parsers:
29
+
30
+ - ``ChartParser`` is a simple and flexible chart parser. Given a
31
+ set of chart rules, it will apply those rules to the chart until
32
+ no more edges are added.
33
+
34
+ - ``SteppingChartParser`` is a subclass of ``ChartParser`` that can
35
+ be used to step through the parsing process.
36
+ """
37
+
38
+ import itertools
39
+ import re
40
+ import warnings
41
+ from functools import total_ordering
42
+
43
+ from nltk.grammar import PCFG, is_nonterminal, is_terminal
44
+ from nltk.internals import raise_unorderable_types
45
+ from nltk.parse.api import ParserI
46
+ from nltk.tree import Tree
47
+ from nltk.util import OrderedDict
48
+
49
+ ########################################################################
50
+ ## Edges
51
+ ########################################################################
52
+
53
+
54
+ @total_ordering
55
+ class EdgeI:
56
+ """
57
+ A hypothesis about the structure of part of a sentence.
58
+ Each edge records the fact that a structure is (partially)
59
+ consistent with the sentence. An edge contains:
60
+
61
+ - A span, indicating what part of the sentence is
62
+ consistent with the hypothesized structure.
63
+ - A left-hand side, specifying what kind of structure is
64
+ hypothesized.
65
+ - A right-hand side, specifying the contents of the
66
+ hypothesized structure.
67
+ - A dot position, indicating how much of the hypothesized
68
+ structure is consistent with the sentence.
69
+
70
+ Every edge is either complete or incomplete:
71
+
72
+ - An edge is complete if its structure is fully consistent
73
+ with the sentence.
74
+ - An edge is incomplete if its structure is partially
75
+ consistent with the sentence. For every incomplete edge, the
76
+ span specifies a possible prefix for the edge's structure.
77
+
78
+ There are two kinds of edge:
79
+
80
+ - A ``TreeEdge`` records which trees have been found to
81
+ be (partially) consistent with the text.
82
+ - A ``LeafEdge`` records the tokens occurring in the text.
83
+
84
+ The ``EdgeI`` interface provides a common interface to both types
85
+ of edge, allowing chart parsers to treat them in a uniform manner.
86
+ """
87
+
88
+ def __init__(self):
89
+ if self.__class__ == EdgeI:
90
+ raise TypeError("Edge is an abstract interface")
91
+
92
+ # ////////////////////////////////////////////////////////////
93
+ # Span
94
+ # ////////////////////////////////////////////////////////////
95
+
96
+ def span(self):
97
+ """
98
+ Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the
99
+ portion of the sentence that is consistent with this
100
+ edge's structure.
101
+
102
+ :rtype: tuple(int, int)
103
+ """
104
+ raise NotImplementedError()
105
+
106
+ def start(self):
107
+ """
108
+ Return the start index of this edge's span.
109
+
110
+ :rtype: int
111
+ """
112
+ raise NotImplementedError()
113
+
114
+ def end(self):
115
+ """
116
+ Return the end index of this edge's span.
117
+
118
+ :rtype: int
119
+ """
120
+ raise NotImplementedError()
121
+
122
+ def length(self):
123
+ """
124
+ Return the length of this edge's span.
125
+
126
+ :rtype: int
127
+ """
128
+ raise NotImplementedError()
129
+
130
+ # ////////////////////////////////////////////////////////////
131
+ # Left Hand Side
132
+ # ////////////////////////////////////////////////////////////
133
+
134
+ def lhs(self):
135
+ """
136
+ Return this edge's left-hand side, which specifies what kind
137
+ of structure is hypothesized by this edge.
138
+
139
+ :see: ``TreeEdge`` and ``LeafEdge`` for a description of
140
+ the left-hand side values for each edge type.
141
+ """
142
+ raise NotImplementedError()
143
+
144
+ # ////////////////////////////////////////////////////////////
145
+ # Right Hand Side
146
+ # ////////////////////////////////////////////////////////////
147
+
148
+ def rhs(self):
149
+ """
150
+ Return this edge's right-hand side, which specifies
151
+ the content of the structure hypothesized by this edge.
152
+
153
+ :see: ``TreeEdge`` and ``LeafEdge`` for a description of
154
+ the right-hand side values for each edge type.
155
+ """
156
+ raise NotImplementedError()
157
+
158
+ def dot(self):
159
+ """
160
+ Return this edge's dot position, which indicates how much of
161
+ the hypothesized structure is consistent with the
162
+ sentence. In particular, ``self.rhs[:dot]`` is consistent
163
+ with ``tokens[self.start():self.end()]``.
164
+
165
+ :rtype: int
166
+ """
167
+ raise NotImplementedError()
168
+
169
+ def nextsym(self):
170
+ """
171
+ Return the element of this edge's right-hand side that
172
+ immediately follows its dot.
173
+
174
+ :rtype: Nonterminal or terminal or None
175
+ """
176
+ raise NotImplementedError()
177
+
178
+ def is_complete(self):
179
+ """
180
+ Return True if this edge's structure is fully consistent
181
+ with the text.
182
+
183
+ :rtype: bool
184
+ """
185
+ raise NotImplementedError()
186
+
187
+ def is_incomplete(self):
188
+ """
189
+ Return True if this edge's structure is partially consistent
190
+ with the text.
191
+
192
+ :rtype: bool
193
+ """
194
+ raise NotImplementedError()
195
+
196
+ # ////////////////////////////////////////////////////////////
197
+ # Comparisons & hashing
198
+ # ////////////////////////////////////////////////////////////
199
+
200
+ def __eq__(self, other):
201
+ return (
202
+ self.__class__ is other.__class__
203
+ and self._comparison_key == other._comparison_key
204
+ )
205
+
206
+ def __ne__(self, other):
207
+ return not self == other
208
+
209
+ def __lt__(self, other):
210
+ if not isinstance(other, EdgeI):
211
+ raise_unorderable_types("<", self, other)
212
+ if self.__class__ is other.__class__:
213
+ return self._comparison_key < other._comparison_key
214
+ else:
215
+ return self.__class__.__name__ < other.__class__.__name__
216
+
217
+ def __hash__(self):
218
+ try:
219
+ return self._hash
220
+ except AttributeError:
221
+ self._hash = hash(self._comparison_key)
222
+ return self._hash
223
+
224
+
225
+ class TreeEdge(EdgeI):
226
+ """
227
+ An edge that records the fact that a tree is (partially)
228
+ consistent with the sentence. A tree edge consists of:
229
+
230
+ - A span, indicating what part of the sentence is
231
+ consistent with the hypothesized tree.
232
+ - A left-hand side, specifying the hypothesized tree's node
233
+ value.
234
+ - A right-hand side, specifying the hypothesized tree's
235
+ children. Each element of the right-hand side is either a
236
+ terminal, specifying a token with that terminal as its leaf
237
+ value; or a nonterminal, specifying a subtree with that
238
+ nonterminal's symbol as its node value.
239
+ - A dot position, indicating which children are consistent
240
+ with part of the sentence. In particular, if ``dot`` is the
241
+ dot position, ``rhs`` is the right-hand size, ``(start,end)``
242
+ is the span, and ``sentence`` is the list of tokens in the
243
+ sentence, then ``tokens[start:end]`` can be spanned by the
244
+ children specified by ``rhs[:dot]``.
245
+
246
+ For more information about edges, see the ``EdgeI`` interface.
247
+ """
248
+
249
+ def __init__(self, span, lhs, rhs, dot=0):
250
+ """
251
+ Construct a new ``TreeEdge``.
252
+
253
+ :type span: tuple(int, int)
254
+ :param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the
255
+ portion of the sentence that is consistent with the new
256
+ edge's structure.
257
+ :type lhs: Nonterminal
258
+ :param lhs: The new edge's left-hand side, specifying the
259
+ hypothesized tree's node value.
260
+ :type rhs: list(Nonterminal and str)
261
+ :param rhs: The new edge's right-hand side, specifying the
262
+ hypothesized tree's children.
263
+ :type dot: int
264
+ :param dot: The position of the new edge's dot. This position
265
+ specifies what prefix of the production's right hand side
266
+ is consistent with the text. In particular, if
267
+ ``sentence`` is the list of tokens in the sentence, then
268
+ ``okens[span[0]:span[1]]`` can be spanned by the
269
+ children specified by ``rhs[:dot]``.
270
+ """
271
+ self._span = span
272
+ self._lhs = lhs
273
+ rhs = tuple(rhs)
274
+ self._rhs = rhs
275
+ self._dot = dot
276
+ self._comparison_key = (span, lhs, rhs, dot)
277
+
278
+ @staticmethod
279
+ def from_production(production, index):
280
+ """
281
+ Return a new ``TreeEdge`` formed from the given production.
282
+ The new edge's left-hand side and right-hand side will
283
+ be taken from ``production``; its span will be
284
+ ``(index,index)``; and its dot position will be ``0``.
285
+
286
+ :rtype: TreeEdge
287
+ """
288
+ return TreeEdge(
289
+ span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0
290
+ )
291
+
292
+ def move_dot_forward(self, new_end):
293
+ """
294
+ Return a new ``TreeEdge`` formed from this edge.
295
+ The new edge's dot position is increased by ``1``,
296
+ and its end index will be replaced by ``new_end``.
297
+
298
+ :param new_end: The new end index.
299
+ :type new_end: int
300
+ :rtype: TreeEdge
301
+ """
302
+ return TreeEdge(
303
+ span=(self._span[0], new_end),
304
+ lhs=self._lhs,
305
+ rhs=self._rhs,
306
+ dot=self._dot + 1,
307
+ )
308
+
309
+ # Accessors
310
+ def lhs(self):
311
+ return self._lhs
312
+
313
+ def span(self):
314
+ return self._span
315
+
316
+ def start(self):
317
+ return self._span[0]
318
+
319
+ def end(self):
320
+ return self._span[1]
321
+
322
+ def length(self):
323
+ return self._span[1] - self._span[0]
324
+
325
+ def rhs(self):
326
+ return self._rhs
327
+
328
+ def dot(self):
329
+ return self._dot
330
+
331
+ def is_complete(self):
332
+ return self._dot == len(self._rhs)
333
+
334
+ def is_incomplete(self):
335
+ return self._dot != len(self._rhs)
336
+
337
+ def nextsym(self):
338
+ if self._dot >= len(self._rhs):
339
+ return None
340
+ else:
341
+ return self._rhs[self._dot]
342
+
343
+ # String representation
344
+ def __str__(self):
345
+ str = f"[{self._span[0]}:{self._span[1]}] "
346
+ str += "%-2r ->" % (self._lhs,)
347
+
348
+ for i in range(len(self._rhs)):
349
+ if i == self._dot:
350
+ str += " *"
351
+ str += " %s" % repr(self._rhs[i])
352
+ if len(self._rhs) == self._dot:
353
+ str += " *"
354
+ return str
355
+
356
+ def __repr__(self):
357
+ return "[Edge: %s]" % self
358
+
359
+
360
+ class LeafEdge(EdgeI):
361
+ """
362
+ An edge that records the fact that a leaf value is consistent with
363
+ a word in the sentence. A leaf edge consists of:
364
+
365
+ - An index, indicating the position of the word.
366
+ - A leaf, specifying the word's content.
367
+
368
+ A leaf edge's left-hand side is its leaf value, and its right hand
369
+ side is ``()``. Its span is ``[index, index+1]``, and its dot
370
+ position is ``0``.
371
+ """
372
+
373
+ def __init__(self, leaf, index):
374
+ """
375
+ Construct a new ``LeafEdge``.
376
+
377
+ :param leaf: The new edge's leaf value, specifying the word
378
+ that is recorded by this edge.
379
+ :param index: The new edge's index, specifying the position of
380
+ the word that is recorded by this edge.
381
+ """
382
+ self._leaf = leaf
383
+ self._index = index
384
+ self._comparison_key = (leaf, index)
385
+
386
+ # Accessors
387
+ def lhs(self):
388
+ return self._leaf
389
+
390
+ def span(self):
391
+ return (self._index, self._index + 1)
392
+
393
+ def start(self):
394
+ return self._index
395
+
396
+ def end(self):
397
+ return self._index + 1
398
+
399
+ def length(self):
400
+ return 1
401
+
402
+ def rhs(self):
403
+ return ()
404
+
405
+ def dot(self):
406
+ return 0
407
+
408
+ def is_complete(self):
409
+ return True
410
+
411
+ def is_incomplete(self):
412
+ return False
413
+
414
+ def nextsym(self):
415
+ return None
416
+
417
+ # String representations
418
+ def __str__(self):
419
+ return f"[{self._index}:{self._index + 1}] {repr(self._leaf)}"
420
+
421
+ def __repr__(self):
422
+ return "[Edge: %s]" % (self)
423
+
424
+
425
+ ########################################################################
426
+ ## Chart
427
+ ########################################################################
428
+
429
+
430
+ class Chart:
431
+ """
432
+ A blackboard for hypotheses about the syntactic constituents of a
433
+ sentence. A chart contains a set of edges, and each edge encodes
434
+ a single hypothesis about the structure of some portion of the
435
+ sentence.
436
+
437
+ The ``select`` method can be used to select a specific collection
438
+ of edges. For example ``chart.select(is_complete=True, start=0)``
439
+ yields all complete edges whose start indices are 0. To ensure
440
+ the efficiency of these selection operations, ``Chart`` dynamically
441
+ creates and maintains an index for each set of attributes that
442
+ have been selected on.
443
+
444
+ In order to reconstruct the trees that are represented by an edge,
445
+ the chart associates each edge with a set of child pointer lists.
446
+ A child pointer list is a list of the edges that license an
447
+ edge's right-hand side.
448
+
449
+ :ivar _tokens: The sentence that the chart covers.
450
+ :ivar _num_leaves: The number of tokens.
451
+ :ivar _edges: A list of the edges in the chart
452
+ :ivar _edge_to_cpls: A dictionary mapping each edge to a set
453
+ of child pointer lists that are associated with that edge.
454
+ :ivar _indexes: A dictionary mapping tuples of edge attributes
455
+ to indices, where each index maps the corresponding edge
456
+ attribute values to lists of edges.
457
+ """
458
+
459
+ def __init__(self, tokens):
460
+ """
461
+ Construct a new chart. The chart is initialized with the
462
+ leaf edges corresponding to the terminal leaves.
463
+
464
+ :type tokens: list
465
+ :param tokens: The sentence that this chart will be used to parse.
466
+ """
467
+ # Record the sentence token and the sentence length.
468
+ self._tokens = tuple(tokens)
469
+ self._num_leaves = len(self._tokens)
470
+
471
+ # Initialise the chart.
472
+ self.initialize()
473
+
474
+ def initialize(self):
475
+ """
476
+ Clear the chart.
477
+ """
478
+ # A list of edges contained in this chart.
479
+ self._edges = []
480
+
481
+ # The set of child pointer lists associated with each edge.
482
+ self._edge_to_cpls = {}
483
+
484
+ # Indexes mapping attribute values to lists of edges
485
+ # (used by select()).
486
+ self._indexes = {}
487
+
488
+ # ////////////////////////////////////////////////////////////
489
+ # Sentence Access
490
+ # ////////////////////////////////////////////////////////////
491
+
492
+ def num_leaves(self):
493
+ """
494
+ Return the number of words in this chart's sentence.
495
+
496
+ :rtype: int
497
+ """
498
+ return self._num_leaves
499
+
500
+ def leaf(self, index):
501
+ """
502
+ Return the leaf value of the word at the given index.
503
+
504
+ :rtype: str
505
+ """
506
+ return self._tokens[index]
507
+
508
+ def leaves(self):
509
+ """
510
+ Return a list of the leaf values of each word in the
511
+ chart's sentence.
512
+
513
+ :rtype: list(str)
514
+ """
515
+ return self._tokens
516
+
517
+ # ////////////////////////////////////////////////////////////
518
+ # Edge access
519
+ # ////////////////////////////////////////////////////////////
520
+
521
+ def edges(self):
522
+ """
523
+ Return a list of all edges in this chart. New edges
524
+ that are added to the chart after the call to edges()
525
+ will *not* be contained in this list.
526
+
527
+ :rtype: list(EdgeI)
528
+ :see: ``iteredges``, ``select``
529
+ """
530
+ return self._edges[:]
531
+
532
+ def iteredges(self):
533
+ """
534
+ Return an iterator over the edges in this chart. It is
535
+ not guaranteed that new edges which are added to the
536
+ chart before the iterator is exhausted will also be generated.
537
+
538
+ :rtype: iter(EdgeI)
539
+ :see: ``edges``, ``select``
540
+ """
541
+ return iter(self._edges)
542
+
543
+ # Iterating over the chart yields its edges.
544
+ __iter__ = iteredges
545
+
546
+ def num_edges(self):
547
+ """
548
+ Return the number of edges contained in this chart.
549
+
550
+ :rtype: int
551
+ """
552
+ return len(self._edge_to_cpls)
553
+
554
+ def select(self, **restrictions):
555
+ """
556
+ Return an iterator over the edges in this chart. Any
557
+ new edges that are added to the chart before the iterator
558
+ is exahusted will also be generated. ``restrictions``
559
+ can be used to restrict the set of edges that will be
560
+ generated.
561
+
562
+ :param span: Only generate edges ``e`` where ``e.span()==span``
563
+ :param start: Only generate edges ``e`` where ``e.start()==start``
564
+ :param end: Only generate edges ``e`` where ``e.end()==end``
565
+ :param length: Only generate edges ``e`` where ``e.length()==length``
566
+ :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs``
567
+ :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs``
568
+ :param nextsym: Only generate edges ``e`` where
569
+ ``e.nextsym()==nextsym``
570
+ :param dot: Only generate edges ``e`` where ``e.dot()==dot``
571
+ :param is_complete: Only generate edges ``e`` where
572
+ ``e.is_complete()==is_complete``
573
+ :param is_incomplete: Only generate edges ``e`` where
574
+ ``e.is_incomplete()==is_incomplete``
575
+ :rtype: iter(EdgeI)
576
+ """
577
+ # If there are no restrictions, then return all edges.
578
+ if restrictions == {}:
579
+ return iter(self._edges)
580
+
581
+ # Find the index corresponding to the given restrictions.
582
+ restr_keys = sorted(restrictions.keys())
583
+ restr_keys = tuple(restr_keys)
584
+
585
+ # If it doesn't exist, then create it.
586
+ if restr_keys not in self._indexes:
587
+ self._add_index(restr_keys)
588
+
589
+ vals = tuple(restrictions[key] for key in restr_keys)
590
+ return iter(self._indexes[restr_keys].get(vals, []))
591
+
592
+ def _add_index(self, restr_keys):
593
+ """
594
+ A helper function for ``select``, which creates a new index for
595
+ a given set of attributes (aka restriction keys).
596
+ """
597
+ # Make sure it's a valid index.
598
+ for key in restr_keys:
599
+ if not hasattr(EdgeI, key):
600
+ raise ValueError("Bad restriction: %s" % key)
601
+
602
+ # Create the index.
603
+ index = self._indexes[restr_keys] = {}
604
+
605
+ # Add all existing edges to the index.
606
+ for edge in self._edges:
607
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
608
+ index.setdefault(vals, []).append(edge)
609
+
610
+ def _register_with_indexes(self, edge):
611
+ """
612
+ A helper function for ``insert``, which registers the new
613
+ edge with all existing indexes.
614
+ """
615
+ for (restr_keys, index) in self._indexes.items():
616
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
617
+ index.setdefault(vals, []).append(edge)
618
+
619
+ # ////////////////////////////////////////////////////////////
620
+ # Edge Insertion
621
+ # ////////////////////////////////////////////////////////////
622
+
623
+ def insert_with_backpointer(self, new_edge, previous_edge, child_edge):
624
+ """
625
+ Add a new edge to the chart, using a pointer to the previous edge.
626
+ """
627
+ cpls = self.child_pointer_lists(previous_edge)
628
+ new_cpls = [cpl + (child_edge,) for cpl in cpls]
629
+ return self.insert(new_edge, *new_cpls)
630
+
631
+ def insert(self, edge, *child_pointer_lists):
632
+ """
633
+ Add a new edge to the chart, and return True if this operation
634
+ modified the chart. In particular, return true iff the chart
635
+ did not already contain ``edge``, or if it did not already associate
636
+ ``child_pointer_lists`` with ``edge``.
637
+
638
+ :type edge: EdgeI
639
+ :param edge: The new edge
640
+ :type child_pointer_lists: sequence of tuple(EdgeI)
641
+ :param child_pointer_lists: A sequence of lists of the edges that
642
+ were used to form this edge. This list is used to reconstruct
643
+ the trees (or partial trees) that are associated with ``edge``.
644
+ :rtype: bool
645
+ """
646
+ # Is it a new edge?
647
+ if edge not in self._edge_to_cpls:
648
+ # Add it to the list of edges.
649
+ self._append_edge(edge)
650
+ # Register with indexes.
651
+ self._register_with_indexes(edge)
652
+
653
+ # Get the set of child pointer lists for this edge.
654
+ cpls = self._edge_to_cpls.setdefault(edge, OrderedDict())
655
+ chart_was_modified = False
656
+ for child_pointer_list in child_pointer_lists:
657
+ child_pointer_list = tuple(child_pointer_list)
658
+ if child_pointer_list not in cpls:
659
+ # It's a new CPL; register it, and return true.
660
+ cpls[child_pointer_list] = True
661
+ chart_was_modified = True
662
+ return chart_was_modified
663
+
664
+ def _append_edge(self, edge):
665
+ self._edges.append(edge)
666
+
667
+ # ////////////////////////////////////////////////////////////
668
+ # Tree extraction & child pointer lists
669
+ # ////////////////////////////////////////////////////////////
670
+
671
+ def parses(self, root, tree_class=Tree):
672
+ """
673
+ Return an iterator of the complete tree structures that span
674
+ the entire chart, and whose root node is ``root``.
675
+ """
676
+ for edge in self.select(start=0, end=self._num_leaves, lhs=root):
677
+ yield from self.trees(edge, tree_class=tree_class, complete=True)
678
+
679
+ def trees(self, edge, tree_class=Tree, complete=False):
680
+ """
681
+ Return an iterator of the tree structures that are associated
682
+ with ``edge``.
683
+
684
+ If ``edge`` is incomplete, then the unexpanded children will be
685
+ encoded as childless subtrees, whose node value is the
686
+ corresponding terminal or nonterminal.
687
+
688
+ :rtype: list(Tree)
689
+ :note: If two trees share a common subtree, then the same
690
+ Tree may be used to encode that subtree in
691
+ both trees. If you need to eliminate this subtree
692
+ sharing, then create a deep copy of each tree.
693
+ """
694
+ return iter(self._trees(edge, complete, memo={}, tree_class=tree_class))
695
+
696
+ def _trees(self, edge, complete, memo, tree_class):
697
+ """
698
+ A helper function for ``trees``.
699
+
700
+ :param memo: A dictionary used to record the trees that we've
701
+ generated for each edge, so that when we see an edge more
702
+ than once, we can reuse the same trees.
703
+ """
704
+ # If we've seen this edge before, then reuse our old answer.
705
+ if edge in memo:
706
+ return memo[edge]
707
+
708
+ # when we're reading trees off the chart, don't use incomplete edges
709
+ if complete and edge.is_incomplete():
710
+ return []
711
+
712
+ # Leaf edges.
713
+ if isinstance(edge, LeafEdge):
714
+ leaf = self._tokens[edge.start()]
715
+ memo[edge] = [leaf]
716
+ return [leaf]
717
+
718
+ # Until we're done computing the trees for edge, set
719
+ # memo[edge] to be empty. This has the effect of filtering
720
+ # out any cyclic trees (i.e., trees that contain themselves as
721
+ # descendants), because if we reach this edge via a cycle,
722
+ # then it will appear that the edge doesn't generate any trees.
723
+ memo[edge] = []
724
+ trees = []
725
+ lhs = edge.lhs().symbol()
726
+
727
+ # Each child pointer list can be used to form trees.
728
+ for cpl in self.child_pointer_lists(edge):
729
+ # Get the set of child choices for each child pointer.
730
+ # child_choices[i] is the set of choices for the tree's
731
+ # ith child.
732
+ child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
733
+
734
+ # For each combination of children, add a tree.
735
+ for children in itertools.product(*child_choices):
736
+ trees.append(tree_class(lhs, children))
737
+
738
+ # If the edge is incomplete, then extend it with "partial trees":
739
+ if edge.is_incomplete():
740
+ unexpanded = [tree_class(elt, []) for elt in edge.rhs()[edge.dot() :]]
741
+ for tree in trees:
742
+ tree.extend(unexpanded)
743
+
744
+ # Update the memoization dictionary.
745
+ memo[edge] = trees
746
+
747
+ # Return the list of trees.
748
+ return trees
749
+
750
+ def child_pointer_lists(self, edge):
751
+ """
752
+ Return the set of child pointer lists for the given edge.
753
+ Each child pointer list is a list of edges that have
754
+ been used to form this edge.
755
+
756
+ :rtype: list(list(EdgeI))
757
+ """
758
+ # Make a copy, in case they modify it.
759
+ return self._edge_to_cpls.get(edge, {}).keys()
760
+
761
+ # ////////////////////////////////////////////////////////////
762
+ # Display
763
+ # ////////////////////////////////////////////////////////////
764
+ def pretty_format_edge(self, edge, width=None):
765
+ """
766
+ Return a pretty-printed string representation of a given edge
767
+ in this chart.
768
+
769
+ :rtype: str
770
+ :param width: The number of characters allotted to each
771
+ index in the sentence.
772
+ """
773
+ if width is None:
774
+ width = 50 // (self.num_leaves() + 1)
775
+ (start, end) = (edge.start(), edge.end())
776
+
777
+ str = "|" + ("." + " " * (width - 1)) * start
778
+
779
+ # Zero-width edges are "#" if complete, ">" if incomplete
780
+ if start == end:
781
+ if edge.is_complete():
782
+ str += "#"
783
+ else:
784
+ str += ">"
785
+
786
+ # Spanning complete edges are "[===]"; Other edges are
787
+ # "[---]" if complete, "[--->" if incomplete
788
+ elif edge.is_complete() and edge.span() == (0, self._num_leaves):
789
+ str += "[" + ("=" * width) * (end - start - 1) + "=" * (width - 1) + "]"
790
+ elif edge.is_complete():
791
+ str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + "]"
792
+ else:
793
+ str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + ">"
794
+
795
+ str += (" " * (width - 1) + ".") * (self._num_leaves - end)
796
+ return str + "| %s" % edge
797
+
798
+ def pretty_format_leaves(self, width=None):
799
+ """
800
+ Return a pretty-printed string representation of this
801
+ chart's leaves. This string can be used as a header
802
+ for calls to ``pretty_format_edge``.
803
+ """
804
+ if width is None:
805
+ width = 50 // (self.num_leaves() + 1)
806
+
807
+ if self._tokens is not None and width > 1:
808
+ header = "|."
809
+ for tok in self._tokens:
810
+ header += tok[: width - 1].center(width - 1) + "."
811
+ header += "|"
812
+ else:
813
+ header = ""
814
+
815
+ return header
816
+
817
+ def pretty_format(self, width=None):
818
+ """
819
+ Return a pretty-printed string representation of this chart.
820
+
821
+ :param width: The number of characters allotted to each
822
+ index in the sentence.
823
+ :rtype: str
824
+ """
825
+ if width is None:
826
+ width = 50 // (self.num_leaves() + 1)
827
+ # sort edges: primary key=length, secondary key=start index.
828
+ # (and filter out the token edges)
829
+ edges = sorted((e.length(), e.start(), e) for e in self)
830
+ edges = [e for (_, _, e) in edges]
831
+
832
+ return (
833
+ self.pretty_format_leaves(width)
834
+ + "\n"
835
+ + "\n".join(self.pretty_format_edge(edge, width) for edge in edges)
836
+ )
837
+
838
+ # ////////////////////////////////////////////////////////////
839
+ # Display: Dot (AT&T Graphviz)
840
+ # ////////////////////////////////////////////////////////////
841
+
842
+ def dot_digraph(self):
843
+ # Header
844
+ s = "digraph nltk_chart {\n"
845
+ # s += ' size="5,5";\n'
846
+ s += " rankdir=LR;\n"
847
+ s += " node [height=0.1,width=0.1];\n"
848
+ s += ' node [style=filled, color="lightgray"];\n'
849
+
850
+ # Set up the nodes
851
+ for y in range(self.num_edges(), -1, -1):
852
+ if y == 0:
853
+ s += ' node [style=filled, color="black"];\n'
854
+ for x in range(self.num_leaves() + 1):
855
+ if y == 0 or (
856
+ x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end()
857
+ ):
858
+ s += ' %04d.%04d [label=""];\n' % (x, y)
859
+
860
+ # Add a spacer
861
+ s += " x [style=invis]; x->0000.0000 [style=invis];\n"
862
+
863
+ # Declare ranks.
864
+ for x in range(self.num_leaves() + 1):
865
+ s += " {rank=same;"
866
+ for y in range(self.num_edges() + 1):
867
+ if y == 0 or (
868
+ x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end()
869
+ ):
870
+ s += " %04d.%04d" % (x, y)
871
+ s += "}\n"
872
+
873
+ # Add the leaves
874
+ s += " edge [style=invis, weight=100];\n"
875
+ s += " node [shape=plaintext]\n"
876
+ s += " 0000.0000"
877
+ for x in range(self.num_leaves()):
878
+ s += "->%s->%04d.0000" % (self.leaf(x), x + 1)
879
+ s += ";\n\n"
880
+
881
+ # Add the edges
882
+ s += " edge [style=solid, weight=1];\n"
883
+ for y, edge in enumerate(self):
884
+ for x in range(edge.start()):
885
+ s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % (
886
+ x,
887
+ y + 1,
888
+ x + 1,
889
+ y + 1,
890
+ )
891
+ s += ' %04d.%04d -> %04d.%04d [label="%s"];\n' % (
892
+ edge.start(),
893
+ y + 1,
894
+ edge.end(),
895
+ y + 1,
896
+ edge,
897
+ )
898
+ for x in range(edge.end(), self.num_leaves()):
899
+ s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % (
900
+ x,
901
+ y + 1,
902
+ x + 1,
903
+ y + 1,
904
+ )
905
+ s += "}\n"
906
+ return s
907
+
908
+
909
+ ########################################################################
910
+ ## Chart Rules
911
+ ########################################################################
912
+
913
+
914
+ class ChartRuleI:
915
+ """
916
+ A rule that specifies what new edges are licensed by any given set
917
+ of existing edges. Each chart rule expects a fixed number of
918
+ edges, as indicated by the class variable ``NUM_EDGES``. In
919
+ particular:
920
+
921
+ - A chart rule with ``NUM_EDGES=0`` specifies what new edges are
922
+ licensed, regardless of existing edges.
923
+ - A chart rule with ``NUM_EDGES=1`` specifies what new edges are
924
+ licensed by a single existing edge.
925
+ - A chart rule with ``NUM_EDGES=2`` specifies what new edges are
926
+ licensed by a pair of existing edges.
927
+
928
+ :type NUM_EDGES: int
929
+ :cvar NUM_EDGES: The number of existing edges that this rule uses
930
+ to license new edges. Typically, this number ranges from zero
931
+ to two.
932
+ """
933
+
934
+ def apply(self, chart, grammar, *edges):
935
+ """
936
+ Return a generator that will add edges licensed by this rule
937
+ and the given edges to the chart, one at a time. Each
938
+ time the generator is resumed, it will either add a new
939
+ edge and yield that edge; or return.
940
+
941
+ :type edges: list(EdgeI)
942
+ :param edges: A set of existing edges. The number of edges
943
+ that should be passed to ``apply()`` is specified by the
944
+ ``NUM_EDGES`` class variable.
945
+ :rtype: iter(EdgeI)
946
+ """
947
+ raise NotImplementedError()
948
+
949
+ def apply_everywhere(self, chart, grammar):
950
+ """
951
+ Return a generator that will add all edges licensed by
952
+ this rule, given the edges that are currently in the
953
+ chart, one at a time. Each time the generator is resumed,
954
+ it will either add a new edge and yield that edge; or return.
955
+
956
+ :rtype: iter(EdgeI)
957
+ """
958
+ raise NotImplementedError()
959
+
960
+
961
+ class AbstractChartRule(ChartRuleI):
962
+ """
963
+ An abstract base class for chart rules. ``AbstractChartRule``
964
+ provides:
965
+
966
+ - A default implementation for ``apply``.
967
+ - A default implementation for ``apply_everywhere``,
968
+ (Currently, this implementation assumes that ``NUM_EDGES <= 3``.)
969
+ - A default implementation for ``__str__``, which returns a
970
+ name based on the rule's class name.
971
+ """
972
+
973
+ # Subclasses must define apply.
974
+ def apply(self, chart, grammar, *edges):
975
+ raise NotImplementedError()
976
+
977
+ # Default: loop through the given number of edges, and call
978
+ # self.apply() for each set of edges.
979
+ def apply_everywhere(self, chart, grammar):
980
+ if self.NUM_EDGES == 0:
981
+ yield from self.apply(chart, grammar)
982
+
983
+ elif self.NUM_EDGES == 1:
984
+ for e1 in chart:
985
+ yield from self.apply(chart, grammar, e1)
986
+
987
+ elif self.NUM_EDGES == 2:
988
+ for e1 in chart:
989
+ for e2 in chart:
990
+ yield from self.apply(chart, grammar, e1, e2)
991
+
992
+ elif self.NUM_EDGES == 3:
993
+ for e1 in chart:
994
+ for e2 in chart:
995
+ for e3 in chart:
996
+ yield from self.apply(chart, grammar, e1, e2, e3)
997
+
998
+ else:
999
+ raise AssertionError("NUM_EDGES>3 is not currently supported")
1000
+
1001
+ # Default: return a name based on the class name.
1002
+ def __str__(self):
1003
+ # Add spaces between InitialCapsWords.
1004
+ return re.sub("([a-z])([A-Z])", r"\1 \2", self.__class__.__name__)
1005
+
1006
+
1007
+ # ////////////////////////////////////////////////////////////
1008
+ # Fundamental Rule
1009
+ # ////////////////////////////////////////////////////////////
1010
+
1011
+
1012
+ class FundamentalRule(AbstractChartRule):
1013
+ r"""
1014
+ A rule that joins two adjacent edges to form a single combined
1015
+ edge. In particular, this rule specifies that any pair of edges
1016
+
1017
+ - ``[A -> alpha \* B beta][i:j]``
1018
+ - ``[B -> gamma \*][j:k]``
1019
+
1020
+ licenses the edge:
1021
+
1022
+ - ``[A -> alpha B * beta][i:j]``
1023
+ """
1024
+
1025
+ NUM_EDGES = 2
1026
+
1027
+ def apply(self, chart, grammar, left_edge, right_edge):
1028
+ # Make sure the rule is applicable.
1029
+ if not (
1030
+ left_edge.is_incomplete()
1031
+ and right_edge.is_complete()
1032
+ and left_edge.end() == right_edge.start()
1033
+ and left_edge.nextsym() == right_edge.lhs()
1034
+ ):
1035
+ return
1036
+
1037
+ # Construct the new edge.
1038
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1039
+
1040
+ # Insert it into the chart.
1041
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1042
+ yield new_edge
1043
+
1044
+
1045
+ class SingleEdgeFundamentalRule(FundamentalRule):
1046
+ r"""
1047
+ A rule that joins a given edge with adjacent edges in the chart,
1048
+ to form combined edges. In particular, this rule specifies that
1049
+ either of the edges:
1050
+
1051
+ - ``[A -> alpha \* B beta][i:j]``
1052
+ - ``[B -> gamma \*][j:k]``
1053
+
1054
+ licenses the edge:
1055
+
1056
+ - ``[A -> alpha B * beta][i:j]``
1057
+
1058
+ if the other edge is already in the chart.
1059
+
1060
+ :note: This is basically ``FundamentalRule``, with one edge left
1061
+ unspecified.
1062
+ """
1063
+
1064
+ NUM_EDGES = 1
1065
+
1066
+ def apply(self, chart, grammar, edge):
1067
+ if edge.is_incomplete():
1068
+ yield from self._apply_incomplete(chart, grammar, edge)
1069
+ else:
1070
+ yield from self._apply_complete(chart, grammar, edge)
1071
+
1072
+ def _apply_complete(self, chart, grammar, right_edge):
1073
+ for left_edge in chart.select(
1074
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
1075
+ ):
1076
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1077
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1078
+ yield new_edge
1079
+
1080
+ def _apply_incomplete(self, chart, grammar, left_edge):
1081
+ for right_edge in chart.select(
1082
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
1083
+ ):
1084
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1085
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1086
+ yield new_edge
1087
+
1088
+
1089
+ # ////////////////////////////////////////////////////////////
1090
+ # Inserting Terminal Leafs
1091
+ # ////////////////////////////////////////////////////////////
1092
+
1093
+
1094
+ class LeafInitRule(AbstractChartRule):
1095
+ NUM_EDGES = 0
1096
+
1097
+ def apply(self, chart, grammar):
1098
+ for index in range(chart.num_leaves()):
1099
+ new_edge = LeafEdge(chart.leaf(index), index)
1100
+ if chart.insert(new_edge, ()):
1101
+ yield new_edge
1102
+
1103
+
1104
+ # ////////////////////////////////////////////////////////////
1105
+ # Top-Down Prediction
1106
+ # ////////////////////////////////////////////////////////////
1107
+
1108
+
1109
+ class TopDownInitRule(AbstractChartRule):
1110
+ r"""
1111
+ A rule licensing edges corresponding to the grammar productions for
1112
+ the grammar's start symbol. In particular, this rule specifies that
1113
+ ``[S -> \* alpha][0:i]`` is licensed for each grammar production
1114
+ ``S -> alpha``, where ``S`` is the grammar's start symbol.
1115
+ """
1116
+
1117
+ NUM_EDGES = 0
1118
+
1119
+ def apply(self, chart, grammar):
1120
+ for prod in grammar.productions(lhs=grammar.start()):
1121
+ new_edge = TreeEdge.from_production(prod, 0)
1122
+ if chart.insert(new_edge, ()):
1123
+ yield new_edge
1124
+
1125
+
1126
+ class TopDownPredictRule(AbstractChartRule):
1127
+ r"""
1128
+ A rule licensing edges corresponding to the grammar productions
1129
+ for the nonterminal following an incomplete edge's dot. In
1130
+ particular, this rule specifies that
1131
+ ``[A -> alpha \* B beta][i:j]`` licenses the edge
1132
+ ``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``.
1133
+
1134
+ :note: This rule corresponds to the Predictor Rule in Earley parsing.
1135
+ """
1136
+
1137
+ NUM_EDGES = 1
1138
+
1139
+ def apply(self, chart, grammar, edge):
1140
+ if edge.is_complete():
1141
+ return
1142
+ for prod in grammar.productions(lhs=edge.nextsym()):
1143
+ new_edge = TreeEdge.from_production(prod, edge.end())
1144
+ if chart.insert(new_edge, ()):
1145
+ yield new_edge
1146
+
1147
+
1148
+ class CachedTopDownPredictRule(TopDownPredictRule):
1149
+ r"""
1150
+ A cached version of ``TopDownPredictRule``. After the first time
1151
+ this rule is applied to an edge with a given ``end`` and ``next``,
1152
+ it will not generate any more edges for edges with that ``end`` and
1153
+ ``next``.
1154
+
1155
+ If ``chart`` or ``grammar`` are changed, then the cache is flushed.
1156
+ """
1157
+
1158
+ def __init__(self):
1159
+ TopDownPredictRule.__init__(self)
1160
+ self._done = {}
1161
+
1162
+ def apply(self, chart, grammar, edge):
1163
+ if edge.is_complete():
1164
+ return
1165
+ nextsym, index = edge.nextsym(), edge.end()
1166
+ if not is_nonterminal(nextsym):
1167
+ return
1168
+
1169
+ # If we've already applied this rule to an edge with the same
1170
+ # next & end, and the chart & grammar have not changed, then
1171
+ # just return (no new edges to add).
1172
+ done = self._done.get((nextsym, index), (None, None))
1173
+ if done[0] is chart and done[1] is grammar:
1174
+ return
1175
+
1176
+ # Add all the edges indicated by the top down expand rule.
1177
+ for prod in grammar.productions(lhs=nextsym):
1178
+ # If the left corner in the predicted production is
1179
+ # leaf, it must match with the input.
1180
+ if prod.rhs():
1181
+ first = prod.rhs()[0]
1182
+ if is_terminal(first):
1183
+ if index >= chart.num_leaves() or first != chart.leaf(index):
1184
+ continue
1185
+
1186
+ new_edge = TreeEdge.from_production(prod, index)
1187
+ if chart.insert(new_edge, ()):
1188
+ yield new_edge
1189
+
1190
+ # Record the fact that we've applied this rule.
1191
+ self._done[nextsym, index] = (chart, grammar)
1192
+
1193
+
1194
+ # ////////////////////////////////////////////////////////////
1195
+ # Bottom-Up Prediction
1196
+ # ////////////////////////////////////////////////////////////
1197
+
1198
+
1199
+ class BottomUpPredictRule(AbstractChartRule):
1200
+ r"""
1201
+ A rule licensing any edge corresponding to a production whose
1202
+ right-hand side begins with a complete edge's left-hand side. In
1203
+ particular, this rule specifies that ``[A -> alpha \*]`` licenses
1204
+ the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``.
1205
+ """
1206
+
1207
+ NUM_EDGES = 1
1208
+
1209
+ def apply(self, chart, grammar, edge):
1210
+ if edge.is_incomplete():
1211
+ return
1212
+ for prod in grammar.productions(rhs=edge.lhs()):
1213
+ new_edge = TreeEdge.from_production(prod, edge.start())
1214
+ if chart.insert(new_edge, ()):
1215
+ yield new_edge
1216
+
1217
+
1218
+ class BottomUpPredictCombineRule(BottomUpPredictRule):
1219
+ r"""
1220
+ A rule licensing any edge corresponding to a production whose
1221
+ right-hand side begins with a complete edge's left-hand side. In
1222
+ particular, this rule specifies that ``[A -> alpha \*]``
1223
+ licenses the edge ``[B -> A \* beta]`` for each grammar
1224
+ production ``B -> A beta``.
1225
+
1226
+ :note: This is like ``BottomUpPredictRule``, but it also applies
1227
+ the ``FundamentalRule`` to the resulting edge.
1228
+ """
1229
+
1230
+ NUM_EDGES = 1
1231
+
1232
+ def apply(self, chart, grammar, edge):
1233
+ if edge.is_incomplete():
1234
+ return
1235
+ for prod in grammar.productions(rhs=edge.lhs()):
1236
+ new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
1237
+ if chart.insert(new_edge, (edge,)):
1238
+ yield new_edge
1239
+
1240
+
1241
+ class EmptyPredictRule(AbstractChartRule):
1242
+ """
1243
+ A rule that inserts all empty productions as passive edges,
1244
+ in every position in the chart.
1245
+ """
1246
+
1247
+ NUM_EDGES = 0
1248
+
1249
+ def apply(self, chart, grammar):
1250
+ for prod in grammar.productions(empty=True):
1251
+ for index in range(chart.num_leaves() + 1):
1252
+ new_edge = TreeEdge.from_production(prod, index)
1253
+ if chart.insert(new_edge, ()):
1254
+ yield new_edge
1255
+
1256
+
1257
+ ########################################################################
1258
+ ## Filtered Bottom Up
1259
+ ########################################################################
1260
+
1261
+
1262
+ class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
1263
+ def _apply_complete(self, chart, grammar, right_edge):
1264
+ end = right_edge.end()
1265
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1266
+ for left_edge in chart.select(
1267
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
1268
+ ):
1269
+ if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
1270
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1271
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1272
+ yield new_edge
1273
+
1274
+ def _apply_incomplete(self, chart, grammar, left_edge):
1275
+ for right_edge in chart.select(
1276
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
1277
+ ):
1278
+ end = right_edge.end()
1279
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1280
+ if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
1281
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1282
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1283
+ yield new_edge
1284
+
1285
+
1286
+ class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule):
1287
+ def apply(self, chart, grammar, edge):
1288
+ if edge.is_incomplete():
1289
+ return
1290
+
1291
+ end = edge.end()
1292
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1293
+ for prod in grammar.productions(rhs=edge.lhs()):
1294
+ if _bottomup_filter(grammar, nexttoken, prod.rhs()):
1295
+ new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
1296
+ if chart.insert(new_edge, (edge,)):
1297
+ yield new_edge
1298
+
1299
+
1300
+ def _bottomup_filter(grammar, nexttoken, rhs, dot=0):
1301
+ if len(rhs) <= dot + 1:
1302
+ return True
1303
+ _next = rhs[dot + 1]
1304
+ if is_terminal(_next):
1305
+ return nexttoken == _next
1306
+ else:
1307
+ return grammar.is_leftcorner(_next, nexttoken)
1308
+
1309
+
1310
+ ########################################################################
1311
+ ## Generic Chart Parser
1312
+ ########################################################################
1313
+
1314
+ TD_STRATEGY = [
1315
+ LeafInitRule(),
1316
+ TopDownInitRule(),
1317
+ CachedTopDownPredictRule(),
1318
+ SingleEdgeFundamentalRule(),
1319
+ ]
1320
+ BU_STRATEGY = [
1321
+ LeafInitRule(),
1322
+ EmptyPredictRule(),
1323
+ BottomUpPredictRule(),
1324
+ SingleEdgeFundamentalRule(),
1325
+ ]
1326
+ BU_LC_STRATEGY = [
1327
+ LeafInitRule(),
1328
+ EmptyPredictRule(),
1329
+ BottomUpPredictCombineRule(),
1330
+ SingleEdgeFundamentalRule(),
1331
+ ]
1332
+
1333
+ LC_STRATEGY = [
1334
+ LeafInitRule(),
1335
+ FilteredBottomUpPredictCombineRule(),
1336
+ FilteredSingleEdgeFundamentalRule(),
1337
+ ]
1338
+
1339
+
1340
+ class ChartParser(ParserI):
1341
+ """
1342
+ A generic chart parser. A "strategy", or list of
1343
+ ``ChartRuleI`` instances, is used to decide what edges to add to
1344
+ the chart. In particular, ``ChartParser`` uses the following
1345
+ algorithm to parse texts:
1346
+
1347
+ | Until no new edges are added:
1348
+ | For each *rule* in *strategy*:
1349
+ | Apply *rule* to any applicable edges in the chart.
1350
+ | Return any complete parses in the chart
1351
+ """
1352
+
1353
+ def __init__(
1354
+ self,
1355
+ grammar,
1356
+ strategy=BU_LC_STRATEGY,
1357
+ trace=0,
1358
+ trace_chart_width=50,
1359
+ use_agenda=True,
1360
+ chart_class=Chart,
1361
+ ):
1362
+ """
1363
+ Create a new chart parser, that uses ``grammar`` to parse
1364
+ texts.
1365
+
1366
+ :type grammar: CFG
1367
+ :param grammar: The grammar used to parse texts.
1368
+ :type strategy: list(ChartRuleI)
1369
+ :param strategy: A list of rules that should be used to decide
1370
+ what edges to add to the chart (top-down strategy by default).
1371
+ :type trace: int
1372
+ :param trace: The level of tracing that should be used when
1373
+ parsing a text. ``0`` will generate no tracing output;
1374
+ and higher numbers will produce more verbose tracing
1375
+ output.
1376
+ :type trace_chart_width: int
1377
+ :param trace_chart_width: The default total width reserved for
1378
+ the chart in trace output. The remainder of each line will
1379
+ be used to display edges.
1380
+ :type use_agenda: bool
1381
+ :param use_agenda: Use an optimized agenda-based algorithm,
1382
+ if possible.
1383
+ :param chart_class: The class that should be used to create
1384
+ the parse charts.
1385
+ """
1386
+ self._grammar = grammar
1387
+ self._strategy = strategy
1388
+ self._trace = trace
1389
+ self._trace_chart_width = trace_chart_width
1390
+ # If the strategy only consists of axioms (NUM_EDGES==0) and
1391
+ # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm:
1392
+ self._use_agenda = use_agenda
1393
+ self._chart_class = chart_class
1394
+
1395
+ self._axioms = []
1396
+ self._inference_rules = []
1397
+ for rule in strategy:
1398
+ if rule.NUM_EDGES == 0:
1399
+ self._axioms.append(rule)
1400
+ elif rule.NUM_EDGES == 1:
1401
+ self._inference_rules.append(rule)
1402
+ else:
1403
+ self._use_agenda = False
1404
+
1405
+ def grammar(self):
1406
+ return self._grammar
1407
+
1408
+ def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width):
1409
+ if not trace:
1410
+ return
1411
+ print_rule_header = trace > 1
1412
+ for edge in new_edges:
1413
+ if print_rule_header:
1414
+ print("%s:" % rule)
1415
+ print_rule_header = False
1416
+ print(chart.pretty_format_edge(edge, edge_width))
1417
+
1418
+ def chart_parse(self, tokens, trace=None):
1419
+ """
1420
+ Return the final parse ``Chart`` from which all possible
1421
+ parse trees can be extracted.
1422
+
1423
+ :param tokens: The sentence to be parsed
1424
+ :type tokens: list(str)
1425
+ :rtype: Chart
1426
+ """
1427
+ if trace is None:
1428
+ trace = self._trace
1429
+ trace_new_edges = self._trace_new_edges
1430
+
1431
+ tokens = list(tokens)
1432
+ self._grammar.check_coverage(tokens)
1433
+ chart = self._chart_class(tokens)
1434
+ grammar = self._grammar
1435
+
1436
+ # Width, for printing trace edges.
1437
+ trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
1438
+ if trace:
1439
+ print(chart.pretty_format_leaves(trace_edge_width))
1440
+
1441
+ if self._use_agenda:
1442
+ # Use an agenda-based algorithm.
1443
+ for axiom in self._axioms:
1444
+ new_edges = list(axiom.apply(chart, grammar))
1445
+ trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
1446
+
1447
+ inference_rules = self._inference_rules
1448
+ agenda = chart.edges()
1449
+ # We reverse the initial agenda, since it is a stack
1450
+ # but chart.edges() functions as a queue.
1451
+ agenda.reverse()
1452
+ while agenda:
1453
+ edge = agenda.pop()
1454
+ for rule in inference_rules:
1455
+ new_edges = list(rule.apply(chart, grammar, edge))
1456
+ if trace:
1457
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
1458
+ agenda += new_edges
1459
+
1460
+ else:
1461
+ # Do not use an agenda-based algorithm.
1462
+ edges_added = True
1463
+ while edges_added:
1464
+ edges_added = False
1465
+ for rule in self._strategy:
1466
+ new_edges = list(rule.apply_everywhere(chart, grammar))
1467
+ edges_added = len(new_edges)
1468
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
1469
+
1470
+ # Return the final chart.
1471
+ return chart
1472
+
1473
+ def parse(self, tokens, tree_class=Tree):
1474
+ chart = self.chart_parse(tokens)
1475
+ return iter(chart.parses(self._grammar.start(), tree_class=tree_class))
1476
+
1477
+
1478
+ class TopDownChartParser(ChartParser):
1479
+ """
1480
+ A ``ChartParser`` using a top-down parsing strategy.
1481
+ See ``ChartParser`` for more information.
1482
+ """
1483
+
1484
+ def __init__(self, grammar, **parser_args):
1485
+ ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args)
1486
+
1487
+
1488
+ class BottomUpChartParser(ChartParser):
1489
+ """
1490
+ A ``ChartParser`` using a bottom-up parsing strategy.
1491
+ See ``ChartParser`` for more information.
1492
+ """
1493
+
1494
+ def __init__(self, grammar, **parser_args):
1495
+ if isinstance(grammar, PCFG):
1496
+ warnings.warn(
1497
+ "BottomUpChartParser only works for CFG, "
1498
+ "use BottomUpProbabilisticChartParser instead",
1499
+ category=DeprecationWarning,
1500
+ )
1501
+ ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
1502
+
1503
+
1504
+ class BottomUpLeftCornerChartParser(ChartParser):
1505
+ """
1506
+ A ``ChartParser`` using a bottom-up left-corner parsing strategy.
1507
+ This strategy is often more efficient than standard bottom-up.
1508
+ See ``ChartParser`` for more information.
1509
+ """
1510
+
1511
+ def __init__(self, grammar, **parser_args):
1512
+ ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args)
1513
+
1514
+
1515
+ class LeftCornerChartParser(ChartParser):
1516
+ def __init__(self, grammar, **parser_args):
1517
+ if not grammar.is_nonempty():
1518
+ raise ValueError(
1519
+ "LeftCornerParser only works for grammars " "without empty productions."
1520
+ )
1521
+ ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args)
1522
+
1523
+
1524
+ ########################################################################
1525
+ ## Stepping Chart Parser
1526
+ ########################################################################
1527
+
1528
+
1529
+ class SteppingChartParser(ChartParser):
1530
+ """
1531
+ A ``ChartParser`` that allows you to step through the parsing
1532
+ process, adding a single edge at a time. It also allows you to
1533
+ change the parser's strategy or grammar midway through parsing a
1534
+ text.
1535
+
1536
+ The ``initialize`` method is used to start parsing a text. ``step``
1537
+ adds a single edge to the chart. ``set_strategy`` changes the
1538
+ strategy used by the chart parser. ``parses`` returns the set of
1539
+ parses that has been found by the chart parser.
1540
+
1541
+ :ivar _restart: Records whether the parser's strategy, grammar,
1542
+ or chart has been changed. If so, then ``step`` must restart
1543
+ the parsing algorithm.
1544
+ """
1545
+
1546
+ def __init__(self, grammar, strategy=[], trace=0):
1547
+ self._chart = None
1548
+ self._current_chartrule = None
1549
+ self._restart = False
1550
+ ChartParser.__init__(self, grammar, strategy, trace)
1551
+
1552
+ # ////////////////////////////////////////////////////////////
1553
+ # Initialization
1554
+ # ////////////////////////////////////////////////////////////
1555
+
1556
+ def initialize(self, tokens):
1557
+ "Begin parsing the given tokens."
1558
+ self._chart = Chart(list(tokens))
1559
+ self._restart = True
1560
+
1561
+ # ////////////////////////////////////////////////////////////
1562
+ # Stepping
1563
+ # ////////////////////////////////////////////////////////////
1564
+
1565
+ def step(self):
1566
+ """
1567
+ Return a generator that adds edges to the chart, one at a
1568
+ time. Each time the generator is resumed, it adds a single
1569
+ edge and yields that edge. If no more edges can be added,
1570
+ then it yields None.
1571
+
1572
+ If the parser's strategy, grammar, or chart is changed, then
1573
+ the generator will continue adding edges using the new
1574
+ strategy, grammar, or chart.
1575
+
1576
+ Note that this generator never terminates, since the grammar
1577
+ or strategy might be changed to values that would add new
1578
+ edges. Instead, it yields None when no more edges can be
1579
+ added with the current strategy and grammar.
1580
+ """
1581
+ if self._chart is None:
1582
+ raise ValueError("Parser must be initialized first")
1583
+ while True:
1584
+ self._restart = False
1585
+ w = 50 // (self._chart.num_leaves() + 1)
1586
+
1587
+ for e in self._parse():
1588
+ if self._trace > 1:
1589
+ print(self._current_chartrule)
1590
+ if self._trace > 0:
1591
+ print(self._chart.pretty_format_edge(e, w))
1592
+ yield e
1593
+ if self._restart:
1594
+ break
1595
+ else:
1596
+ yield None # No more edges.
1597
+
1598
+ def _parse(self):
1599
+ """
1600
+ A generator that implements the actual parsing algorithm.
1601
+ ``step`` iterates through this generator, and restarts it
1602
+ whenever the parser's strategy, grammar, or chart is modified.
1603
+ """
1604
+ chart = self._chart
1605
+ grammar = self._grammar
1606
+ edges_added = 1
1607
+ while edges_added > 0:
1608
+ edges_added = 0
1609
+ for rule in self._strategy:
1610
+ self._current_chartrule = rule
1611
+ for e in rule.apply_everywhere(chart, grammar):
1612
+ edges_added += 1
1613
+ yield e
1614
+
1615
+ # ////////////////////////////////////////////////////////////
1616
+ # Accessors
1617
+ # ////////////////////////////////////////////////////////////
1618
+
1619
+ def strategy(self):
1620
+ "Return the strategy used by this parser."
1621
+ return self._strategy
1622
+
1623
+ def grammar(self):
1624
+ "Return the grammar used by this parser."
1625
+ return self._grammar
1626
+
1627
+ def chart(self):
1628
+ "Return the chart that is used by this parser."
1629
+ return self._chart
1630
+
1631
+ def current_chartrule(self):
1632
+ "Return the chart rule used to generate the most recent edge."
1633
+ return self._current_chartrule
1634
+
1635
+ def parses(self, tree_class=Tree):
1636
+ "Return the parse trees currently contained in the chart."
1637
+ return self._chart.parses(self._grammar.start(), tree_class)
1638
+
1639
+ # ////////////////////////////////////////////////////////////
1640
+ # Parser modification
1641
+ # ////////////////////////////////////////////////////////////
1642
+
1643
+ def set_strategy(self, strategy):
1644
+ """
1645
+ Change the strategy that the parser uses to decide which edges
1646
+ to add to the chart.
1647
+
1648
+ :type strategy: list(ChartRuleI)
1649
+ :param strategy: A list of rules that should be used to decide
1650
+ what edges to add to the chart.
1651
+ """
1652
+ if strategy == self._strategy:
1653
+ return
1654
+ self._strategy = strategy[:] # Make a copy.
1655
+ self._restart = True
1656
+
1657
+ def set_grammar(self, grammar):
1658
+ "Change the grammar used by the parser."
1659
+ if grammar is self._grammar:
1660
+ return
1661
+ self._grammar = grammar
1662
+ self._restart = True
1663
+
1664
+ def set_chart(self, chart):
1665
+ "Load a given chart into the chart parser."
1666
+ if chart is self._chart:
1667
+ return
1668
+ self._chart = chart
1669
+ self._restart = True
1670
+
1671
+ # ////////////////////////////////////////////////////////////
1672
+ # Standard parser methods
1673
+ # ////////////////////////////////////////////////////////////
1674
+
1675
+ def parse(self, tokens, tree_class=Tree):
1676
+ tokens = list(tokens)
1677
+ self._grammar.check_coverage(tokens)
1678
+
1679
+ # Initialize ourselves.
1680
+ self.initialize(tokens)
1681
+
1682
+ # Step until no more edges are generated.
1683
+ for e in self.step():
1684
+ if e is None:
1685
+ break
1686
+
1687
+ # Return an iterator of complete parses.
1688
+ return self.parses(tree_class=tree_class)
1689
+
1690
+
1691
+ ########################################################################
1692
+ ## Demo Code
1693
+ ########################################################################
1694
+
1695
+
1696
+ def demo_grammar():
1697
+ from nltk.grammar import CFG
1698
+
1699
+ return CFG.fromstring(
1700
+ """
1701
+ S -> NP VP
1702
+ PP -> "with" NP
1703
+ NP -> NP PP
1704
+ VP -> VP PP
1705
+ VP -> Verb NP
1706
+ VP -> Verb
1707
+ NP -> Det Noun
1708
+ NP -> "John"
1709
+ NP -> "I"
1710
+ Det -> "the"
1711
+ Det -> "my"
1712
+ Det -> "a"
1713
+ Noun -> "dog"
1714
+ Noun -> "cookie"
1715
+ Verb -> "ate"
1716
+ Verb -> "saw"
1717
+ Prep -> "with"
1718
+ Prep -> "under"
1719
+ """
1720
+ )
1721
+
1722
+
1723
+ def demo(
1724
+ choice=None,
1725
+ print_times=True,
1726
+ print_grammar=False,
1727
+ print_trees=True,
1728
+ trace=2,
1729
+ sent="I saw John with a dog with my cookie",
1730
+ numparses=5,
1731
+ ):
1732
+ """
1733
+ A demonstration of the chart parsers.
1734
+ """
1735
+ import sys
1736
+ import time
1737
+
1738
+ from nltk import CFG, Production, nonterminals
1739
+
1740
+ # The grammar for ChartParser and SteppingChartParser:
1741
+ grammar = demo_grammar()
1742
+ if print_grammar:
1743
+ print("* Grammar")
1744
+ print(grammar)
1745
+
1746
+ # Tokenize the sample sentence.
1747
+ print("* Sentence:")
1748
+ print(sent)
1749
+ tokens = sent.split()
1750
+ print(tokens)
1751
+ print()
1752
+
1753
+ # Ask the user which parser to test,
1754
+ # if the parser wasn't provided as an argument
1755
+ if choice is None:
1756
+ print(" 1: Top-down chart parser")
1757
+ print(" 2: Bottom-up chart parser")
1758
+ print(" 3: Bottom-up left-corner chart parser")
1759
+ print(" 4: Left-corner chart parser with bottom-up filter")
1760
+ print(" 5: Stepping chart parser (alternating top-down & bottom-up)")
1761
+ print(" 6: All parsers")
1762
+ print("\nWhich parser (1-6)? ", end=" ")
1763
+ choice = sys.stdin.readline().strip()
1764
+ print()
1765
+
1766
+ choice = str(choice)
1767
+ if choice not in "123456":
1768
+ print("Bad parser number")
1769
+ return
1770
+
1771
+ # Keep track of how long each parser takes.
1772
+ times = {}
1773
+
1774
+ strategies = {
1775
+ "1": ("Top-down", TD_STRATEGY),
1776
+ "2": ("Bottom-up", BU_STRATEGY),
1777
+ "3": ("Bottom-up left-corner", BU_LC_STRATEGY),
1778
+ "4": ("Filtered left-corner", LC_STRATEGY),
1779
+ }
1780
+ choices = []
1781
+ if choice in strategies:
1782
+ choices = [choice]
1783
+ if choice == "6":
1784
+ choices = "1234"
1785
+
1786
+ # Run the requested chart parser(s), except the stepping parser.
1787
+ for strategy in choices:
1788
+ print("* Strategy: " + strategies[strategy][0])
1789
+ print()
1790
+ cp = ChartParser(grammar, strategies[strategy][1], trace=trace)
1791
+ t = time.time()
1792
+ chart = cp.chart_parse(tokens)
1793
+ parses = list(chart.parses(grammar.start()))
1794
+
1795
+ times[strategies[strategy][0]] = time.time() - t
1796
+ print("Nr edges in chart:", len(chart.edges()))
1797
+ if numparses:
1798
+ assert len(parses) == numparses, "Not all parses found"
1799
+ if print_trees:
1800
+ for tree in parses:
1801
+ print(tree)
1802
+ else:
1803
+ print("Nr trees:", len(parses))
1804
+ print()
1805
+
1806
+ # Run the stepping parser, if requested.
1807
+ if choice in "56":
1808
+ print("* Strategy: Stepping (top-down vs bottom-up)")
1809
+ print()
1810
+ t = time.time()
1811
+ cp = SteppingChartParser(grammar, trace=trace)
1812
+ cp.initialize(tokens)
1813
+ for i in range(5):
1814
+ print("*** SWITCH TO TOP DOWN")
1815
+ cp.set_strategy(TD_STRATEGY)
1816
+ for j, e in enumerate(cp.step()):
1817
+ if j > 20 or e is None:
1818
+ break
1819
+ print("*** SWITCH TO BOTTOM UP")
1820
+ cp.set_strategy(BU_STRATEGY)
1821
+ for j, e in enumerate(cp.step()):
1822
+ if j > 20 or e is None:
1823
+ break
1824
+ times["Stepping"] = time.time() - t
1825
+ print("Nr edges in chart:", len(cp.chart().edges()))
1826
+ if numparses:
1827
+ assert len(list(cp.parses())) == numparses, "Not all parses found"
1828
+ if print_trees:
1829
+ for tree in cp.parses():
1830
+ print(tree)
1831
+ else:
1832
+ print("Nr trees:", len(list(cp.parses())))
1833
+ print()
1834
+
1835
+ # Print the times of all parsers:
1836
+ if not (print_times and times):
1837
+ return
1838
+ print("* Parsing times")
1839
+ print()
1840
+ maxlen = max(len(key) for key in times)
1841
+ format = "%" + repr(maxlen) + "s parser: %6.3fsec"
1842
+ times_items = times.items()
1843
+ for (parser, t) in sorted(times_items, key=lambda a: a[1]):
1844
+ print(format % (parser, t))
1845
+
1846
+
1847
+ if __name__ == "__main__":
1848
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/dependencygraph.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ # Steven Bird <[email protected]> (modifications)
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+ #
10
+
11
+ """
12
+ Tools for reading and writing dependency trees.
13
+ The input is assumed to be in Malt-TAB format
14
+ (https://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
15
+ """
16
+
17
+ import subprocess
18
+ import warnings
19
+ from collections import defaultdict
20
+ from itertools import chain
21
+ from pprint import pformat
22
+
23
+ from nltk.internals import find_binary
24
+ from nltk.tree import Tree
25
+
26
+ #################################################################
27
+ # DependencyGraph Class
28
+ #################################################################
29
+
30
+
31
+ class DependencyGraph:
32
+ """
33
+ A container for the nodes and labelled edges of a dependency structure.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ tree_str=None,
39
+ cell_extractor=None,
40
+ zero_based=False,
41
+ cell_separator=None,
42
+ top_relation_label="ROOT",
43
+ ):
44
+ """Dependency graph.
45
+
46
+ We place a dummy `TOP` node with the index 0, since the root node is
47
+ often assigned 0 as its head. This also means that the indexing of the
48
+ nodes corresponds directly to the Malt-TAB format, which starts at 1.
49
+
50
+ If zero-based is True, then Malt-TAB-like input with node numbers
51
+ starting at 0 and the root node assigned -1 (as produced by, e.g.,
52
+ zpar).
53
+
54
+ :param str cell_separator: the cell separator. If not provided, cells
55
+ are split by whitespace.
56
+
57
+ :param str top_relation_label: the label by which the top relation is
58
+ identified, for examlple, `ROOT`, `null` or `TOP`.
59
+ """
60
+ self.nodes = defaultdict(
61
+ lambda: {
62
+ "address": None,
63
+ "word": None,
64
+ "lemma": None,
65
+ "ctag": None,
66
+ "tag": None,
67
+ "feats": None,
68
+ "head": None,
69
+ "deps": defaultdict(list),
70
+ "rel": None,
71
+ }
72
+ )
73
+
74
+ self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0})
75
+
76
+ self.root = None
77
+
78
+ if tree_str:
79
+ self._parse(
80
+ tree_str,
81
+ cell_extractor=cell_extractor,
82
+ zero_based=zero_based,
83
+ cell_separator=cell_separator,
84
+ top_relation_label=top_relation_label,
85
+ )
86
+
87
+ def remove_by_address(self, address):
88
+ """
89
+ Removes the node with the given address. References
90
+ to this node in others will still exist.
91
+ """
92
+ del self.nodes[address]
93
+
94
+ def redirect_arcs(self, originals, redirect):
95
+ """
96
+ Redirects arcs to any of the nodes in the originals list
97
+ to the redirect node address.
98
+ """
99
+ for node in self.nodes.values():
100
+ new_deps = []
101
+ for dep in node["deps"]:
102
+ if dep in originals:
103
+ new_deps.append(redirect)
104
+ else:
105
+ new_deps.append(dep)
106
+ node["deps"] = new_deps
107
+
108
+ def add_arc(self, head_address, mod_address):
109
+ """
110
+ Adds an arc from the node specified by head_address to the
111
+ node specified by the mod address.
112
+ """
113
+ relation = self.nodes[mod_address]["rel"]
114
+ self.nodes[head_address]["deps"].setdefault(relation, [])
115
+ self.nodes[head_address]["deps"][relation].append(mod_address)
116
+ # self.nodes[head_address]['deps'].append(mod_address)
117
+
118
+ def connect_graph(self):
119
+ """
120
+ Fully connects all non-root nodes. All nodes are set to be dependents
121
+ of the root node.
122
+ """
123
+ for node1 in self.nodes.values():
124
+ for node2 in self.nodes.values():
125
+ if node1["address"] != node2["address"] and node2["rel"] != "TOP":
126
+ relation = node2["rel"]
127
+ node1["deps"].setdefault(relation, [])
128
+ node1["deps"][relation].append(node2["address"])
129
+ # node1['deps'].append(node2['address'])
130
+
131
+ def get_by_address(self, node_address):
132
+ """Return the node with the given address."""
133
+ return self.nodes[node_address]
134
+
135
+ def contains_address(self, node_address):
136
+ """
137
+ Returns true if the graph contains a node with the given node
138
+ address, false otherwise.
139
+ """
140
+ return node_address in self.nodes
141
+
142
+ def to_dot(self):
143
+ """Return a dot representation suitable for using with Graphviz.
144
+
145
+ >>> dg = DependencyGraph(
146
+ ... 'John N 2\\n'
147
+ ... 'loves V 0\\n'
148
+ ... 'Mary N 2'
149
+ ... )
150
+ >>> print(dg.to_dot())
151
+ digraph G{
152
+ edge [dir=forward]
153
+ node [shape=plaintext]
154
+ <BLANKLINE>
155
+ 0 [label="0 (None)"]
156
+ 0 -> 2 [label="ROOT"]
157
+ 1 [label="1 (John)"]
158
+ 2 [label="2 (loves)"]
159
+ 2 -> 1 [label=""]
160
+ 2 -> 3 [label=""]
161
+ 3 [label="3 (Mary)"]
162
+ }
163
+
164
+ """
165
+ # Start the digraph specification
166
+ s = "digraph G{\n"
167
+ s += "edge [dir=forward]\n"
168
+ s += "node [shape=plaintext]\n"
169
+
170
+ # Draw the remaining nodes
171
+ for node in sorted(self.nodes.values(), key=lambda v: v["address"]):
172
+ s += '\n{} [label="{} ({})"]'.format(
173
+ node["address"],
174
+ node["address"],
175
+ node["word"],
176
+ )
177
+ for rel, deps in node["deps"].items():
178
+ for dep in deps:
179
+ if rel is not None:
180
+ s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel)
181
+ else:
182
+ s += "\n{} -> {} ".format(node["address"], dep)
183
+ s += "\n}"
184
+
185
+ return s
186
+
187
+ def _repr_svg_(self):
188
+ """Show SVG representation of the transducer (IPython magic).
189
+ >>> from nltk.test.setup_fixt import check_binary
190
+ >>> check_binary('dot')
191
+ >>> dg = DependencyGraph(
192
+ ... 'John N 2\\n'
193
+ ... 'loves V 0\\n'
194
+ ... 'Mary N 2'
195
+ ... )
196
+ >>> dg._repr_svg_().split('\\n')[0]
197
+ '<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
198
+
199
+ """
200
+ dot_string = self.to_dot()
201
+ return dot2img(dot_string)
202
+
203
+ def __str__(self):
204
+ return pformat(self.nodes)
205
+
206
+ def __repr__(self):
207
+ return f"<DependencyGraph with {len(self.nodes)} nodes>"
208
+
209
+ @staticmethod
210
+ def load(
211
+ filename, zero_based=False, cell_separator=None, top_relation_label="ROOT"
212
+ ):
213
+ """
214
+ :param filename: a name of a file in Malt-TAB format
215
+ :param zero_based: nodes in the input file are numbered starting from 0
216
+ rather than 1 (as produced by, e.g., zpar)
217
+ :param str cell_separator: the cell separator. If not provided, cells
218
+ are split by whitespace.
219
+ :param str top_relation_label: the label by which the top relation is
220
+ identified, for examlple, `ROOT`, `null` or `TOP`.
221
+
222
+ :return: a list of DependencyGraphs
223
+
224
+ """
225
+ with open(filename) as infile:
226
+ return [
227
+ DependencyGraph(
228
+ tree_str,
229
+ zero_based=zero_based,
230
+ cell_separator=cell_separator,
231
+ top_relation_label=top_relation_label,
232
+ )
233
+ for tree_str in infile.read().split("\n\n")
234
+ ]
235
+
236
+ def left_children(self, node_index):
237
+ """
238
+ Returns the number of left children under the node specified
239
+ by the given address.
240
+ """
241
+ children = chain.from_iterable(self.nodes[node_index]["deps"].values())
242
+ index = self.nodes[node_index]["address"]
243
+ return sum(1 for c in children if c < index)
244
+
245
+ def right_children(self, node_index):
246
+ """
247
+ Returns the number of right children under the node specified
248
+ by the given address.
249
+ """
250
+ children = chain.from_iterable(self.nodes[node_index]["deps"].values())
251
+ index = self.nodes[node_index]["address"]
252
+ return sum(1 for c in children if c > index)
253
+
254
+ def add_node(self, node):
255
+ if not self.contains_address(node["address"]):
256
+ self.nodes[node["address"]].update(node)
257
+
258
+ def _parse(
259
+ self,
260
+ input_,
261
+ cell_extractor=None,
262
+ zero_based=False,
263
+ cell_separator=None,
264
+ top_relation_label="ROOT",
265
+ ):
266
+ """Parse a sentence.
267
+
268
+ :param extractor: a function that given a tuple of cells returns a
269
+ 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
270
+ rel``.
271
+
272
+ :param str cell_separator: the cell separator. If not provided, cells
273
+ are split by whitespace.
274
+
275
+ :param str top_relation_label: the label by which the top relation is
276
+ identified, for examlple, `ROOT`, `null` or `TOP`.
277
+
278
+ """
279
+
280
+ def extract_3_cells(cells, index):
281
+ word, tag, head = cells
282
+ return index, word, word, tag, tag, "", head, ""
283
+
284
+ def extract_4_cells(cells, index):
285
+ word, tag, head, rel = cells
286
+ return index, word, word, tag, tag, "", head, rel
287
+
288
+ def extract_7_cells(cells, index):
289
+ line_index, word, lemma, tag, _, head, rel = cells
290
+ try:
291
+ index = int(line_index)
292
+ except ValueError:
293
+ # index can't be parsed as an integer, use default
294
+ pass
295
+ return index, word, lemma, tag, tag, "", head, rel
296
+
297
+ def extract_10_cells(cells, index):
298
+ line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
299
+ try:
300
+ index = int(line_index)
301
+ except ValueError:
302
+ # index can't be parsed as an integer, use default
303
+ pass
304
+ return index, word, lemma, ctag, tag, feats, head, rel
305
+
306
+ extractors = {
307
+ 3: extract_3_cells,
308
+ 4: extract_4_cells,
309
+ 7: extract_7_cells,
310
+ 10: extract_10_cells,
311
+ }
312
+
313
+ if isinstance(input_, str):
314
+ input_ = (line for line in input_.split("\n"))
315
+
316
+ lines = (l.rstrip() for l in input_)
317
+ lines = (l for l in lines if l)
318
+
319
+ cell_number = None
320
+ for index, line in enumerate(lines, start=1):
321
+ cells = line.split(cell_separator)
322
+ if cell_number is None:
323
+ cell_number = len(cells)
324
+ else:
325
+ assert cell_number == len(cells)
326
+
327
+ if cell_extractor is None:
328
+ try:
329
+ cell_extractor = extractors[cell_number]
330
+ except KeyError as e:
331
+ raise ValueError(
332
+ "Number of tab-delimited fields ({}) not supported by "
333
+ "CoNLL(10) or Malt-Tab(4) format".format(cell_number)
334
+ ) from e
335
+
336
+ try:
337
+ index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
338
+ cells, index
339
+ )
340
+ except (TypeError, ValueError):
341
+ # cell_extractor doesn't take 2 arguments or doesn't return 8
342
+ # values; assume the cell_extractor is an older external
343
+ # extractor and doesn't accept or return an index.
344
+ word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
345
+
346
+ if head == "_":
347
+ continue
348
+
349
+ head = int(head)
350
+ if zero_based:
351
+ head += 1
352
+
353
+ self.nodes[index].update(
354
+ {
355
+ "address": index,
356
+ "word": word,
357
+ "lemma": lemma,
358
+ "ctag": ctag,
359
+ "tag": tag,
360
+ "feats": feats,
361
+ "head": head,
362
+ "rel": rel,
363
+ }
364
+ )
365
+
366
+ # Make sure that the fake root node has labeled dependencies.
367
+ if (cell_number == 3) and (head == 0):
368
+ rel = top_relation_label
369
+ self.nodes[head]["deps"][rel].append(index)
370
+
371
+ if self.nodes[0]["deps"][top_relation_label]:
372
+ root_address = self.nodes[0]["deps"][top_relation_label][0]
373
+ self.root = self.nodes[root_address]
374
+ self.top_relation_label = top_relation_label
375
+ else:
376
+ warnings.warn(
377
+ "The graph doesn't contain a node " "that depends on the root element."
378
+ )
379
+
380
+ def _word(self, node, filter=True):
381
+ w = node["word"]
382
+ if filter:
383
+ if w != ",":
384
+ return w
385
+ return w
386
+
387
+ def _tree(self, i):
388
+ """Turn dependency graphs into NLTK trees.
389
+
390
+ :param int i: index of a node
391
+ :return: either a word (if the indexed node is a leaf) or a ``Tree``.
392
+ """
393
+ node = self.get_by_address(i)
394
+ word = node["word"]
395
+ deps = sorted(chain.from_iterable(node["deps"].values()))
396
+
397
+ if deps:
398
+ return Tree(word, [self._tree(dep) for dep in deps])
399
+ else:
400
+ return word
401
+
402
+ def tree(self):
403
+ """
404
+ Starting with the ``root`` node, build a dependency tree using the NLTK
405
+ ``Tree`` constructor. Dependency labels are omitted.
406
+ """
407
+ node = self.root
408
+
409
+ word = node["word"]
410
+ deps = sorted(chain.from_iterable(node["deps"].values()))
411
+ return Tree(word, [self._tree(dep) for dep in deps])
412
+
413
+ def triples(self, node=None):
414
+ """
415
+ Extract dependency triples of the form:
416
+ ((head word, head tag), rel, (dep word, dep tag))
417
+ """
418
+
419
+ if not node:
420
+ node = self.root
421
+
422
+ head = (node["word"], node["ctag"])
423
+ for i in sorted(chain.from_iterable(node["deps"].values())):
424
+ dep = self.get_by_address(i)
425
+ yield (head, dep["rel"], (dep["word"], dep["ctag"]))
426
+ yield from self.triples(node=dep)
427
+
428
+ def _hd(self, i):
429
+ try:
430
+ return self.nodes[i]["head"]
431
+ except IndexError:
432
+ return None
433
+
434
+ def _rel(self, i):
435
+ try:
436
+ return self.nodes[i]["rel"]
437
+ except IndexError:
438
+ return None
439
+
440
+ # what's the return type? Boolean or list?
441
+ def contains_cycle(self):
442
+ """Check whether there are cycles.
443
+
444
+ >>> dg = DependencyGraph(treebank_data)
445
+ >>> dg.contains_cycle()
446
+ False
447
+
448
+ >>> cyclic_dg = DependencyGraph()
449
+ >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
450
+ >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
451
+ >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
452
+ >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
453
+ >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
454
+ >>> cyclic_dg.nodes = {
455
+ ... 0: top,
456
+ ... 1: child1,
457
+ ... 2: child2,
458
+ ... 3: child3,
459
+ ... 4: child4,
460
+ ... }
461
+ >>> cyclic_dg.root = top
462
+
463
+ >>> cyclic_dg.contains_cycle()
464
+ [1, 2, 4, 3]
465
+
466
+ """
467
+ distances = {}
468
+
469
+ for node in self.nodes.values():
470
+ for dep in node["deps"]:
471
+ key = tuple([node["address"], dep])
472
+ distances[key] = 1
473
+
474
+ for _ in self.nodes:
475
+ new_entries = {}
476
+
477
+ for pair1 in distances:
478
+ for pair2 in distances:
479
+ if pair1[1] == pair2[0]:
480
+ key = tuple([pair1[0], pair2[1]])
481
+ new_entries[key] = distances[pair1] + distances[pair2]
482
+
483
+ for pair in new_entries:
484
+ distances[pair] = new_entries[pair]
485
+ if pair[0] == pair[1]:
486
+ path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
487
+ return path
488
+
489
+ return False # return []?
490
+
491
+ def get_cycle_path(self, curr_node, goal_node_index):
492
+ for dep in curr_node["deps"]:
493
+ if dep == goal_node_index:
494
+ return [curr_node["address"]]
495
+ for dep in curr_node["deps"]:
496
+ path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
497
+ if len(path) > 0:
498
+ path.insert(0, curr_node["address"])
499
+ return path
500
+ return []
501
+
502
+ def to_conll(self, style):
503
+ """
504
+ The dependency graph in CoNLL format.
505
+
506
+ :param style: the style to use for the format (3, 4, 10 columns)
507
+ :type style: int
508
+ :rtype: str
509
+ """
510
+
511
+ if style == 3:
512
+ template = "{word}\t{tag}\t{head}\n"
513
+ elif style == 4:
514
+ template = "{word}\t{tag}\t{head}\t{rel}\n"
515
+ elif style == 10:
516
+ template = (
517
+ "{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
518
+ )
519
+ else:
520
+ raise ValueError(
521
+ "Number of tab-delimited fields ({}) not supported by "
522
+ "CoNLL(10) or Malt-Tab(4) format".format(style)
523
+ )
524
+
525
+ return "".join(
526
+ template.format(i=i, **node)
527
+ for i, node in sorted(self.nodes.items())
528
+ if node["tag"] != "TOP"
529
+ )
530
+
531
+ def nx_graph(self):
532
+ """Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
533
+ import networkx
534
+
535
+ nx_nodelist = list(range(1, len(self.nodes)))
536
+ nx_edgelist = [
537
+ (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
538
+ ]
539
+ self.nx_labels = {}
540
+ for n in nx_nodelist:
541
+ self.nx_labels[n] = self.nodes[n]["word"]
542
+
543
+ g = networkx.MultiDiGraph()
544
+ g.add_nodes_from(nx_nodelist)
545
+ g.add_edges_from(nx_edgelist)
546
+
547
+ return g
548
+
549
+
550
+ def dot2img(dot_string, t="svg"):
551
+ """
552
+ Create image representation fom dot_string, using the 'dot' program
553
+ from the Graphviz package.
554
+
555
+ Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps',
556
+ 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats).
557
+
558
+ Note that the "capture_output" option of subprocess.run() is only available
559
+ with text formats (like svg), but not with binary image formats (like png).
560
+ """
561
+
562
+ try:
563
+ find_binary("dot")
564
+ try:
565
+ if t in ["dot", "dot_json", "json", "svg"]:
566
+ proc = subprocess.run(
567
+ ["dot", "-T%s" % t],
568
+ capture_output=True,
569
+ input=dot_string,
570
+ text=True,
571
+ )
572
+ else:
573
+ proc = subprocess.run(
574
+ ["dot", "-T%s" % t],
575
+ input=bytes(dot_string, encoding="utf8"),
576
+ )
577
+ return proc.stdout
578
+ except:
579
+ raise Exception(
580
+ "Cannot create image representation by running dot from string: {}"
581
+ "".format(dot_string)
582
+ )
583
+ except OSError as e:
584
+ raise Exception("Cannot find the dot binary from Graphviz package") from e
585
+
586
+
587
+ class DependencyGraphError(Exception):
588
+ """Dependency graph exception."""
589
+
590
+
591
+ def demo():
592
+ malt_demo()
593
+ conll_demo()
594
+ conll_file_demo()
595
+ cycle_finding_demo()
596
+
597
+
598
+ def malt_demo(nx=False):
599
+ """
600
+ A demonstration of the result of reading a dependency
601
+ version of the first sentence of the Penn Treebank.
602
+ """
603
+ dg = DependencyGraph(
604
+ """Pierre NNP 2 NMOD
605
+ Vinken NNP 8 SUB
606
+ , , 2 P
607
+ 61 CD 5 NMOD
608
+ years NNS 6 AMOD
609
+ old JJ 2 NMOD
610
+ , , 2 P
611
+ will MD 0 ROOT
612
+ join VB 8 VC
613
+ the DT 11 NMOD
614
+ board NN 9 OBJ
615
+ as IN 9 VMOD
616
+ a DT 15 NMOD
617
+ nonexecutive JJ 15 NMOD
618
+ director NN 12 PMOD
619
+ Nov. NNP 9 VMOD
620
+ 29 CD 16 NMOD
621
+ . . 9 VMOD
622
+ """
623
+ )
624
+ tree = dg.tree()
625
+ tree.pprint()
626
+ if nx:
627
+ # currently doesn't work
628
+ import networkx
629
+ from matplotlib import pylab
630
+
631
+ g = dg.nx_graph()
632
+ g.info()
633
+ pos = networkx.spring_layout(g, dim=1)
634
+ networkx.draw_networkx_nodes(g, pos, node_size=50)
635
+ # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
636
+ networkx.draw_networkx_labels(g, pos, dg.nx_labels)
637
+ pylab.xticks([])
638
+ pylab.yticks([])
639
+ pylab.savefig("tree.png")
640
+ pylab.show()
641
+
642
+
643
+ def conll_demo():
644
+ """
645
+ A demonstration of how to read a string representation of
646
+ a CoNLL format dependency tree.
647
+ """
648
+ dg = DependencyGraph(conll_data1)
649
+ tree = dg.tree()
650
+ tree.pprint()
651
+ print(dg)
652
+ print(dg.to_conll(4))
653
+
654
+
655
+ def conll_file_demo():
656
+ print("Mass conll_read demo...")
657
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
658
+ for graph in graphs:
659
+ tree = graph.tree()
660
+ print("\n")
661
+ tree.pprint()
662
+
663
+
664
+ def cycle_finding_demo():
665
+ dg = DependencyGraph(treebank_data)
666
+ print(dg.contains_cycle())
667
+ cyclic_dg = DependencyGraph()
668
+ cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
669
+ cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
670
+ cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
671
+ cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
672
+ cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
673
+ print(cyclic_dg.contains_cycle())
674
+
675
+
676
+ treebank_data = """Pierre NNP 2 NMOD
677
+ Vinken NNP 8 SUB
678
+ , , 2 P
679
+ 61 CD 5 NMOD
680
+ years NNS 6 AMOD
681
+ old JJ 2 NMOD
682
+ , , 2 P
683
+ will MD 0 ROOT
684
+ join VB 8 VC
685
+ the DT 11 NMOD
686
+ board NN 9 OBJ
687
+ as IN 9 VMOD
688
+ a DT 15 NMOD
689
+ nonexecutive JJ 15 NMOD
690
+ director NN 12 PMOD
691
+ Nov. NNP 9 VMOD
692
+ 29 CD 16 NMOD
693
+ . . 9 VMOD
694
+ """
695
+
696
+ conll_data1 = """
697
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
698
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
699
+ 3 met met Prep Prep voor 8 mod _ _
700
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
701
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
702
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
703
+ 7 gaan ga V V hulp|inf 6 vc _ _
704
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
705
+ 9 , , Punc Punc komma 8 punct _ _
706
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
707
+ 11 of of Conj Conj neven 7 vc _ _
708
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
709
+ 13 . . Punc Punc punt 12 punct _ _
710
+ """
711
+
712
+ conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
713
+ 2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
714
+ 3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
715
+ 4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
716
+ 5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
717
+ 6 . . Punc Punc punt 5 punct _ _
718
+
719
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
720
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
721
+ 3 met met Prep Prep voor 8 mod _ _
722
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
723
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
724
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
725
+ 7 gaan ga V V hulp|inf 6 vc _ _
726
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
727
+ 9 , , Punc Punc komma 8 punct _ _
728
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
729
+ 11 of of Conj Conj neven 7 vc _ _
730
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
731
+ 13 . . Punc Punc punt 12 punct _ _
732
+
733
+ 1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
734
+ 2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
735
+ 3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
736
+ 4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
737
+ 5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
738
+ 6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
739
+ 7 . . Punc Punc punt 6 punct _ _
740
+
741
+ 1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
742
+ 2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
743
+ 3 bij bij Prep Prep voor 2 ld _ _
744
+ 4 de de Art Art bep|zijdofmv|neut 6 det _ _
745
+ 5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
746
+ 6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
747
+ 7 die die Pron Pron betr|neut|zelfst 6 mod _ _
748
+ 8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
749
+ 9 ginds ginds Adv Adv gew|aanw 12 mod _ _
750
+ 10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
751
+ 11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
752
+ 12 gelaten laat V V trans|verldw|onverv 11 vc _ _
753
+ 13 . . Punc Punc punt 12 punct _ _
754
+
755
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
756
+ 2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
757
+ 3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
758
+ 4 naast naast Prep Prep voor 11 mod _ _
759
+ 5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
760
+ 6 op op Prep Prep voor 11 ld _ _
761
+ 7 de de Art Art bep|zijdofmv|neut 8 det _ _
762
+ 8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
763
+ 9 kunnen kan V V hulp|inf 2 vc _ _
764
+ 10 gaan ga V V hulp|inf 9 vc _ _
765
+ 11 liggen lig V V intrans|inf 10 vc _ _
766
+ 12 . . Punc Punc punt 11 punct _ _
767
+
768
+ 1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
769
+ 2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
770
+ 3 mams mams N N soort|ev|neut 4 det _ _
771
+ 4 rug rug N N soort|ev|neut 5 obj1 _ _
772
+ 5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
773
+ 6 hebben heb V V hulp|inf 2 vc _ _
774
+ 7 en en Conj Conj neven 0 ROOT _ _
775
+ 8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
776
+ 9 de de Art Art bep|zijdofmv|neut 10 det _ _
777
+ 10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
778
+ 11 . . Punc Punc punt 10 punct _ _
779
+
780
+ 1 Of of Conj Conj onder|metfin 0 ROOT _ _
781
+ 2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
782
+ 3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
783
+ 4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
784
+ 5 met met Prep Prep voor 10 mod _ _
785
+ 6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
786
+ 7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
787
+ 8 rond rond Adv Adv deelv 10 svp _ _
788
+ 9 kunnen kan V V hulp|inf 3 vc _ _
789
+ 10 slenteren slenter V V intrans|inf 9 vc _ _
790
+ 11 in in Prep Prep voor 10 mod _ _
791
+ 12 de de Art Art bep|zijdofmv|neut 13 det _ _
792
+ 13 buurt buurt N N soort|ev|neut 11 obj1 _ _
793
+ 14 van van Prep Prep voor 13 mod _ _
794
+ 15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
795
+ 16 . . Punc Punc punt 15 punct _ _
796
+ """
797
+
798
+ if __name__ == "__main__":
799
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/pchart.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Probabilistic Chart Parsers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Classes and interfaces for associating probabilities with tree
11
+ structures that represent the internal organization of a text. The
12
+ probabilistic parser module defines ``BottomUpProbabilisticChartParser``.
13
+
14
+ ``BottomUpProbabilisticChartParser`` is an abstract class that implements
15
+ a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges,
16
+ and adds them to the chart one at a time. The ordering of this queue
17
+ is based on the probabilities associated with the edges, allowing the
18
+ parser to expand more likely edges before less likely ones. Each
19
+ subclass implements a different queue ordering, producing different
20
+ search strategies. Currently the following subclasses are defined:
21
+
22
+ - ``InsideChartParser`` searches edges in decreasing order of
23
+ their trees' inside probabilities.
24
+ - ``RandomChartParser`` searches edges in random order.
25
+ - ``LongestChartParser`` searches edges in decreasing order of their
26
+ location's length.
27
+
28
+ The ``BottomUpProbabilisticChartParser`` constructor has an optional
29
+ argument beam_size. If non-zero, this controls the size of the beam
30
+ (aka the edge queue). This option is most useful with InsideChartParser.
31
+ """
32
+
33
+ ##//////////////////////////////////////////////////////
34
+ ## Bottom-Up PCFG Chart Parser
35
+ ##//////////////////////////////////////////////////////
36
+
37
+ # [XX] This might not be implemented quite right -- it would be better
38
+ # to associate probabilities with child pointer lists.
39
+
40
+ import random
41
+ from functools import reduce
42
+
43
+ from nltk.grammar import PCFG, Nonterminal
44
+ from nltk.parse.api import ParserI
45
+ from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge
46
+ from nltk.tree import ProbabilisticTree, Tree
47
+
48
+
49
+ # Probabilistic edges
50
+ class ProbabilisticLeafEdge(LeafEdge):
51
+ def prob(self):
52
+ return 1.0
53
+
54
+
55
+ class ProbabilisticTreeEdge(TreeEdge):
56
+ def __init__(self, prob, *args, **kwargs):
57
+ TreeEdge.__init__(self, *args, **kwargs)
58
+ self._prob = prob
59
+ # two edges with different probabilities are not equal.
60
+ self._comparison_key = (self._comparison_key, prob)
61
+
62
+ def prob(self):
63
+ return self._prob
64
+
65
+ @staticmethod
66
+ def from_production(production, index, p):
67
+ return ProbabilisticTreeEdge(
68
+ p, (index, index), production.lhs(), production.rhs(), 0
69
+ )
70
+
71
+
72
+ # Rules using probabilistic edges
73
+ class ProbabilisticBottomUpInitRule(AbstractChartRule):
74
+ NUM_EDGES = 0
75
+
76
+ def apply(self, chart, grammar):
77
+ for index in range(chart.num_leaves()):
78
+ new_edge = ProbabilisticLeafEdge(chart.leaf(index), index)
79
+ if chart.insert(new_edge, ()):
80
+ yield new_edge
81
+
82
+
83
+ class ProbabilisticBottomUpPredictRule(AbstractChartRule):
84
+ NUM_EDGES = 1
85
+
86
+ def apply(self, chart, grammar, edge):
87
+ if edge.is_incomplete():
88
+ return
89
+ for prod in grammar.productions():
90
+ if edge.lhs() == prod.rhs()[0]:
91
+ new_edge = ProbabilisticTreeEdge.from_production(
92
+ prod, edge.start(), prod.prob()
93
+ )
94
+ if chart.insert(new_edge, ()):
95
+ yield new_edge
96
+
97
+
98
+ class ProbabilisticFundamentalRule(AbstractChartRule):
99
+ NUM_EDGES = 2
100
+
101
+ def apply(self, chart, grammar, left_edge, right_edge):
102
+ # Make sure the rule is applicable.
103
+ if not (
104
+ left_edge.end() == right_edge.start()
105
+ and left_edge.nextsym() == right_edge.lhs()
106
+ and left_edge.is_incomplete()
107
+ and right_edge.is_complete()
108
+ ):
109
+ return
110
+
111
+ # Construct the new edge.
112
+ p = left_edge.prob() * right_edge.prob()
113
+ new_edge = ProbabilisticTreeEdge(
114
+ p,
115
+ span=(left_edge.start(), right_edge.end()),
116
+ lhs=left_edge.lhs(),
117
+ rhs=left_edge.rhs(),
118
+ dot=left_edge.dot() + 1,
119
+ )
120
+
121
+ # Add it to the chart, with appropriate child pointers.
122
+ changed_chart = False
123
+ for cpl1 in chart.child_pointer_lists(left_edge):
124
+ if chart.insert(new_edge, cpl1 + (right_edge,)):
125
+ changed_chart = True
126
+
127
+ # If we changed the chart, then generate the edge.
128
+ if changed_chart:
129
+ yield new_edge
130
+
131
+
132
+ class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule):
133
+ NUM_EDGES = 1
134
+
135
+ _fundamental_rule = ProbabilisticFundamentalRule()
136
+
137
+ def apply(self, chart, grammar, edge1):
138
+ fr = self._fundamental_rule
139
+ if edge1.is_incomplete():
140
+ # edge1 = left_edge; edge2 = right_edge
141
+ for edge2 in chart.select(
142
+ start=edge1.end(), is_complete=True, lhs=edge1.nextsym()
143
+ ):
144
+ yield from fr.apply(chart, grammar, edge1, edge2)
145
+ else:
146
+ # edge2 = left_edge; edge1 = right_edge
147
+ for edge2 in chart.select(
148
+ end=edge1.start(), is_complete=False, nextsym=edge1.lhs()
149
+ ):
150
+ yield from fr.apply(chart, grammar, edge2, edge1)
151
+
152
+ def __str__(self):
153
+ return "Fundamental Rule"
154
+
155
+
156
+ class BottomUpProbabilisticChartParser(ParserI):
157
+ """
158
+ An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to
159
+ record partial results. ``BottomUpProbabilisticChartParser`` maintains
160
+ a queue of edges that can be added to the chart. This queue is
161
+ initialized with edges for each token in the text that is being
162
+ parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into
163
+ the chart one at a time, starting with the most likely edges, and
164
+ proceeding to less likely edges. For each edge that is added to
165
+ the chart, it may become possible to insert additional edges into
166
+ the chart; these are added to the queue. This process continues
167
+ until enough complete parses have been generated, or until the
168
+ queue is empty.
169
+
170
+ The sorting order for the queue is not specified by
171
+ ``BottomUpProbabilisticChartParser``. Different sorting orders will
172
+ result in different search strategies. The sorting order for the
173
+ queue is defined by the method ``sort_queue``; subclasses are required
174
+ to provide a definition for this method.
175
+
176
+ :type _grammar: PCFG
177
+ :ivar _grammar: The grammar used to parse sentences.
178
+ :type _trace: int
179
+ :ivar _trace: The level of tracing output that should be generated
180
+ when parsing a text.
181
+ """
182
+
183
+ def __init__(self, grammar, beam_size=0, trace=0):
184
+ """
185
+ Create a new ``BottomUpProbabilisticChartParser``, that uses
186
+ ``grammar`` to parse texts.
187
+
188
+ :type grammar: PCFG
189
+ :param grammar: The grammar used to parse texts.
190
+ :type beam_size: int
191
+ :param beam_size: The maximum length for the parser's edge queue.
192
+ :type trace: int
193
+ :param trace: The level of tracing that should be used when
194
+ parsing a text. ``0`` will generate no tracing output;
195
+ and higher numbers will produce more verbose tracing
196
+ output.
197
+ """
198
+ if not isinstance(grammar, PCFG):
199
+ raise ValueError("The grammar must be probabilistic PCFG")
200
+ self._grammar = grammar
201
+ self.beam_size = beam_size
202
+ self._trace = trace
203
+
204
+ def grammar(self):
205
+ return self._grammar
206
+
207
+ def trace(self, trace=2):
208
+ """
209
+ Set the level of tracing output that should be generated when
210
+ parsing a text.
211
+
212
+ :type trace: int
213
+ :param trace: The trace level. A trace level of ``0`` will
214
+ generate no tracing output; and higher trace levels will
215
+ produce more verbose tracing output.
216
+ :rtype: None
217
+ """
218
+ self._trace = trace
219
+
220
+ # TODO: change this to conform more with the standard ChartParser
221
+ def parse(self, tokens):
222
+ self._grammar.check_coverage(tokens)
223
+ chart = Chart(list(tokens))
224
+ grammar = self._grammar
225
+
226
+ # Chart parser rules.
227
+ bu_init = ProbabilisticBottomUpInitRule()
228
+ bu = ProbabilisticBottomUpPredictRule()
229
+ fr = SingleEdgeProbabilisticFundamentalRule()
230
+
231
+ # Our queue
232
+ queue = []
233
+
234
+ # Initialize the chart.
235
+ for edge in bu_init.apply(chart, grammar):
236
+ if self._trace > 1:
237
+ print(
238
+ " %-50s [%s]"
239
+ % (chart.pretty_format_edge(edge, width=2), edge.prob())
240
+ )
241
+ queue.append(edge)
242
+
243
+ while len(queue) > 0:
244
+ # Re-sort the queue.
245
+ self.sort_queue(queue, chart)
246
+
247
+ # Prune the queue to the correct size if a beam was defined
248
+ if self.beam_size:
249
+ self._prune(queue, chart)
250
+
251
+ # Get the best edge.
252
+ edge = queue.pop()
253
+ if self._trace > 0:
254
+ print(
255
+ " %-50s [%s]"
256
+ % (chart.pretty_format_edge(edge, width=2), edge.prob())
257
+ )
258
+
259
+ # Apply BU & FR to it.
260
+ queue.extend(bu.apply(chart, grammar, edge))
261
+ queue.extend(fr.apply(chart, grammar, edge))
262
+
263
+ # Get a list of complete parses.
264
+ parses = list(chart.parses(grammar.start(), ProbabilisticTree))
265
+
266
+ # Assign probabilities to the trees.
267
+ prod_probs = {}
268
+ for prod in grammar.productions():
269
+ prod_probs[prod.lhs(), prod.rhs()] = prod.prob()
270
+ for parse in parses:
271
+ self._setprob(parse, prod_probs)
272
+
273
+ # Sort by probability
274
+ parses.sort(reverse=True, key=lambda tree: tree.prob())
275
+
276
+ return iter(parses)
277
+
278
+ def _setprob(self, tree, prod_probs):
279
+ if tree.prob() is not None:
280
+ return
281
+
282
+ # Get the prob of the CFG production.
283
+ lhs = Nonterminal(tree.label())
284
+ rhs = []
285
+ for child in tree:
286
+ if isinstance(child, Tree):
287
+ rhs.append(Nonterminal(child.label()))
288
+ else:
289
+ rhs.append(child)
290
+ prob = prod_probs[lhs, tuple(rhs)]
291
+
292
+ # Get the probs of children.
293
+ for child in tree:
294
+ if isinstance(child, Tree):
295
+ self._setprob(child, prod_probs)
296
+ prob *= child.prob()
297
+
298
+ tree.set_prob(prob)
299
+
300
+ def sort_queue(self, queue, chart):
301
+ """
302
+ Sort the given queue of ``Edge`` objects, placing the edge that should
303
+ be tried first at the beginning of the queue. This method
304
+ will be called after each ``Edge`` is added to the queue.
305
+
306
+ :param queue: The queue of ``Edge`` objects to sort. Each edge in
307
+ this queue is an edge that could be added to the chart by
308
+ the fundamental rule; but that has not yet been added.
309
+ :type queue: list(Edge)
310
+ :param chart: The chart being used to parse the text. This
311
+ chart can be used to provide extra information for sorting
312
+ the queue.
313
+ :type chart: Chart
314
+ :rtype: None
315
+ """
316
+ raise NotImplementedError()
317
+
318
+ def _prune(self, queue, chart):
319
+ """Discard items in the queue if the queue is longer than the beam."""
320
+ if len(queue) > self.beam_size:
321
+ split = len(queue) - self.beam_size
322
+ if self._trace > 2:
323
+ for edge in queue[:split]:
324
+ print(" %-50s [DISCARDED]" % chart.pretty_format_edge(edge, 2))
325
+ del queue[:split]
326
+
327
+
328
+ class InsideChartParser(BottomUpProbabilisticChartParser):
329
+ """
330
+ A bottom-up parser for ``PCFG`` grammars that tries edges in descending
331
+ order of the inside probabilities of their trees. The "inside
332
+ probability" of a tree is simply the
333
+ probability of the entire tree, ignoring its context. In
334
+ particular, the inside probability of a tree generated by
335
+ production *p* with children *c[1], c[2], ..., c[n]* is
336
+ *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside
337
+ probability of a token is 1 if it is present in the text, and 0 if
338
+ it is absent.
339
+
340
+ This sorting order results in a type of lowest-cost-first search
341
+ strategy.
342
+ """
343
+
344
+ # Inherit constructor.
345
+ def sort_queue(self, queue, chart):
346
+ """
347
+ Sort the given queue of edges, in descending order of the
348
+ inside probabilities of the edges' trees.
349
+
350
+ :param queue: The queue of ``Edge`` objects to sort. Each edge in
351
+ this queue is an edge that could be added to the chart by
352
+ the fundamental rule; but that has not yet been added.
353
+ :type queue: list(Edge)
354
+ :param chart: The chart being used to parse the text. This
355
+ chart can be used to provide extra information for sorting
356
+ the queue.
357
+ :type chart: Chart
358
+ :rtype: None
359
+ """
360
+ queue.sort(key=lambda edge: edge.prob())
361
+
362
+
363
+ # Eventually, this will become some sort of inside-outside parser:
364
+ # class InsideOutsideParser(BottomUpProbabilisticChartParser):
365
+ # def __init__(self, grammar, trace=0):
366
+ # # Inherit docs.
367
+ # BottomUpProbabilisticChartParser.__init__(self, grammar, trace)
368
+ #
369
+ # # Find the best path from S to each nonterminal
370
+ # bestp = {}
371
+ # for production in grammar.productions(): bestp[production.lhs()]=0
372
+ # bestp[grammar.start()] = 1.0
373
+ #
374
+ # for i in range(len(grammar.productions())):
375
+ # for production in grammar.productions():
376
+ # lhs = production.lhs()
377
+ # for elt in production.rhs():
378
+ # bestp[elt] = max(bestp[lhs]*production.prob(),
379
+ # bestp.get(elt,0))
380
+ #
381
+ # self._bestp = bestp
382
+ # for (k,v) in self._bestp.items(): print(k,v)
383
+ #
384
+ # def _sortkey(self, edge):
385
+ # return edge.structure()[PROB] * self._bestp[edge.lhs()]
386
+ #
387
+ # def sort_queue(self, queue, chart):
388
+ # queue.sort(key=self._sortkey)
389
+
390
+
391
+ class RandomChartParser(BottomUpProbabilisticChartParser):
392
+ """
393
+ A bottom-up parser for ``PCFG`` grammars that tries edges in random order.
394
+ This sorting order results in a random search strategy.
395
+ """
396
+
397
+ # Inherit constructor
398
+ def sort_queue(self, queue, chart):
399
+ i = random.randint(0, len(queue) - 1)
400
+ (queue[-1], queue[i]) = (queue[i], queue[-1])
401
+
402
+
403
+ class UnsortedChartParser(BottomUpProbabilisticChartParser):
404
+ """
405
+ A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order.
406
+ """
407
+
408
+ # Inherit constructor
409
+ def sort_queue(self, queue, chart):
410
+ return
411
+
412
+
413
+ class LongestChartParser(BottomUpProbabilisticChartParser):
414
+ """
415
+ A bottom-up parser for ``PCFG`` grammars that tries longer edges before
416
+ shorter ones. This sorting order results in a type of best-first
417
+ search strategy.
418
+ """
419
+
420
+ # Inherit constructor
421
+ def sort_queue(self, queue, chart):
422
+ queue.sort(key=lambda edge: edge.length())
423
+
424
+
425
+ ##//////////////////////////////////////////////////////
426
+ ## Test Code
427
+ ##//////////////////////////////////////////////////////
428
+
429
+
430
+ def demo(choice=None, draw_parses=None, print_parses=None):
431
+ """
432
+ A demonstration of the probabilistic parsers. The user is
433
+ prompted to select which demo to run, and how many parses should
434
+ be found; and then each parser is run on the same demo, and a
435
+ summary of the results are displayed.
436
+ """
437
+ import sys
438
+ import time
439
+
440
+ from nltk import tokenize
441
+ from nltk.parse import pchart
442
+
443
+ # Define two demos. Each demo has a sentence and a grammar.
444
+ toy_pcfg1 = PCFG.fromstring(
445
+ """
446
+ S -> NP VP [1.0]
447
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
448
+ Det -> 'the' [0.8] | 'my' [0.2]
449
+ N -> 'man' [0.5] | 'telescope' [0.5]
450
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
451
+ V -> 'ate' [0.35] | 'saw' [0.65]
452
+ PP -> P NP [1.0]
453
+ P -> 'with' [0.61] | 'under' [0.39]
454
+ """
455
+ )
456
+
457
+ toy_pcfg2 = PCFG.fromstring(
458
+ """
459
+ S -> NP VP [1.0]
460
+ VP -> V NP [.59]
461
+ VP -> V [.40]
462
+ VP -> VP PP [.01]
463
+ NP -> Det N [.41]
464
+ NP -> Name [.28]
465
+ NP -> NP PP [.31]
466
+ PP -> P NP [1.0]
467
+ V -> 'saw' [.21]
468
+ V -> 'ate' [.51]
469
+ V -> 'ran' [.28]
470
+ N -> 'boy' [.11]
471
+ N -> 'cookie' [.12]
472
+ N -> 'table' [.13]
473
+ N -> 'telescope' [.14]
474
+ N -> 'hill' [.5]
475
+ Name -> 'Jack' [.52]
476
+ Name -> 'Bob' [.48]
477
+ P -> 'with' [.61]
478
+ P -> 'under' [.39]
479
+ Det -> 'the' [.41]
480
+ Det -> 'a' [.31]
481
+ Det -> 'my' [.28]
482
+ """
483
+ )
484
+
485
+ demos = [
486
+ ("I saw John with my telescope", toy_pcfg1),
487
+ ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2),
488
+ ]
489
+
490
+ if choice is None:
491
+ # Ask the user which demo they want to use.
492
+ print()
493
+ for i in range(len(demos)):
494
+ print(f"{i + 1:>3}: {demos[i][0]}")
495
+ print(" %r" % demos[i][1])
496
+ print()
497
+ print("Which demo (%d-%d)? " % (1, len(demos)), end=" ")
498
+ choice = int(sys.stdin.readline().strip()) - 1
499
+ try:
500
+ sent, grammar = demos[choice]
501
+ except:
502
+ print("Bad sentence number")
503
+ return
504
+
505
+ # Tokenize the sentence.
506
+ tokens = sent.split()
507
+
508
+ # Define a list of parsers. We'll use all parsers.
509
+ parsers = [
510
+ pchart.InsideChartParser(grammar),
511
+ pchart.RandomChartParser(grammar),
512
+ pchart.UnsortedChartParser(grammar),
513
+ pchart.LongestChartParser(grammar),
514
+ pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser
515
+ ]
516
+
517
+ # Run the parsers on the tokenized sentence.
518
+ times = []
519
+ average_p = []
520
+ num_parses = []
521
+ all_parses = {}
522
+ for parser in parsers:
523
+ print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}")
524
+ parser.trace(3)
525
+ t = time.time()
526
+ parses = list(parser.parse(tokens))
527
+ times.append(time.time() - t)
528
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0
529
+ average_p.append(p)
530
+ num_parses.append(len(parses))
531
+ for p in parses:
532
+ all_parses[p.freeze()] = 1
533
+
534
+ # Print some summary statistics
535
+ print()
536
+ print(" Parser Beam | Time (secs) # Parses Average P(parse)")
537
+ print("------------------------+------------------------------------------")
538
+ for i in range(len(parsers)):
539
+ print(
540
+ "%18s %4d |%11.4f%11d%19.14f"
541
+ % (
542
+ parsers[i].__class__.__name__,
543
+ parsers[i].beam_size,
544
+ times[i],
545
+ num_parses[i],
546
+ average_p[i],
547
+ )
548
+ )
549
+ parses = all_parses.keys()
550
+ if parses:
551
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses)
552
+ else:
553
+ p = 0
554
+ print("------------------------+------------------------------------------")
555
+ print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p))
556
+
557
+ if draw_parses is None:
558
+ # Ask the user if we should draw the parses.
559
+ print()
560
+ print("Draw parses (y/n)? ", end=" ")
561
+ draw_parses = sys.stdin.readline().strip().lower().startswith("y")
562
+ if draw_parses:
563
+ from nltk.draw.tree import draw_trees
564
+
565
+ print(" please wait...")
566
+ draw_trees(*parses)
567
+
568
+ if print_parses is None:
569
+ # Ask the user if we should print the parses.
570
+ print()
571
+ print("Print parses (y/n)? ", end=" ")
572
+ print_parses = sys.stdin.readline().strip().lower().startswith("y")
573
+ if print_parses:
574
+ for parse in parses:
575
+ print(parse)
576
+
577
+
578
+ if __name__ == "__main__":
579
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py ADDED
@@ -0,0 +1,716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ from collections import defaultdict
11
+ from functools import total_ordering
12
+ from itertools import chain
13
+
14
+ from nltk.grammar import (
15
+ DependencyGrammar,
16
+ DependencyProduction,
17
+ ProbabilisticDependencyGrammar,
18
+ )
19
+ from nltk.internals import raise_unorderable_types
20
+ from nltk.parse.dependencygraph import DependencyGraph
21
+
22
+ #################################################################
23
+ # Dependency Span
24
+ #################################################################
25
+
26
+
27
+ @total_ordering
28
+ class DependencySpan:
29
+ """
30
+ A contiguous span over some part of the input string representing
31
+ dependency (head -> modifier) relationships amongst words. An atomic
32
+ span corresponds to only one word so it isn't a 'span' in the conventional
33
+ sense, as its _start_index = _end_index = _head_index for concatenation
34
+ purposes. All other spans are assumed to have arcs between all nodes
35
+ within the start and end indexes of the span, and one head index corresponding
36
+ to the head word for the entire span. This is the same as the root node if
37
+ the dependency structure were depicted as a graph.
38
+ """
39
+
40
+ def __init__(self, start_index, end_index, head_index, arcs, tags):
41
+ self._start_index = start_index
42
+ self._end_index = end_index
43
+ self._head_index = head_index
44
+ self._arcs = arcs
45
+ self._tags = tags
46
+ self._comparison_key = (start_index, end_index, head_index, tuple(arcs))
47
+ self._hash = hash(self._comparison_key)
48
+
49
+ def head_index(self):
50
+ """
51
+ :return: An value indexing the head of the entire ``DependencySpan``.
52
+ :rtype: int
53
+ """
54
+ return self._head_index
55
+
56
+ def __repr__(self):
57
+ """
58
+ :return: A concise string representatino of the ``DependencySpan``.
59
+ :rtype: str.
60
+ """
61
+ return "Span %d-%d; Head Index: %d" % (
62
+ self._start_index,
63
+ self._end_index,
64
+ self._head_index,
65
+ )
66
+
67
+ def __str__(self):
68
+ """
69
+ :return: A verbose string representation of the ``DependencySpan``.
70
+ :rtype: str
71
+ """
72
+ str = "Span %d-%d; Head Index: %d" % (
73
+ self._start_index,
74
+ self._end_index,
75
+ self._head_index,
76
+ )
77
+ for i in range(len(self._arcs)):
78
+ str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i])
79
+ return str
80
+
81
+ def __eq__(self, other):
82
+ return (
83
+ type(self) == type(other) and self._comparison_key == other._comparison_key
84
+ )
85
+
86
+ def __ne__(self, other):
87
+ return not self == other
88
+
89
+ def __lt__(self, other):
90
+ if not isinstance(other, DependencySpan):
91
+ raise_unorderable_types("<", self, other)
92
+ return self._comparison_key < other._comparison_key
93
+
94
+ def __hash__(self):
95
+ """
96
+ :return: The hash value of this ``DependencySpan``.
97
+ """
98
+ return self._hash
99
+
100
+
101
+ #################################################################
102
+ # Chart Cell
103
+ #################################################################
104
+
105
+
106
+ class ChartCell:
107
+ """
108
+ A cell from the parse chart formed when performing the CYK algorithm.
109
+ Each cell keeps track of its x and y coordinates (though this will probably
110
+ be discarded), and a list of spans serving as the cell's entries.
111
+ """
112
+
113
+ def __init__(self, x, y):
114
+ """
115
+ :param x: This cell's x coordinate.
116
+ :type x: int.
117
+ :param y: This cell's y coordinate.
118
+ :type y: int.
119
+ """
120
+ self._x = x
121
+ self._y = y
122
+ self._entries = set()
123
+
124
+ def add(self, span):
125
+ """
126
+ Appends the given span to the list of spans
127
+ representing the chart cell's entries.
128
+
129
+ :param span: The span to add.
130
+ :type span: DependencySpan
131
+ """
132
+ self._entries.add(span)
133
+
134
+ def __str__(self):
135
+ """
136
+ :return: A verbose string representation of this ``ChartCell``.
137
+ :rtype: str.
138
+ """
139
+ return "CC[%d,%d]: %s" % (self._x, self._y, self._entries)
140
+
141
+ def __repr__(self):
142
+ """
143
+ :return: A concise string representation of this ``ChartCell``.
144
+ :rtype: str.
145
+ """
146
+ return "%s" % self
147
+
148
+
149
+ #################################################################
150
+ # Parsing with Dependency Grammars
151
+ #################################################################
152
+
153
+
154
+ class ProjectiveDependencyParser:
155
+ """
156
+ A projective, rule-based, dependency parser. A ProjectiveDependencyParser
157
+ is created with a DependencyGrammar, a set of productions specifying
158
+ word-to-word dependency relations. The parse() method will then
159
+ return the set of all parses, in tree representation, for a given input
160
+ sequence of tokens. Each parse must meet the requirements of the both
161
+ the grammar and the projectivity constraint which specifies that the
162
+ branches of the dependency tree are not allowed to cross. Alternatively,
163
+ this can be understood as stating that each parent node and its children
164
+ in the parse tree form a continuous substring of the input sequence.
165
+ """
166
+
167
+ def __init__(self, dependency_grammar):
168
+ """
169
+ Create a new ProjectiveDependencyParser, from a word-to-word
170
+ dependency grammar ``DependencyGrammar``.
171
+
172
+ :param dependency_grammar: A word-to-word relation dependencygrammar.
173
+ :type dependency_grammar: DependencyGrammar
174
+ """
175
+ self._grammar = dependency_grammar
176
+
177
+ def parse(self, tokens):
178
+ """
179
+ Performs a projective dependency parse on the list of tokens using
180
+ a chart-based, span-concatenation algorithm similar to Eisner (1996).
181
+
182
+ :param tokens: The list of input tokens.
183
+ :type tokens: list(str)
184
+ :return: An iterator over parse trees.
185
+ :rtype: iter(Tree)
186
+ """
187
+ self._tokens = list(tokens)
188
+ chart = []
189
+ for i in range(0, len(self._tokens) + 1):
190
+ chart.append([])
191
+ for j in range(0, len(self._tokens) + 1):
192
+ chart[i].append(ChartCell(i, j))
193
+ if i == j + 1:
194
+ chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"]))
195
+
196
+ for i in range(1, len(self._tokens) + 1):
197
+ for j in range(i - 2, -1, -1):
198
+ for k in range(i - 1, j, -1):
199
+ for span1 in chart[k][j]._entries:
200
+ for span2 in chart[i][k]._entries:
201
+ for newspan in self.concatenate(span1, span2):
202
+ chart[i][j].add(newspan)
203
+
204
+ for parse in chart[len(self._tokens)][0]._entries:
205
+ conll_format = ""
206
+ # malt_format = ""
207
+ for i in range(len(tokens)):
208
+ # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null')
209
+ # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-')
210
+ # Modify to comply with the new Dependency Graph requirement (at least must have an root elements)
211
+ conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
212
+ i + 1,
213
+ tokens[i],
214
+ tokens[i],
215
+ "null",
216
+ "null",
217
+ "null",
218
+ parse._arcs[i] + 1,
219
+ "ROOT",
220
+ "-",
221
+ "-",
222
+ )
223
+ dg = DependencyGraph(conll_format)
224
+ # if self.meets_arity(dg):
225
+ yield dg.tree()
226
+
227
+ def concatenate(self, span1, span2):
228
+ """
229
+ Concatenates the two spans in whichever way possible. This
230
+ includes rightward concatenation (from the leftmost word of the
231
+ leftmost span to the rightmost word of the rightmost span) and
232
+ leftward concatenation (vice-versa) between adjacent spans. Unlike
233
+ Eisner's presentation of span concatenation, these spans do not
234
+ share or pivot on a particular word/word-index.
235
+
236
+ :return: A list of new spans formed through concatenation.
237
+ :rtype: list(DependencySpan)
238
+ """
239
+ spans = []
240
+ if span1._start_index == span2._start_index:
241
+ print("Error: Mismatched spans - replace this with thrown error")
242
+ if span1._start_index > span2._start_index:
243
+ temp_span = span1
244
+ span1 = span2
245
+ span2 = temp_span
246
+ # adjacent rightward covered concatenation
247
+ new_arcs = span1._arcs + span2._arcs
248
+ new_tags = span1._tags + span2._tags
249
+ if self._grammar.contains(
250
+ self._tokens[span1._head_index], self._tokens[span2._head_index]
251
+ ):
252
+ # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index))
253
+ new_arcs[span2._head_index - span1._start_index] = span1._head_index
254
+ spans.append(
255
+ DependencySpan(
256
+ span1._start_index,
257
+ span2._end_index,
258
+ span1._head_index,
259
+ new_arcs,
260
+ new_tags,
261
+ )
262
+ )
263
+ # adjacent leftward covered concatenation
264
+ new_arcs = span1._arcs + span2._arcs
265
+ if self._grammar.contains(
266
+ self._tokens[span2._head_index], self._tokens[span1._head_index]
267
+ ):
268
+ # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index))
269
+ new_arcs[span1._head_index - span1._start_index] = span2._head_index
270
+ spans.append(
271
+ DependencySpan(
272
+ span1._start_index,
273
+ span2._end_index,
274
+ span2._head_index,
275
+ new_arcs,
276
+ new_tags,
277
+ )
278
+ )
279
+ return spans
280
+
281
+
282
+ #################################################################
283
+ # Parsing with Probabilistic Dependency Grammars
284
+ #################################################################
285
+
286
+
287
+ class ProbabilisticProjectiveDependencyParser:
288
+ """A probabilistic, projective dependency parser.
289
+
290
+ This parser returns the most probable projective parse derived from the
291
+ probabilistic dependency grammar derived from the train() method. The
292
+ probabilistic model is an implementation of Eisner's (1996) Model C, which
293
+ conditions on head-word, head-tag, child-word, and child-tag. The decoding
294
+ uses a bottom-up chart-based span concatenation algorithm that's identical
295
+ to the one utilized by the rule-based projective parser.
296
+
297
+ Usage example
298
+
299
+ >>> from nltk.parse.dependencygraph import conll_data2
300
+
301
+ >>> graphs = [
302
+ ... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry
303
+ ... ]
304
+
305
+ >>> ppdp = ProbabilisticProjectiveDependencyParser()
306
+ >>> ppdp.train(graphs)
307
+
308
+ >>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.']
309
+ >>> list(ppdp.parse(sent))
310
+ [Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])]
311
+
312
+ """
313
+
314
+ def __init__(self):
315
+ """
316
+ Create a new probabilistic dependency parser. No additional
317
+ operations are necessary.
318
+ """
319
+
320
+ def parse(self, tokens):
321
+ """
322
+ Parses the list of tokens subject to the projectivity constraint
323
+ and the productions in the parser's grammar. This uses a method
324
+ similar to the span-concatenation algorithm defined in Eisner (1996).
325
+ It returns the most probable parse derived from the parser's
326
+ probabilistic dependency grammar.
327
+ """
328
+ self._tokens = list(tokens)
329
+ chart = []
330
+ for i in range(0, len(self._tokens) + 1):
331
+ chart.append([])
332
+ for j in range(0, len(self._tokens) + 1):
333
+ chart[i].append(ChartCell(i, j))
334
+ if i == j + 1:
335
+ if tokens[i - 1] in self._grammar._tags:
336
+ for tag in self._grammar._tags[tokens[i - 1]]:
337
+ chart[i][j].add(
338
+ DependencySpan(i - 1, i, i - 1, [-1], [tag])
339
+ )
340
+ else:
341
+ print(
342
+ "No tag found for input token '%s', parse is impossible."
343
+ % tokens[i - 1]
344
+ )
345
+ return []
346
+ for i in range(1, len(self._tokens) + 1):
347
+ for j in range(i - 2, -1, -1):
348
+ for k in range(i - 1, j, -1):
349
+ for span1 in chart[k][j]._entries:
350
+ for span2 in chart[i][k]._entries:
351
+ for newspan in self.concatenate(span1, span2):
352
+ chart[i][j].add(newspan)
353
+ trees = []
354
+ max_parse = None
355
+ max_score = 0
356
+ for parse in chart[len(self._tokens)][0]._entries:
357
+ conll_format = ""
358
+ malt_format = ""
359
+ for i in range(len(tokens)):
360
+ malt_format += "%s\t%s\t%d\t%s\n" % (
361
+ tokens[i],
362
+ "null",
363
+ parse._arcs[i] + 1,
364
+ "null",
365
+ )
366
+ # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-')
367
+ # Modify to comply with recent change in dependency graph such that there must be a ROOT element.
368
+ conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
369
+ i + 1,
370
+ tokens[i],
371
+ tokens[i],
372
+ parse._tags[i],
373
+ parse._tags[i],
374
+ "null",
375
+ parse._arcs[i] + 1,
376
+ "ROOT",
377
+ "-",
378
+ "-",
379
+ )
380
+ dg = DependencyGraph(conll_format)
381
+ score = self.compute_prob(dg)
382
+ trees.append((score, dg.tree()))
383
+ trees.sort()
384
+ return (tree for (score, tree) in trees)
385
+
386
+ def concatenate(self, span1, span2):
387
+ """
388
+ Concatenates the two spans in whichever way possible. This
389
+ includes rightward concatenation (from the leftmost word of the
390
+ leftmost span to the rightmost word of the rightmost span) and
391
+ leftward concatenation (vice-versa) between adjacent spans. Unlike
392
+ Eisner's presentation of span concatenation, these spans do not
393
+ share or pivot on a particular word/word-index.
394
+
395
+ :return: A list of new spans formed through concatenation.
396
+ :rtype: list(DependencySpan)
397
+ """
398
+ spans = []
399
+ if span1._start_index == span2._start_index:
400
+ print("Error: Mismatched spans - replace this with thrown error")
401
+ if span1._start_index > span2._start_index:
402
+ temp_span = span1
403
+ span1 = span2
404
+ span2 = temp_span
405
+ # adjacent rightward covered concatenation
406
+ new_arcs = span1._arcs + span2._arcs
407
+ new_tags = span1._tags + span2._tags
408
+ if self._grammar.contains(
409
+ self._tokens[span1._head_index], self._tokens[span2._head_index]
410
+ ):
411
+ new_arcs[span2._head_index - span1._start_index] = span1._head_index
412
+ spans.append(
413
+ DependencySpan(
414
+ span1._start_index,
415
+ span2._end_index,
416
+ span1._head_index,
417
+ new_arcs,
418
+ new_tags,
419
+ )
420
+ )
421
+ # adjacent leftward covered concatenation
422
+ new_arcs = span1._arcs + span2._arcs
423
+ new_tags = span1._tags + span2._tags
424
+ if self._grammar.contains(
425
+ self._tokens[span2._head_index], self._tokens[span1._head_index]
426
+ ):
427
+ new_arcs[span1._head_index - span1._start_index] = span2._head_index
428
+ spans.append(
429
+ DependencySpan(
430
+ span1._start_index,
431
+ span2._end_index,
432
+ span2._head_index,
433
+ new_arcs,
434
+ new_tags,
435
+ )
436
+ )
437
+ return spans
438
+
439
+ def train(self, graphs):
440
+ """
441
+ Trains a ProbabilisticDependencyGrammar based on the list of input
442
+ DependencyGraphs. This model is an implementation of Eisner's (1996)
443
+ Model C, which derives its statistics from head-word, head-tag,
444
+ child-word, and child-tag relationships.
445
+
446
+ :param graphs: A list of dependency graphs to train from.
447
+ :type: list(DependencyGraph)
448
+ """
449
+ productions = []
450
+ events = defaultdict(int)
451
+ tags = {}
452
+ for dg in graphs:
453
+ for node_index in range(1, len(dg.nodes)):
454
+ # children = dg.nodes[node_index]['deps']
455
+ children = list(
456
+ chain.from_iterable(dg.nodes[node_index]["deps"].values())
457
+ )
458
+
459
+ nr_left_children = dg.left_children(node_index)
460
+ nr_right_children = dg.right_children(node_index)
461
+ nr_children = nr_left_children + nr_right_children
462
+ for child_index in range(
463
+ 0 - (nr_left_children + 1), nr_right_children + 2
464
+ ):
465
+ head_word = dg.nodes[node_index]["word"]
466
+ head_tag = dg.nodes[node_index]["tag"]
467
+ if head_word in tags:
468
+ tags[head_word].add(head_tag)
469
+ else:
470
+ tags[head_word] = {head_tag}
471
+ child = "STOP"
472
+ child_tag = "STOP"
473
+ prev_word = "START"
474
+ prev_tag = "START"
475
+ if child_index < 0:
476
+ array_index = child_index + nr_left_children
477
+ if array_index >= 0:
478
+ child = dg.nodes[children[array_index]]["word"]
479
+ child_tag = dg.nodes[children[array_index]]["tag"]
480
+ if child_index != -1:
481
+ prev_word = dg.nodes[children[array_index + 1]]["word"]
482
+ prev_tag = dg.nodes[children[array_index + 1]]["tag"]
483
+ if child != "STOP":
484
+ productions.append(DependencyProduction(head_word, [child]))
485
+ head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format(
486
+ child,
487
+ child_tag,
488
+ prev_tag,
489
+ head_word,
490
+ head_tag,
491
+ )
492
+ mod_event = "(mods ({}, {}, {}) left))".format(
493
+ prev_tag,
494
+ head_word,
495
+ head_tag,
496
+ )
497
+ events[head_event] += 1
498
+ events[mod_event] += 1
499
+ elif child_index > 0:
500
+ array_index = child_index + nr_left_children - 1
501
+ if array_index < nr_children:
502
+ child = dg.nodes[children[array_index]]["word"]
503
+ child_tag = dg.nodes[children[array_index]]["tag"]
504
+ if child_index != 1:
505
+ prev_word = dg.nodes[children[array_index - 1]]["word"]
506
+ prev_tag = dg.nodes[children[array_index - 1]]["tag"]
507
+ if child != "STOP":
508
+ productions.append(DependencyProduction(head_word, [child]))
509
+ head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format(
510
+ child,
511
+ child_tag,
512
+ prev_tag,
513
+ head_word,
514
+ head_tag,
515
+ )
516
+ mod_event = "(mods ({}, {}, {}) right))".format(
517
+ prev_tag,
518
+ head_word,
519
+ head_tag,
520
+ )
521
+ events[head_event] += 1
522
+ events[mod_event] += 1
523
+ self._grammar = ProbabilisticDependencyGrammar(productions, events, tags)
524
+
525
+ def compute_prob(self, dg):
526
+ """
527
+ Computes the probability of a dependency graph based
528
+ on the parser's probability model (defined by the parser's
529
+ statistical dependency grammar).
530
+
531
+ :param dg: A dependency graph to score.
532
+ :type dg: DependencyGraph
533
+ :return: The probability of the dependency graph.
534
+ :rtype: int
535
+ """
536
+ prob = 1.0
537
+ for node_index in range(1, len(dg.nodes)):
538
+ # children = dg.nodes[node_index]['deps']
539
+ children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values()))
540
+
541
+ nr_left_children = dg.left_children(node_index)
542
+ nr_right_children = dg.right_children(node_index)
543
+ nr_children = nr_left_children + nr_right_children
544
+ for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2):
545
+ head_word = dg.nodes[node_index]["word"]
546
+ head_tag = dg.nodes[node_index]["tag"]
547
+ child = "STOP"
548
+ child_tag = "STOP"
549
+ prev_word = "START"
550
+ prev_tag = "START"
551
+ if child_index < 0:
552
+ array_index = child_index + nr_left_children
553
+ if array_index >= 0:
554
+ child = dg.nodes[children[array_index]]["word"]
555
+ child_tag = dg.nodes[children[array_index]]["tag"]
556
+ if child_index != -1:
557
+ prev_word = dg.nodes[children[array_index + 1]]["word"]
558
+ prev_tag = dg.nodes[children[array_index + 1]]["tag"]
559
+ head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format(
560
+ child,
561
+ child_tag,
562
+ prev_tag,
563
+ head_word,
564
+ head_tag,
565
+ )
566
+ mod_event = "(mods ({}, {}, {}) left))".format(
567
+ prev_tag,
568
+ head_word,
569
+ head_tag,
570
+ )
571
+ h_count = self._grammar._events[head_event]
572
+ m_count = self._grammar._events[mod_event]
573
+
574
+ # If the grammar is not covered
575
+ if m_count != 0:
576
+ prob *= h_count / m_count
577
+ else:
578
+ prob = 0.00000001 # Very small number
579
+
580
+ elif child_index > 0:
581
+ array_index = child_index + nr_left_children - 1
582
+ if array_index < nr_children:
583
+ child = dg.nodes[children[array_index]]["word"]
584
+ child_tag = dg.nodes[children[array_index]]["tag"]
585
+ if child_index != 1:
586
+ prev_word = dg.nodes[children[array_index - 1]]["word"]
587
+ prev_tag = dg.nodes[children[array_index - 1]]["tag"]
588
+ head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format(
589
+ child,
590
+ child_tag,
591
+ prev_tag,
592
+ head_word,
593
+ head_tag,
594
+ )
595
+ mod_event = "(mods ({}, {}, {}) right))".format(
596
+ prev_tag,
597
+ head_word,
598
+ head_tag,
599
+ )
600
+ h_count = self._grammar._events[head_event]
601
+ m_count = self._grammar._events[mod_event]
602
+
603
+ if m_count != 0:
604
+ prob *= h_count / m_count
605
+ else:
606
+ prob = 0.00000001 # Very small number
607
+
608
+ return prob
609
+
610
+
611
+ #################################################################
612
+ # Demos
613
+ #################################################################
614
+
615
+
616
+ def demo():
617
+ projective_rule_parse_demo()
618
+ # arity_parse_demo()
619
+ projective_prob_parse_demo()
620
+
621
+
622
+ def projective_rule_parse_demo():
623
+ """
624
+ A demonstration showing the creation and use of a
625
+ ``DependencyGrammar`` to perform a projective dependency
626
+ parse.
627
+ """
628
+ grammar = DependencyGrammar.fromstring(
629
+ """
630
+ 'scratch' -> 'cats' | 'walls'
631
+ 'walls' -> 'the'
632
+ 'cats' -> 'the'
633
+ """
634
+ )
635
+ print(grammar)
636
+ pdp = ProjectiveDependencyParser(grammar)
637
+ trees = pdp.parse(["the", "cats", "scratch", "the", "walls"])
638
+ for tree in trees:
639
+ print(tree)
640
+
641
+
642
+ def arity_parse_demo():
643
+ """
644
+ A demonstration showing the creation of a ``DependencyGrammar``
645
+ in which a specific number of modifiers is listed for a given
646
+ head. This can further constrain the number of possible parses
647
+ created by a ``ProjectiveDependencyParser``.
648
+ """
649
+ print()
650
+ print("A grammar with no arity constraints. Each DependencyProduction")
651
+ print("specifies a relationship between one head word and only one")
652
+ print("modifier word.")
653
+ grammar = DependencyGrammar.fromstring(
654
+ """
655
+ 'fell' -> 'price' | 'stock'
656
+ 'price' -> 'of' | 'the'
657
+ 'of' -> 'stock'
658
+ 'stock' -> 'the'
659
+ """
660
+ )
661
+ print(grammar)
662
+
663
+ print()
664
+ print("For the sentence 'The price of the stock fell', this grammar")
665
+ print("will produce the following three parses:")
666
+ pdp = ProjectiveDependencyParser(grammar)
667
+ trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
668
+ for tree in trees:
669
+ print(tree)
670
+
671
+ print()
672
+ print("By contrast, the following grammar contains a ")
673
+ print("DependencyProduction that specifies a relationship")
674
+ print("between a single head word, 'price', and two modifier")
675
+ print("words, 'of' and 'the'.")
676
+ grammar = DependencyGrammar.fromstring(
677
+ """
678
+ 'fell' -> 'price' | 'stock'
679
+ 'price' -> 'of' 'the'
680
+ 'of' -> 'stock'
681
+ 'stock' -> 'the'
682
+ """
683
+ )
684
+ print(grammar)
685
+
686
+ print()
687
+ print(
688
+ "This constrains the number of possible parses to just one:"
689
+ ) # unimplemented, soon to replace
690
+ pdp = ProjectiveDependencyParser(grammar)
691
+ trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
692
+ for tree in trees:
693
+ print(tree)
694
+
695
+
696
+ def projective_prob_parse_demo():
697
+ """
698
+ A demo showing the training and use of a projective
699
+ dependency parser.
700
+ """
701
+ from nltk.parse.dependencygraph import conll_data2
702
+
703
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
704
+ ppdp = ProbabilisticProjectiveDependencyParser()
705
+ print("Training Probabilistic Projective Dependency Parser...")
706
+ ppdp.train(graphs)
707
+
708
+ sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."]
709
+ print("Parsing '", " ".join(sent), "'...")
710
+ print("Parse:")
711
+ for tree in ppdp.parse(sent):
712
+ print(tree)
713
+
714
+
715
+ if __name__ == "__main__":
716
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/recursivedescent.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Recursive Descent Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.grammar import Nonterminal
10
+ from nltk.parse.api import ParserI
11
+ from nltk.tree import ImmutableTree, Tree
12
+
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Recursive Descent Parser
16
+ ##//////////////////////////////////////////////////////
17
+ class RecursiveDescentParser(ParserI):
18
+ """
19
+ A simple top-down CFG parser that parses texts by recursively
20
+ expanding the fringe of a Tree, and matching it against a
21
+ text.
22
+
23
+ ``RecursiveDescentParser`` uses a list of tree locations called a
24
+ "frontier" to remember which subtrees have not yet been expanded
25
+ and which leaves have not yet been matched against the text. Each
26
+ tree location consists of a list of child indices specifying the
27
+ path from the root of the tree to a subtree or a leaf; see the
28
+ reference documentation for Tree for more information
29
+ about tree locations.
30
+
31
+ When the parser begins parsing a text, it constructs a tree
32
+ containing only the start symbol, and a frontier containing the
33
+ location of the tree's root node. It then extends the tree to
34
+ cover the text, using the following recursive procedure:
35
+
36
+ - If the frontier is empty, and the text is covered by the tree,
37
+ then return the tree as a possible parse.
38
+ - If the frontier is empty, and the text is not covered by the
39
+ tree, then return no parses.
40
+ - If the first element of the frontier is a subtree, then
41
+ use CFG productions to "expand" it. For each applicable
42
+ production, add the expanded subtree's children to the
43
+ frontier, and recursively find all parses that can be
44
+ generated by the new tree and frontier.
45
+ - If the first element of the frontier is a token, then "match"
46
+ it against the next token from the text. Remove the token
47
+ from the frontier, and recursively find all parses that can be
48
+ generated by the new tree and frontier.
49
+
50
+ :see: ``nltk.grammar``
51
+ """
52
+
53
+ def __init__(self, grammar, trace=0):
54
+ """
55
+ Create a new ``RecursiveDescentParser``, that uses ``grammar``
56
+ to parse texts.
57
+
58
+ :type grammar: CFG
59
+ :param grammar: The grammar used to parse texts.
60
+ :type trace: int
61
+ :param trace: The level of tracing that should be used when
62
+ parsing a text. ``0`` will generate no tracing output;
63
+ and higher numbers will produce more verbose tracing
64
+ output.
65
+ """
66
+ self._grammar = grammar
67
+ self._trace = trace
68
+
69
+ def grammar(self):
70
+ return self._grammar
71
+
72
+ def parse(self, tokens):
73
+ # Inherit docs from ParserI
74
+
75
+ tokens = list(tokens)
76
+ self._grammar.check_coverage(tokens)
77
+
78
+ # Start a recursive descent parse, with an initial tree
79
+ # containing just the start symbol.
80
+ start = self._grammar.start().symbol()
81
+ initial_tree = Tree(start, [])
82
+ frontier = [()]
83
+ if self._trace:
84
+ self._trace_start(initial_tree, frontier, tokens)
85
+ return self._parse(tokens, initial_tree, frontier)
86
+
87
+ def _parse(self, remaining_text, tree, frontier):
88
+ """
89
+ Recursively expand and match each elements of ``tree``
90
+ specified by ``frontier``, to cover ``remaining_text``. Return
91
+ a list of all parses found.
92
+
93
+ :return: An iterator of all parses that can be generated by
94
+ matching and expanding the elements of ``tree``
95
+ specified by ``frontier``.
96
+ :rtype: iter(Tree)
97
+ :type tree: Tree
98
+ :param tree: A partial structure for the text that is
99
+ currently being parsed. The elements of ``tree``
100
+ that are specified by ``frontier`` have not yet been
101
+ expanded or matched.
102
+ :type remaining_text: list(str)
103
+ :param remaining_text: The portion of the text that is not yet
104
+ covered by ``tree``.
105
+ :type frontier: list(tuple(int))
106
+ :param frontier: A list of the locations within ``tree`` of
107
+ all subtrees that have not yet been expanded, and all
108
+ leaves that have not yet been matched. This list sorted
109
+ in left-to-right order of location within the tree.
110
+ """
111
+
112
+ # If the tree covers the text, and there's nothing left to
113
+ # expand, then we've found a complete parse; return it.
114
+ if len(remaining_text) == 0 and len(frontier) == 0:
115
+ if self._trace:
116
+ self._trace_succeed(tree, frontier)
117
+ yield tree
118
+
119
+ # If there's still text, but nothing left to expand, we failed.
120
+ elif len(frontier) == 0:
121
+ if self._trace:
122
+ self._trace_backtrack(tree, frontier)
123
+
124
+ # If the next element on the frontier is a tree, expand it.
125
+ elif isinstance(tree[frontier[0]], Tree):
126
+ yield from self._expand(remaining_text, tree, frontier)
127
+
128
+ # If the next element on the frontier is a token, match it.
129
+ else:
130
+ yield from self._match(remaining_text, tree, frontier)
131
+
132
+ def _match(self, rtext, tree, frontier):
133
+ """
134
+ :rtype: iter(Tree)
135
+ :return: an iterator of all parses that can be generated by
136
+ matching the first element of ``frontier`` against the
137
+ first token in ``rtext``. In particular, if the first
138
+ element of ``frontier`` has the same type as the first
139
+ token in ``rtext``, then substitute the token into
140
+ ``tree``; and return all parses that can be generated by
141
+ matching and expanding the remaining elements of
142
+ ``frontier``. If the first element of ``frontier`` does not
143
+ have the same type as the first token in ``rtext``, then
144
+ return empty list.
145
+
146
+ :type tree: Tree
147
+ :param tree: A partial structure for the text that is
148
+ currently being parsed. The elements of ``tree``
149
+ that are specified by ``frontier`` have not yet been
150
+ expanded or matched.
151
+ :type rtext: list(str)
152
+ :param rtext: The portion of the text that is not yet
153
+ covered by ``tree``.
154
+ :type frontier: list of tuple of int
155
+ :param frontier: A list of the locations within ``tree`` of
156
+ all subtrees that have not yet been expanded, and all
157
+ leaves that have not yet been matched.
158
+ """
159
+
160
+ tree_leaf = tree[frontier[0]]
161
+ if len(rtext) > 0 and tree_leaf == rtext[0]:
162
+ # If it's a terminal that matches rtext[0], then substitute
163
+ # in the token, and continue parsing.
164
+ newtree = tree.copy(deep=True)
165
+ newtree[frontier[0]] = rtext[0]
166
+ if self._trace:
167
+ self._trace_match(newtree, frontier[1:], rtext[0])
168
+ yield from self._parse(rtext[1:], newtree, frontier[1:])
169
+ else:
170
+ # If it's a non-matching terminal, fail.
171
+ if self._trace:
172
+ self._trace_backtrack(tree, frontier, rtext[:1])
173
+
174
+ def _expand(self, remaining_text, tree, frontier, production=None):
175
+ """
176
+ :rtype: iter(Tree)
177
+ :return: An iterator of all parses that can be generated by
178
+ expanding the first element of ``frontier`` with
179
+ ``production``. In particular, if the first element of
180
+ ``frontier`` is a subtree whose node type is equal to
181
+ ``production``'s left hand side, then add a child to that
182
+ subtree for each element of ``production``'s right hand
183
+ side; and return all parses that can be generated by
184
+ matching and expanding the remaining elements of
185
+ ``frontier``. If the first element of ``frontier`` is not a
186
+ subtree whose node type is equal to ``production``'s left
187
+ hand side, then return an empty list. If ``production`` is
188
+ not specified, then return a list of all parses that can
189
+ be generated by expanding the first element of ``frontier``
190
+ with *any* CFG production.
191
+
192
+ :type tree: Tree
193
+ :param tree: A partial structure for the text that is
194
+ currently being parsed. The elements of ``tree``
195
+ that are specified by ``frontier`` have not yet been
196
+ expanded or matched.
197
+ :type remaining_text: list(str)
198
+ :param remaining_text: The portion of the text that is not yet
199
+ covered by ``tree``.
200
+ :type frontier: list(tuple(int))
201
+ :param frontier: A list of the locations within ``tree`` of
202
+ all subtrees that have not yet been expanded, and all
203
+ leaves that have not yet been matched.
204
+ """
205
+
206
+ if production is None:
207
+ productions = self._grammar.productions()
208
+ else:
209
+ productions = [production]
210
+
211
+ for production in productions:
212
+ lhs = production.lhs().symbol()
213
+ if lhs == tree[frontier[0]].label():
214
+ subtree = self._production_to_tree(production)
215
+ if frontier[0] == ():
216
+ newtree = subtree
217
+ else:
218
+ newtree = tree.copy(deep=True)
219
+ newtree[frontier[0]] = subtree
220
+ new_frontier = [
221
+ frontier[0] + (i,) for i in range(len(production.rhs()))
222
+ ]
223
+ if self._trace:
224
+ self._trace_expand(newtree, new_frontier, production)
225
+ yield from self._parse(
226
+ remaining_text, newtree, new_frontier + frontier[1:]
227
+ )
228
+
229
+ def _production_to_tree(self, production):
230
+ """
231
+ :rtype: Tree
232
+ :return: The Tree that is licensed by ``production``.
233
+ In particular, given the production ``[lhs -> elt[1] ... elt[n]]``
234
+ return a tree that has a node ``lhs.symbol``, and
235
+ ``n`` children. For each nonterminal element
236
+ ``elt[i]`` in the production, the tree token has a
237
+ childless subtree with node value ``elt[i].symbol``; and
238
+ for each terminal element ``elt[j]``, the tree token has
239
+ a leaf token with type ``elt[j]``.
240
+
241
+ :param production: The CFG production that licenses the tree
242
+ token that should be returned.
243
+ :type production: Production
244
+ """
245
+ children = []
246
+ for elt in production.rhs():
247
+ if isinstance(elt, Nonterminal):
248
+ children.append(Tree(elt.symbol(), []))
249
+ else:
250
+ # This will be matched.
251
+ children.append(elt)
252
+ return Tree(production.lhs().symbol(), children)
253
+
254
+ def trace(self, trace=2):
255
+ """
256
+ Set the level of tracing output that should be generated when
257
+ parsing a text.
258
+
259
+ :type trace: int
260
+ :param trace: The trace level. A trace level of ``0`` will
261
+ generate no tracing output; and higher trace levels will
262
+ produce more verbose tracing output.
263
+ :rtype: None
264
+ """
265
+ self._trace = trace
266
+
267
+ def _trace_fringe(self, tree, treeloc=None):
268
+ """
269
+ Print trace output displaying the fringe of ``tree``. The
270
+ fringe of ``tree`` consists of all of its leaves and all of
271
+ its childless subtrees.
272
+
273
+ :rtype: None
274
+ """
275
+
276
+ if treeloc == ():
277
+ print("*", end=" ")
278
+ if isinstance(tree, Tree):
279
+ if len(tree) == 0:
280
+ print(repr(Nonterminal(tree.label())), end=" ")
281
+ for i in range(len(tree)):
282
+ if treeloc is not None and i == treeloc[0]:
283
+ self._trace_fringe(tree[i], treeloc[1:])
284
+ else:
285
+ self._trace_fringe(tree[i])
286
+ else:
287
+ print(repr(tree), end=" ")
288
+
289
+ def _trace_tree(self, tree, frontier, operation):
290
+ """
291
+ Print trace output displaying the parser's current state.
292
+
293
+ :param operation: A character identifying the operation that
294
+ generated the current state.
295
+ :rtype: None
296
+ """
297
+ if self._trace == 2:
298
+ print(" %c [" % operation, end=" ")
299
+ else:
300
+ print(" [", end=" ")
301
+ if len(frontier) > 0:
302
+ self._trace_fringe(tree, frontier[0])
303
+ else:
304
+ self._trace_fringe(tree)
305
+ print("]")
306
+
307
+ def _trace_start(self, tree, frontier, text):
308
+ print("Parsing %r" % " ".join(text))
309
+ if self._trace > 2:
310
+ print("Start:")
311
+ if self._trace > 1:
312
+ self._trace_tree(tree, frontier, " ")
313
+
314
+ def _trace_expand(self, tree, frontier, production):
315
+ if self._trace > 2:
316
+ print("Expand: %s" % production)
317
+ if self._trace > 1:
318
+ self._trace_tree(tree, frontier, "E")
319
+
320
+ def _trace_match(self, tree, frontier, tok):
321
+ if self._trace > 2:
322
+ print("Match: %r" % tok)
323
+ if self._trace > 1:
324
+ self._trace_tree(tree, frontier, "M")
325
+
326
+ def _trace_succeed(self, tree, frontier):
327
+ if self._trace > 2:
328
+ print("GOOD PARSE:")
329
+ if self._trace == 1:
330
+ print("Found a parse:\n%s" % tree)
331
+ if self._trace > 1:
332
+ self._trace_tree(tree, frontier, "+")
333
+
334
+ def _trace_backtrack(self, tree, frontier, toks=None):
335
+ if self._trace > 2:
336
+ if toks:
337
+ print("Backtrack: %r match failed" % toks[0])
338
+ else:
339
+ print("Backtrack")
340
+
341
+
342
+ ##//////////////////////////////////////////////////////
343
+ ## Stepping Recursive Descent Parser
344
+ ##//////////////////////////////////////////////////////
345
+ class SteppingRecursiveDescentParser(RecursiveDescentParser):
346
+ """
347
+ A ``RecursiveDescentParser`` that allows you to step through the
348
+ parsing process, performing a single operation at a time.
349
+
350
+ The ``initialize`` method is used to start parsing a text.
351
+ ``expand`` expands the first element on the frontier using a single
352
+ CFG production, and ``match`` matches the first element on the
353
+ frontier against the next text token. ``backtrack`` undoes the most
354
+ recent expand or match operation. ``step`` performs a single
355
+ expand, match, or backtrack operation. ``parses`` returns the set
356
+ of parses that have been found by the parser.
357
+
358
+ :ivar _history: A list of ``(rtext, tree, frontier)`` tripples,
359
+ containing the previous states of the parser. This history is
360
+ used to implement the ``backtrack`` operation.
361
+ :ivar _tried_e: A record of all productions that have been tried
362
+ for a given tree. This record is used by ``expand`` to perform
363
+ the next untried production.
364
+ :ivar _tried_m: A record of what tokens have been matched for a
365
+ given tree. This record is used by ``step`` to decide whether
366
+ or not to match a token.
367
+ :see: ``nltk.grammar``
368
+ """
369
+
370
+ def __init__(self, grammar, trace=0):
371
+ super().__init__(grammar, trace)
372
+ self._rtext = None
373
+ self._tree = None
374
+ self._frontier = [()]
375
+ self._tried_e = {}
376
+ self._tried_m = {}
377
+ self._history = []
378
+ self._parses = []
379
+
380
+ # [XX] TEMPORARY HACK WARNING! This should be replaced with
381
+ # something nicer when we get the chance.
382
+ def _freeze(self, tree):
383
+ c = tree.copy()
384
+ # for pos in c.treepositions('leaves'):
385
+ # c[pos] = c[pos].freeze()
386
+ return ImmutableTree.convert(c)
387
+
388
+ def parse(self, tokens):
389
+ tokens = list(tokens)
390
+ self.initialize(tokens)
391
+ while self.step() is not None:
392
+ pass
393
+ return self.parses()
394
+
395
+ def initialize(self, tokens):
396
+ """
397
+ Start parsing a given text. This sets the parser's tree to
398
+ the start symbol, its frontier to the root node, and its
399
+ remaining text to ``token['SUBTOKENS']``.
400
+ """
401
+
402
+ self._rtext = tokens
403
+ start = self._grammar.start().symbol()
404
+ self._tree = Tree(start, [])
405
+ self._frontier = [()]
406
+ self._tried_e = {}
407
+ self._tried_m = {}
408
+ self._history = []
409
+ self._parses = []
410
+ if self._trace:
411
+ self._trace_start(self._tree, self._frontier, self._rtext)
412
+
413
+ def remaining_text(self):
414
+ """
415
+ :return: The portion of the text that is not yet covered by the
416
+ tree.
417
+ :rtype: list(str)
418
+ """
419
+ return self._rtext
420
+
421
+ def frontier(self):
422
+ """
423
+ :return: A list of the tree locations of all subtrees that
424
+ have not yet been expanded, and all leaves that have not
425
+ yet been matched.
426
+ :rtype: list(tuple(int))
427
+ """
428
+ return self._frontier
429
+
430
+ def tree(self):
431
+ """
432
+ :return: A partial structure for the text that is
433
+ currently being parsed. The elements specified by the
434
+ frontier have not yet been expanded or matched.
435
+ :rtype: Tree
436
+ """
437
+ return self._tree
438
+
439
+ def step(self):
440
+ """
441
+ Perform a single parsing operation. If an untried match is
442
+ possible, then perform the match, and return the matched
443
+ token. If an untried expansion is possible, then perform the
444
+ expansion, and return the production that it is based on. If
445
+ backtracking is possible, then backtrack, and return True.
446
+ Otherwise, return None.
447
+
448
+ :return: None if no operation was performed; a token if a match
449
+ was performed; a production if an expansion was performed;
450
+ and True if a backtrack operation was performed.
451
+ :rtype: Production or String or bool
452
+ """
453
+ # Try matching (if we haven't already)
454
+ if self.untried_match():
455
+ token = self.match()
456
+ if token is not None:
457
+ return token
458
+
459
+ # Try expanding.
460
+ production = self.expand()
461
+ if production is not None:
462
+ return production
463
+
464
+ # Try backtracking
465
+ if self.backtrack():
466
+ self._trace_backtrack(self._tree, self._frontier)
467
+ return True
468
+
469
+ # Nothing left to do.
470
+ return None
471
+
472
+ def expand(self, production=None):
473
+ """
474
+ Expand the first element of the frontier. In particular, if
475
+ the first element of the frontier is a subtree whose node type
476
+ is equal to ``production``'s left hand side, then add a child
477
+ to that subtree for each element of ``production``'s right hand
478
+ side. If ``production`` is not specified, then use the first
479
+ untried expandable production. If all expandable productions
480
+ have been tried, do nothing.
481
+
482
+ :return: The production used to expand the frontier, if an
483
+ expansion was performed. If no expansion was performed,
484
+ return None.
485
+ :rtype: Production or None
486
+ """
487
+
488
+ # Make sure we *can* expand.
489
+ if len(self._frontier) == 0:
490
+ return None
491
+ if not isinstance(self._tree[self._frontier[0]], Tree):
492
+ return None
493
+
494
+ # If they didn't specify a production, check all untried ones.
495
+ if production is None:
496
+ productions = self.untried_expandable_productions()
497
+ else:
498
+ productions = [production]
499
+
500
+ parses = []
501
+ for prod in productions:
502
+ # Record that we've tried this production now.
503
+ self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
504
+
505
+ # Try expanding.
506
+ for _result in self._expand(self._rtext, self._tree, self._frontier, prod):
507
+ return prod
508
+
509
+ # We didn't expand anything.
510
+ return None
511
+
512
+ def match(self):
513
+ """
514
+ Match the first element of the frontier. In particular, if
515
+ the first element of the frontier has the same type as the
516
+ next text token, then substitute the text token into the tree.
517
+
518
+ :return: The token matched, if a match operation was
519
+ performed. If no match was performed, return None
520
+ :rtype: str or None
521
+ """
522
+
523
+ # Record that we've tried matching this token.
524
+ tok = self._rtext[0]
525
+ self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
526
+
527
+ # Make sure we *can* match.
528
+ if len(self._frontier) == 0:
529
+ return None
530
+ if isinstance(self._tree[self._frontier[0]], Tree):
531
+ return None
532
+
533
+ for _result in self._match(self._rtext, self._tree, self._frontier):
534
+ # Return the token we just matched.
535
+ return self._history[-1][0][0]
536
+ return None
537
+
538
+ def backtrack(self):
539
+ """
540
+ Return the parser to its state before the most recent
541
+ match or expand operation. Calling ``undo`` repeatedly return
542
+ the parser to successively earlier states. If no match or
543
+ expand operations have been performed, ``undo`` will make no
544
+ changes.
545
+
546
+ :return: true if an operation was successfully undone.
547
+ :rtype: bool
548
+ """
549
+ if len(self._history) == 0:
550
+ return False
551
+ (self._rtext, self._tree, self._frontier) = self._history.pop()
552
+ return True
553
+
554
+ def expandable_productions(self):
555
+ """
556
+ :return: A list of all the productions for which expansions
557
+ are available for the current parser state.
558
+ :rtype: list(Production)
559
+ """
560
+ # Make sure we *can* expand.
561
+ if len(self._frontier) == 0:
562
+ return []
563
+ frontier_child = self._tree[self._frontier[0]]
564
+ if len(self._frontier) == 0 or not isinstance(frontier_child, Tree):
565
+ return []
566
+
567
+ return [
568
+ p
569
+ for p in self._grammar.productions()
570
+ if p.lhs().symbol() == frontier_child.label()
571
+ ]
572
+
573
+ def untried_expandable_productions(self):
574
+ """
575
+ :return: A list of all the untried productions for which
576
+ expansions are available for the current parser state.
577
+ :rtype: list(Production)
578
+ """
579
+
580
+ tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
581
+ return [p for p in self.expandable_productions() if p not in tried_expansions]
582
+
583
+ def untried_match(self):
584
+ """
585
+ :return: Whether the first element of the frontier is a token
586
+ that has not yet been matched.
587
+ :rtype: bool
588
+ """
589
+
590
+ if len(self._rtext) == 0:
591
+ return False
592
+ tried_matches = self._tried_m.get(self._freeze(self._tree), [])
593
+ return self._rtext[0] not in tried_matches
594
+
595
+ def currently_complete(self):
596
+ """
597
+ :return: Whether the parser's current state represents a
598
+ complete parse.
599
+ :rtype: bool
600
+ """
601
+ return len(self._frontier) == 0 and len(self._rtext) == 0
602
+
603
+ def _parse(self, remaining_text, tree, frontier):
604
+ """
605
+ A stub version of ``_parse`` that sets the parsers current
606
+ state to the given arguments. In ``RecursiveDescentParser``,
607
+ the ``_parse`` method is used to recursively continue parsing a
608
+ text. ``SteppingRecursiveDescentParser`` overrides it to
609
+ capture these recursive calls. It records the parser's old
610
+ state in the history (to allow for backtracking), and updates
611
+ the parser's new state using the given arguments. Finally, it
612
+ returns ``[1]``, which is used by ``match`` and ``expand`` to
613
+ detect whether their operations were successful.
614
+
615
+ :return: ``[1]``
616
+ :rtype: list of int
617
+ """
618
+ self._history.append((self._rtext, self._tree, self._frontier))
619
+ self._rtext = remaining_text
620
+ self._tree = tree
621
+ self._frontier = frontier
622
+
623
+ # Is it a good parse? If so, record it.
624
+ if len(frontier) == 0 and len(remaining_text) == 0:
625
+ self._parses.append(tree)
626
+ self._trace_succeed(self._tree, self._frontier)
627
+
628
+ return [1]
629
+
630
+ def parses(self):
631
+ """
632
+ :return: An iterator of the parses that have been found by this
633
+ parser so far.
634
+ :rtype: list of Tree
635
+ """
636
+ return iter(self._parses)
637
+
638
+ def set_grammar(self, grammar):
639
+ """
640
+ Change the grammar used to parse texts.
641
+
642
+ :param grammar: The new grammar.
643
+ :type grammar: CFG
644
+ """
645
+ self._grammar = grammar
646
+
647
+
648
+ ##//////////////////////////////////////////////////////
649
+ ## Demonstration Code
650
+ ##//////////////////////////////////////////////////////
651
+
652
+
653
+ def demo():
654
+ """
655
+ A demonstration of the recursive descent parser.
656
+ """
657
+
658
+ from nltk import CFG, parse
659
+
660
+ grammar = CFG.fromstring(
661
+ """
662
+ S -> NP VP
663
+ NP -> Det N | Det N PP
664
+ VP -> V NP | V NP PP
665
+ PP -> P NP
666
+ NP -> 'I'
667
+ N -> 'man' | 'park' | 'telescope' | 'dog'
668
+ Det -> 'the' | 'a'
669
+ P -> 'in' | 'with'
670
+ V -> 'saw'
671
+ """
672
+ )
673
+
674
+ for prod in grammar.productions():
675
+ print(prod)
676
+
677
+ sent = "I saw a man in the park".split()
678
+ parser = parse.RecursiveDescentParser(grammar, trace=2)
679
+ for p in parser.parse(sent):
680
+ print(p)
681
+
682
+
683
+ if __name__ == "__main__":
684
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/shiftreduce.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Shift-Reduce Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.grammar import Nonterminal
10
+ from nltk.parse.api import ParserI
11
+ from nltk.tree import Tree
12
+
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Shift/Reduce Parser
16
+ ##//////////////////////////////////////////////////////
17
+ class ShiftReduceParser(ParserI):
18
+ """
19
+ A simple bottom-up CFG parser that uses two operations, "shift"
20
+ and "reduce", to find a single parse for a text.
21
+
22
+ ``ShiftReduceParser`` maintains a stack, which records the
23
+ structure of a portion of the text. This stack is a list of
24
+ strings and Trees that collectively cover a portion of
25
+ the text. For example, while parsing the sentence "the dog saw
26
+ the man" with a typical grammar, ``ShiftReduceParser`` will produce
27
+ the following stack, which covers "the dog saw"::
28
+
29
+ [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')]
30
+
31
+ ``ShiftReduceParser`` attempts to extend the stack to cover the
32
+ entire text, and to combine the stack elements into a single tree,
33
+ producing a complete parse for the sentence.
34
+
35
+ Initially, the stack is empty. It is extended to cover the text,
36
+ from left to right, by repeatedly applying two operations:
37
+
38
+ - "shift" moves a token from the beginning of the text to the
39
+ end of the stack.
40
+ - "reduce" uses a CFG production to combine the rightmost stack
41
+ elements into a single Tree.
42
+
43
+ Often, more than one operation can be performed on a given stack.
44
+ In this case, ``ShiftReduceParser`` uses the following heuristics
45
+ to decide which operation to perform:
46
+
47
+ - Only shift if no reductions are available.
48
+ - If multiple reductions are available, then apply the reduction
49
+ whose CFG production is listed earliest in the grammar.
50
+
51
+ Note that these heuristics are not guaranteed to choose an
52
+ operation that leads to a parse of the text. Also, if multiple
53
+ parses exists, ``ShiftReduceParser`` will return at most one of
54
+ them.
55
+
56
+ :see: ``nltk.grammar``
57
+ """
58
+
59
+ def __init__(self, grammar, trace=0):
60
+ """
61
+ Create a new ``ShiftReduceParser``, that uses ``grammar`` to
62
+ parse texts.
63
+
64
+ :type grammar: Grammar
65
+ :param grammar: The grammar used to parse texts.
66
+ :type trace: int
67
+ :param trace: The level of tracing that should be used when
68
+ parsing a text. ``0`` will generate no tracing output;
69
+ and higher numbers will produce more verbose tracing
70
+ output.
71
+ """
72
+ self._grammar = grammar
73
+ self._trace = trace
74
+ self._check_grammar()
75
+
76
+ def grammar(self):
77
+ return self._grammar
78
+
79
+ def parse(self, tokens):
80
+ tokens = list(tokens)
81
+ self._grammar.check_coverage(tokens)
82
+
83
+ # initialize the stack.
84
+ stack = []
85
+ remaining_text = tokens
86
+
87
+ # Trace output.
88
+ if self._trace:
89
+ print("Parsing %r" % " ".join(tokens))
90
+ self._trace_stack(stack, remaining_text)
91
+
92
+ # iterate through the text, pushing the token onto
93
+ # the stack, then reducing the stack.
94
+ while len(remaining_text) > 0:
95
+ self._shift(stack, remaining_text)
96
+ while self._reduce(stack, remaining_text):
97
+ pass
98
+
99
+ # Did we reduce everything?
100
+ if len(stack) == 1:
101
+ # Did we end up with the right category?
102
+ if stack[0].label() == self._grammar.start().symbol():
103
+ yield stack[0]
104
+
105
+ def _shift(self, stack, remaining_text):
106
+ """
107
+ Move a token from the beginning of ``remaining_text`` to the
108
+ end of ``stack``.
109
+
110
+ :type stack: list(str and Tree)
111
+ :param stack: A list of strings and Trees, encoding
112
+ the structure of the text that has been parsed so far.
113
+ :type remaining_text: list(str)
114
+ :param remaining_text: The portion of the text that is not yet
115
+ covered by ``stack``.
116
+ :rtype: None
117
+ """
118
+ stack.append(remaining_text[0])
119
+ remaining_text.remove(remaining_text[0])
120
+ if self._trace:
121
+ self._trace_shift(stack, remaining_text)
122
+
123
+ def _match_rhs(self, rhs, rightmost_stack):
124
+ """
125
+ :rtype: bool
126
+ :return: true if the right hand side of a CFG production
127
+ matches the rightmost elements of the stack. ``rhs``
128
+ matches ``rightmost_stack`` if they are the same length,
129
+ and each element of ``rhs`` matches the corresponding
130
+ element of ``rightmost_stack``. A nonterminal element of
131
+ ``rhs`` matches any Tree whose node value is equal
132
+ to the nonterminal's symbol. A terminal element of ``rhs``
133
+ matches any string whose type is equal to the terminal.
134
+ :type rhs: list(terminal and Nonterminal)
135
+ :param rhs: The right hand side of a CFG production.
136
+ :type rightmost_stack: list(string and Tree)
137
+ :param rightmost_stack: The rightmost elements of the parser's
138
+ stack.
139
+ """
140
+
141
+ if len(rightmost_stack) != len(rhs):
142
+ return False
143
+ for i in range(len(rightmost_stack)):
144
+ if isinstance(rightmost_stack[i], Tree):
145
+ if not isinstance(rhs[i], Nonterminal):
146
+ return False
147
+ if rightmost_stack[i].label() != rhs[i].symbol():
148
+ return False
149
+ else:
150
+ if isinstance(rhs[i], Nonterminal):
151
+ return False
152
+ if rightmost_stack[i] != rhs[i]:
153
+ return False
154
+ return True
155
+
156
+ def _reduce(self, stack, remaining_text, production=None):
157
+ """
158
+ Find a CFG production whose right hand side matches the
159
+ rightmost stack elements; and combine those stack elements
160
+ into a single Tree, with the node specified by the
161
+ production's left-hand side. If more than one CFG production
162
+ matches the stack, then use the production that is listed
163
+ earliest in the grammar. The new Tree replaces the
164
+ elements in the stack.
165
+
166
+ :rtype: Production or None
167
+ :return: If a reduction is performed, then return the CFG
168
+ production that the reduction is based on; otherwise,
169
+ return false.
170
+ :type stack: list(string and Tree)
171
+ :param stack: A list of strings and Trees, encoding
172
+ the structure of the text that has been parsed so far.
173
+ :type remaining_text: list(str)
174
+ :param remaining_text: The portion of the text that is not yet
175
+ covered by ``stack``.
176
+ """
177
+ if production is None:
178
+ productions = self._grammar.productions()
179
+ else:
180
+ productions = [production]
181
+
182
+ # Try each production, in order.
183
+ for production in productions:
184
+ rhslen = len(production.rhs())
185
+
186
+ # check if the RHS of a production matches the top of the stack
187
+ if self._match_rhs(production.rhs(), stack[-rhslen:]):
188
+
189
+ # combine the tree to reflect the reduction
190
+ tree = Tree(production.lhs().symbol(), stack[-rhslen:])
191
+ stack[-rhslen:] = [tree]
192
+
193
+ # We reduced something
194
+ if self._trace:
195
+ self._trace_reduce(stack, production, remaining_text)
196
+ return production
197
+
198
+ # We didn't reduce anything
199
+ return None
200
+
201
+ def trace(self, trace=2):
202
+ """
203
+ Set the level of tracing output that should be generated when
204
+ parsing a text.
205
+
206
+ :type trace: int
207
+ :param trace: The trace level. A trace level of ``0`` will
208
+ generate no tracing output; and higher trace levels will
209
+ produce more verbose tracing output.
210
+ :rtype: None
211
+ """
212
+ # 1: just show shifts.
213
+ # 2: show shifts & reduces
214
+ # 3: display which tokens & productions are shifed/reduced
215
+ self._trace = trace
216
+
217
+ def _trace_stack(self, stack, remaining_text, marker=" "):
218
+ """
219
+ Print trace output displaying the given stack and text.
220
+
221
+ :rtype: None
222
+ :param marker: A character that is printed to the left of the
223
+ stack. This is used with trace level 2 to print 'S'
224
+ before shifted stacks and 'R' before reduced stacks.
225
+ """
226
+ s = " " + marker + " [ "
227
+ for elt in stack:
228
+ if isinstance(elt, Tree):
229
+ s += repr(Nonterminal(elt.label())) + " "
230
+ else:
231
+ s += repr(elt) + " "
232
+ s += "* " + " ".join(remaining_text) + "]"
233
+ print(s)
234
+
235
+ def _trace_shift(self, stack, remaining_text):
236
+ """
237
+ Print trace output displaying that a token has been shifted.
238
+
239
+ :rtype: None
240
+ """
241
+ if self._trace > 2:
242
+ print("Shift %r:" % stack[-1])
243
+ if self._trace == 2:
244
+ self._trace_stack(stack, remaining_text, "S")
245
+ elif self._trace > 0:
246
+ self._trace_stack(stack, remaining_text)
247
+
248
+ def _trace_reduce(self, stack, production, remaining_text):
249
+ """
250
+ Print trace output displaying that ``production`` was used to
251
+ reduce ``stack``.
252
+
253
+ :rtype: None
254
+ """
255
+ if self._trace > 2:
256
+ rhs = " ".join(production.rhs())
257
+ print(f"Reduce {production.lhs()!r} <- {rhs}")
258
+ if self._trace == 2:
259
+ self._trace_stack(stack, remaining_text, "R")
260
+ elif self._trace > 1:
261
+ self._trace_stack(stack, remaining_text)
262
+
263
+ def _check_grammar(self):
264
+ """
265
+ Check to make sure that all of the CFG productions are
266
+ potentially useful. If any productions can never be used,
267
+ then print a warning.
268
+
269
+ :rtype: None
270
+ """
271
+ productions = self._grammar.productions()
272
+
273
+ # Any production whose RHS is an extension of another production's RHS
274
+ # will never be used.
275
+ for i in range(len(productions)):
276
+ for j in range(i + 1, len(productions)):
277
+ rhs1 = productions[i].rhs()
278
+ rhs2 = productions[j].rhs()
279
+ if rhs1[: len(rhs2)] == rhs2:
280
+ print("Warning: %r will never be used" % productions[i])
281
+
282
+
283
+ ##//////////////////////////////////////////////////////
284
+ ## Stepping Shift/Reduce Parser
285
+ ##//////////////////////////////////////////////////////
286
+ class SteppingShiftReduceParser(ShiftReduceParser):
287
+ """
288
+ A ``ShiftReduceParser`` that allows you to setp through the parsing
289
+ process, performing a single operation at a time. It also allows
290
+ you to change the parser's grammar midway through parsing a text.
291
+
292
+ The ``initialize`` method is used to start parsing a text.
293
+ ``shift`` performs a single shift operation, and ``reduce`` performs
294
+ a single reduce operation. ``step`` will perform a single reduce
295
+ operation if possible; otherwise, it will perform a single shift
296
+ operation. ``parses`` returns the set of parses that have been
297
+ found by the parser.
298
+
299
+ :ivar _history: A list of ``(stack, remaining_text)`` pairs,
300
+ containing all of the previous states of the parser. This
301
+ history is used to implement the ``undo`` operation.
302
+ :see: ``nltk.grammar``
303
+ """
304
+
305
+ def __init__(self, grammar, trace=0):
306
+ super().__init__(grammar, trace)
307
+ self._stack = None
308
+ self._remaining_text = None
309
+ self._history = []
310
+
311
+ def parse(self, tokens):
312
+ tokens = list(tokens)
313
+ self.initialize(tokens)
314
+ while self.step():
315
+ pass
316
+ return self.parses()
317
+
318
+ def stack(self):
319
+ """
320
+ :return: The parser's stack.
321
+ :rtype: list(str and Tree)
322
+ """
323
+ return self._stack
324
+
325
+ def remaining_text(self):
326
+ """
327
+ :return: The portion of the text that is not yet covered by the
328
+ stack.
329
+ :rtype: list(str)
330
+ """
331
+ return self._remaining_text
332
+
333
+ def initialize(self, tokens):
334
+ """
335
+ Start parsing a given text. This sets the parser's stack to
336
+ ``[]`` and sets its remaining text to ``tokens``.
337
+ """
338
+ self._stack = []
339
+ self._remaining_text = tokens
340
+ self._history = []
341
+
342
+ def step(self):
343
+ """
344
+ Perform a single parsing operation. If a reduction is
345
+ possible, then perform that reduction, and return the
346
+ production that it is based on. Otherwise, if a shift is
347
+ possible, then perform it, and return True. Otherwise,
348
+ return False.
349
+
350
+ :return: False if no operation was performed; True if a shift was
351
+ performed; and the CFG production used to reduce if a
352
+ reduction was performed.
353
+ :rtype: Production or bool
354
+ """
355
+ return self.reduce() or self.shift()
356
+
357
+ def shift(self):
358
+ """
359
+ Move a token from the beginning of the remaining text to the
360
+ end of the stack. If there are no more tokens in the
361
+ remaining text, then do nothing.
362
+
363
+ :return: True if the shift operation was successful.
364
+ :rtype: bool
365
+ """
366
+ if len(self._remaining_text) == 0:
367
+ return False
368
+ self._history.append((self._stack[:], self._remaining_text[:]))
369
+ self._shift(self._stack, self._remaining_text)
370
+ return True
371
+
372
+ def reduce(self, production=None):
373
+ """
374
+ Use ``production`` to combine the rightmost stack elements into
375
+ a single Tree. If ``production`` does not match the
376
+ rightmost stack elements, then do nothing.
377
+
378
+ :return: The production used to reduce the stack, if a
379
+ reduction was performed. If no reduction was performed,
380
+ return None.
381
+
382
+ :rtype: Production or None
383
+ """
384
+ self._history.append((self._stack[:], self._remaining_text[:]))
385
+ return_val = self._reduce(self._stack, self._remaining_text, production)
386
+
387
+ if not return_val:
388
+ self._history.pop()
389
+ return return_val
390
+
391
+ def undo(self):
392
+ """
393
+ Return the parser to its state before the most recent
394
+ shift or reduce operation. Calling ``undo`` repeatedly return
395
+ the parser to successively earlier states. If no shift or
396
+ reduce operations have been performed, ``undo`` will make no
397
+ changes.
398
+
399
+ :return: true if an operation was successfully undone.
400
+ :rtype: bool
401
+ """
402
+ if len(self._history) == 0:
403
+ return False
404
+ (self._stack, self._remaining_text) = self._history.pop()
405
+ return True
406
+
407
+ def reducible_productions(self):
408
+ """
409
+ :return: A list of the productions for which reductions are
410
+ available for the current parser state.
411
+ :rtype: list(Production)
412
+ """
413
+ productions = []
414
+ for production in self._grammar.productions():
415
+ rhslen = len(production.rhs())
416
+ if self._match_rhs(production.rhs(), self._stack[-rhslen:]):
417
+ productions.append(production)
418
+ return productions
419
+
420
+ def parses(self):
421
+ """
422
+ :return: An iterator of the parses that have been found by this
423
+ parser so far.
424
+ :rtype: iter(Tree)
425
+ """
426
+ if (
427
+ len(self._remaining_text) == 0
428
+ and len(self._stack) == 1
429
+ and self._stack[0].label() == self._grammar.start().symbol()
430
+ ):
431
+ yield self._stack[0]
432
+
433
+ # copied from nltk.parser
434
+
435
+ def set_grammar(self, grammar):
436
+ """
437
+ Change the grammar used to parse texts.
438
+
439
+ :param grammar: The new grammar.
440
+ :type grammar: CFG
441
+ """
442
+ self._grammar = grammar
443
+
444
+
445
+ ##//////////////////////////////////////////////////////
446
+ ## Demonstration Code
447
+ ##//////////////////////////////////////////////////////
448
+
449
+
450
+ def demo():
451
+ """
452
+ A demonstration of the shift-reduce parser.
453
+ """
454
+
455
+ from nltk import CFG, parse
456
+
457
+ grammar = CFG.fromstring(
458
+ """
459
+ S -> NP VP
460
+ NP -> Det N | Det N PP
461
+ VP -> V NP | V NP PP
462
+ PP -> P NP
463
+ NP -> 'I'
464
+ N -> 'man' | 'park' | 'telescope' | 'dog'
465
+ Det -> 'the' | 'a'
466
+ P -> 'in' | 'with'
467
+ V -> 'saw'
468
+ """
469
+ )
470
+
471
+ sent = "I saw a man in the park".split()
472
+
473
+ parser = parse.ShiftReduceParser(grammar, trace=2)
474
+ for p in parser.parse(sent):
475
+ print(p)
476
+
477
+
478
+ if __name__ == "__main__":
479
+ demo()
venv/lib/python3.10/site-packages/nltk/parse/util.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parser Utility Functions
2
+ #
3
+ # Author: Ewan Klein <[email protected]>
4
+ # Tom Aarsen <>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+
11
+ """
12
+ Utility functions for parsers.
13
+ """
14
+
15
+ from nltk.data import load
16
+ from nltk.grammar import CFG, PCFG, FeatureGrammar
17
+ from nltk.parse.chart import Chart, ChartParser
18
+ from nltk.parse.featurechart import FeatureChart, FeatureChartParser
19
+ from nltk.parse.pchart import InsideChartParser
20
+
21
+
22
+ def load_parser(
23
+ grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args
24
+ ):
25
+ """
26
+ Load a grammar from a file, and build a parser based on that grammar.
27
+ The parser depends on the grammar format, and might also depend
28
+ on properties of the grammar itself.
29
+
30
+ The following grammar formats are currently supported:
31
+ - ``'cfg'`` (CFGs: ``CFG``)
32
+ - ``'pcfg'`` (probabilistic CFGs: ``PCFG``)
33
+ - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``)
34
+
35
+ :type grammar_url: str
36
+ :param grammar_url: A URL specifying where the grammar is located.
37
+ The default protocol is ``"nltk:"``, which searches for the file
38
+ in the the NLTK data package.
39
+ :type trace: int
40
+ :param trace: The level of tracing that should be used when
41
+ parsing a text. ``0`` will generate no tracing output;
42
+ and higher numbers will produce more verbose tracing output.
43
+ :param parser: The class used for parsing; should be ``ChartParser``
44
+ or a subclass.
45
+ If None, the class depends on the grammar format.
46
+ :param chart_class: The class used for storing the chart;
47
+ should be ``Chart`` or a subclass.
48
+ Only used for CFGs and feature CFGs.
49
+ If None, the chart class depends on the grammar format.
50
+ :type beam_size: int
51
+ :param beam_size: The maximum length for the parser's edge queue.
52
+ Only used for probabilistic CFGs.
53
+ :param load_args: Keyword parameters used when loading the grammar.
54
+ See ``data.load`` for more information.
55
+ """
56
+ grammar = load(grammar_url, **load_args)
57
+ if not isinstance(grammar, CFG):
58
+ raise ValueError("The grammar must be a CFG, " "or a subclass thereof.")
59
+ if isinstance(grammar, PCFG):
60
+ if parser is None:
61
+ parser = InsideChartParser
62
+ return parser(grammar, trace=trace, beam_size=beam_size)
63
+
64
+ elif isinstance(grammar, FeatureGrammar):
65
+ if parser is None:
66
+ parser = FeatureChartParser
67
+ if chart_class is None:
68
+ chart_class = FeatureChart
69
+ return parser(grammar, trace=trace, chart_class=chart_class)
70
+
71
+ else: # Plain CFG.
72
+ if parser is None:
73
+ parser = ChartParser
74
+ if chart_class is None:
75
+ chart_class = Chart
76
+ return parser(grammar, trace=trace, chart_class=chart_class)
77
+
78
+
79
+ def taggedsent_to_conll(sentence):
80
+ """
81
+ A module to convert a single POS tagged sentence into CONLL format.
82
+
83
+ >>> from nltk import word_tokenize, pos_tag
84
+ >>> text = "This is a foobar sentence."
85
+ >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE
86
+ ... print(line, end="")
87
+ 1 This _ DT DT _ 0 a _ _
88
+ 2 is _ VBZ VBZ _ 0 a _ _
89
+ 3 a _ DT DT _ 0 a _ _
90
+ 4 foobar _ JJ JJ _ 0 a _ _
91
+ 5 sentence _ NN NN _ 0 a _ _
92
+ 6 . _ . . _ 0 a _ _
93
+
94
+ :param sentence: A single input sentence to parse
95
+ :type sentence: list(tuple(str, str))
96
+ :rtype: iter(str)
97
+ :return: a generator yielding a single sentence in CONLL format.
98
+ """
99
+ for (i, (word, tag)) in enumerate(sentence, start=1):
100
+ input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"]
101
+ input_str = "\t".join(input_str) + "\n"
102
+ yield input_str
103
+
104
+
105
+ def taggedsents_to_conll(sentences):
106
+ """
107
+ A module to convert the a POS tagged document stream
108
+ (i.e. list of list of tuples, a list of sentences) and yield lines
109
+ in CONLL format. This module yields one line per word and two newlines
110
+ for end of sentence.
111
+
112
+ >>> from nltk import word_tokenize, sent_tokenize, pos_tag
113
+ >>> text = "This is a foobar sentence. Is that right?"
114
+ >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)]
115
+ >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE
116
+ ... if line:
117
+ ... print(line, end="")
118
+ 1 This _ DT DT _ 0 a _ _
119
+ 2 is _ VBZ VBZ _ 0 a _ _
120
+ 3 a _ DT DT _ 0 a _ _
121
+ 4 foobar _ JJ JJ _ 0 a _ _
122
+ 5 sentence _ NN NN _ 0 a _ _
123
+ 6 . _ . . _ 0 a _ _
124
+ <BLANKLINE>
125
+ <BLANKLINE>
126
+ 1 Is _ VBZ VBZ _ 0 a _ _
127
+ 2 that _ IN IN _ 0 a _ _
128
+ 3 right _ NN NN _ 0 a _ _
129
+ 4 ? _ . . _ 0 a _ _
130
+ <BLANKLINE>
131
+ <BLANKLINE>
132
+
133
+ :param sentences: Input sentences to parse
134
+ :type sentence: list(list(tuple(str, str)))
135
+ :rtype: iter(str)
136
+ :return: a generator yielding sentences in CONLL format.
137
+ """
138
+ for sentence in sentences:
139
+ yield from taggedsent_to_conll(sentence)
140
+ yield "\n\n"
141
+
142
+
143
+ ######################################################################
144
+ # { Test Suites
145
+ ######################################################################
146
+
147
+
148
+ class TestGrammar:
149
+ """
150
+ Unit tests for CFG.
151
+ """
152
+
153
+ def __init__(self, grammar, suite, accept=None, reject=None):
154
+ self.test_grammar = grammar
155
+
156
+ self.cp = load_parser(grammar, trace=0)
157
+ self.suite = suite
158
+ self._accept = accept
159
+ self._reject = reject
160
+
161
+ def run(self, show_trees=False):
162
+ """
163
+ Sentences in the test suite are divided into two classes:
164
+
165
+ - grammatical (``accept``) and
166
+ - ungrammatical (``reject``).
167
+
168
+ If a sentence should parse according to the grammar, the value of
169
+ ``trees`` will be a non-empty list. If a sentence should be rejected
170
+ according to the grammar, then the value of ``trees`` will be None.
171
+ """
172
+ for test in self.suite:
173
+ print(test["doc"] + ":", end=" ")
174
+ for key in ["accept", "reject"]:
175
+ for sent in test[key]:
176
+ tokens = sent.split()
177
+ trees = list(self.cp.parse(tokens))
178
+ if show_trees and trees:
179
+ print()
180
+ print(sent)
181
+ for tree in trees:
182
+ print(tree)
183
+ if key == "accept":
184
+ if trees == []:
185
+ raise ValueError("Sentence '%s' failed to parse'" % sent)
186
+ else:
187
+ accepted = True
188
+ else:
189
+ if trees:
190
+ raise ValueError("Sentence '%s' received a parse'" % sent)
191
+ else:
192
+ rejected = True
193
+ if accepted and rejected:
194
+ print("All tests passed!")
195
+
196
+
197
+ def extract_test_sentences(string, comment_chars="#%;", encoding=None):
198
+ """
199
+ Parses a string with one test sentence per line.
200
+ Lines can optionally begin with:
201
+
202
+ - a bool, saying if the sentence is grammatical or not, or
203
+ - an int, giving the number of parse trees is should have,
204
+
205
+ The result information is followed by a colon, and then the sentence.
206
+ Empty lines and lines beginning with a comment char are ignored.
207
+
208
+ :return: a list of tuple of sentences and expected results,
209
+ where a sentence is a list of str,
210
+ and a result is None, or bool, or int
211
+
212
+ :param comment_chars: ``str`` of possible comment characters.
213
+ :param encoding: the encoding of the string, if it is binary
214
+ """
215
+ if encoding is not None:
216
+ string = string.decode(encoding)
217
+ sentences = []
218
+ for sentence in string.split("\n"):
219
+ if sentence == "" or sentence[0] in comment_chars:
220
+ continue
221
+ split_info = sentence.split(":", 1)
222
+ result = None
223
+ if len(split_info) == 2:
224
+ if split_info[0] in ["True", "true", "False", "false"]:
225
+ result = split_info[0] in ["True", "true"]
226
+ sentence = split_info[1]
227
+ else:
228
+ result = int(split_info[0])
229
+ sentence = split_info[1]
230
+ tokens = sentence.split()
231
+ if tokens == []:
232
+ continue
233
+ sentences += [(tokens, result)]
234
+ return sentences
venv/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (456 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc ADDED
Binary file (605 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc ADDED
Binary file (337 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc ADDED
Binary file (336 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc ADDED
Binary file (521 Bytes). View file