applied-ai-018 commited on
Commit
0479309
·
verified ·
1 Parent(s): fd6d886

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/parse/__init__.py +102 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/parse/bllip.py +299 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/parse/chart.py +1848 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/parse/dependencygraph.py +799 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/parse/earleychart.py +552 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/parse/evaluate.py +129 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/parse/featurechart.py +674 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/parse/generate.py +85 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/parse/shiftreduce.py +479 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/nltk/parse/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parsers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ """
11
+ NLTK Parsers
12
+
13
+ Classes and interfaces for producing tree structures that represent
14
+ the internal organization of a text. This task is known as "parsing"
15
+ the text, and the resulting tree structures are called the text's
16
+ "parses". Typically, the text is a single sentence, and the tree
17
+ structure represents the syntactic structure of the sentence.
18
+ However, parsers can also be used in other domains. For example,
19
+ parsers can be used to derive the morphological structure of the
20
+ morphemes that make up a word, or to derive the discourse structure
21
+ for a set of utterances.
22
+
23
+ Sometimes, a single piece of text can be represented by more than one
24
+ tree structure. Texts represented by more than one tree structure are
25
+ called "ambiguous" texts. Note that there are actually two ways in
26
+ which a text can be ambiguous:
27
+
28
+ - The text has multiple correct parses.
29
+ - There is not enough information to decide which of several
30
+ candidate parses is correct.
31
+
32
+ However, the parser module does *not* distinguish these two types of
33
+ ambiguity.
34
+
35
+ The parser module defines ``ParserI``, a standard interface for parsing
36
+ texts; and two simple implementations of that interface,
37
+ ``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains
38
+ three sub-modules for specialized kinds of parsing:
39
+
40
+ - ``nltk.parser.chart`` defines chart parsing, which uses dynamic
41
+ programming to efficiently parse texts.
42
+ - ``nltk.parser.probabilistic`` defines probabilistic parsing, which
43
+ associates a probability with each parse.
44
+ """
45
+
46
+ from nltk.parse.api import ParserI
47
+ from nltk.parse.bllip import BllipParser
48
+ from nltk.parse.chart import (
49
+ BottomUpChartParser,
50
+ BottomUpLeftCornerChartParser,
51
+ ChartParser,
52
+ LeftCornerChartParser,
53
+ SteppingChartParser,
54
+ TopDownChartParser,
55
+ )
56
+ from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser
57
+ from nltk.parse.dependencygraph import DependencyGraph
58
+ from nltk.parse.earleychart import (
59
+ EarleyChartParser,
60
+ FeatureEarleyChartParser,
61
+ FeatureIncrementalBottomUpChartParser,
62
+ FeatureIncrementalBottomUpLeftCornerChartParser,
63
+ FeatureIncrementalChartParser,
64
+ FeatureIncrementalTopDownChartParser,
65
+ IncrementalBottomUpChartParser,
66
+ IncrementalBottomUpLeftCornerChartParser,
67
+ IncrementalChartParser,
68
+ IncrementalLeftCornerChartParser,
69
+ IncrementalTopDownChartParser,
70
+ )
71
+ from nltk.parse.evaluate import DependencyEvaluator
72
+ from nltk.parse.featurechart import (
73
+ FeatureBottomUpChartParser,
74
+ FeatureBottomUpLeftCornerChartParser,
75
+ FeatureChartParser,
76
+ FeatureTopDownChartParser,
77
+ )
78
+ from nltk.parse.malt import MaltParser
79
+ from nltk.parse.nonprojectivedependencyparser import (
80
+ NaiveBayesDependencyScorer,
81
+ NonprojectiveDependencyParser,
82
+ ProbabilisticNonprojectiveParser,
83
+ )
84
+ from nltk.parse.pchart import (
85
+ BottomUpProbabilisticChartParser,
86
+ InsideChartParser,
87
+ LongestChartParser,
88
+ RandomChartParser,
89
+ UnsortedChartParser,
90
+ )
91
+ from nltk.parse.projectivedependencyparser import (
92
+ ProbabilisticProjectiveDependencyParser,
93
+ ProjectiveDependencyParser,
94
+ )
95
+ from nltk.parse.recursivedescent import (
96
+ RecursiveDescentParser,
97
+ SteppingRecursiveDescentParser,
98
+ )
99
+ from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser
100
+ from nltk.parse.transitionparser import TransitionParser
101
+ from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser
102
+ from nltk.parse.viterbi import ViterbiParser
env-llmeval/lib/python3.10/site-packages/nltk/parse/bllip.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to BLLIP Parser
2
+ #
3
+ # Author: David McClosky <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.parse.api import ParserI
10
+ from nltk.tree import Tree
11
+
12
+ """
13
+ Interface for parsing with BLLIP Parser. Requires the Python
14
+ bllipparser module. BllipParser objects can be constructed with the
15
+ ``BllipParser.from_unified_model_dir`` class method or manually using the
16
+ ``BllipParser`` constructor. The former is generally easier if you have
17
+ a BLLIP Parser unified model directory -- a basic model can be obtained
18
+ from NLTK's downloader. More unified parsing models can be obtained with
19
+ BLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher``
20
+ or see docs for ``bllipparser.ModelFetcher.download_and_install_model``).
21
+
22
+ Basic usage::
23
+
24
+ # download and install a basic unified parsing model (Wall Street Journal)
25
+ # sudo python -m nltk.downloader bllip_wsj_no_aux
26
+
27
+ >>> from nltk.data import find
28
+ >>> model_dir = find('models/bllip_wsj_no_aux').path
29
+ >>> bllip = BllipParser.from_unified_model_dir(model_dir)
30
+
31
+ # 1-best parsing
32
+ >>> sentence1 = 'British left waffles on Falklands .'.split()
33
+ >>> top_parse = bllip.parse_one(sentence1)
34
+ >>> print(top_parse)
35
+ (S1
36
+ (S
37
+ (NP (JJ British) (NN left))
38
+ (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands))))
39
+ (. .)))
40
+
41
+ # n-best parsing
42
+ >>> sentence2 = 'Time flies'.split()
43
+ >>> all_parses = bllip.parse_all(sentence2)
44
+ >>> print(len(all_parses))
45
+ 50
46
+ >>> print(all_parses[0])
47
+ (S1 (S (NP (NNP Time)) (VP (VBZ flies))))
48
+
49
+ # incorporating external tagging constraints (None means unconstrained tag)
50
+ >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')])
51
+ >>> print(next(constrained1))
52
+ (S1 (NP (VB Time) (NNS flies)))
53
+ >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)])
54
+ >>> print(next(constrained2))
55
+ (S1 (NP (NN Time) (VBZ flies)))
56
+
57
+ References
58
+ ----------
59
+
60
+ - Charniak, Eugene. "A maximum-entropy-inspired parser." Proceedings of
61
+ the 1st North American chapter of the Association for Computational
62
+ Linguistics conference. Association for Computational Linguistics,
63
+ 2000.
64
+
65
+ - Charniak, Eugene, and Mark Johnson. "Coarse-to-fine n-best parsing
66
+ and MaxEnt discriminative reranking." Proceedings of the 43rd Annual
67
+ Meeting on Association for Computational Linguistics. Association
68
+ for Computational Linguistics, 2005.
69
+
70
+ Known issues
71
+ ------------
72
+
73
+ Note that BLLIP Parser is not currently threadsafe. Since this module
74
+ uses a SWIG interface, it is potentially unsafe to create multiple
75
+ ``BllipParser`` objects in the same process. BLLIP Parser currently
76
+ has issues with non-ASCII text and will raise an error if given any.
77
+
78
+ See https://pypi.python.org/pypi/bllipparser/ for more information
79
+ on BLLIP Parser's Python interface.
80
+ """
81
+
82
+ __all__ = ["BllipParser"]
83
+
84
+ # this block allows this module to be imported even if bllipparser isn't
85
+ # available
86
+ try:
87
+ from bllipparser import RerankingParser
88
+ from bllipparser.RerankingParser import get_unified_model_parameters
89
+
90
+ def _ensure_bllip_import_or_error():
91
+ pass
92
+
93
+ except ImportError as ie:
94
+
95
+ def _ensure_bllip_import_or_error(ie=ie):
96
+ raise ImportError("Couldn't import bllipparser module: %s" % ie)
97
+
98
+
99
+ def _ensure_ascii(words):
100
+ try:
101
+ for i, word in enumerate(words):
102
+ word.encode("ascii")
103
+ except UnicodeEncodeError as e:
104
+ raise ValueError(
105
+ f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser "
106
+ "currently doesn't support non-ASCII inputs."
107
+ ) from e
108
+
109
+
110
+ def _scored_parse_to_nltk_tree(scored_parse):
111
+ return Tree.fromstring(str(scored_parse.ptb_parse))
112
+
113
+
114
+ class BllipParser(ParserI):
115
+ """
116
+ Interface for parsing with BLLIP Parser. BllipParser objects can be
117
+ constructed with the ``BllipParser.from_unified_model_dir`` class
118
+ method or manually using the ``BllipParser`` constructor.
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ parser_model=None,
124
+ reranker_features=None,
125
+ reranker_weights=None,
126
+ parser_options=None,
127
+ reranker_options=None,
128
+ ):
129
+ """
130
+ Load a BLLIP Parser model from scratch. You'll typically want to
131
+ use the ``from_unified_model_dir()`` class method to construct
132
+ this object.
133
+
134
+ :param parser_model: Path to parser model directory
135
+ :type parser_model: str
136
+
137
+ :param reranker_features: Path the reranker model's features file
138
+ :type reranker_features: str
139
+
140
+ :param reranker_weights: Path the reranker model's weights file
141
+ :type reranker_weights: str
142
+
143
+ :param parser_options: optional dictionary of parser options, see
144
+ ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
145
+ for more information.
146
+ :type parser_options: dict(str)
147
+
148
+ :param reranker_options: optional
149
+ dictionary of reranker options, see
150
+ ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
151
+ for more information.
152
+ :type reranker_options: dict(str)
153
+ """
154
+ _ensure_bllip_import_or_error()
155
+
156
+ parser_options = parser_options or {}
157
+ reranker_options = reranker_options or {}
158
+
159
+ self.rrp = RerankingParser()
160
+ self.rrp.load_parser_model(parser_model, **parser_options)
161
+ if reranker_features and reranker_weights:
162
+ self.rrp.load_reranker_model(
163
+ features_filename=reranker_features,
164
+ weights_filename=reranker_weights,
165
+ **reranker_options,
166
+ )
167
+
168
+ def parse(self, sentence):
169
+ """
170
+ Use BLLIP Parser to parse a sentence. Takes a sentence as a list
171
+ of words; it will be automatically tagged with this BLLIP Parser
172
+ instance's tagger.
173
+
174
+ :return: An iterator that generates parse trees for the sentence
175
+ from most likely to least likely.
176
+
177
+ :param sentence: The sentence to be parsed
178
+ :type sentence: list(str)
179
+ :rtype: iter(Tree)
180
+ """
181
+ _ensure_ascii(sentence)
182
+ nbest_list = self.rrp.parse(sentence)
183
+ for scored_parse in nbest_list:
184
+ yield _scored_parse_to_nltk_tree(scored_parse)
185
+
186
+ def tagged_parse(self, word_and_tag_pairs):
187
+ """
188
+ Use BLLIP to parse a sentence. Takes a sentence as a list of
189
+ (word, tag) tuples; the sentence must have already been tokenized
190
+ and tagged. BLLIP will attempt to use the tags provided but may
191
+ use others if it can't come up with a complete parse subject
192
+ to those constraints. You may also specify a tag as ``None``
193
+ to leave a token's tag unconstrained.
194
+
195
+ :return: An iterator that generates parse trees for the sentence
196
+ from most likely to least likely.
197
+
198
+ :param sentence: Input sentence to parse as (word, tag) pairs
199
+ :type sentence: list(tuple(str, str))
200
+ :rtype: iter(Tree)
201
+ """
202
+ words = []
203
+ tag_map = {}
204
+ for i, (word, tag) in enumerate(word_and_tag_pairs):
205
+ words.append(word)
206
+ if tag is not None:
207
+ tag_map[i] = tag
208
+
209
+ _ensure_ascii(words)
210
+ nbest_list = self.rrp.parse_tagged(words, tag_map)
211
+ for scored_parse in nbest_list:
212
+ yield _scored_parse_to_nltk_tree(scored_parse)
213
+
214
+ @classmethod
215
+ def from_unified_model_dir(
216
+ cls, model_dir, parser_options=None, reranker_options=None
217
+ ):
218
+ """
219
+ Create a ``BllipParser`` object from a unified parsing model
220
+ directory. Unified parsing model directories are a standardized
221
+ way of storing BLLIP parser and reranker models together on disk.
222
+ See ``bllipparser.RerankingParser.get_unified_model_parameters()``
223
+ for more information about unified model directories.
224
+
225
+ :return: A ``BllipParser`` object using the parser and reranker
226
+ models in the model directory.
227
+
228
+ :param model_dir: Path to the unified model directory.
229
+ :type model_dir: str
230
+ :param parser_options: optional dictionary of parser options, see
231
+ ``bllipparser.RerankingParser.RerankingParser.load_parser_options()``
232
+ for more information.
233
+ :type parser_options: dict(str)
234
+ :param reranker_options: optional dictionary of reranker options, see
235
+ ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()``
236
+ for more information.
237
+ :type reranker_options: dict(str)
238
+ :rtype: BllipParser
239
+ """
240
+ (
241
+ parser_model_dir,
242
+ reranker_features_filename,
243
+ reranker_weights_filename,
244
+ ) = get_unified_model_parameters(model_dir)
245
+ return cls(
246
+ parser_model_dir,
247
+ reranker_features_filename,
248
+ reranker_weights_filename,
249
+ parser_options,
250
+ reranker_options,
251
+ )
252
+
253
+
254
+ def demo():
255
+ """This assumes the Python module bllipparser is installed."""
256
+
257
+ # download and install a basic unified parsing model (Wall Street Journal)
258
+ # sudo python -m nltk.downloader bllip_wsj_no_aux
259
+
260
+ from nltk.data import find
261
+
262
+ model_dir = find("models/bllip_wsj_no_aux").path
263
+
264
+ print("Loading BLLIP Parsing models...")
265
+ # the easiest way to get started is to use a unified model
266
+ bllip = BllipParser.from_unified_model_dir(model_dir)
267
+ print("Done.")
268
+
269
+ sentence1 = "British left waffles on Falklands .".split()
270
+ sentence2 = "I saw the man with the telescope .".split()
271
+ # this sentence is known to fail under the WSJ parsing model
272
+ fail1 = "# ! ? : -".split()
273
+ for sentence in (sentence1, sentence2, fail1):
274
+ print("Sentence: %r" % " ".join(sentence))
275
+ try:
276
+ tree = next(bllip.parse(sentence))
277
+ print(tree)
278
+ except StopIteration:
279
+ print("(parse failed)")
280
+
281
+ # n-best parsing demo
282
+ for i, parse in enumerate(bllip.parse(sentence1)):
283
+ print("parse %d:\n%s" % (i, parse))
284
+
285
+ # using external POS tag constraints
286
+ print(
287
+ "forcing 'tree' to be 'NN':",
288
+ next(bllip.tagged_parse([("A", None), ("tree", "NN")])),
289
+ )
290
+ print(
291
+ "forcing 'A' to be 'DT' and 'tree' to be 'NNP':",
292
+ next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])),
293
+ )
294
+ # constraints don't have to make sense... (though on more complicated
295
+ # sentences, they may cause the parse to fail)
296
+ print(
297
+ "forcing 'A' to be 'NNP':",
298
+ next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])),
299
+ )
env-llmeval/lib/python3.10/site-packages/nltk/parse/chart.py ADDED
@@ -0,0 +1,1848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: A Chart Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # Jean Mark Gawron <[email protected]>
7
+ # Peter Ljunglöf <[email protected]>
8
+ # URL: <https://www.nltk.org/>
9
+ # For license information, see LICENSE.TXT
10
+
11
+ """
12
+ Data classes and parser implementations for "chart parsers", which
13
+ use dynamic programming to efficiently parse a text. A chart
14
+ parser derives parse trees for a text by iteratively adding "edges"
15
+ to a "chart." Each edge represents a hypothesis about the tree
16
+ structure for a subsequence of the text. The chart is a
17
+ "blackboard" for composing and combining these hypotheses.
18
+
19
+ When a chart parser begins parsing a text, it creates a new (empty)
20
+ chart, spanning the text. It then incrementally adds new edges to the
21
+ chart. A set of "chart rules" specifies the conditions under which
22
+ new edges should be added to the chart. Once the chart reaches a
23
+ stage where none of the chart rules adds any new edges, parsing is
24
+ complete.
25
+
26
+ Charts are encoded with the ``Chart`` class, and edges are encoded with
27
+ the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module
28
+ defines three chart parsers:
29
+
30
+ - ``ChartParser`` is a simple and flexible chart parser. Given a
31
+ set of chart rules, it will apply those rules to the chart until
32
+ no more edges are added.
33
+
34
+ - ``SteppingChartParser`` is a subclass of ``ChartParser`` that can
35
+ be used to step through the parsing process.
36
+ """
37
+
38
+ import itertools
39
+ import re
40
+ import warnings
41
+ from functools import total_ordering
42
+
43
+ from nltk.grammar import PCFG, is_nonterminal, is_terminal
44
+ from nltk.internals import raise_unorderable_types
45
+ from nltk.parse.api import ParserI
46
+ from nltk.tree import Tree
47
+ from nltk.util import OrderedDict
48
+
49
+ ########################################################################
50
+ ## Edges
51
+ ########################################################################
52
+
53
+
54
+ @total_ordering
55
+ class EdgeI:
56
+ """
57
+ A hypothesis about the structure of part of a sentence.
58
+ Each edge records the fact that a structure is (partially)
59
+ consistent with the sentence. An edge contains:
60
+
61
+ - A span, indicating what part of the sentence is
62
+ consistent with the hypothesized structure.
63
+ - A left-hand side, specifying what kind of structure is
64
+ hypothesized.
65
+ - A right-hand side, specifying the contents of the
66
+ hypothesized structure.
67
+ - A dot position, indicating how much of the hypothesized
68
+ structure is consistent with the sentence.
69
+
70
+ Every edge is either complete or incomplete:
71
+
72
+ - An edge is complete if its structure is fully consistent
73
+ with the sentence.
74
+ - An edge is incomplete if its structure is partially
75
+ consistent with the sentence. For every incomplete edge, the
76
+ span specifies a possible prefix for the edge's structure.
77
+
78
+ There are two kinds of edge:
79
+
80
+ - A ``TreeEdge`` records which trees have been found to
81
+ be (partially) consistent with the text.
82
+ - A ``LeafEdge`` records the tokens occurring in the text.
83
+
84
+ The ``EdgeI`` interface provides a common interface to both types
85
+ of edge, allowing chart parsers to treat them in a uniform manner.
86
+ """
87
+
88
+ def __init__(self):
89
+ if self.__class__ == EdgeI:
90
+ raise TypeError("Edge is an abstract interface")
91
+
92
+ # ////////////////////////////////////////////////////////////
93
+ # Span
94
+ # ////////////////////////////////////////////////////////////
95
+
96
+ def span(self):
97
+ """
98
+ Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the
99
+ portion of the sentence that is consistent with this
100
+ edge's structure.
101
+
102
+ :rtype: tuple(int, int)
103
+ """
104
+ raise NotImplementedError()
105
+
106
+ def start(self):
107
+ """
108
+ Return the start index of this edge's span.
109
+
110
+ :rtype: int
111
+ """
112
+ raise NotImplementedError()
113
+
114
+ def end(self):
115
+ """
116
+ Return the end index of this edge's span.
117
+
118
+ :rtype: int
119
+ """
120
+ raise NotImplementedError()
121
+
122
+ def length(self):
123
+ """
124
+ Return the length of this edge's span.
125
+
126
+ :rtype: int
127
+ """
128
+ raise NotImplementedError()
129
+
130
+ # ////////////////////////////////////////////////////////////
131
+ # Left Hand Side
132
+ # ////////////////////////////////////////////////////////////
133
+
134
+ def lhs(self):
135
+ """
136
+ Return this edge's left-hand side, which specifies what kind
137
+ of structure is hypothesized by this edge.
138
+
139
+ :see: ``TreeEdge`` and ``LeafEdge`` for a description of
140
+ the left-hand side values for each edge type.
141
+ """
142
+ raise NotImplementedError()
143
+
144
+ # ////////////////////////////////////////////////////////////
145
+ # Right Hand Side
146
+ # ////////////////////////////////////////////////////////////
147
+
148
+ def rhs(self):
149
+ """
150
+ Return this edge's right-hand side, which specifies
151
+ the content of the structure hypothesized by this edge.
152
+
153
+ :see: ``TreeEdge`` and ``LeafEdge`` for a description of
154
+ the right-hand side values for each edge type.
155
+ """
156
+ raise NotImplementedError()
157
+
158
+ def dot(self):
159
+ """
160
+ Return this edge's dot position, which indicates how much of
161
+ the hypothesized structure is consistent with the
162
+ sentence. In particular, ``self.rhs[:dot]`` is consistent
163
+ with ``tokens[self.start():self.end()]``.
164
+
165
+ :rtype: int
166
+ """
167
+ raise NotImplementedError()
168
+
169
+ def nextsym(self):
170
+ """
171
+ Return the element of this edge's right-hand side that
172
+ immediately follows its dot.
173
+
174
+ :rtype: Nonterminal or terminal or None
175
+ """
176
+ raise NotImplementedError()
177
+
178
+ def is_complete(self):
179
+ """
180
+ Return True if this edge's structure is fully consistent
181
+ with the text.
182
+
183
+ :rtype: bool
184
+ """
185
+ raise NotImplementedError()
186
+
187
+ def is_incomplete(self):
188
+ """
189
+ Return True if this edge's structure is partially consistent
190
+ with the text.
191
+
192
+ :rtype: bool
193
+ """
194
+ raise NotImplementedError()
195
+
196
+ # ////////////////////////////////////////////////////////////
197
+ # Comparisons & hashing
198
+ # ////////////////////////////////////////////////////////////
199
+
200
+ def __eq__(self, other):
201
+ return (
202
+ self.__class__ is other.__class__
203
+ and self._comparison_key == other._comparison_key
204
+ )
205
+
206
+ def __ne__(self, other):
207
+ return not self == other
208
+
209
+ def __lt__(self, other):
210
+ if not isinstance(other, EdgeI):
211
+ raise_unorderable_types("<", self, other)
212
+ if self.__class__ is other.__class__:
213
+ return self._comparison_key < other._comparison_key
214
+ else:
215
+ return self.__class__.__name__ < other.__class__.__name__
216
+
217
+ def __hash__(self):
218
+ try:
219
+ return self._hash
220
+ except AttributeError:
221
+ self._hash = hash(self._comparison_key)
222
+ return self._hash
223
+
224
+
225
+ class TreeEdge(EdgeI):
226
+ """
227
+ An edge that records the fact that a tree is (partially)
228
+ consistent with the sentence. A tree edge consists of:
229
+
230
+ - A span, indicating what part of the sentence is
231
+ consistent with the hypothesized tree.
232
+ - A left-hand side, specifying the hypothesized tree's node
233
+ value.
234
+ - A right-hand side, specifying the hypothesized tree's
235
+ children. Each element of the right-hand side is either a
236
+ terminal, specifying a token with that terminal as its leaf
237
+ value; or a nonterminal, specifying a subtree with that
238
+ nonterminal's symbol as its node value.
239
+ - A dot position, indicating which children are consistent
240
+ with part of the sentence. In particular, if ``dot`` is the
241
+ dot position, ``rhs`` is the right-hand size, ``(start,end)``
242
+ is the span, and ``sentence`` is the list of tokens in the
243
+ sentence, then ``tokens[start:end]`` can be spanned by the
244
+ children specified by ``rhs[:dot]``.
245
+
246
+ For more information about edges, see the ``EdgeI`` interface.
247
+ """
248
+
249
+ def __init__(self, span, lhs, rhs, dot=0):
250
+ """
251
+ Construct a new ``TreeEdge``.
252
+
253
+ :type span: tuple(int, int)
254
+ :param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the
255
+ portion of the sentence that is consistent with the new
256
+ edge's structure.
257
+ :type lhs: Nonterminal
258
+ :param lhs: The new edge's left-hand side, specifying the
259
+ hypothesized tree's node value.
260
+ :type rhs: list(Nonterminal and str)
261
+ :param rhs: The new edge's right-hand side, specifying the
262
+ hypothesized tree's children.
263
+ :type dot: int
264
+ :param dot: The position of the new edge's dot. This position
265
+ specifies what prefix of the production's right hand side
266
+ is consistent with the text. In particular, if
267
+ ``sentence`` is the list of tokens in the sentence, then
268
+ ``okens[span[0]:span[1]]`` can be spanned by the
269
+ children specified by ``rhs[:dot]``.
270
+ """
271
+ self._span = span
272
+ self._lhs = lhs
273
+ rhs = tuple(rhs)
274
+ self._rhs = rhs
275
+ self._dot = dot
276
+ self._comparison_key = (span, lhs, rhs, dot)
277
+
278
+ @staticmethod
279
+ def from_production(production, index):
280
+ """
281
+ Return a new ``TreeEdge`` formed from the given production.
282
+ The new edge's left-hand side and right-hand side will
283
+ be taken from ``production``; its span will be
284
+ ``(index,index)``; and its dot position will be ``0``.
285
+
286
+ :rtype: TreeEdge
287
+ """
288
+ return TreeEdge(
289
+ span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0
290
+ )
291
+
292
+ def move_dot_forward(self, new_end):
293
+ """
294
+ Return a new ``TreeEdge`` formed from this edge.
295
+ The new edge's dot position is increased by ``1``,
296
+ and its end index will be replaced by ``new_end``.
297
+
298
+ :param new_end: The new end index.
299
+ :type new_end: int
300
+ :rtype: TreeEdge
301
+ """
302
+ return TreeEdge(
303
+ span=(self._span[0], new_end),
304
+ lhs=self._lhs,
305
+ rhs=self._rhs,
306
+ dot=self._dot + 1,
307
+ )
308
+
309
+ # Accessors
310
+ def lhs(self):
311
+ return self._lhs
312
+
313
+ def span(self):
314
+ return self._span
315
+
316
+ def start(self):
317
+ return self._span[0]
318
+
319
+ def end(self):
320
+ return self._span[1]
321
+
322
+ def length(self):
323
+ return self._span[1] - self._span[0]
324
+
325
+ def rhs(self):
326
+ return self._rhs
327
+
328
+ def dot(self):
329
+ return self._dot
330
+
331
+ def is_complete(self):
332
+ return self._dot == len(self._rhs)
333
+
334
+ def is_incomplete(self):
335
+ return self._dot != len(self._rhs)
336
+
337
+ def nextsym(self):
338
+ if self._dot >= len(self._rhs):
339
+ return None
340
+ else:
341
+ return self._rhs[self._dot]
342
+
343
+ # String representation
344
+ def __str__(self):
345
+ str = f"[{self._span[0]}:{self._span[1]}] "
346
+ str += "%-2r ->" % (self._lhs,)
347
+
348
+ for i in range(len(self._rhs)):
349
+ if i == self._dot:
350
+ str += " *"
351
+ str += " %s" % repr(self._rhs[i])
352
+ if len(self._rhs) == self._dot:
353
+ str += " *"
354
+ return str
355
+
356
+ def __repr__(self):
357
+ return "[Edge: %s]" % self
358
+
359
+
360
+ class LeafEdge(EdgeI):
361
+ """
362
+ An edge that records the fact that a leaf value is consistent with
363
+ a word in the sentence. A leaf edge consists of:
364
+
365
+ - An index, indicating the position of the word.
366
+ - A leaf, specifying the word's content.
367
+
368
+ A leaf edge's left-hand side is its leaf value, and its right hand
369
+ side is ``()``. Its span is ``[index, index+1]``, and its dot
370
+ position is ``0``.
371
+ """
372
+
373
+ def __init__(self, leaf, index):
374
+ """
375
+ Construct a new ``LeafEdge``.
376
+
377
+ :param leaf: The new edge's leaf value, specifying the word
378
+ that is recorded by this edge.
379
+ :param index: The new edge's index, specifying the position of
380
+ the word that is recorded by this edge.
381
+ """
382
+ self._leaf = leaf
383
+ self._index = index
384
+ self._comparison_key = (leaf, index)
385
+
386
+ # Accessors
387
+ def lhs(self):
388
+ return self._leaf
389
+
390
+ def span(self):
391
+ return (self._index, self._index + 1)
392
+
393
+ def start(self):
394
+ return self._index
395
+
396
+ def end(self):
397
+ return self._index + 1
398
+
399
+ def length(self):
400
+ return 1
401
+
402
+ def rhs(self):
403
+ return ()
404
+
405
+ def dot(self):
406
+ return 0
407
+
408
+ def is_complete(self):
409
+ return True
410
+
411
+ def is_incomplete(self):
412
+ return False
413
+
414
+ def nextsym(self):
415
+ return None
416
+
417
+ # String representations
418
+ def __str__(self):
419
+ return f"[{self._index}:{self._index + 1}] {repr(self._leaf)}"
420
+
421
+ def __repr__(self):
422
+ return "[Edge: %s]" % (self)
423
+
424
+
425
+ ########################################################################
426
+ ## Chart
427
+ ########################################################################
428
+
429
+
430
+ class Chart:
431
+ """
432
+ A blackboard for hypotheses about the syntactic constituents of a
433
+ sentence. A chart contains a set of edges, and each edge encodes
434
+ a single hypothesis about the structure of some portion of the
435
+ sentence.
436
+
437
+ The ``select`` method can be used to select a specific collection
438
+ of edges. For example ``chart.select(is_complete=True, start=0)``
439
+ yields all complete edges whose start indices are 0. To ensure
440
+ the efficiency of these selection operations, ``Chart`` dynamically
441
+ creates and maintains an index for each set of attributes that
442
+ have been selected on.
443
+
444
+ In order to reconstruct the trees that are represented by an edge,
445
+ the chart associates each edge with a set of child pointer lists.
446
+ A child pointer list is a list of the edges that license an
447
+ edge's right-hand side.
448
+
449
+ :ivar _tokens: The sentence that the chart covers.
450
+ :ivar _num_leaves: The number of tokens.
451
+ :ivar _edges: A list of the edges in the chart
452
+ :ivar _edge_to_cpls: A dictionary mapping each edge to a set
453
+ of child pointer lists that are associated with that edge.
454
+ :ivar _indexes: A dictionary mapping tuples of edge attributes
455
+ to indices, where each index maps the corresponding edge
456
+ attribute values to lists of edges.
457
+ """
458
+
459
+ def __init__(self, tokens):
460
+ """
461
+ Construct a new chart. The chart is initialized with the
462
+ leaf edges corresponding to the terminal leaves.
463
+
464
+ :type tokens: list
465
+ :param tokens: The sentence that this chart will be used to parse.
466
+ """
467
+ # Record the sentence token and the sentence length.
468
+ self._tokens = tuple(tokens)
469
+ self._num_leaves = len(self._tokens)
470
+
471
+ # Initialise the chart.
472
+ self.initialize()
473
+
474
+ def initialize(self):
475
+ """
476
+ Clear the chart.
477
+ """
478
+ # A list of edges contained in this chart.
479
+ self._edges = []
480
+
481
+ # The set of child pointer lists associated with each edge.
482
+ self._edge_to_cpls = {}
483
+
484
+ # Indexes mapping attribute values to lists of edges
485
+ # (used by select()).
486
+ self._indexes = {}
487
+
488
+ # ////////////////////////////////////////////////////////////
489
+ # Sentence Access
490
+ # ////////////////////////////////////////////////////////////
491
+
492
+ def num_leaves(self):
493
+ """
494
+ Return the number of words in this chart's sentence.
495
+
496
+ :rtype: int
497
+ """
498
+ return self._num_leaves
499
+
500
+ def leaf(self, index):
501
+ """
502
+ Return the leaf value of the word at the given index.
503
+
504
+ :rtype: str
505
+ """
506
+ return self._tokens[index]
507
+
508
+ def leaves(self):
509
+ """
510
+ Return a list of the leaf values of each word in the
511
+ chart's sentence.
512
+
513
+ :rtype: list(str)
514
+ """
515
+ return self._tokens
516
+
517
+ # ////////////////////////////////////////////////////////////
518
+ # Edge access
519
+ # ////////////////////////////////////////////////////////////
520
+
521
+ def edges(self):
522
+ """
523
+ Return a list of all edges in this chart. New edges
524
+ that are added to the chart after the call to edges()
525
+ will *not* be contained in this list.
526
+
527
+ :rtype: list(EdgeI)
528
+ :see: ``iteredges``, ``select``
529
+ """
530
+ return self._edges[:]
531
+
532
+ def iteredges(self):
533
+ """
534
+ Return an iterator over the edges in this chart. It is
535
+ not guaranteed that new edges which are added to the
536
+ chart before the iterator is exhausted will also be generated.
537
+
538
+ :rtype: iter(EdgeI)
539
+ :see: ``edges``, ``select``
540
+ """
541
+ return iter(self._edges)
542
+
543
+ # Iterating over the chart yields its edges.
544
+ __iter__ = iteredges
545
+
546
+ def num_edges(self):
547
+ """
548
+ Return the number of edges contained in this chart.
549
+
550
+ :rtype: int
551
+ """
552
+ return len(self._edge_to_cpls)
553
+
554
+ def select(self, **restrictions):
555
+ """
556
+ Return an iterator over the edges in this chart. Any
557
+ new edges that are added to the chart before the iterator
558
+ is exahusted will also be generated. ``restrictions``
559
+ can be used to restrict the set of edges that will be
560
+ generated.
561
+
562
+ :param span: Only generate edges ``e`` where ``e.span()==span``
563
+ :param start: Only generate edges ``e`` where ``e.start()==start``
564
+ :param end: Only generate edges ``e`` where ``e.end()==end``
565
+ :param length: Only generate edges ``e`` where ``e.length()==length``
566
+ :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs``
567
+ :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs``
568
+ :param nextsym: Only generate edges ``e`` where
569
+ ``e.nextsym()==nextsym``
570
+ :param dot: Only generate edges ``e`` where ``e.dot()==dot``
571
+ :param is_complete: Only generate edges ``e`` where
572
+ ``e.is_complete()==is_complete``
573
+ :param is_incomplete: Only generate edges ``e`` where
574
+ ``e.is_incomplete()==is_incomplete``
575
+ :rtype: iter(EdgeI)
576
+ """
577
+ # If there are no restrictions, then return all edges.
578
+ if restrictions == {}:
579
+ return iter(self._edges)
580
+
581
+ # Find the index corresponding to the given restrictions.
582
+ restr_keys = sorted(restrictions.keys())
583
+ restr_keys = tuple(restr_keys)
584
+
585
+ # If it doesn't exist, then create it.
586
+ if restr_keys not in self._indexes:
587
+ self._add_index(restr_keys)
588
+
589
+ vals = tuple(restrictions[key] for key in restr_keys)
590
+ return iter(self._indexes[restr_keys].get(vals, []))
591
+
592
+ def _add_index(self, restr_keys):
593
+ """
594
+ A helper function for ``select``, which creates a new index for
595
+ a given set of attributes (aka restriction keys).
596
+ """
597
+ # Make sure it's a valid index.
598
+ for key in restr_keys:
599
+ if not hasattr(EdgeI, key):
600
+ raise ValueError("Bad restriction: %s" % key)
601
+
602
+ # Create the index.
603
+ index = self._indexes[restr_keys] = {}
604
+
605
+ # Add all existing edges to the index.
606
+ for edge in self._edges:
607
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
608
+ index.setdefault(vals, []).append(edge)
609
+
610
+ def _register_with_indexes(self, edge):
611
+ """
612
+ A helper function for ``insert``, which registers the new
613
+ edge with all existing indexes.
614
+ """
615
+ for (restr_keys, index) in self._indexes.items():
616
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
617
+ index.setdefault(vals, []).append(edge)
618
+
619
+ # ////////////////////////////////////////////////////////////
620
+ # Edge Insertion
621
+ # ////////////////////////////////////////////////////////////
622
+
623
+ def insert_with_backpointer(self, new_edge, previous_edge, child_edge):
624
+ """
625
+ Add a new edge to the chart, using a pointer to the previous edge.
626
+ """
627
+ cpls = self.child_pointer_lists(previous_edge)
628
+ new_cpls = [cpl + (child_edge,) for cpl in cpls]
629
+ return self.insert(new_edge, *new_cpls)
630
+
631
+ def insert(self, edge, *child_pointer_lists):
632
+ """
633
+ Add a new edge to the chart, and return True if this operation
634
+ modified the chart. In particular, return true iff the chart
635
+ did not already contain ``edge``, or if it did not already associate
636
+ ``child_pointer_lists`` with ``edge``.
637
+
638
+ :type edge: EdgeI
639
+ :param edge: The new edge
640
+ :type child_pointer_lists: sequence of tuple(EdgeI)
641
+ :param child_pointer_lists: A sequence of lists of the edges that
642
+ were used to form this edge. This list is used to reconstruct
643
+ the trees (or partial trees) that are associated with ``edge``.
644
+ :rtype: bool
645
+ """
646
+ # Is it a new edge?
647
+ if edge not in self._edge_to_cpls:
648
+ # Add it to the list of edges.
649
+ self._append_edge(edge)
650
+ # Register with indexes.
651
+ self._register_with_indexes(edge)
652
+
653
+ # Get the set of child pointer lists for this edge.
654
+ cpls = self._edge_to_cpls.setdefault(edge, OrderedDict())
655
+ chart_was_modified = False
656
+ for child_pointer_list in child_pointer_lists:
657
+ child_pointer_list = tuple(child_pointer_list)
658
+ if child_pointer_list not in cpls:
659
+ # It's a new CPL; register it, and return true.
660
+ cpls[child_pointer_list] = True
661
+ chart_was_modified = True
662
+ return chart_was_modified
663
+
664
+ def _append_edge(self, edge):
665
+ self._edges.append(edge)
666
+
667
+ # ////////////////////////////////////////////////////////////
668
+ # Tree extraction & child pointer lists
669
+ # ////////////////////////////////////////////////////////////
670
+
671
+ def parses(self, root, tree_class=Tree):
672
+ """
673
+ Return an iterator of the complete tree structures that span
674
+ the entire chart, and whose root node is ``root``.
675
+ """
676
+ for edge in self.select(start=0, end=self._num_leaves, lhs=root):
677
+ yield from self.trees(edge, tree_class=tree_class, complete=True)
678
+
679
+ def trees(self, edge, tree_class=Tree, complete=False):
680
+ """
681
+ Return an iterator of the tree structures that are associated
682
+ with ``edge``.
683
+
684
+ If ``edge`` is incomplete, then the unexpanded children will be
685
+ encoded as childless subtrees, whose node value is the
686
+ corresponding terminal or nonterminal.
687
+
688
+ :rtype: list(Tree)
689
+ :note: If two trees share a common subtree, then the same
690
+ Tree may be used to encode that subtree in
691
+ both trees. If you need to eliminate this subtree
692
+ sharing, then create a deep copy of each tree.
693
+ """
694
+ return iter(self._trees(edge, complete, memo={}, tree_class=tree_class))
695
+
696
+ def _trees(self, edge, complete, memo, tree_class):
697
+ """
698
+ A helper function for ``trees``.
699
+
700
+ :param memo: A dictionary used to record the trees that we've
701
+ generated for each edge, so that when we see an edge more
702
+ than once, we can reuse the same trees.
703
+ """
704
+ # If we've seen this edge before, then reuse our old answer.
705
+ if edge in memo:
706
+ return memo[edge]
707
+
708
+ # when we're reading trees off the chart, don't use incomplete edges
709
+ if complete and edge.is_incomplete():
710
+ return []
711
+
712
+ # Leaf edges.
713
+ if isinstance(edge, LeafEdge):
714
+ leaf = self._tokens[edge.start()]
715
+ memo[edge] = [leaf]
716
+ return [leaf]
717
+
718
+ # Until we're done computing the trees for edge, set
719
+ # memo[edge] to be empty. This has the effect of filtering
720
+ # out any cyclic trees (i.e., trees that contain themselves as
721
+ # descendants), because if we reach this edge via a cycle,
722
+ # then it will appear that the edge doesn't generate any trees.
723
+ memo[edge] = []
724
+ trees = []
725
+ lhs = edge.lhs().symbol()
726
+
727
+ # Each child pointer list can be used to form trees.
728
+ for cpl in self.child_pointer_lists(edge):
729
+ # Get the set of child choices for each child pointer.
730
+ # child_choices[i] is the set of choices for the tree's
731
+ # ith child.
732
+ child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
733
+
734
+ # For each combination of children, add a tree.
735
+ for children in itertools.product(*child_choices):
736
+ trees.append(tree_class(lhs, children))
737
+
738
+ # If the edge is incomplete, then extend it with "partial trees":
739
+ if edge.is_incomplete():
740
+ unexpanded = [tree_class(elt, []) for elt in edge.rhs()[edge.dot() :]]
741
+ for tree in trees:
742
+ tree.extend(unexpanded)
743
+
744
+ # Update the memoization dictionary.
745
+ memo[edge] = trees
746
+
747
+ # Return the list of trees.
748
+ return trees
749
+
750
+ def child_pointer_lists(self, edge):
751
+ """
752
+ Return the set of child pointer lists for the given edge.
753
+ Each child pointer list is a list of edges that have
754
+ been used to form this edge.
755
+
756
+ :rtype: list(list(EdgeI))
757
+ """
758
+ # Make a copy, in case they modify it.
759
+ return self._edge_to_cpls.get(edge, {}).keys()
760
+
761
+ # ////////////////////////////////////////////////////////////
762
+ # Display
763
+ # ////////////////////////////////////////////////////////////
764
+ def pretty_format_edge(self, edge, width=None):
765
+ """
766
+ Return a pretty-printed string representation of a given edge
767
+ in this chart.
768
+
769
+ :rtype: str
770
+ :param width: The number of characters allotted to each
771
+ index in the sentence.
772
+ """
773
+ if width is None:
774
+ width = 50 // (self.num_leaves() + 1)
775
+ (start, end) = (edge.start(), edge.end())
776
+
777
+ str = "|" + ("." + " " * (width - 1)) * start
778
+
779
+ # Zero-width edges are "#" if complete, ">" if incomplete
780
+ if start == end:
781
+ if edge.is_complete():
782
+ str += "#"
783
+ else:
784
+ str += ">"
785
+
786
+ # Spanning complete edges are "[===]"; Other edges are
787
+ # "[---]" if complete, "[--->" if incomplete
788
+ elif edge.is_complete() and edge.span() == (0, self._num_leaves):
789
+ str += "[" + ("=" * width) * (end - start - 1) + "=" * (width - 1) + "]"
790
+ elif edge.is_complete():
791
+ str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + "]"
792
+ else:
793
+ str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + ">"
794
+
795
+ str += (" " * (width - 1) + ".") * (self._num_leaves - end)
796
+ return str + "| %s" % edge
797
+
798
+ def pretty_format_leaves(self, width=None):
799
+ """
800
+ Return a pretty-printed string representation of this
801
+ chart's leaves. This string can be used as a header
802
+ for calls to ``pretty_format_edge``.
803
+ """
804
+ if width is None:
805
+ width = 50 // (self.num_leaves() + 1)
806
+
807
+ if self._tokens is not None and width > 1:
808
+ header = "|."
809
+ for tok in self._tokens:
810
+ header += tok[: width - 1].center(width - 1) + "."
811
+ header += "|"
812
+ else:
813
+ header = ""
814
+
815
+ return header
816
+
817
+ def pretty_format(self, width=None):
818
+ """
819
+ Return a pretty-printed string representation of this chart.
820
+
821
+ :param width: The number of characters allotted to each
822
+ index in the sentence.
823
+ :rtype: str
824
+ """
825
+ if width is None:
826
+ width = 50 // (self.num_leaves() + 1)
827
+ # sort edges: primary key=length, secondary key=start index.
828
+ # (and filter out the token edges)
829
+ edges = sorted((e.length(), e.start(), e) for e in self)
830
+ edges = [e for (_, _, e) in edges]
831
+
832
+ return (
833
+ self.pretty_format_leaves(width)
834
+ + "\n"
835
+ + "\n".join(self.pretty_format_edge(edge, width) for edge in edges)
836
+ )
837
+
838
+ # ////////////////////////////////////////////////////////////
839
+ # Display: Dot (AT&T Graphviz)
840
+ # ////////////////////////////////////////////////////////////
841
+
842
+ def dot_digraph(self):
843
+ # Header
844
+ s = "digraph nltk_chart {\n"
845
+ # s += ' size="5,5";\n'
846
+ s += " rankdir=LR;\n"
847
+ s += " node [height=0.1,width=0.1];\n"
848
+ s += ' node [style=filled, color="lightgray"];\n'
849
+
850
+ # Set up the nodes
851
+ for y in range(self.num_edges(), -1, -1):
852
+ if y == 0:
853
+ s += ' node [style=filled, color="black"];\n'
854
+ for x in range(self.num_leaves() + 1):
855
+ if y == 0 or (
856
+ x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end()
857
+ ):
858
+ s += ' %04d.%04d [label=""];\n' % (x, y)
859
+
860
+ # Add a spacer
861
+ s += " x [style=invis]; x->0000.0000 [style=invis];\n"
862
+
863
+ # Declare ranks.
864
+ for x in range(self.num_leaves() + 1):
865
+ s += " {rank=same;"
866
+ for y in range(self.num_edges() + 1):
867
+ if y == 0 or (
868
+ x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end()
869
+ ):
870
+ s += " %04d.%04d" % (x, y)
871
+ s += "}\n"
872
+
873
+ # Add the leaves
874
+ s += " edge [style=invis, weight=100];\n"
875
+ s += " node [shape=plaintext]\n"
876
+ s += " 0000.0000"
877
+ for x in range(self.num_leaves()):
878
+ s += "->%s->%04d.0000" % (self.leaf(x), x + 1)
879
+ s += ";\n\n"
880
+
881
+ # Add the edges
882
+ s += " edge [style=solid, weight=1];\n"
883
+ for y, edge in enumerate(self):
884
+ for x in range(edge.start()):
885
+ s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % (
886
+ x,
887
+ y + 1,
888
+ x + 1,
889
+ y + 1,
890
+ )
891
+ s += ' %04d.%04d -> %04d.%04d [label="%s"];\n' % (
892
+ edge.start(),
893
+ y + 1,
894
+ edge.end(),
895
+ y + 1,
896
+ edge,
897
+ )
898
+ for x in range(edge.end(), self.num_leaves()):
899
+ s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % (
900
+ x,
901
+ y + 1,
902
+ x + 1,
903
+ y + 1,
904
+ )
905
+ s += "}\n"
906
+ return s
907
+
908
+
909
+ ########################################################################
910
+ ## Chart Rules
911
+ ########################################################################
912
+
913
+
914
+ class ChartRuleI:
915
+ """
916
+ A rule that specifies what new edges are licensed by any given set
917
+ of existing edges. Each chart rule expects a fixed number of
918
+ edges, as indicated by the class variable ``NUM_EDGES``. In
919
+ particular:
920
+
921
+ - A chart rule with ``NUM_EDGES=0`` specifies what new edges are
922
+ licensed, regardless of existing edges.
923
+ - A chart rule with ``NUM_EDGES=1`` specifies what new edges are
924
+ licensed by a single existing edge.
925
+ - A chart rule with ``NUM_EDGES=2`` specifies what new edges are
926
+ licensed by a pair of existing edges.
927
+
928
+ :type NUM_EDGES: int
929
+ :cvar NUM_EDGES: The number of existing edges that this rule uses
930
+ to license new edges. Typically, this number ranges from zero
931
+ to two.
932
+ """
933
+
934
+ def apply(self, chart, grammar, *edges):
935
+ """
936
+ Return a generator that will add edges licensed by this rule
937
+ and the given edges to the chart, one at a time. Each
938
+ time the generator is resumed, it will either add a new
939
+ edge and yield that edge; or return.
940
+
941
+ :type edges: list(EdgeI)
942
+ :param edges: A set of existing edges. The number of edges
943
+ that should be passed to ``apply()`` is specified by the
944
+ ``NUM_EDGES`` class variable.
945
+ :rtype: iter(EdgeI)
946
+ """
947
+ raise NotImplementedError()
948
+
949
+ def apply_everywhere(self, chart, grammar):
950
+ """
951
+ Return a generator that will add all edges licensed by
952
+ this rule, given the edges that are currently in the
953
+ chart, one at a time. Each time the generator is resumed,
954
+ it will either add a new edge and yield that edge; or return.
955
+
956
+ :rtype: iter(EdgeI)
957
+ """
958
+ raise NotImplementedError()
959
+
960
+
961
+ class AbstractChartRule(ChartRuleI):
962
+ """
963
+ An abstract base class for chart rules. ``AbstractChartRule``
964
+ provides:
965
+
966
+ - A default implementation for ``apply``.
967
+ - A default implementation for ``apply_everywhere``,
968
+ (Currently, this implementation assumes that ``NUM_EDGES <= 3``.)
969
+ - A default implementation for ``__str__``, which returns a
970
+ name based on the rule's class name.
971
+ """
972
+
973
+ # Subclasses must define apply.
974
+ def apply(self, chart, grammar, *edges):
975
+ raise NotImplementedError()
976
+
977
+ # Default: loop through the given number of edges, and call
978
+ # self.apply() for each set of edges.
979
+ def apply_everywhere(self, chart, grammar):
980
+ if self.NUM_EDGES == 0:
981
+ yield from self.apply(chart, grammar)
982
+
983
+ elif self.NUM_EDGES == 1:
984
+ for e1 in chart:
985
+ yield from self.apply(chart, grammar, e1)
986
+
987
+ elif self.NUM_EDGES == 2:
988
+ for e1 in chart:
989
+ for e2 in chart:
990
+ yield from self.apply(chart, grammar, e1, e2)
991
+
992
+ elif self.NUM_EDGES == 3:
993
+ for e1 in chart:
994
+ for e2 in chart:
995
+ for e3 in chart:
996
+ yield from self.apply(chart, grammar, e1, e2, e3)
997
+
998
+ else:
999
+ raise AssertionError("NUM_EDGES>3 is not currently supported")
1000
+
1001
+ # Default: return a name based on the class name.
1002
+ def __str__(self):
1003
+ # Add spaces between InitialCapsWords.
1004
+ return re.sub("([a-z])([A-Z])", r"\1 \2", self.__class__.__name__)
1005
+
1006
+
1007
+ # ////////////////////////////////////////////////////////////
1008
+ # Fundamental Rule
1009
+ # ////////////////////////////////////////////////////////////
1010
+
1011
+
1012
+ class FundamentalRule(AbstractChartRule):
1013
+ r"""
1014
+ A rule that joins two adjacent edges to form a single combined
1015
+ edge. In particular, this rule specifies that any pair of edges
1016
+
1017
+ - ``[A -> alpha \* B beta][i:j]``
1018
+ - ``[B -> gamma \*][j:k]``
1019
+
1020
+ licenses the edge:
1021
+
1022
+ - ``[A -> alpha B * beta][i:j]``
1023
+ """
1024
+
1025
+ NUM_EDGES = 2
1026
+
1027
+ def apply(self, chart, grammar, left_edge, right_edge):
1028
+ # Make sure the rule is applicable.
1029
+ if not (
1030
+ left_edge.is_incomplete()
1031
+ and right_edge.is_complete()
1032
+ and left_edge.end() == right_edge.start()
1033
+ and left_edge.nextsym() == right_edge.lhs()
1034
+ ):
1035
+ return
1036
+
1037
+ # Construct the new edge.
1038
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1039
+
1040
+ # Insert it into the chart.
1041
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1042
+ yield new_edge
1043
+
1044
+
1045
+ class SingleEdgeFundamentalRule(FundamentalRule):
1046
+ r"""
1047
+ A rule that joins a given edge with adjacent edges in the chart,
1048
+ to form combined edges. In particular, this rule specifies that
1049
+ either of the edges:
1050
+
1051
+ - ``[A -> alpha \* B beta][i:j]``
1052
+ - ``[B -> gamma \*][j:k]``
1053
+
1054
+ licenses the edge:
1055
+
1056
+ - ``[A -> alpha B * beta][i:j]``
1057
+
1058
+ if the other edge is already in the chart.
1059
+
1060
+ :note: This is basically ``FundamentalRule``, with one edge left
1061
+ unspecified.
1062
+ """
1063
+
1064
+ NUM_EDGES = 1
1065
+
1066
+ def apply(self, chart, grammar, edge):
1067
+ if edge.is_incomplete():
1068
+ yield from self._apply_incomplete(chart, grammar, edge)
1069
+ else:
1070
+ yield from self._apply_complete(chart, grammar, edge)
1071
+
1072
+ def _apply_complete(self, chart, grammar, right_edge):
1073
+ for left_edge in chart.select(
1074
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
1075
+ ):
1076
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1077
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1078
+ yield new_edge
1079
+
1080
+ def _apply_incomplete(self, chart, grammar, left_edge):
1081
+ for right_edge in chart.select(
1082
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
1083
+ ):
1084
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1085
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1086
+ yield new_edge
1087
+
1088
+
1089
+ # ////////////////////////////////////////////////////////////
1090
+ # Inserting Terminal Leafs
1091
+ # ////////////////////////////////////////////////////////////
1092
+
1093
+
1094
+ class LeafInitRule(AbstractChartRule):
1095
+ NUM_EDGES = 0
1096
+
1097
+ def apply(self, chart, grammar):
1098
+ for index in range(chart.num_leaves()):
1099
+ new_edge = LeafEdge(chart.leaf(index), index)
1100
+ if chart.insert(new_edge, ()):
1101
+ yield new_edge
1102
+
1103
+
1104
+ # ////////////////////////////////////////////////////////////
1105
+ # Top-Down Prediction
1106
+ # ////////////////////////////////////////////////////////////
1107
+
1108
+
1109
+ class TopDownInitRule(AbstractChartRule):
1110
+ r"""
1111
+ A rule licensing edges corresponding to the grammar productions for
1112
+ the grammar's start symbol. In particular, this rule specifies that
1113
+ ``[S -> \* alpha][0:i]`` is licensed for each grammar production
1114
+ ``S -> alpha``, where ``S`` is the grammar's start symbol.
1115
+ """
1116
+
1117
+ NUM_EDGES = 0
1118
+
1119
+ def apply(self, chart, grammar):
1120
+ for prod in grammar.productions(lhs=grammar.start()):
1121
+ new_edge = TreeEdge.from_production(prod, 0)
1122
+ if chart.insert(new_edge, ()):
1123
+ yield new_edge
1124
+
1125
+
1126
+ class TopDownPredictRule(AbstractChartRule):
1127
+ r"""
1128
+ A rule licensing edges corresponding to the grammar productions
1129
+ for the nonterminal following an incomplete edge's dot. In
1130
+ particular, this rule specifies that
1131
+ ``[A -> alpha \* B beta][i:j]`` licenses the edge
1132
+ ``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``.
1133
+
1134
+ :note: This rule corresponds to the Predictor Rule in Earley parsing.
1135
+ """
1136
+
1137
+ NUM_EDGES = 1
1138
+
1139
+ def apply(self, chart, grammar, edge):
1140
+ if edge.is_complete():
1141
+ return
1142
+ for prod in grammar.productions(lhs=edge.nextsym()):
1143
+ new_edge = TreeEdge.from_production(prod, edge.end())
1144
+ if chart.insert(new_edge, ()):
1145
+ yield new_edge
1146
+
1147
+
1148
+ class CachedTopDownPredictRule(TopDownPredictRule):
1149
+ r"""
1150
+ A cached version of ``TopDownPredictRule``. After the first time
1151
+ this rule is applied to an edge with a given ``end`` and ``next``,
1152
+ it will not generate any more edges for edges with that ``end`` and
1153
+ ``next``.
1154
+
1155
+ If ``chart`` or ``grammar`` are changed, then the cache is flushed.
1156
+ """
1157
+
1158
+ def __init__(self):
1159
+ TopDownPredictRule.__init__(self)
1160
+ self._done = {}
1161
+
1162
+ def apply(self, chart, grammar, edge):
1163
+ if edge.is_complete():
1164
+ return
1165
+ nextsym, index = edge.nextsym(), edge.end()
1166
+ if not is_nonterminal(nextsym):
1167
+ return
1168
+
1169
+ # If we've already applied this rule to an edge with the same
1170
+ # next & end, and the chart & grammar have not changed, then
1171
+ # just return (no new edges to add).
1172
+ done = self._done.get((nextsym, index), (None, None))
1173
+ if done[0] is chart and done[1] is grammar:
1174
+ return
1175
+
1176
+ # Add all the edges indicated by the top down expand rule.
1177
+ for prod in grammar.productions(lhs=nextsym):
1178
+ # If the left corner in the predicted production is
1179
+ # leaf, it must match with the input.
1180
+ if prod.rhs():
1181
+ first = prod.rhs()[0]
1182
+ if is_terminal(first):
1183
+ if index >= chart.num_leaves() or first != chart.leaf(index):
1184
+ continue
1185
+
1186
+ new_edge = TreeEdge.from_production(prod, index)
1187
+ if chart.insert(new_edge, ()):
1188
+ yield new_edge
1189
+
1190
+ # Record the fact that we've applied this rule.
1191
+ self._done[nextsym, index] = (chart, grammar)
1192
+
1193
+
1194
+ # ////////////////////////////////////////////////////////////
1195
+ # Bottom-Up Prediction
1196
+ # ////////////////////////////////////////////////////////////
1197
+
1198
+
1199
+ class BottomUpPredictRule(AbstractChartRule):
1200
+ r"""
1201
+ A rule licensing any edge corresponding to a production whose
1202
+ right-hand side begins with a complete edge's left-hand side. In
1203
+ particular, this rule specifies that ``[A -> alpha \*]`` licenses
1204
+ the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``.
1205
+ """
1206
+
1207
+ NUM_EDGES = 1
1208
+
1209
+ def apply(self, chart, grammar, edge):
1210
+ if edge.is_incomplete():
1211
+ return
1212
+ for prod in grammar.productions(rhs=edge.lhs()):
1213
+ new_edge = TreeEdge.from_production(prod, edge.start())
1214
+ if chart.insert(new_edge, ()):
1215
+ yield new_edge
1216
+
1217
+
1218
+ class BottomUpPredictCombineRule(BottomUpPredictRule):
1219
+ r"""
1220
+ A rule licensing any edge corresponding to a production whose
1221
+ right-hand side begins with a complete edge's left-hand side. In
1222
+ particular, this rule specifies that ``[A -> alpha \*]``
1223
+ licenses the edge ``[B -> A \* beta]`` for each grammar
1224
+ production ``B -> A beta``.
1225
+
1226
+ :note: This is like ``BottomUpPredictRule``, but it also applies
1227
+ the ``FundamentalRule`` to the resulting edge.
1228
+ """
1229
+
1230
+ NUM_EDGES = 1
1231
+
1232
+ def apply(self, chart, grammar, edge):
1233
+ if edge.is_incomplete():
1234
+ return
1235
+ for prod in grammar.productions(rhs=edge.lhs()):
1236
+ new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
1237
+ if chart.insert(new_edge, (edge,)):
1238
+ yield new_edge
1239
+
1240
+
1241
+ class EmptyPredictRule(AbstractChartRule):
1242
+ """
1243
+ A rule that inserts all empty productions as passive edges,
1244
+ in every position in the chart.
1245
+ """
1246
+
1247
+ NUM_EDGES = 0
1248
+
1249
+ def apply(self, chart, grammar):
1250
+ for prod in grammar.productions(empty=True):
1251
+ for index in range(chart.num_leaves() + 1):
1252
+ new_edge = TreeEdge.from_production(prod, index)
1253
+ if chart.insert(new_edge, ()):
1254
+ yield new_edge
1255
+
1256
+
1257
+ ########################################################################
1258
+ ## Filtered Bottom Up
1259
+ ########################################################################
1260
+
1261
+
1262
+ class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
1263
+ def _apply_complete(self, chart, grammar, right_edge):
1264
+ end = right_edge.end()
1265
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1266
+ for left_edge in chart.select(
1267
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
1268
+ ):
1269
+ if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
1270
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1271
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1272
+ yield new_edge
1273
+
1274
+ def _apply_incomplete(self, chart, grammar, left_edge):
1275
+ for right_edge in chart.select(
1276
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
1277
+ ):
1278
+ end = right_edge.end()
1279
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1280
+ if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()):
1281
+ new_edge = left_edge.move_dot_forward(right_edge.end())
1282
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
1283
+ yield new_edge
1284
+
1285
+
1286
+ class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule):
1287
+ def apply(self, chart, grammar, edge):
1288
+ if edge.is_incomplete():
1289
+ return
1290
+
1291
+ end = edge.end()
1292
+ nexttoken = end < chart.num_leaves() and chart.leaf(end)
1293
+ for prod in grammar.productions(rhs=edge.lhs()):
1294
+ if _bottomup_filter(grammar, nexttoken, prod.rhs()):
1295
+ new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1)
1296
+ if chart.insert(new_edge, (edge,)):
1297
+ yield new_edge
1298
+
1299
+
1300
+ def _bottomup_filter(grammar, nexttoken, rhs, dot=0):
1301
+ if len(rhs) <= dot + 1:
1302
+ return True
1303
+ _next = rhs[dot + 1]
1304
+ if is_terminal(_next):
1305
+ return nexttoken == _next
1306
+ else:
1307
+ return grammar.is_leftcorner(_next, nexttoken)
1308
+
1309
+
1310
+ ########################################################################
1311
+ ## Generic Chart Parser
1312
+ ########################################################################
1313
+
1314
+ TD_STRATEGY = [
1315
+ LeafInitRule(),
1316
+ TopDownInitRule(),
1317
+ CachedTopDownPredictRule(),
1318
+ SingleEdgeFundamentalRule(),
1319
+ ]
1320
+ BU_STRATEGY = [
1321
+ LeafInitRule(),
1322
+ EmptyPredictRule(),
1323
+ BottomUpPredictRule(),
1324
+ SingleEdgeFundamentalRule(),
1325
+ ]
1326
+ BU_LC_STRATEGY = [
1327
+ LeafInitRule(),
1328
+ EmptyPredictRule(),
1329
+ BottomUpPredictCombineRule(),
1330
+ SingleEdgeFundamentalRule(),
1331
+ ]
1332
+
1333
+ LC_STRATEGY = [
1334
+ LeafInitRule(),
1335
+ FilteredBottomUpPredictCombineRule(),
1336
+ FilteredSingleEdgeFundamentalRule(),
1337
+ ]
1338
+
1339
+
1340
+ class ChartParser(ParserI):
1341
+ """
1342
+ A generic chart parser. A "strategy", or list of
1343
+ ``ChartRuleI`` instances, is used to decide what edges to add to
1344
+ the chart. In particular, ``ChartParser`` uses the following
1345
+ algorithm to parse texts:
1346
+
1347
+ | Until no new edges are added:
1348
+ | For each *rule* in *strategy*:
1349
+ | Apply *rule* to any applicable edges in the chart.
1350
+ | Return any complete parses in the chart
1351
+ """
1352
+
1353
+ def __init__(
1354
+ self,
1355
+ grammar,
1356
+ strategy=BU_LC_STRATEGY,
1357
+ trace=0,
1358
+ trace_chart_width=50,
1359
+ use_agenda=True,
1360
+ chart_class=Chart,
1361
+ ):
1362
+ """
1363
+ Create a new chart parser, that uses ``grammar`` to parse
1364
+ texts.
1365
+
1366
+ :type grammar: CFG
1367
+ :param grammar: The grammar used to parse texts.
1368
+ :type strategy: list(ChartRuleI)
1369
+ :param strategy: A list of rules that should be used to decide
1370
+ what edges to add to the chart (top-down strategy by default).
1371
+ :type trace: int
1372
+ :param trace: The level of tracing that should be used when
1373
+ parsing a text. ``0`` will generate no tracing output;
1374
+ and higher numbers will produce more verbose tracing
1375
+ output.
1376
+ :type trace_chart_width: int
1377
+ :param trace_chart_width: The default total width reserved for
1378
+ the chart in trace output. The remainder of each line will
1379
+ be used to display edges.
1380
+ :type use_agenda: bool
1381
+ :param use_agenda: Use an optimized agenda-based algorithm,
1382
+ if possible.
1383
+ :param chart_class: The class that should be used to create
1384
+ the parse charts.
1385
+ """
1386
+ self._grammar = grammar
1387
+ self._strategy = strategy
1388
+ self._trace = trace
1389
+ self._trace_chart_width = trace_chart_width
1390
+ # If the strategy only consists of axioms (NUM_EDGES==0) and
1391
+ # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm:
1392
+ self._use_agenda = use_agenda
1393
+ self._chart_class = chart_class
1394
+
1395
+ self._axioms = []
1396
+ self._inference_rules = []
1397
+ for rule in strategy:
1398
+ if rule.NUM_EDGES == 0:
1399
+ self._axioms.append(rule)
1400
+ elif rule.NUM_EDGES == 1:
1401
+ self._inference_rules.append(rule)
1402
+ else:
1403
+ self._use_agenda = False
1404
+
1405
+ def grammar(self):
1406
+ return self._grammar
1407
+
1408
+ def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width):
1409
+ if not trace:
1410
+ return
1411
+ print_rule_header = trace > 1
1412
+ for edge in new_edges:
1413
+ if print_rule_header:
1414
+ print("%s:" % rule)
1415
+ print_rule_header = False
1416
+ print(chart.pretty_format_edge(edge, edge_width))
1417
+
1418
+ def chart_parse(self, tokens, trace=None):
1419
+ """
1420
+ Return the final parse ``Chart`` from which all possible
1421
+ parse trees can be extracted.
1422
+
1423
+ :param tokens: The sentence to be parsed
1424
+ :type tokens: list(str)
1425
+ :rtype: Chart
1426
+ """
1427
+ if trace is None:
1428
+ trace = self._trace
1429
+ trace_new_edges = self._trace_new_edges
1430
+
1431
+ tokens = list(tokens)
1432
+ self._grammar.check_coverage(tokens)
1433
+ chart = self._chart_class(tokens)
1434
+ grammar = self._grammar
1435
+
1436
+ # Width, for printing trace edges.
1437
+ trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
1438
+ if trace:
1439
+ print(chart.pretty_format_leaves(trace_edge_width))
1440
+
1441
+ if self._use_agenda:
1442
+ # Use an agenda-based algorithm.
1443
+ for axiom in self._axioms:
1444
+ new_edges = list(axiom.apply(chart, grammar))
1445
+ trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
1446
+
1447
+ inference_rules = self._inference_rules
1448
+ agenda = chart.edges()
1449
+ # We reverse the initial agenda, since it is a stack
1450
+ # but chart.edges() functions as a queue.
1451
+ agenda.reverse()
1452
+ while agenda:
1453
+ edge = agenda.pop()
1454
+ for rule in inference_rules:
1455
+ new_edges = list(rule.apply(chart, grammar, edge))
1456
+ if trace:
1457
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
1458
+ agenda += new_edges
1459
+
1460
+ else:
1461
+ # Do not use an agenda-based algorithm.
1462
+ edges_added = True
1463
+ while edges_added:
1464
+ edges_added = False
1465
+ for rule in self._strategy:
1466
+ new_edges = list(rule.apply_everywhere(chart, grammar))
1467
+ edges_added = len(new_edges)
1468
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
1469
+
1470
+ # Return the final chart.
1471
+ return chart
1472
+
1473
+ def parse(self, tokens, tree_class=Tree):
1474
+ chart = self.chart_parse(tokens)
1475
+ return iter(chart.parses(self._grammar.start(), tree_class=tree_class))
1476
+
1477
+
1478
+ class TopDownChartParser(ChartParser):
1479
+ """
1480
+ A ``ChartParser`` using a top-down parsing strategy.
1481
+ See ``ChartParser`` for more information.
1482
+ """
1483
+
1484
+ def __init__(self, grammar, **parser_args):
1485
+ ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args)
1486
+
1487
+
1488
+ class BottomUpChartParser(ChartParser):
1489
+ """
1490
+ A ``ChartParser`` using a bottom-up parsing strategy.
1491
+ See ``ChartParser`` for more information.
1492
+ """
1493
+
1494
+ def __init__(self, grammar, **parser_args):
1495
+ if isinstance(grammar, PCFG):
1496
+ warnings.warn(
1497
+ "BottomUpChartParser only works for CFG, "
1498
+ "use BottomUpProbabilisticChartParser instead",
1499
+ category=DeprecationWarning,
1500
+ )
1501
+ ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args)
1502
+
1503
+
1504
+ class BottomUpLeftCornerChartParser(ChartParser):
1505
+ """
1506
+ A ``ChartParser`` using a bottom-up left-corner parsing strategy.
1507
+ This strategy is often more efficient than standard bottom-up.
1508
+ See ``ChartParser`` for more information.
1509
+ """
1510
+
1511
+ def __init__(self, grammar, **parser_args):
1512
+ ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args)
1513
+
1514
+
1515
+ class LeftCornerChartParser(ChartParser):
1516
+ def __init__(self, grammar, **parser_args):
1517
+ if not grammar.is_nonempty():
1518
+ raise ValueError(
1519
+ "LeftCornerParser only works for grammars " "without empty productions."
1520
+ )
1521
+ ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args)
1522
+
1523
+
1524
+ ########################################################################
1525
+ ## Stepping Chart Parser
1526
+ ########################################################################
1527
+
1528
+
1529
+ class SteppingChartParser(ChartParser):
1530
+ """
1531
+ A ``ChartParser`` that allows you to step through the parsing
1532
+ process, adding a single edge at a time. It also allows you to
1533
+ change the parser's strategy or grammar midway through parsing a
1534
+ text.
1535
+
1536
+ The ``initialize`` method is used to start parsing a text. ``step``
1537
+ adds a single edge to the chart. ``set_strategy`` changes the
1538
+ strategy used by the chart parser. ``parses`` returns the set of
1539
+ parses that has been found by the chart parser.
1540
+
1541
+ :ivar _restart: Records whether the parser's strategy, grammar,
1542
+ or chart has been changed. If so, then ``step`` must restart
1543
+ the parsing algorithm.
1544
+ """
1545
+
1546
+ def __init__(self, grammar, strategy=[], trace=0):
1547
+ self._chart = None
1548
+ self._current_chartrule = None
1549
+ self._restart = False
1550
+ ChartParser.__init__(self, grammar, strategy, trace)
1551
+
1552
+ # ////////////////////////////////////////////////////////////
1553
+ # Initialization
1554
+ # ////////////////////////////////////////////////////////////
1555
+
1556
+ def initialize(self, tokens):
1557
+ "Begin parsing the given tokens."
1558
+ self._chart = Chart(list(tokens))
1559
+ self._restart = True
1560
+
1561
+ # ////////////////////////////////////////////////////////////
1562
+ # Stepping
1563
+ # ////////////////////////////////////////////////////////////
1564
+
1565
+ def step(self):
1566
+ """
1567
+ Return a generator that adds edges to the chart, one at a
1568
+ time. Each time the generator is resumed, it adds a single
1569
+ edge and yields that edge. If no more edges can be added,
1570
+ then it yields None.
1571
+
1572
+ If the parser's strategy, grammar, or chart is changed, then
1573
+ the generator will continue adding edges using the new
1574
+ strategy, grammar, or chart.
1575
+
1576
+ Note that this generator never terminates, since the grammar
1577
+ or strategy might be changed to values that would add new
1578
+ edges. Instead, it yields None when no more edges can be
1579
+ added with the current strategy and grammar.
1580
+ """
1581
+ if self._chart is None:
1582
+ raise ValueError("Parser must be initialized first")
1583
+ while True:
1584
+ self._restart = False
1585
+ w = 50 // (self._chart.num_leaves() + 1)
1586
+
1587
+ for e in self._parse():
1588
+ if self._trace > 1:
1589
+ print(self._current_chartrule)
1590
+ if self._trace > 0:
1591
+ print(self._chart.pretty_format_edge(e, w))
1592
+ yield e
1593
+ if self._restart:
1594
+ break
1595
+ else:
1596
+ yield None # No more edges.
1597
+
1598
+ def _parse(self):
1599
+ """
1600
+ A generator that implements the actual parsing algorithm.
1601
+ ``step`` iterates through this generator, and restarts it
1602
+ whenever the parser's strategy, grammar, or chart is modified.
1603
+ """
1604
+ chart = self._chart
1605
+ grammar = self._grammar
1606
+ edges_added = 1
1607
+ while edges_added > 0:
1608
+ edges_added = 0
1609
+ for rule in self._strategy:
1610
+ self._current_chartrule = rule
1611
+ for e in rule.apply_everywhere(chart, grammar):
1612
+ edges_added += 1
1613
+ yield e
1614
+
1615
+ # ////////////////////////////////////////////////////////////
1616
+ # Accessors
1617
+ # ////////////////////////////////////////////////////////////
1618
+
1619
+ def strategy(self):
1620
+ "Return the strategy used by this parser."
1621
+ return self._strategy
1622
+
1623
+ def grammar(self):
1624
+ "Return the grammar used by this parser."
1625
+ return self._grammar
1626
+
1627
+ def chart(self):
1628
+ "Return the chart that is used by this parser."
1629
+ return self._chart
1630
+
1631
+ def current_chartrule(self):
1632
+ "Return the chart rule used to generate the most recent edge."
1633
+ return self._current_chartrule
1634
+
1635
+ def parses(self, tree_class=Tree):
1636
+ "Return the parse trees currently contained in the chart."
1637
+ return self._chart.parses(self._grammar.start(), tree_class)
1638
+
1639
+ # ////////////////////////////////////////////////////////////
1640
+ # Parser modification
1641
+ # ////////////////////////////////////////////////////////////
1642
+
1643
+ def set_strategy(self, strategy):
1644
+ """
1645
+ Change the strategy that the parser uses to decide which edges
1646
+ to add to the chart.
1647
+
1648
+ :type strategy: list(ChartRuleI)
1649
+ :param strategy: A list of rules that should be used to decide
1650
+ what edges to add to the chart.
1651
+ """
1652
+ if strategy == self._strategy:
1653
+ return
1654
+ self._strategy = strategy[:] # Make a copy.
1655
+ self._restart = True
1656
+
1657
+ def set_grammar(self, grammar):
1658
+ "Change the grammar used by the parser."
1659
+ if grammar is self._grammar:
1660
+ return
1661
+ self._grammar = grammar
1662
+ self._restart = True
1663
+
1664
+ def set_chart(self, chart):
1665
+ "Load a given chart into the chart parser."
1666
+ if chart is self._chart:
1667
+ return
1668
+ self._chart = chart
1669
+ self._restart = True
1670
+
1671
+ # ////////////////////////////////////////////////////////////
1672
+ # Standard parser methods
1673
+ # ////////////////////////////////////////////////////////////
1674
+
1675
+ def parse(self, tokens, tree_class=Tree):
1676
+ tokens = list(tokens)
1677
+ self._grammar.check_coverage(tokens)
1678
+
1679
+ # Initialize ourselves.
1680
+ self.initialize(tokens)
1681
+
1682
+ # Step until no more edges are generated.
1683
+ for e in self.step():
1684
+ if e is None:
1685
+ break
1686
+
1687
+ # Return an iterator of complete parses.
1688
+ return self.parses(tree_class=tree_class)
1689
+
1690
+
1691
+ ########################################################################
1692
+ ## Demo Code
1693
+ ########################################################################
1694
+
1695
+
1696
+ def demo_grammar():
1697
+ from nltk.grammar import CFG
1698
+
1699
+ return CFG.fromstring(
1700
+ """
1701
+ S -> NP VP
1702
+ PP -> "with" NP
1703
+ NP -> NP PP
1704
+ VP -> VP PP
1705
+ VP -> Verb NP
1706
+ VP -> Verb
1707
+ NP -> Det Noun
1708
+ NP -> "John"
1709
+ NP -> "I"
1710
+ Det -> "the"
1711
+ Det -> "my"
1712
+ Det -> "a"
1713
+ Noun -> "dog"
1714
+ Noun -> "cookie"
1715
+ Verb -> "ate"
1716
+ Verb -> "saw"
1717
+ Prep -> "with"
1718
+ Prep -> "under"
1719
+ """
1720
+ )
1721
+
1722
+
1723
+ def demo(
1724
+ choice=None,
1725
+ print_times=True,
1726
+ print_grammar=False,
1727
+ print_trees=True,
1728
+ trace=2,
1729
+ sent="I saw John with a dog with my cookie",
1730
+ numparses=5,
1731
+ ):
1732
+ """
1733
+ A demonstration of the chart parsers.
1734
+ """
1735
+ import sys
1736
+ import time
1737
+
1738
+ from nltk import CFG, Production, nonterminals
1739
+
1740
+ # The grammar for ChartParser and SteppingChartParser:
1741
+ grammar = demo_grammar()
1742
+ if print_grammar:
1743
+ print("* Grammar")
1744
+ print(grammar)
1745
+
1746
+ # Tokenize the sample sentence.
1747
+ print("* Sentence:")
1748
+ print(sent)
1749
+ tokens = sent.split()
1750
+ print(tokens)
1751
+ print()
1752
+
1753
+ # Ask the user which parser to test,
1754
+ # if the parser wasn't provided as an argument
1755
+ if choice is None:
1756
+ print(" 1: Top-down chart parser")
1757
+ print(" 2: Bottom-up chart parser")
1758
+ print(" 3: Bottom-up left-corner chart parser")
1759
+ print(" 4: Left-corner chart parser with bottom-up filter")
1760
+ print(" 5: Stepping chart parser (alternating top-down & bottom-up)")
1761
+ print(" 6: All parsers")
1762
+ print("\nWhich parser (1-6)? ", end=" ")
1763
+ choice = sys.stdin.readline().strip()
1764
+ print()
1765
+
1766
+ choice = str(choice)
1767
+ if choice not in "123456":
1768
+ print("Bad parser number")
1769
+ return
1770
+
1771
+ # Keep track of how long each parser takes.
1772
+ times = {}
1773
+
1774
+ strategies = {
1775
+ "1": ("Top-down", TD_STRATEGY),
1776
+ "2": ("Bottom-up", BU_STRATEGY),
1777
+ "3": ("Bottom-up left-corner", BU_LC_STRATEGY),
1778
+ "4": ("Filtered left-corner", LC_STRATEGY),
1779
+ }
1780
+ choices = []
1781
+ if choice in strategies:
1782
+ choices = [choice]
1783
+ if choice == "6":
1784
+ choices = "1234"
1785
+
1786
+ # Run the requested chart parser(s), except the stepping parser.
1787
+ for strategy in choices:
1788
+ print("* Strategy: " + strategies[strategy][0])
1789
+ print()
1790
+ cp = ChartParser(grammar, strategies[strategy][1], trace=trace)
1791
+ t = time.time()
1792
+ chart = cp.chart_parse(tokens)
1793
+ parses = list(chart.parses(grammar.start()))
1794
+
1795
+ times[strategies[strategy][0]] = time.time() - t
1796
+ print("Nr edges in chart:", len(chart.edges()))
1797
+ if numparses:
1798
+ assert len(parses) == numparses, "Not all parses found"
1799
+ if print_trees:
1800
+ for tree in parses:
1801
+ print(tree)
1802
+ else:
1803
+ print("Nr trees:", len(parses))
1804
+ print()
1805
+
1806
+ # Run the stepping parser, if requested.
1807
+ if choice in "56":
1808
+ print("* Strategy: Stepping (top-down vs bottom-up)")
1809
+ print()
1810
+ t = time.time()
1811
+ cp = SteppingChartParser(grammar, trace=trace)
1812
+ cp.initialize(tokens)
1813
+ for i in range(5):
1814
+ print("*** SWITCH TO TOP DOWN")
1815
+ cp.set_strategy(TD_STRATEGY)
1816
+ for j, e in enumerate(cp.step()):
1817
+ if j > 20 or e is None:
1818
+ break
1819
+ print("*** SWITCH TO BOTTOM UP")
1820
+ cp.set_strategy(BU_STRATEGY)
1821
+ for j, e in enumerate(cp.step()):
1822
+ if j > 20 or e is None:
1823
+ break
1824
+ times["Stepping"] = time.time() - t
1825
+ print("Nr edges in chart:", len(cp.chart().edges()))
1826
+ if numparses:
1827
+ assert len(list(cp.parses())) == numparses, "Not all parses found"
1828
+ if print_trees:
1829
+ for tree in cp.parses():
1830
+ print(tree)
1831
+ else:
1832
+ print("Nr trees:", len(list(cp.parses())))
1833
+ print()
1834
+
1835
+ # Print the times of all parsers:
1836
+ if not (print_times and times):
1837
+ return
1838
+ print("* Parsing times")
1839
+ print()
1840
+ maxlen = max(len(key) for key in times)
1841
+ format = "%" + repr(maxlen) + "s parser: %6.3fsec"
1842
+ times_items = times.items()
1843
+ for (parser, t) in sorted(times_items, key=lambda a: a[1]):
1844
+ print(format % (parser, t))
1845
+
1846
+
1847
+ if __name__ == "__main__":
1848
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/dependencygraph.py ADDED
@@ -0,0 +1,799 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ # Steven Bird <[email protected]> (modifications)
6
+ #
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+ #
10
+
11
+ """
12
+ Tools for reading and writing dependency trees.
13
+ The input is assumed to be in Malt-TAB format
14
+ (https://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
15
+ """
16
+
17
+ import subprocess
18
+ import warnings
19
+ from collections import defaultdict
20
+ from itertools import chain
21
+ from pprint import pformat
22
+
23
+ from nltk.internals import find_binary
24
+ from nltk.tree import Tree
25
+
26
+ #################################################################
27
+ # DependencyGraph Class
28
+ #################################################################
29
+
30
+
31
+ class DependencyGraph:
32
+ """
33
+ A container for the nodes and labelled edges of a dependency structure.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ tree_str=None,
39
+ cell_extractor=None,
40
+ zero_based=False,
41
+ cell_separator=None,
42
+ top_relation_label="ROOT",
43
+ ):
44
+ """Dependency graph.
45
+
46
+ We place a dummy `TOP` node with the index 0, since the root node is
47
+ often assigned 0 as its head. This also means that the indexing of the
48
+ nodes corresponds directly to the Malt-TAB format, which starts at 1.
49
+
50
+ If zero-based is True, then Malt-TAB-like input with node numbers
51
+ starting at 0 and the root node assigned -1 (as produced by, e.g.,
52
+ zpar).
53
+
54
+ :param str cell_separator: the cell separator. If not provided, cells
55
+ are split by whitespace.
56
+
57
+ :param str top_relation_label: the label by which the top relation is
58
+ identified, for examlple, `ROOT`, `null` or `TOP`.
59
+ """
60
+ self.nodes = defaultdict(
61
+ lambda: {
62
+ "address": None,
63
+ "word": None,
64
+ "lemma": None,
65
+ "ctag": None,
66
+ "tag": None,
67
+ "feats": None,
68
+ "head": None,
69
+ "deps": defaultdict(list),
70
+ "rel": None,
71
+ }
72
+ )
73
+
74
+ self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0})
75
+
76
+ self.root = None
77
+
78
+ if tree_str:
79
+ self._parse(
80
+ tree_str,
81
+ cell_extractor=cell_extractor,
82
+ zero_based=zero_based,
83
+ cell_separator=cell_separator,
84
+ top_relation_label=top_relation_label,
85
+ )
86
+
87
+ def remove_by_address(self, address):
88
+ """
89
+ Removes the node with the given address. References
90
+ to this node in others will still exist.
91
+ """
92
+ del self.nodes[address]
93
+
94
+ def redirect_arcs(self, originals, redirect):
95
+ """
96
+ Redirects arcs to any of the nodes in the originals list
97
+ to the redirect node address.
98
+ """
99
+ for node in self.nodes.values():
100
+ new_deps = []
101
+ for dep in node["deps"]:
102
+ if dep in originals:
103
+ new_deps.append(redirect)
104
+ else:
105
+ new_deps.append(dep)
106
+ node["deps"] = new_deps
107
+
108
+ def add_arc(self, head_address, mod_address):
109
+ """
110
+ Adds an arc from the node specified by head_address to the
111
+ node specified by the mod address.
112
+ """
113
+ relation = self.nodes[mod_address]["rel"]
114
+ self.nodes[head_address]["deps"].setdefault(relation, [])
115
+ self.nodes[head_address]["deps"][relation].append(mod_address)
116
+ # self.nodes[head_address]['deps'].append(mod_address)
117
+
118
+ def connect_graph(self):
119
+ """
120
+ Fully connects all non-root nodes. All nodes are set to be dependents
121
+ of the root node.
122
+ """
123
+ for node1 in self.nodes.values():
124
+ for node2 in self.nodes.values():
125
+ if node1["address"] != node2["address"] and node2["rel"] != "TOP":
126
+ relation = node2["rel"]
127
+ node1["deps"].setdefault(relation, [])
128
+ node1["deps"][relation].append(node2["address"])
129
+ # node1['deps'].append(node2['address'])
130
+
131
+ def get_by_address(self, node_address):
132
+ """Return the node with the given address."""
133
+ return self.nodes[node_address]
134
+
135
+ def contains_address(self, node_address):
136
+ """
137
+ Returns true if the graph contains a node with the given node
138
+ address, false otherwise.
139
+ """
140
+ return node_address in self.nodes
141
+
142
+ def to_dot(self):
143
+ """Return a dot representation suitable for using with Graphviz.
144
+
145
+ >>> dg = DependencyGraph(
146
+ ... 'John N 2\\n'
147
+ ... 'loves V 0\\n'
148
+ ... 'Mary N 2'
149
+ ... )
150
+ >>> print(dg.to_dot())
151
+ digraph G{
152
+ edge [dir=forward]
153
+ node [shape=plaintext]
154
+ <BLANKLINE>
155
+ 0 [label="0 (None)"]
156
+ 0 -> 2 [label="ROOT"]
157
+ 1 [label="1 (John)"]
158
+ 2 [label="2 (loves)"]
159
+ 2 -> 1 [label=""]
160
+ 2 -> 3 [label=""]
161
+ 3 [label="3 (Mary)"]
162
+ }
163
+
164
+ """
165
+ # Start the digraph specification
166
+ s = "digraph G{\n"
167
+ s += "edge [dir=forward]\n"
168
+ s += "node [shape=plaintext]\n"
169
+
170
+ # Draw the remaining nodes
171
+ for node in sorted(self.nodes.values(), key=lambda v: v["address"]):
172
+ s += '\n{} [label="{} ({})"]'.format(
173
+ node["address"],
174
+ node["address"],
175
+ node["word"],
176
+ )
177
+ for rel, deps in node["deps"].items():
178
+ for dep in deps:
179
+ if rel is not None:
180
+ s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel)
181
+ else:
182
+ s += "\n{} -> {} ".format(node["address"], dep)
183
+ s += "\n}"
184
+
185
+ return s
186
+
187
+ def _repr_svg_(self):
188
+ """Show SVG representation of the transducer (IPython magic).
189
+ >>> from nltk.test.setup_fixt import check_binary
190
+ >>> check_binary('dot')
191
+ >>> dg = DependencyGraph(
192
+ ... 'John N 2\\n'
193
+ ... 'loves V 0\\n'
194
+ ... 'Mary N 2'
195
+ ... )
196
+ >>> dg._repr_svg_().split('\\n')[0]
197
+ '<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
198
+
199
+ """
200
+ dot_string = self.to_dot()
201
+ return dot2img(dot_string)
202
+
203
+ def __str__(self):
204
+ return pformat(self.nodes)
205
+
206
+ def __repr__(self):
207
+ return f"<DependencyGraph with {len(self.nodes)} nodes>"
208
+
209
+ @staticmethod
210
+ def load(
211
+ filename, zero_based=False, cell_separator=None, top_relation_label="ROOT"
212
+ ):
213
+ """
214
+ :param filename: a name of a file in Malt-TAB format
215
+ :param zero_based: nodes in the input file are numbered starting from 0
216
+ rather than 1 (as produced by, e.g., zpar)
217
+ :param str cell_separator: the cell separator. If not provided, cells
218
+ are split by whitespace.
219
+ :param str top_relation_label: the label by which the top relation is
220
+ identified, for examlple, `ROOT`, `null` or `TOP`.
221
+
222
+ :return: a list of DependencyGraphs
223
+
224
+ """
225
+ with open(filename) as infile:
226
+ return [
227
+ DependencyGraph(
228
+ tree_str,
229
+ zero_based=zero_based,
230
+ cell_separator=cell_separator,
231
+ top_relation_label=top_relation_label,
232
+ )
233
+ for tree_str in infile.read().split("\n\n")
234
+ ]
235
+
236
+ def left_children(self, node_index):
237
+ """
238
+ Returns the number of left children under the node specified
239
+ by the given address.
240
+ """
241
+ children = chain.from_iterable(self.nodes[node_index]["deps"].values())
242
+ index = self.nodes[node_index]["address"]
243
+ return sum(1 for c in children if c < index)
244
+
245
+ def right_children(self, node_index):
246
+ """
247
+ Returns the number of right children under the node specified
248
+ by the given address.
249
+ """
250
+ children = chain.from_iterable(self.nodes[node_index]["deps"].values())
251
+ index = self.nodes[node_index]["address"]
252
+ return sum(1 for c in children if c > index)
253
+
254
+ def add_node(self, node):
255
+ if not self.contains_address(node["address"]):
256
+ self.nodes[node["address"]].update(node)
257
+
258
+ def _parse(
259
+ self,
260
+ input_,
261
+ cell_extractor=None,
262
+ zero_based=False,
263
+ cell_separator=None,
264
+ top_relation_label="ROOT",
265
+ ):
266
+ """Parse a sentence.
267
+
268
+ :param extractor: a function that given a tuple of cells returns a
269
+ 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
270
+ rel``.
271
+
272
+ :param str cell_separator: the cell separator. If not provided, cells
273
+ are split by whitespace.
274
+
275
+ :param str top_relation_label: the label by which the top relation is
276
+ identified, for examlple, `ROOT`, `null` or `TOP`.
277
+
278
+ """
279
+
280
+ def extract_3_cells(cells, index):
281
+ word, tag, head = cells
282
+ return index, word, word, tag, tag, "", head, ""
283
+
284
+ def extract_4_cells(cells, index):
285
+ word, tag, head, rel = cells
286
+ return index, word, word, tag, tag, "", head, rel
287
+
288
+ def extract_7_cells(cells, index):
289
+ line_index, word, lemma, tag, _, head, rel = cells
290
+ try:
291
+ index = int(line_index)
292
+ except ValueError:
293
+ # index can't be parsed as an integer, use default
294
+ pass
295
+ return index, word, lemma, tag, tag, "", head, rel
296
+
297
+ def extract_10_cells(cells, index):
298
+ line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
299
+ try:
300
+ index = int(line_index)
301
+ except ValueError:
302
+ # index can't be parsed as an integer, use default
303
+ pass
304
+ return index, word, lemma, ctag, tag, feats, head, rel
305
+
306
+ extractors = {
307
+ 3: extract_3_cells,
308
+ 4: extract_4_cells,
309
+ 7: extract_7_cells,
310
+ 10: extract_10_cells,
311
+ }
312
+
313
+ if isinstance(input_, str):
314
+ input_ = (line for line in input_.split("\n"))
315
+
316
+ lines = (l.rstrip() for l in input_)
317
+ lines = (l for l in lines if l)
318
+
319
+ cell_number = None
320
+ for index, line in enumerate(lines, start=1):
321
+ cells = line.split(cell_separator)
322
+ if cell_number is None:
323
+ cell_number = len(cells)
324
+ else:
325
+ assert cell_number == len(cells)
326
+
327
+ if cell_extractor is None:
328
+ try:
329
+ cell_extractor = extractors[cell_number]
330
+ except KeyError as e:
331
+ raise ValueError(
332
+ "Number of tab-delimited fields ({}) not supported by "
333
+ "CoNLL(10) or Malt-Tab(4) format".format(cell_number)
334
+ ) from e
335
+
336
+ try:
337
+ index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
338
+ cells, index
339
+ )
340
+ except (TypeError, ValueError):
341
+ # cell_extractor doesn't take 2 arguments or doesn't return 8
342
+ # values; assume the cell_extractor is an older external
343
+ # extractor and doesn't accept or return an index.
344
+ word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
345
+
346
+ if head == "_":
347
+ continue
348
+
349
+ head = int(head)
350
+ if zero_based:
351
+ head += 1
352
+
353
+ self.nodes[index].update(
354
+ {
355
+ "address": index,
356
+ "word": word,
357
+ "lemma": lemma,
358
+ "ctag": ctag,
359
+ "tag": tag,
360
+ "feats": feats,
361
+ "head": head,
362
+ "rel": rel,
363
+ }
364
+ )
365
+
366
+ # Make sure that the fake root node has labeled dependencies.
367
+ if (cell_number == 3) and (head == 0):
368
+ rel = top_relation_label
369
+ self.nodes[head]["deps"][rel].append(index)
370
+
371
+ if self.nodes[0]["deps"][top_relation_label]:
372
+ root_address = self.nodes[0]["deps"][top_relation_label][0]
373
+ self.root = self.nodes[root_address]
374
+ self.top_relation_label = top_relation_label
375
+ else:
376
+ warnings.warn(
377
+ "The graph doesn't contain a node " "that depends on the root element."
378
+ )
379
+
380
+ def _word(self, node, filter=True):
381
+ w = node["word"]
382
+ if filter:
383
+ if w != ",":
384
+ return w
385
+ return w
386
+
387
+ def _tree(self, i):
388
+ """Turn dependency graphs into NLTK trees.
389
+
390
+ :param int i: index of a node
391
+ :return: either a word (if the indexed node is a leaf) or a ``Tree``.
392
+ """
393
+ node = self.get_by_address(i)
394
+ word = node["word"]
395
+ deps = sorted(chain.from_iterable(node["deps"].values()))
396
+
397
+ if deps:
398
+ return Tree(word, [self._tree(dep) for dep in deps])
399
+ else:
400
+ return word
401
+
402
+ def tree(self):
403
+ """
404
+ Starting with the ``root`` node, build a dependency tree using the NLTK
405
+ ``Tree`` constructor. Dependency labels are omitted.
406
+ """
407
+ node = self.root
408
+
409
+ word = node["word"]
410
+ deps = sorted(chain.from_iterable(node["deps"].values()))
411
+ return Tree(word, [self._tree(dep) for dep in deps])
412
+
413
+ def triples(self, node=None):
414
+ """
415
+ Extract dependency triples of the form:
416
+ ((head word, head tag), rel, (dep word, dep tag))
417
+ """
418
+
419
+ if not node:
420
+ node = self.root
421
+
422
+ head = (node["word"], node["ctag"])
423
+ for i in sorted(chain.from_iterable(node["deps"].values())):
424
+ dep = self.get_by_address(i)
425
+ yield (head, dep["rel"], (dep["word"], dep["ctag"]))
426
+ yield from self.triples(node=dep)
427
+
428
+ def _hd(self, i):
429
+ try:
430
+ return self.nodes[i]["head"]
431
+ except IndexError:
432
+ return None
433
+
434
+ def _rel(self, i):
435
+ try:
436
+ return self.nodes[i]["rel"]
437
+ except IndexError:
438
+ return None
439
+
440
+ # what's the return type? Boolean or list?
441
+ def contains_cycle(self):
442
+ """Check whether there are cycles.
443
+
444
+ >>> dg = DependencyGraph(treebank_data)
445
+ >>> dg.contains_cycle()
446
+ False
447
+
448
+ >>> cyclic_dg = DependencyGraph()
449
+ >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
450
+ >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
451
+ >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
452
+ >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
453
+ >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
454
+ >>> cyclic_dg.nodes = {
455
+ ... 0: top,
456
+ ... 1: child1,
457
+ ... 2: child2,
458
+ ... 3: child3,
459
+ ... 4: child4,
460
+ ... }
461
+ >>> cyclic_dg.root = top
462
+
463
+ >>> cyclic_dg.contains_cycle()
464
+ [1, 2, 4, 3]
465
+
466
+ """
467
+ distances = {}
468
+
469
+ for node in self.nodes.values():
470
+ for dep in node["deps"]:
471
+ key = tuple([node["address"], dep])
472
+ distances[key] = 1
473
+
474
+ for _ in self.nodes:
475
+ new_entries = {}
476
+
477
+ for pair1 in distances:
478
+ for pair2 in distances:
479
+ if pair1[1] == pair2[0]:
480
+ key = tuple([pair1[0], pair2[1]])
481
+ new_entries[key] = distances[pair1] + distances[pair2]
482
+
483
+ for pair in new_entries:
484
+ distances[pair] = new_entries[pair]
485
+ if pair[0] == pair[1]:
486
+ path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
487
+ return path
488
+
489
+ return False # return []?
490
+
491
+ def get_cycle_path(self, curr_node, goal_node_index):
492
+ for dep in curr_node["deps"]:
493
+ if dep == goal_node_index:
494
+ return [curr_node["address"]]
495
+ for dep in curr_node["deps"]:
496
+ path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
497
+ if len(path) > 0:
498
+ path.insert(0, curr_node["address"])
499
+ return path
500
+ return []
501
+
502
+ def to_conll(self, style):
503
+ """
504
+ The dependency graph in CoNLL format.
505
+
506
+ :param style: the style to use for the format (3, 4, 10 columns)
507
+ :type style: int
508
+ :rtype: str
509
+ """
510
+
511
+ if style == 3:
512
+ template = "{word}\t{tag}\t{head}\n"
513
+ elif style == 4:
514
+ template = "{word}\t{tag}\t{head}\t{rel}\n"
515
+ elif style == 10:
516
+ template = (
517
+ "{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
518
+ )
519
+ else:
520
+ raise ValueError(
521
+ "Number of tab-delimited fields ({}) not supported by "
522
+ "CoNLL(10) or Malt-Tab(4) format".format(style)
523
+ )
524
+
525
+ return "".join(
526
+ template.format(i=i, **node)
527
+ for i, node in sorted(self.nodes.items())
528
+ if node["tag"] != "TOP"
529
+ )
530
+
531
+ def nx_graph(self):
532
+ """Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
533
+ import networkx
534
+
535
+ nx_nodelist = list(range(1, len(self.nodes)))
536
+ nx_edgelist = [
537
+ (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
538
+ ]
539
+ self.nx_labels = {}
540
+ for n in nx_nodelist:
541
+ self.nx_labels[n] = self.nodes[n]["word"]
542
+
543
+ g = networkx.MultiDiGraph()
544
+ g.add_nodes_from(nx_nodelist)
545
+ g.add_edges_from(nx_edgelist)
546
+
547
+ return g
548
+
549
+
550
+ def dot2img(dot_string, t="svg"):
551
+ """
552
+ Create image representation fom dot_string, using the 'dot' program
553
+ from the Graphviz package.
554
+
555
+ Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps',
556
+ 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats).
557
+
558
+ Note that the "capture_output" option of subprocess.run() is only available
559
+ with text formats (like svg), but not with binary image formats (like png).
560
+ """
561
+
562
+ try:
563
+ find_binary("dot")
564
+ try:
565
+ if t in ["dot", "dot_json", "json", "svg"]:
566
+ proc = subprocess.run(
567
+ ["dot", "-T%s" % t],
568
+ capture_output=True,
569
+ input=dot_string,
570
+ text=True,
571
+ )
572
+ else:
573
+ proc = subprocess.run(
574
+ ["dot", "-T%s" % t],
575
+ input=bytes(dot_string, encoding="utf8"),
576
+ )
577
+ return proc.stdout
578
+ except:
579
+ raise Exception(
580
+ "Cannot create image representation by running dot from string: {}"
581
+ "".format(dot_string)
582
+ )
583
+ except OSError as e:
584
+ raise Exception("Cannot find the dot binary from Graphviz package") from e
585
+
586
+
587
+ class DependencyGraphError(Exception):
588
+ """Dependency graph exception."""
589
+
590
+
591
+ def demo():
592
+ malt_demo()
593
+ conll_demo()
594
+ conll_file_demo()
595
+ cycle_finding_demo()
596
+
597
+
598
+ def malt_demo(nx=False):
599
+ """
600
+ A demonstration of the result of reading a dependency
601
+ version of the first sentence of the Penn Treebank.
602
+ """
603
+ dg = DependencyGraph(
604
+ """Pierre NNP 2 NMOD
605
+ Vinken NNP 8 SUB
606
+ , , 2 P
607
+ 61 CD 5 NMOD
608
+ years NNS 6 AMOD
609
+ old JJ 2 NMOD
610
+ , , 2 P
611
+ will MD 0 ROOT
612
+ join VB 8 VC
613
+ the DT 11 NMOD
614
+ board NN 9 OBJ
615
+ as IN 9 VMOD
616
+ a DT 15 NMOD
617
+ nonexecutive JJ 15 NMOD
618
+ director NN 12 PMOD
619
+ Nov. NNP 9 VMOD
620
+ 29 CD 16 NMOD
621
+ . . 9 VMOD
622
+ """
623
+ )
624
+ tree = dg.tree()
625
+ tree.pprint()
626
+ if nx:
627
+ # currently doesn't work
628
+ import networkx
629
+ from matplotlib import pylab
630
+
631
+ g = dg.nx_graph()
632
+ g.info()
633
+ pos = networkx.spring_layout(g, dim=1)
634
+ networkx.draw_networkx_nodes(g, pos, node_size=50)
635
+ # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
636
+ networkx.draw_networkx_labels(g, pos, dg.nx_labels)
637
+ pylab.xticks([])
638
+ pylab.yticks([])
639
+ pylab.savefig("tree.png")
640
+ pylab.show()
641
+
642
+
643
+ def conll_demo():
644
+ """
645
+ A demonstration of how to read a string representation of
646
+ a CoNLL format dependency tree.
647
+ """
648
+ dg = DependencyGraph(conll_data1)
649
+ tree = dg.tree()
650
+ tree.pprint()
651
+ print(dg)
652
+ print(dg.to_conll(4))
653
+
654
+
655
+ def conll_file_demo():
656
+ print("Mass conll_read demo...")
657
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
658
+ for graph in graphs:
659
+ tree = graph.tree()
660
+ print("\n")
661
+ tree.pprint()
662
+
663
+
664
+ def cycle_finding_demo():
665
+ dg = DependencyGraph(treebank_data)
666
+ print(dg.contains_cycle())
667
+ cyclic_dg = DependencyGraph()
668
+ cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
669
+ cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
670
+ cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
671
+ cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
672
+ cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
673
+ print(cyclic_dg.contains_cycle())
674
+
675
+
676
+ treebank_data = """Pierre NNP 2 NMOD
677
+ Vinken NNP 8 SUB
678
+ , , 2 P
679
+ 61 CD 5 NMOD
680
+ years NNS 6 AMOD
681
+ old JJ 2 NMOD
682
+ , , 2 P
683
+ will MD 0 ROOT
684
+ join VB 8 VC
685
+ the DT 11 NMOD
686
+ board NN 9 OBJ
687
+ as IN 9 VMOD
688
+ a DT 15 NMOD
689
+ nonexecutive JJ 15 NMOD
690
+ director NN 12 PMOD
691
+ Nov. NNP 9 VMOD
692
+ 29 CD 16 NMOD
693
+ . . 9 VMOD
694
+ """
695
+
696
+ conll_data1 = """
697
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
698
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
699
+ 3 met met Prep Prep voor 8 mod _ _
700
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
701
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
702
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
703
+ 7 gaan ga V V hulp|inf 6 vc _ _
704
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
705
+ 9 , , Punc Punc komma 8 punct _ _
706
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
707
+ 11 of of Conj Conj neven 7 vc _ _
708
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
709
+ 13 . . Punc Punc punt 12 punct _ _
710
+ """
711
+
712
+ conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
713
+ 2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
714
+ 3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
715
+ 4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
716
+ 5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
717
+ 6 . . Punc Punc punt 5 punct _ _
718
+
719
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
720
+ 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
721
+ 3 met met Prep Prep voor 8 mod _ _
722
+ 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
723
+ 5 moeder moeder N N soort|ev|neut 3 obj1 _ _
724
+ 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
725
+ 7 gaan ga V V hulp|inf 6 vc _ _
726
+ 8 winkelen winkel V V intrans|inf 11 cnj _ _
727
+ 9 , , Punc Punc komma 8 punct _ _
728
+ 10 zwemmen zwem V V intrans|inf 11 cnj _ _
729
+ 11 of of Conj Conj neven 7 vc _ _
730
+ 12 terrassen terras N N soort|mv|neut 11 cnj _ _
731
+ 13 . . Punc Punc punt 12 punct _ _
732
+
733
+ 1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
734
+ 2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
735
+ 3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
736
+ 4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
737
+ 5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
738
+ 6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
739
+ 7 . . Punc Punc punt 6 punct _ _
740
+
741
+ 1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
742
+ 2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
743
+ 3 bij bij Prep Prep voor 2 ld _ _
744
+ 4 de de Art Art bep|zijdofmv|neut 6 det _ _
745
+ 5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
746
+ 6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
747
+ 7 die die Pron Pron betr|neut|zelfst 6 mod _ _
748
+ 8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
749
+ 9 ginds ginds Adv Adv gew|aanw 12 mod _ _
750
+ 10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
751
+ 11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
752
+ 12 gelaten laat V V trans|verldw|onverv 11 vc _ _
753
+ 13 . . Punc Punc punt 12 punct _ _
754
+
755
+ 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
756
+ 2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
757
+ 3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
758
+ 4 naast naast Prep Prep voor 11 mod _ _
759
+ 5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
760
+ 6 op op Prep Prep voor 11 ld _ _
761
+ 7 de de Art Art bep|zijdofmv|neut 8 det _ _
762
+ 8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
763
+ 9 kunnen kan V V hulp|inf 2 vc _ _
764
+ 10 gaan ga V V hulp|inf 9 vc _ _
765
+ 11 liggen lig V V intrans|inf 10 vc _ _
766
+ 12 . . Punc Punc punt 11 punct _ _
767
+
768
+ 1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
769
+ 2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
770
+ 3 mams mams N N soort|ev|neut 4 det _ _
771
+ 4 rug rug N N soort|ev|neut 5 obj1 _ _
772
+ 5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
773
+ 6 hebben heb V V hulp|inf 2 vc _ _
774
+ 7 en en Conj Conj neven 0 ROOT _ _
775
+ 8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
776
+ 9 de de Art Art bep|zijdofmv|neut 10 det _ _
777
+ 10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
778
+ 11 . . Punc Punc punt 10 punct _ _
779
+
780
+ 1 Of of Conj Conj onder|metfin 0 ROOT _ _
781
+ 2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
782
+ 3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
783
+ 4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
784
+ 5 met met Prep Prep voor 10 mod _ _
785
+ 6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
786
+ 7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
787
+ 8 rond rond Adv Adv deelv 10 svp _ _
788
+ 9 kunnen kan V V hulp|inf 3 vc _ _
789
+ 10 slenteren slenter V V intrans|inf 9 vc _ _
790
+ 11 in in Prep Prep voor 10 mod _ _
791
+ 12 de de Art Art bep|zijdofmv|neut 13 det _ _
792
+ 13 buurt buurt N N soort|ev|neut 11 obj1 _ _
793
+ 14 van van Prep Prep voor 13 mod _ _
794
+ 15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
795
+ 16 . . Punc Punc punt 15 punct _ _
796
+ """
797
+
798
+ if __name__ == "__main__":
799
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/earleychart.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: An Incremental Earley Chart Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Ljunglöf <[email protected]>
5
+ # Rob Speer <[email protected]>
6
+ # Edward Loper <[email protected]>
7
+ # Steven Bird <[email protected]>
8
+ # Jean Mark Gawron <[email protected]>
9
+ # URL: <https://www.nltk.org/>
10
+ # For license information, see LICENSE.TXT
11
+
12
+ """
13
+ Data classes and parser implementations for *incremental* chart
14
+ parsers, which use dynamic programming to efficiently parse a text.
15
+ A "chart parser" derives parse trees for a text by iteratively adding
16
+ \"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree
17
+ structure for a subsequence of the text. The "chart" is a
18
+ \"blackboard\" for composing and combining these hypotheses.
19
+
20
+ A parser is "incremental", if it guarantees that for all i, j where i < j,
21
+ all edges ending at i are built before any edges ending at j.
22
+ This is appealing for, say, speech recognizer hypothesis filtering.
23
+
24
+ The main parser class is ``EarleyChartParser``, which is a top-down
25
+ algorithm, originally formulated by Jay Earley (1970).
26
+ """
27
+
28
+ from time import perf_counter
29
+
30
+ from nltk.parse.chart import (
31
+ BottomUpPredictCombineRule,
32
+ BottomUpPredictRule,
33
+ CachedTopDownPredictRule,
34
+ Chart,
35
+ ChartParser,
36
+ EdgeI,
37
+ EmptyPredictRule,
38
+ FilteredBottomUpPredictCombineRule,
39
+ FilteredSingleEdgeFundamentalRule,
40
+ LeafEdge,
41
+ LeafInitRule,
42
+ SingleEdgeFundamentalRule,
43
+ TopDownInitRule,
44
+ )
45
+ from nltk.parse.featurechart import (
46
+ FeatureBottomUpPredictCombineRule,
47
+ FeatureBottomUpPredictRule,
48
+ FeatureChart,
49
+ FeatureChartParser,
50
+ FeatureEmptyPredictRule,
51
+ FeatureSingleEdgeFundamentalRule,
52
+ FeatureTopDownInitRule,
53
+ FeatureTopDownPredictRule,
54
+ )
55
+
56
+ # ////////////////////////////////////////////////////////////
57
+ # Incremental Chart
58
+ # ////////////////////////////////////////////////////////////
59
+
60
+
61
+ class IncrementalChart(Chart):
62
+ def initialize(self):
63
+ # A sequence of edge lists contained in this chart.
64
+ self._edgelists = tuple([] for x in self._positions())
65
+
66
+ # The set of child pointer lists associated with each edge.
67
+ self._edge_to_cpls = {}
68
+
69
+ # Indexes mapping attribute values to lists of edges
70
+ # (used by select()).
71
+ self._indexes = {}
72
+
73
+ def edges(self):
74
+ return list(self.iteredges())
75
+
76
+ def iteredges(self):
77
+ return (edge for edgelist in self._edgelists for edge in edgelist)
78
+
79
+ def select(self, end, **restrictions):
80
+ edgelist = self._edgelists[end]
81
+
82
+ # If there are no restrictions, then return all edges.
83
+ if restrictions == {}:
84
+ return iter(edgelist)
85
+
86
+ # Find the index corresponding to the given restrictions.
87
+ restr_keys = sorted(restrictions.keys())
88
+ restr_keys = tuple(restr_keys)
89
+
90
+ # If it doesn't exist, then create it.
91
+ if restr_keys not in self._indexes:
92
+ self._add_index(restr_keys)
93
+
94
+ vals = tuple(restrictions[key] for key in restr_keys)
95
+ return iter(self._indexes[restr_keys][end].get(vals, []))
96
+
97
+ def _add_index(self, restr_keys):
98
+ # Make sure it's a valid index.
99
+ for key in restr_keys:
100
+ if not hasattr(EdgeI, key):
101
+ raise ValueError("Bad restriction: %s" % key)
102
+
103
+ # Create the index.
104
+ index = self._indexes[restr_keys] = tuple({} for x in self._positions())
105
+
106
+ # Add all existing edges to the index.
107
+ for end, edgelist in enumerate(self._edgelists):
108
+ this_index = index[end]
109
+ for edge in edgelist:
110
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
111
+ this_index.setdefault(vals, []).append(edge)
112
+
113
+ def _register_with_indexes(self, edge):
114
+ end = edge.end()
115
+ for (restr_keys, index) in self._indexes.items():
116
+ vals = tuple(getattr(edge, key)() for key in restr_keys)
117
+ index[end].setdefault(vals, []).append(edge)
118
+
119
+ def _append_edge(self, edge):
120
+ self._edgelists[edge.end()].append(edge)
121
+
122
+ def _positions(self):
123
+ return range(self.num_leaves() + 1)
124
+
125
+
126
+ class FeatureIncrementalChart(IncrementalChart, FeatureChart):
127
+ def select(self, end, **restrictions):
128
+ edgelist = self._edgelists[end]
129
+
130
+ # If there are no restrictions, then return all edges.
131
+ if restrictions == {}:
132
+ return iter(edgelist)
133
+
134
+ # Find the index corresponding to the given restrictions.
135
+ restr_keys = sorted(restrictions.keys())
136
+ restr_keys = tuple(restr_keys)
137
+
138
+ # If it doesn't exist, then create it.
139
+ if restr_keys not in self._indexes:
140
+ self._add_index(restr_keys)
141
+
142
+ vals = tuple(
143
+ self._get_type_if_possible(restrictions[key]) for key in restr_keys
144
+ )
145
+ return iter(self._indexes[restr_keys][end].get(vals, []))
146
+
147
+ def _add_index(self, restr_keys):
148
+ # Make sure it's a valid index.
149
+ for key in restr_keys:
150
+ if not hasattr(EdgeI, key):
151
+ raise ValueError("Bad restriction: %s" % key)
152
+
153
+ # Create the index.
154
+ index = self._indexes[restr_keys] = tuple({} for x in self._positions())
155
+
156
+ # Add all existing edges to the index.
157
+ for end, edgelist in enumerate(self._edgelists):
158
+ this_index = index[end]
159
+ for edge in edgelist:
160
+ vals = tuple(
161
+ self._get_type_if_possible(getattr(edge, key)())
162
+ for key in restr_keys
163
+ )
164
+ this_index.setdefault(vals, []).append(edge)
165
+
166
+ def _register_with_indexes(self, edge):
167
+ end = edge.end()
168
+ for (restr_keys, index) in self._indexes.items():
169
+ vals = tuple(
170
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
171
+ )
172
+ index[end].setdefault(vals, []).append(edge)
173
+
174
+
175
+ # ////////////////////////////////////////////////////////////
176
+ # Incremental CFG Rules
177
+ # ////////////////////////////////////////////////////////////
178
+
179
+
180
+ class CompleteFundamentalRule(SingleEdgeFundamentalRule):
181
+ def _apply_incomplete(self, chart, grammar, left_edge):
182
+ end = left_edge.end()
183
+ # When the chart is incremental, we only have to look for
184
+ # empty complete edges here.
185
+ for right_edge in chart.select(
186
+ start=end, end=end, is_complete=True, lhs=left_edge.nextsym()
187
+ ):
188
+ new_edge = left_edge.move_dot_forward(right_edge.end())
189
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
190
+ yield new_edge
191
+
192
+
193
+ class CompleterRule(CompleteFundamentalRule):
194
+ _fundamental_rule = CompleteFundamentalRule()
195
+
196
+ def apply(self, chart, grammar, edge):
197
+ if not isinstance(edge, LeafEdge):
198
+ yield from self._fundamental_rule.apply(chart, grammar, edge)
199
+
200
+
201
+ class ScannerRule(CompleteFundamentalRule):
202
+ _fundamental_rule = CompleteFundamentalRule()
203
+
204
+ def apply(self, chart, grammar, edge):
205
+ if isinstance(edge, LeafEdge):
206
+ yield from self._fundamental_rule.apply(chart, grammar, edge)
207
+
208
+
209
+ class PredictorRule(CachedTopDownPredictRule):
210
+ pass
211
+
212
+
213
+ class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
214
+ def apply(self, chart, grammar, edge):
215
+ # Since the Filtered rule only works for grammars without empty productions,
216
+ # we only have to bother with complete edges here.
217
+ if edge.is_complete():
218
+ yield from self._apply_complete(chart, grammar, edge)
219
+
220
+
221
+ # ////////////////////////////////////////////////////////////
222
+ # Incremental FCFG Rules
223
+ # ////////////////////////////////////////////////////////////
224
+
225
+
226
+ class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
227
+ def _apply_incomplete(self, chart, grammar, left_edge):
228
+ fr = self._fundamental_rule
229
+ end = left_edge.end()
230
+ # When the chart is incremental, we only have to look for
231
+ # empty complete edges here.
232
+ for right_edge in chart.select(
233
+ start=end, end=end, is_complete=True, lhs=left_edge.nextsym()
234
+ ):
235
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
236
+
237
+
238
+ class FeatureCompleterRule(CompleterRule):
239
+ _fundamental_rule = FeatureCompleteFundamentalRule()
240
+
241
+
242
+ class FeatureScannerRule(ScannerRule):
243
+ _fundamental_rule = FeatureCompleteFundamentalRule()
244
+
245
+
246
+ class FeaturePredictorRule(FeatureTopDownPredictRule):
247
+ pass
248
+
249
+
250
+ # ////////////////////////////////////////////////////////////
251
+ # Incremental CFG Chart Parsers
252
+ # ////////////////////////////////////////////////////////////
253
+
254
+ EARLEY_STRATEGY = [
255
+ LeafInitRule(),
256
+ TopDownInitRule(),
257
+ CompleterRule(),
258
+ ScannerRule(),
259
+ PredictorRule(),
260
+ ]
261
+ TD_INCREMENTAL_STRATEGY = [
262
+ LeafInitRule(),
263
+ TopDownInitRule(),
264
+ CachedTopDownPredictRule(),
265
+ CompleteFundamentalRule(),
266
+ ]
267
+ BU_INCREMENTAL_STRATEGY = [
268
+ LeafInitRule(),
269
+ EmptyPredictRule(),
270
+ BottomUpPredictRule(),
271
+ CompleteFundamentalRule(),
272
+ ]
273
+ BU_LC_INCREMENTAL_STRATEGY = [
274
+ LeafInitRule(),
275
+ EmptyPredictRule(),
276
+ BottomUpPredictCombineRule(),
277
+ CompleteFundamentalRule(),
278
+ ]
279
+
280
+ LC_INCREMENTAL_STRATEGY = [
281
+ LeafInitRule(),
282
+ FilteredBottomUpPredictCombineRule(),
283
+ FilteredCompleteFundamentalRule(),
284
+ ]
285
+
286
+
287
+ class IncrementalChartParser(ChartParser):
288
+ """
289
+ An *incremental* chart parser implementing Jay Earley's
290
+ parsing algorithm:
291
+
292
+ | For each index end in [0, 1, ..., N]:
293
+ | For each edge such that edge.end = end:
294
+ | If edge is incomplete and edge.next is not a part of speech:
295
+ | Apply PredictorRule to edge
296
+ | If edge is incomplete and edge.next is a part of speech:
297
+ | Apply ScannerRule to edge
298
+ | If edge is complete:
299
+ | Apply CompleterRule to edge
300
+ | Return any complete parses in the chart
301
+ """
302
+
303
+ def __init__(
304
+ self,
305
+ grammar,
306
+ strategy=BU_LC_INCREMENTAL_STRATEGY,
307
+ trace=0,
308
+ trace_chart_width=50,
309
+ chart_class=IncrementalChart,
310
+ ):
311
+ """
312
+ Create a new Earley chart parser, that uses ``grammar`` to
313
+ parse texts.
314
+
315
+ :type grammar: CFG
316
+ :param grammar: The grammar used to parse texts.
317
+ :type trace: int
318
+ :param trace: The level of tracing that should be used when
319
+ parsing a text. ``0`` will generate no tracing output;
320
+ and higher numbers will produce more verbose tracing
321
+ output.
322
+ :type trace_chart_width: int
323
+ :param trace_chart_width: The default total width reserved for
324
+ the chart in trace output. The remainder of each line will
325
+ be used to display edges.
326
+ :param chart_class: The class that should be used to create
327
+ the charts used by this parser.
328
+ """
329
+ self._grammar = grammar
330
+ self._trace = trace
331
+ self._trace_chart_width = trace_chart_width
332
+ self._chart_class = chart_class
333
+
334
+ self._axioms = []
335
+ self._inference_rules = []
336
+ for rule in strategy:
337
+ if rule.NUM_EDGES == 0:
338
+ self._axioms.append(rule)
339
+ elif rule.NUM_EDGES == 1:
340
+ self._inference_rules.append(rule)
341
+ else:
342
+ raise ValueError(
343
+ "Incremental inference rules must have " "NUM_EDGES == 0 or 1"
344
+ )
345
+
346
+ def chart_parse(self, tokens, trace=None):
347
+ if trace is None:
348
+ trace = self._trace
349
+ trace_new_edges = self._trace_new_edges
350
+
351
+ tokens = list(tokens)
352
+ self._grammar.check_coverage(tokens)
353
+ chart = self._chart_class(tokens)
354
+ grammar = self._grammar
355
+
356
+ # Width, for printing trace edges.
357
+ trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
358
+ if trace:
359
+ print(chart.pretty_format_leaves(trace_edge_width))
360
+
361
+ for axiom in self._axioms:
362
+ new_edges = list(axiom.apply(chart, grammar))
363
+ trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
364
+
365
+ inference_rules = self._inference_rules
366
+ for end in range(chart.num_leaves() + 1):
367
+ if trace > 1:
368
+ print("\n* Processing queue:", end, "\n")
369
+ agenda = list(chart.select(end=end))
370
+ while agenda:
371
+ edge = agenda.pop()
372
+ for rule in inference_rules:
373
+ new_edges = list(rule.apply(chart, grammar, edge))
374
+ trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
375
+ for new_edge in new_edges:
376
+ if new_edge.end() == end:
377
+ agenda.append(new_edge)
378
+
379
+ return chart
380
+
381
+
382
+ class EarleyChartParser(IncrementalChartParser):
383
+ def __init__(self, grammar, **parser_args):
384
+ IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
385
+
386
+
387
+ class IncrementalTopDownChartParser(IncrementalChartParser):
388
+ def __init__(self, grammar, **parser_args):
389
+ IncrementalChartParser.__init__(
390
+ self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args
391
+ )
392
+
393
+
394
+ class IncrementalBottomUpChartParser(IncrementalChartParser):
395
+ def __init__(self, grammar, **parser_args):
396
+ IncrementalChartParser.__init__(
397
+ self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args
398
+ )
399
+
400
+
401
+ class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
402
+ def __init__(self, grammar, **parser_args):
403
+ IncrementalChartParser.__init__(
404
+ self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args
405
+ )
406
+
407
+
408
+ class IncrementalLeftCornerChartParser(IncrementalChartParser):
409
+ def __init__(self, grammar, **parser_args):
410
+ if not grammar.is_nonempty():
411
+ raise ValueError(
412
+ "IncrementalLeftCornerParser only works for grammars "
413
+ "without empty productions."
414
+ )
415
+ IncrementalChartParser.__init__(
416
+ self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args
417
+ )
418
+
419
+
420
+ # ////////////////////////////////////////////////////////////
421
+ # Incremental FCFG Chart Parsers
422
+ # ////////////////////////////////////////////////////////////
423
+
424
+ EARLEY_FEATURE_STRATEGY = [
425
+ LeafInitRule(),
426
+ FeatureTopDownInitRule(),
427
+ FeatureCompleterRule(),
428
+ FeatureScannerRule(),
429
+ FeaturePredictorRule(),
430
+ ]
431
+ TD_INCREMENTAL_FEATURE_STRATEGY = [
432
+ LeafInitRule(),
433
+ FeatureTopDownInitRule(),
434
+ FeatureTopDownPredictRule(),
435
+ FeatureCompleteFundamentalRule(),
436
+ ]
437
+ BU_INCREMENTAL_FEATURE_STRATEGY = [
438
+ LeafInitRule(),
439
+ FeatureEmptyPredictRule(),
440
+ FeatureBottomUpPredictRule(),
441
+ FeatureCompleteFundamentalRule(),
442
+ ]
443
+ BU_LC_INCREMENTAL_FEATURE_STRATEGY = [
444
+ LeafInitRule(),
445
+ FeatureEmptyPredictRule(),
446
+ FeatureBottomUpPredictCombineRule(),
447
+ FeatureCompleteFundamentalRule(),
448
+ ]
449
+
450
+
451
+ class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
452
+ def __init__(
453
+ self,
454
+ grammar,
455
+ strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
456
+ trace_chart_width=20,
457
+ chart_class=FeatureIncrementalChart,
458
+ **parser_args
459
+ ):
460
+ IncrementalChartParser.__init__(
461
+ self,
462
+ grammar,
463
+ strategy=strategy,
464
+ trace_chart_width=trace_chart_width,
465
+ chart_class=chart_class,
466
+ **parser_args
467
+ )
468
+
469
+
470
+ class FeatureEarleyChartParser(FeatureIncrementalChartParser):
471
+ def __init__(self, grammar, **parser_args):
472
+ FeatureIncrementalChartParser.__init__(
473
+ self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args
474
+ )
475
+
476
+
477
+ class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
478
+ def __init__(self, grammar, **parser_args):
479
+ FeatureIncrementalChartParser.__init__(
480
+ self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args
481
+ )
482
+
483
+
484
+ class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
485
+ def __init__(self, grammar, **parser_args):
486
+ FeatureIncrementalChartParser.__init__(
487
+ self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args
488
+ )
489
+
490
+
491
+ class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
492
+ def __init__(self, grammar, **parser_args):
493
+ FeatureIncrementalChartParser.__init__(
494
+ self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args
495
+ )
496
+
497
+
498
+ # ////////////////////////////////////////////////////////////
499
+ # Demonstration
500
+ # ////////////////////////////////////////////////////////////
501
+
502
+
503
+ def demo(
504
+ print_times=True,
505
+ print_grammar=False,
506
+ print_trees=True,
507
+ trace=2,
508
+ sent="I saw John with a dog with my cookie",
509
+ numparses=5,
510
+ ):
511
+ """
512
+ A demonstration of the Earley parsers.
513
+ """
514
+ import sys
515
+ import time
516
+
517
+ from nltk.parse.chart import demo_grammar
518
+
519
+ # The grammar for ChartParser and SteppingChartParser:
520
+ grammar = demo_grammar()
521
+ if print_grammar:
522
+ print("* Grammar")
523
+ print(grammar)
524
+
525
+ # Tokenize the sample sentence.
526
+ print("* Sentence:")
527
+ print(sent)
528
+ tokens = sent.split()
529
+ print(tokens)
530
+ print()
531
+
532
+ # Do the parsing.
533
+ earley = EarleyChartParser(grammar, trace=trace)
534
+ t = perf_counter()
535
+ chart = earley.chart_parse(tokens)
536
+ parses = list(chart.parses(grammar.start()))
537
+ t = perf_counter() - t
538
+
539
+ # Print results.
540
+ if numparses:
541
+ assert len(parses) == numparses, "Not all parses found"
542
+ if print_trees:
543
+ for tree in parses:
544
+ print(tree)
545
+ else:
546
+ print("Nr trees:", len(parses))
547
+ if print_times:
548
+ print("Time:", t)
549
+
550
+
551
+ if __name__ == "__main__":
552
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/evaluate.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: evaluation of dependency parser
2
+ #
3
+ # Author: Long Duong <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import unicodedata
10
+
11
+
12
+ class DependencyEvaluator:
13
+ """
14
+ Class for measuring labelled and unlabelled attachment score for
15
+ dependency parsing. Note that the evaluation ignores punctuation.
16
+
17
+ >>> from nltk.parse import DependencyGraph, DependencyEvaluator
18
+
19
+ >>> gold_sent = DependencyGraph(\"""
20
+ ... Pierre NNP 2 NMOD
21
+ ... Vinken NNP 8 SUB
22
+ ... , , 2 P
23
+ ... 61 CD 5 NMOD
24
+ ... years NNS 6 AMOD
25
+ ... old JJ 2 NMOD
26
+ ... , , 2 P
27
+ ... will MD 0 ROOT
28
+ ... join VB 8 VC
29
+ ... the DT 11 NMOD
30
+ ... board NN 9 OBJ
31
+ ... as IN 9 VMOD
32
+ ... a DT 15 NMOD
33
+ ... nonexecutive JJ 15 NMOD
34
+ ... director NN 12 PMOD
35
+ ... Nov. NNP 9 VMOD
36
+ ... 29 CD 16 NMOD
37
+ ... . . 9 VMOD
38
+ ... \""")
39
+
40
+ >>> parsed_sent = DependencyGraph(\"""
41
+ ... Pierre NNP 8 NMOD
42
+ ... Vinken NNP 1 SUB
43
+ ... , , 3 P
44
+ ... 61 CD 6 NMOD
45
+ ... years NNS 6 AMOD
46
+ ... old JJ 2 NMOD
47
+ ... , , 3 AMOD
48
+ ... will MD 0 ROOT
49
+ ... join VB 8 VC
50
+ ... the DT 11 AMOD
51
+ ... board NN 9 OBJECT
52
+ ... as IN 9 NMOD
53
+ ... a DT 15 NMOD
54
+ ... nonexecutive JJ 15 NMOD
55
+ ... director NN 12 PMOD
56
+ ... Nov. NNP 9 VMOD
57
+ ... 29 CD 16 NMOD
58
+ ... . . 9 VMOD
59
+ ... \""")
60
+
61
+ >>> de = DependencyEvaluator([parsed_sent],[gold_sent])
62
+ >>> las, uas = de.eval()
63
+ >>> las
64
+ 0.6
65
+ >>> uas
66
+ 0.8
67
+ >>> abs(uas - 0.8) < 0.00001
68
+ True
69
+ """
70
+
71
+ def __init__(self, parsed_sents, gold_sents):
72
+ """
73
+ :param parsed_sents: the list of parsed_sents as the output of parser
74
+ :type parsed_sents: list(DependencyGraph)
75
+ """
76
+ self._parsed_sents = parsed_sents
77
+ self._gold_sents = gold_sents
78
+
79
+ def _remove_punct(self, inStr):
80
+ """
81
+ Function to remove punctuation from Unicode string.
82
+ :param input: the input string
83
+ :return: Unicode string after remove all punctuation
84
+ """
85
+ punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"}
86
+ return "".join(x for x in inStr if unicodedata.category(x) not in punc_cat)
87
+
88
+ def eval(self):
89
+ """
90
+ Return the Labeled Attachment Score (LAS) and Unlabeled Attachment Score (UAS)
91
+
92
+ :return : tuple(float,float)
93
+ """
94
+ if len(self._parsed_sents) != len(self._gold_sents):
95
+ raise ValueError(
96
+ " Number of parsed sentence is different with number of gold sentence."
97
+ )
98
+
99
+ corr = 0
100
+ corrL = 0
101
+ total = 0
102
+
103
+ for i in range(len(self._parsed_sents)):
104
+ parsed_sent_nodes = self._parsed_sents[i].nodes
105
+ gold_sent_nodes = self._gold_sents[i].nodes
106
+
107
+ if len(parsed_sent_nodes) != len(gold_sent_nodes):
108
+ raise ValueError("Sentences must have equal length.")
109
+
110
+ for parsed_node_address, parsed_node in parsed_sent_nodes.items():
111
+ gold_node = gold_sent_nodes[parsed_node_address]
112
+
113
+ if parsed_node["word"] is None:
114
+ continue
115
+ if parsed_node["word"] != gold_node["word"]:
116
+ raise ValueError("Sentence sequence is not matched.")
117
+
118
+ # Ignore if word is punctuation by default
119
+ # if (parsed_sent[j]["word"] in string.punctuation):
120
+ if self._remove_punct(parsed_node["word"]) == "":
121
+ continue
122
+
123
+ total += 1
124
+ if parsed_node["head"] == gold_node["head"]:
125
+ corr += 1
126
+ if parsed_node["rel"] == gold_node["rel"]:
127
+ corrL += 1
128
+
129
+ return corrL / total, corr / total
env-llmeval/lib/python3.10/site-packages/nltk/parse/featurechart.py ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chart Parser for Feature-Based Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Rob Speer <[email protected]>
5
+ # Peter Ljunglöf <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Extension of chart parsing implementation to handle grammars with
11
+ feature structures as nodes.
12
+ """
13
+ from time import perf_counter
14
+
15
+ from nltk.featstruct import TYPE, FeatStruct, find_variables, unify
16
+ from nltk.grammar import (
17
+ CFG,
18
+ FeatStructNonterminal,
19
+ Nonterminal,
20
+ Production,
21
+ is_nonterminal,
22
+ is_terminal,
23
+ )
24
+ from nltk.parse.chart import (
25
+ BottomUpPredictCombineRule,
26
+ BottomUpPredictRule,
27
+ CachedTopDownPredictRule,
28
+ Chart,
29
+ ChartParser,
30
+ EdgeI,
31
+ EmptyPredictRule,
32
+ FundamentalRule,
33
+ LeafInitRule,
34
+ SingleEdgeFundamentalRule,
35
+ TopDownInitRule,
36
+ TreeEdge,
37
+ )
38
+ from nltk.sem import logic
39
+ from nltk.tree import Tree
40
+
41
+ # ////////////////////////////////////////////////////////////
42
+ # Tree Edge
43
+ # ////////////////////////////////////////////////////////////
44
+
45
+
46
+ class FeatureTreeEdge(TreeEdge):
47
+ """
48
+ A specialized tree edge that allows shared variable bindings
49
+ between nonterminals on the left-hand side and right-hand side.
50
+
51
+ Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a
52
+ dictionary mapping from variables to values. If the edge is not
53
+ complete, then these bindings are simply stored. However, if the
54
+ edge is complete, then the constructor applies these bindings to
55
+ every nonterminal in the edge whose symbol implements the
56
+ interface ``SubstituteBindingsI``.
57
+ """
58
+
59
+ def __init__(self, span, lhs, rhs, dot=0, bindings=None):
60
+ """
61
+ Construct a new edge. If the edge is incomplete (i.e., if
62
+ ``dot<len(rhs)``), then store the bindings as-is. If the edge
63
+ is complete (i.e., if ``dot==len(rhs)``), then apply the
64
+ bindings to all nonterminals in ``lhs`` and ``rhs``, and then
65
+ clear the bindings. See ``TreeEdge`` for a description of
66
+ the other arguments.
67
+ """
68
+ if bindings is None:
69
+ bindings = {}
70
+
71
+ # If the edge is complete, then substitute in the bindings,
72
+ # and then throw them away. (If we didn't throw them away, we
73
+ # might think that 2 complete edges are different just because
74
+ # they have different bindings, even though all bindings have
75
+ # already been applied.)
76
+ if dot == len(rhs) and bindings:
77
+ lhs = self._bind(lhs, bindings)
78
+ rhs = [self._bind(elt, bindings) for elt in rhs]
79
+ bindings = {}
80
+
81
+ # Initialize the edge.
82
+ TreeEdge.__init__(self, span, lhs, rhs, dot)
83
+ self._bindings = bindings
84
+ self._comparison_key = (self._comparison_key, tuple(sorted(bindings.items())))
85
+
86
+ @staticmethod
87
+ def from_production(production, index):
88
+ """
89
+ :return: A new ``TreeEdge`` formed from the given production.
90
+ The new edge's left-hand side and right-hand side will
91
+ be taken from ``production``; its span will be
92
+ ``(index,index)``; and its dot position will be ``0``.
93
+ :rtype: TreeEdge
94
+ """
95
+ return FeatureTreeEdge(
96
+ span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0
97
+ )
98
+
99
+ def move_dot_forward(self, new_end, bindings=None):
100
+ """
101
+ :return: A new ``FeatureTreeEdge`` formed from this edge.
102
+ The new edge's dot position is increased by ``1``,
103
+ and its end index will be replaced by ``new_end``.
104
+ :rtype: FeatureTreeEdge
105
+ :param new_end: The new end index.
106
+ :type new_end: int
107
+ :param bindings: Bindings for the new edge.
108
+ :type bindings: dict
109
+ """
110
+ return FeatureTreeEdge(
111
+ span=(self._span[0], new_end),
112
+ lhs=self._lhs,
113
+ rhs=self._rhs,
114
+ dot=self._dot + 1,
115
+ bindings=bindings,
116
+ )
117
+
118
+ def _bind(self, nt, bindings):
119
+ if not isinstance(nt, FeatStructNonterminal):
120
+ return nt
121
+ return nt.substitute_bindings(bindings)
122
+
123
+ def next_with_bindings(self):
124
+ return self._bind(self.nextsym(), self._bindings)
125
+
126
+ def bindings(self):
127
+ """
128
+ Return a copy of this edge's bindings dictionary.
129
+ """
130
+ return self._bindings.copy()
131
+
132
+ def variables(self):
133
+ """
134
+ :return: The set of variables used by this edge.
135
+ :rtype: set(Variable)
136
+ """
137
+ return find_variables(
138
+ [self._lhs]
139
+ + list(self._rhs)
140
+ + list(self._bindings.keys())
141
+ + list(self._bindings.values()),
142
+ fs_class=FeatStruct,
143
+ )
144
+
145
+ def __str__(self):
146
+ if self.is_complete():
147
+ return super().__str__()
148
+ else:
149
+ bindings = "{%s}" % ", ".join(
150
+ "%s: %r" % item for item in sorted(self._bindings.items())
151
+ )
152
+ return f"{super().__str__()} {bindings}"
153
+
154
+
155
+ # ////////////////////////////////////////////////////////////
156
+ # A specialized Chart for feature grammars
157
+ # ////////////////////////////////////////////////////////////
158
+
159
+ # TODO: subsumes check when adding new edges
160
+
161
+
162
+ class FeatureChart(Chart):
163
+ """
164
+ A Chart for feature grammars.
165
+ :see: ``Chart`` for more information.
166
+ """
167
+
168
+ def select(self, **restrictions):
169
+ """
170
+ Returns an iterator over the edges in this chart.
171
+ See ``Chart.select`` for more information about the
172
+ ``restrictions`` on the edges.
173
+ """
174
+ # If there are no restrictions, then return all edges.
175
+ if restrictions == {}:
176
+ return iter(self._edges)
177
+
178
+ # Find the index corresponding to the given restrictions.
179
+ restr_keys = sorted(restrictions.keys())
180
+ restr_keys = tuple(restr_keys)
181
+
182
+ # If it doesn't exist, then create it.
183
+ if restr_keys not in self._indexes:
184
+ self._add_index(restr_keys)
185
+
186
+ vals = tuple(
187
+ self._get_type_if_possible(restrictions[key]) for key in restr_keys
188
+ )
189
+ return iter(self._indexes[restr_keys].get(vals, []))
190
+
191
+ def _add_index(self, restr_keys):
192
+ """
193
+ A helper function for ``select``, which creates a new index for
194
+ a given set of attributes (aka restriction keys).
195
+ """
196
+ # Make sure it's a valid index.
197
+ for key in restr_keys:
198
+ if not hasattr(EdgeI, key):
199
+ raise ValueError("Bad restriction: %s" % key)
200
+
201
+ # Create the index.
202
+ index = self._indexes[restr_keys] = {}
203
+
204
+ # Add all existing edges to the index.
205
+ for edge in self._edges:
206
+ vals = tuple(
207
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
208
+ )
209
+ index.setdefault(vals, []).append(edge)
210
+
211
+ def _register_with_indexes(self, edge):
212
+ """
213
+ A helper function for ``insert``, which registers the new
214
+ edge with all existing indexes.
215
+ """
216
+ for (restr_keys, index) in self._indexes.items():
217
+ vals = tuple(
218
+ self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys
219
+ )
220
+ index.setdefault(vals, []).append(edge)
221
+
222
+ def _get_type_if_possible(self, item):
223
+ """
224
+ Helper function which returns the ``TYPE`` feature of the ``item``,
225
+ if it exists, otherwise it returns the ``item`` itself
226
+ """
227
+ if isinstance(item, dict) and TYPE in item:
228
+ return item[TYPE]
229
+ else:
230
+ return item
231
+
232
+ def parses(self, start, tree_class=Tree):
233
+ for edge in self.select(start=0, end=self._num_leaves):
234
+ if (
235
+ (isinstance(edge, FeatureTreeEdge))
236
+ and (edge.lhs()[TYPE] == start[TYPE])
237
+ and (unify(edge.lhs(), start, rename_vars=True))
238
+ ):
239
+ yield from self.trees(edge, complete=True, tree_class=tree_class)
240
+
241
+
242
+ # ////////////////////////////////////////////////////////////
243
+ # Fundamental Rule
244
+ # ////////////////////////////////////////////////////////////
245
+
246
+
247
+ class FeatureFundamentalRule(FundamentalRule):
248
+ r"""
249
+ A specialized version of the fundamental rule that operates on
250
+ nonterminals whose symbols are ``FeatStructNonterminal``s. Rather
251
+ than simply comparing the nonterminals for equality, they are
252
+ unified. Variable bindings from these unifications are collected
253
+ and stored in the chart using a ``FeatureTreeEdge``. When a
254
+ complete edge is generated, these bindings are applied to all
255
+ nonterminals in the edge.
256
+
257
+ The fundamental rule states that:
258
+
259
+ - ``[A -> alpha \* B1 beta][i:j]``
260
+ - ``[B2 -> gamma \*][j:k]``
261
+
262
+ licenses the edge:
263
+
264
+ - ``[A -> alpha B3 \* beta][i:j]``
265
+
266
+ assuming that B1 and B2 can be unified to generate B3.
267
+ """
268
+
269
+ def apply(self, chart, grammar, left_edge, right_edge):
270
+ # Make sure the rule is applicable.
271
+ if not (
272
+ left_edge.end() == right_edge.start()
273
+ and left_edge.is_incomplete()
274
+ and right_edge.is_complete()
275
+ and isinstance(left_edge, FeatureTreeEdge)
276
+ ):
277
+ return
278
+ found = right_edge.lhs()
279
+ nextsym = left_edge.nextsym()
280
+ if isinstance(right_edge, FeatureTreeEdge):
281
+ if not is_nonterminal(nextsym):
282
+ return
283
+ if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]:
284
+ return
285
+ # Create a copy of the bindings.
286
+ bindings = left_edge.bindings()
287
+ # We rename vars here, because we don't want variables
288
+ # from the two different productions to match.
289
+ found = found.rename_variables(used_vars=left_edge.variables())
290
+ # Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to
291
+ # generate B3 (result).
292
+ result = unify(nextsym, found, bindings, rename_vars=False)
293
+ if result is None:
294
+ return
295
+ else:
296
+ if nextsym != found:
297
+ return
298
+ # Create a copy of the bindings.
299
+ bindings = left_edge.bindings()
300
+
301
+ # Construct the new edge.
302
+ new_edge = left_edge.move_dot_forward(right_edge.end(), bindings)
303
+
304
+ # Add it to the chart, with appropriate child pointers.
305
+ if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
306
+ yield new_edge
307
+
308
+
309
+ class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
310
+ """
311
+ A specialized version of the completer / single edge fundamental rule
312
+ that operates on nonterminals whose symbols are ``FeatStructNonterminal``.
313
+ Rather than simply comparing the nonterminals for equality, they are
314
+ unified.
315
+ """
316
+
317
+ _fundamental_rule = FeatureFundamentalRule()
318
+
319
+ def _apply_complete(self, chart, grammar, right_edge):
320
+ fr = self._fundamental_rule
321
+ for left_edge in chart.select(
322
+ end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs()
323
+ ):
324
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
325
+
326
+ def _apply_incomplete(self, chart, grammar, left_edge):
327
+ fr = self._fundamental_rule
328
+ for right_edge in chart.select(
329
+ start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym()
330
+ ):
331
+ yield from fr.apply(chart, grammar, left_edge, right_edge)
332
+
333
+
334
+ # ////////////////////////////////////////////////////////////
335
+ # Top-Down Prediction
336
+ # ////////////////////////////////////////////////////////////
337
+
338
+
339
+ class FeatureTopDownInitRule(TopDownInitRule):
340
+ def apply(self, chart, grammar):
341
+ for prod in grammar.productions(lhs=grammar.start()):
342
+ new_edge = FeatureTreeEdge.from_production(prod, 0)
343
+ if chart.insert(new_edge, ()):
344
+ yield new_edge
345
+
346
+
347
+ class FeatureTopDownPredictRule(CachedTopDownPredictRule):
348
+ r"""
349
+ A specialized version of the (cached) top down predict rule that operates
350
+ on nonterminals whose symbols are ``FeatStructNonterminal``. Rather
351
+ than simply comparing the nonterminals for equality, they are
352
+ unified.
353
+
354
+ The top down expand rule states that:
355
+
356
+ - ``[A -> alpha \* B1 beta][i:j]``
357
+
358
+ licenses the edge:
359
+
360
+ - ``[B2 -> \* gamma][j:j]``
361
+
362
+ for each grammar production ``B2 -> gamma``, assuming that B1
363
+ and B2 can be unified.
364
+ """
365
+
366
+ def apply(self, chart, grammar, edge):
367
+ if edge.is_complete():
368
+ return
369
+ nextsym, index = edge.nextsym(), edge.end()
370
+ if not is_nonterminal(nextsym):
371
+ return
372
+
373
+ # If we've already applied this rule to an edge with the same
374
+ # next & end, and the chart & grammar have not changed, then
375
+ # just return (no new edges to add).
376
+ nextsym_with_bindings = edge.next_with_bindings()
377
+ done = self._done.get((nextsym_with_bindings, index), (None, None))
378
+ if done[0] is chart and done[1] is grammar:
379
+ return
380
+
381
+ for prod in grammar.productions(lhs=nextsym):
382
+ # If the left corner in the predicted production is
383
+ # leaf, it must match with the input.
384
+ if prod.rhs():
385
+ first = prod.rhs()[0]
386
+ if is_terminal(first):
387
+ if index >= chart.num_leaves():
388
+ continue
389
+ if first != chart.leaf(index):
390
+ continue
391
+
392
+ # We rename vars here, because we don't want variables
393
+ # from the two different productions to match.
394
+ if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True):
395
+ new_edge = FeatureTreeEdge.from_production(prod, edge.end())
396
+ if chart.insert(new_edge, ()):
397
+ yield new_edge
398
+
399
+ # Record the fact that we've applied this rule.
400
+ self._done[nextsym_with_bindings, index] = (chart, grammar)
401
+
402
+
403
+ # ////////////////////////////////////////////////////////////
404
+ # Bottom-Up Prediction
405
+ # ////////////////////////////////////////////////////////////
406
+
407
+
408
+ class FeatureBottomUpPredictRule(BottomUpPredictRule):
409
+ def apply(self, chart, grammar, edge):
410
+ if edge.is_incomplete():
411
+ return
412
+ for prod in grammar.productions(rhs=edge.lhs()):
413
+ if isinstance(edge, FeatureTreeEdge):
414
+ _next = prod.rhs()[0]
415
+ if not is_nonterminal(_next):
416
+ continue
417
+
418
+ new_edge = FeatureTreeEdge.from_production(prod, edge.start())
419
+ if chart.insert(new_edge, ()):
420
+ yield new_edge
421
+
422
+
423
+ class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule):
424
+ def apply(self, chart, grammar, edge):
425
+ if edge.is_incomplete():
426
+ return
427
+ found = edge.lhs()
428
+ for prod in grammar.productions(rhs=found):
429
+ bindings = {}
430
+ if isinstance(edge, FeatureTreeEdge):
431
+ _next = prod.rhs()[0]
432
+ if not is_nonterminal(_next):
433
+ continue
434
+
435
+ # We rename vars here, because we don't want variables
436
+ # from the two different productions to match.
437
+ used_vars = find_variables(
438
+ (prod.lhs(),) + prod.rhs(), fs_class=FeatStruct
439
+ )
440
+ found = found.rename_variables(used_vars=used_vars)
441
+
442
+ result = unify(_next, found, bindings, rename_vars=False)
443
+ if result is None:
444
+ continue
445
+
446
+ new_edge = FeatureTreeEdge.from_production(
447
+ prod, edge.start()
448
+ ).move_dot_forward(edge.end(), bindings)
449
+ if chart.insert(new_edge, (edge,)):
450
+ yield new_edge
451
+
452
+
453
+ class FeatureEmptyPredictRule(EmptyPredictRule):
454
+ def apply(self, chart, grammar):
455
+ for prod in grammar.productions(empty=True):
456
+ for index in range(chart.num_leaves() + 1):
457
+ new_edge = FeatureTreeEdge.from_production(prod, index)
458
+ if chart.insert(new_edge, ()):
459
+ yield new_edge
460
+
461
+
462
+ # ////////////////////////////////////////////////////////////
463
+ # Feature Chart Parser
464
+ # ////////////////////////////////////////////////////////////
465
+
466
+ TD_FEATURE_STRATEGY = [
467
+ LeafInitRule(),
468
+ FeatureTopDownInitRule(),
469
+ FeatureTopDownPredictRule(),
470
+ FeatureSingleEdgeFundamentalRule(),
471
+ ]
472
+ BU_FEATURE_STRATEGY = [
473
+ LeafInitRule(),
474
+ FeatureEmptyPredictRule(),
475
+ FeatureBottomUpPredictRule(),
476
+ FeatureSingleEdgeFundamentalRule(),
477
+ ]
478
+ BU_LC_FEATURE_STRATEGY = [
479
+ LeafInitRule(),
480
+ FeatureEmptyPredictRule(),
481
+ FeatureBottomUpPredictCombineRule(),
482
+ FeatureSingleEdgeFundamentalRule(),
483
+ ]
484
+
485
+
486
+ class FeatureChartParser(ChartParser):
487
+ def __init__(
488
+ self,
489
+ grammar,
490
+ strategy=BU_LC_FEATURE_STRATEGY,
491
+ trace_chart_width=20,
492
+ chart_class=FeatureChart,
493
+ **parser_args,
494
+ ):
495
+ ChartParser.__init__(
496
+ self,
497
+ grammar,
498
+ strategy=strategy,
499
+ trace_chart_width=trace_chart_width,
500
+ chart_class=chart_class,
501
+ **parser_args,
502
+ )
503
+
504
+
505
+ class FeatureTopDownChartParser(FeatureChartParser):
506
+ def __init__(self, grammar, **parser_args):
507
+ FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args)
508
+
509
+
510
+ class FeatureBottomUpChartParser(FeatureChartParser):
511
+ def __init__(self, grammar, **parser_args):
512
+ FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args)
513
+
514
+
515
+ class FeatureBottomUpLeftCornerChartParser(FeatureChartParser):
516
+ def __init__(self, grammar, **parser_args):
517
+ FeatureChartParser.__init__(
518
+ self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args
519
+ )
520
+
521
+
522
+ # ////////////////////////////////////////////////////////////
523
+ # Instantiate Variable Chart
524
+ # ////////////////////////////////////////////////////////////
525
+
526
+
527
+ class InstantiateVarsChart(FeatureChart):
528
+ """
529
+ A specialized chart that 'instantiates' variables whose names
530
+ start with '@', by replacing them with unique new variables.
531
+ In particular, whenever a complete edge is added to the chart, any
532
+ variables in the edge's ``lhs`` whose names start with '@' will be
533
+ replaced by unique new ``Variable``.
534
+ """
535
+
536
+ def __init__(self, tokens):
537
+ FeatureChart.__init__(self, tokens)
538
+
539
+ def initialize(self):
540
+ self._instantiated = set()
541
+ FeatureChart.initialize(self)
542
+
543
+ def insert(self, edge, child_pointer_list):
544
+ if edge in self._instantiated:
545
+ return False
546
+ self.instantiate_edge(edge)
547
+ return FeatureChart.insert(self, edge, child_pointer_list)
548
+
549
+ def instantiate_edge(self, edge):
550
+ """
551
+ If the edge is a ``FeatureTreeEdge``, and it is complete,
552
+ then instantiate all variables whose names start with '@',
553
+ by replacing them with unique new variables.
554
+
555
+ Note that instantiation is done in-place, since the
556
+ parsing algorithms might already hold a reference to
557
+ the edge for future use.
558
+ """
559
+ # If the edge is a leaf, or is not complete, or is
560
+ # already in the chart, then just return it as-is.
561
+ if not isinstance(edge, FeatureTreeEdge):
562
+ return
563
+ if not edge.is_complete():
564
+ return
565
+ if edge in self._edge_to_cpls:
566
+ return
567
+
568
+ # Get a list of variables that need to be instantiated.
569
+ # If there are none, then return as-is.
570
+ inst_vars = self.inst_vars(edge)
571
+ if not inst_vars:
572
+ return
573
+
574
+ # Instantiate the edge!
575
+ self._instantiated.add(edge)
576
+ edge._lhs = edge.lhs().substitute_bindings(inst_vars)
577
+
578
+ def inst_vars(self, edge):
579
+ return {
580
+ var: logic.unique_variable()
581
+ for var in edge.lhs().variables()
582
+ if var.name.startswith("@")
583
+ }
584
+
585
+
586
+ # ////////////////////////////////////////////////////////////
587
+ # Demo
588
+ # ////////////////////////////////////////////////////////////
589
+
590
+
591
+ def demo_grammar():
592
+ from nltk.grammar import FeatureGrammar
593
+
594
+ return FeatureGrammar.fromstring(
595
+ """
596
+ S -> NP VP
597
+ PP -> Prep NP
598
+ NP -> NP PP
599
+ VP -> VP PP
600
+ VP -> Verb NP
601
+ VP -> Verb
602
+ NP -> Det[pl=?x] Noun[pl=?x]
603
+ NP -> "John"
604
+ NP -> "I"
605
+ Det -> "the"
606
+ Det -> "my"
607
+ Det[-pl] -> "a"
608
+ Noun[-pl] -> "dog"
609
+ Noun[-pl] -> "cookie"
610
+ Verb -> "ate"
611
+ Verb -> "saw"
612
+ Prep -> "with"
613
+ Prep -> "under"
614
+ """
615
+ )
616
+
617
+
618
+ def demo(
619
+ print_times=True,
620
+ print_grammar=True,
621
+ print_trees=True,
622
+ print_sentence=True,
623
+ trace=1,
624
+ parser=FeatureChartParser,
625
+ sent="I saw John with a dog with my cookie",
626
+ ):
627
+ import sys
628
+ import time
629
+
630
+ print()
631
+ grammar = demo_grammar()
632
+ if print_grammar:
633
+ print(grammar)
634
+ print()
635
+ print("*", parser.__name__)
636
+ if print_sentence:
637
+ print("Sentence:", sent)
638
+ tokens = sent.split()
639
+ t = perf_counter()
640
+ cp = parser(grammar, trace=trace)
641
+ chart = cp.chart_parse(tokens)
642
+ trees = list(chart.parses(grammar.start()))
643
+ if print_times:
644
+ print("Time: %s" % (perf_counter() - t))
645
+ if print_trees:
646
+ for tree in trees:
647
+ print(tree)
648
+ else:
649
+ print("Nr trees:", len(trees))
650
+
651
+
652
+ def run_profile():
653
+ import profile
654
+
655
+ profile.run("for i in range(1): demo()", "/tmp/profile.out")
656
+ import pstats
657
+
658
+ p = pstats.Stats("/tmp/profile.out")
659
+ p.strip_dirs().sort_stats("time", "cum").print_stats(60)
660
+ p.strip_dirs().sort_stats("cum", "time").print_stats(60)
661
+
662
+
663
+ if __name__ == "__main__":
664
+ from nltk.data import load
665
+
666
+ demo()
667
+ print()
668
+ grammar = load("grammars/book_grammars/feat0.fcfg")
669
+ cp = FeatureChartParser(grammar, trace=2)
670
+ sent = "Kim likes children"
671
+ tokens = sent.split()
672
+ trees = cp.parse(tokens)
673
+ for tree in trees:
674
+ print(tree)
env-llmeval/lib/python3.10/site-packages/nltk/parse/generate.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Generating from a CFG
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Peter Ljunglöf <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import itertools
11
+ import sys
12
+
13
+ from nltk.grammar import Nonterminal
14
+
15
+
16
+ def generate(grammar, start=None, depth=None, n=None):
17
+ """
18
+ Generates an iterator of all sentences from a CFG.
19
+
20
+ :param grammar: The Grammar used to generate sentences.
21
+ :param start: The Nonterminal from which to start generate sentences.
22
+ :param depth: The maximal depth of the generated tree.
23
+ :param n: The maximum number of sentences to return.
24
+ :return: An iterator of lists of terminal tokens.
25
+ """
26
+ if not start:
27
+ start = grammar.start()
28
+ if depth is None:
29
+ depth = sys.maxsize
30
+
31
+ iter = _generate_all(grammar, [start], depth)
32
+
33
+ if n:
34
+ iter = itertools.islice(iter, n)
35
+
36
+ return iter
37
+
38
+
39
+ def _generate_all(grammar, items, depth):
40
+ if items:
41
+ try:
42
+ for frag1 in _generate_one(grammar, items[0], depth):
43
+ for frag2 in _generate_all(grammar, items[1:], depth):
44
+ yield frag1 + frag2
45
+ except RecursionError as error:
46
+ # Helpful error message while still showing the recursion stack.
47
+ raise RuntimeError(
48
+ "The grammar has rule(s) that yield infinite recursion!"
49
+ ) from error
50
+ else:
51
+ yield []
52
+
53
+
54
+ def _generate_one(grammar, item, depth):
55
+ if depth > 0:
56
+ if isinstance(item, Nonterminal):
57
+ for prod in grammar.productions(lhs=item):
58
+ yield from _generate_all(grammar, prod.rhs(), depth - 1)
59
+ else:
60
+ yield [item]
61
+
62
+
63
+ demo_grammar = """
64
+ S -> NP VP
65
+ NP -> Det N
66
+ PP -> P NP
67
+ VP -> 'slept' | 'saw' NP | 'walked' PP
68
+ Det -> 'the' | 'a'
69
+ N -> 'man' | 'park' | 'dog'
70
+ P -> 'in' | 'with'
71
+ """
72
+
73
+
74
+ def demo(N=23):
75
+ from nltk.grammar import CFG
76
+
77
+ print("Generating the first %d sentences for demo grammar:" % (N,))
78
+ print(demo_grammar)
79
+ grammar = CFG.fromstring(demo_grammar)
80
+ for n, sent in enumerate(generate(grammar, n=N), 1):
81
+ print("%3d. %s" % (n, " ".join(sent)))
82
+
83
+
84
+ if __name__ == "__main__":
85
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/shiftreduce.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Shift-Reduce Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.grammar import Nonterminal
10
+ from nltk.parse.api import ParserI
11
+ from nltk.tree import Tree
12
+
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Shift/Reduce Parser
16
+ ##//////////////////////////////////////////////////////
17
+ class ShiftReduceParser(ParserI):
18
+ """
19
+ A simple bottom-up CFG parser that uses two operations, "shift"
20
+ and "reduce", to find a single parse for a text.
21
+
22
+ ``ShiftReduceParser`` maintains a stack, which records the
23
+ structure of a portion of the text. This stack is a list of
24
+ strings and Trees that collectively cover a portion of
25
+ the text. For example, while parsing the sentence "the dog saw
26
+ the man" with a typical grammar, ``ShiftReduceParser`` will produce
27
+ the following stack, which covers "the dog saw"::
28
+
29
+ [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')]
30
+
31
+ ``ShiftReduceParser`` attempts to extend the stack to cover the
32
+ entire text, and to combine the stack elements into a single tree,
33
+ producing a complete parse for the sentence.
34
+
35
+ Initially, the stack is empty. It is extended to cover the text,
36
+ from left to right, by repeatedly applying two operations:
37
+
38
+ - "shift" moves a token from the beginning of the text to the
39
+ end of the stack.
40
+ - "reduce" uses a CFG production to combine the rightmost stack
41
+ elements into a single Tree.
42
+
43
+ Often, more than one operation can be performed on a given stack.
44
+ In this case, ``ShiftReduceParser`` uses the following heuristics
45
+ to decide which operation to perform:
46
+
47
+ - Only shift if no reductions are available.
48
+ - If multiple reductions are available, then apply the reduction
49
+ whose CFG production is listed earliest in the grammar.
50
+
51
+ Note that these heuristics are not guaranteed to choose an
52
+ operation that leads to a parse of the text. Also, if multiple
53
+ parses exists, ``ShiftReduceParser`` will return at most one of
54
+ them.
55
+
56
+ :see: ``nltk.grammar``
57
+ """
58
+
59
+ def __init__(self, grammar, trace=0):
60
+ """
61
+ Create a new ``ShiftReduceParser``, that uses ``grammar`` to
62
+ parse texts.
63
+
64
+ :type grammar: Grammar
65
+ :param grammar: The grammar used to parse texts.
66
+ :type trace: int
67
+ :param trace: The level of tracing that should be used when
68
+ parsing a text. ``0`` will generate no tracing output;
69
+ and higher numbers will produce more verbose tracing
70
+ output.
71
+ """
72
+ self._grammar = grammar
73
+ self._trace = trace
74
+ self._check_grammar()
75
+
76
+ def grammar(self):
77
+ return self._grammar
78
+
79
+ def parse(self, tokens):
80
+ tokens = list(tokens)
81
+ self._grammar.check_coverage(tokens)
82
+
83
+ # initialize the stack.
84
+ stack = []
85
+ remaining_text = tokens
86
+
87
+ # Trace output.
88
+ if self._trace:
89
+ print("Parsing %r" % " ".join(tokens))
90
+ self._trace_stack(stack, remaining_text)
91
+
92
+ # iterate through the text, pushing the token onto
93
+ # the stack, then reducing the stack.
94
+ while len(remaining_text) > 0:
95
+ self._shift(stack, remaining_text)
96
+ while self._reduce(stack, remaining_text):
97
+ pass
98
+
99
+ # Did we reduce everything?
100
+ if len(stack) == 1:
101
+ # Did we end up with the right category?
102
+ if stack[0].label() == self._grammar.start().symbol():
103
+ yield stack[0]
104
+
105
+ def _shift(self, stack, remaining_text):
106
+ """
107
+ Move a token from the beginning of ``remaining_text`` to the
108
+ end of ``stack``.
109
+
110
+ :type stack: list(str and Tree)
111
+ :param stack: A list of strings and Trees, encoding
112
+ the structure of the text that has been parsed so far.
113
+ :type remaining_text: list(str)
114
+ :param remaining_text: The portion of the text that is not yet
115
+ covered by ``stack``.
116
+ :rtype: None
117
+ """
118
+ stack.append(remaining_text[0])
119
+ remaining_text.remove(remaining_text[0])
120
+ if self._trace:
121
+ self._trace_shift(stack, remaining_text)
122
+
123
+ def _match_rhs(self, rhs, rightmost_stack):
124
+ """
125
+ :rtype: bool
126
+ :return: true if the right hand side of a CFG production
127
+ matches the rightmost elements of the stack. ``rhs``
128
+ matches ``rightmost_stack`` if they are the same length,
129
+ and each element of ``rhs`` matches the corresponding
130
+ element of ``rightmost_stack``. A nonterminal element of
131
+ ``rhs`` matches any Tree whose node value is equal
132
+ to the nonterminal's symbol. A terminal element of ``rhs``
133
+ matches any string whose type is equal to the terminal.
134
+ :type rhs: list(terminal and Nonterminal)
135
+ :param rhs: The right hand side of a CFG production.
136
+ :type rightmost_stack: list(string and Tree)
137
+ :param rightmost_stack: The rightmost elements of the parser's
138
+ stack.
139
+ """
140
+
141
+ if len(rightmost_stack) != len(rhs):
142
+ return False
143
+ for i in range(len(rightmost_stack)):
144
+ if isinstance(rightmost_stack[i], Tree):
145
+ if not isinstance(rhs[i], Nonterminal):
146
+ return False
147
+ if rightmost_stack[i].label() != rhs[i].symbol():
148
+ return False
149
+ else:
150
+ if isinstance(rhs[i], Nonterminal):
151
+ return False
152
+ if rightmost_stack[i] != rhs[i]:
153
+ return False
154
+ return True
155
+
156
+ def _reduce(self, stack, remaining_text, production=None):
157
+ """
158
+ Find a CFG production whose right hand side matches the
159
+ rightmost stack elements; and combine those stack elements
160
+ into a single Tree, with the node specified by the
161
+ production's left-hand side. If more than one CFG production
162
+ matches the stack, then use the production that is listed
163
+ earliest in the grammar. The new Tree replaces the
164
+ elements in the stack.
165
+
166
+ :rtype: Production or None
167
+ :return: If a reduction is performed, then return the CFG
168
+ production that the reduction is based on; otherwise,
169
+ return false.
170
+ :type stack: list(string and Tree)
171
+ :param stack: A list of strings and Trees, encoding
172
+ the structure of the text that has been parsed so far.
173
+ :type remaining_text: list(str)
174
+ :param remaining_text: The portion of the text that is not yet
175
+ covered by ``stack``.
176
+ """
177
+ if production is None:
178
+ productions = self._grammar.productions()
179
+ else:
180
+ productions = [production]
181
+
182
+ # Try each production, in order.
183
+ for production in productions:
184
+ rhslen = len(production.rhs())
185
+
186
+ # check if the RHS of a production matches the top of the stack
187
+ if self._match_rhs(production.rhs(), stack[-rhslen:]):
188
+
189
+ # combine the tree to reflect the reduction
190
+ tree = Tree(production.lhs().symbol(), stack[-rhslen:])
191
+ stack[-rhslen:] = [tree]
192
+
193
+ # We reduced something
194
+ if self._trace:
195
+ self._trace_reduce(stack, production, remaining_text)
196
+ return production
197
+
198
+ # We didn't reduce anything
199
+ return None
200
+
201
+ def trace(self, trace=2):
202
+ """
203
+ Set the level of tracing output that should be generated when
204
+ parsing a text.
205
+
206
+ :type trace: int
207
+ :param trace: The trace level. A trace level of ``0`` will
208
+ generate no tracing output; and higher trace levels will
209
+ produce more verbose tracing output.
210
+ :rtype: None
211
+ """
212
+ # 1: just show shifts.
213
+ # 2: show shifts & reduces
214
+ # 3: display which tokens & productions are shifed/reduced
215
+ self._trace = trace
216
+
217
+ def _trace_stack(self, stack, remaining_text, marker=" "):
218
+ """
219
+ Print trace output displaying the given stack and text.
220
+
221
+ :rtype: None
222
+ :param marker: A character that is printed to the left of the
223
+ stack. This is used with trace level 2 to print 'S'
224
+ before shifted stacks and 'R' before reduced stacks.
225
+ """
226
+ s = " " + marker + " [ "
227
+ for elt in stack:
228
+ if isinstance(elt, Tree):
229
+ s += repr(Nonterminal(elt.label())) + " "
230
+ else:
231
+ s += repr(elt) + " "
232
+ s += "* " + " ".join(remaining_text) + "]"
233
+ print(s)
234
+
235
+ def _trace_shift(self, stack, remaining_text):
236
+ """
237
+ Print trace output displaying that a token has been shifted.
238
+
239
+ :rtype: None
240
+ """
241
+ if self._trace > 2:
242
+ print("Shift %r:" % stack[-1])
243
+ if self._trace == 2:
244
+ self._trace_stack(stack, remaining_text, "S")
245
+ elif self._trace > 0:
246
+ self._trace_stack(stack, remaining_text)
247
+
248
+ def _trace_reduce(self, stack, production, remaining_text):
249
+ """
250
+ Print trace output displaying that ``production`` was used to
251
+ reduce ``stack``.
252
+
253
+ :rtype: None
254
+ """
255
+ if self._trace > 2:
256
+ rhs = " ".join(production.rhs())
257
+ print(f"Reduce {production.lhs()!r} <- {rhs}")
258
+ if self._trace == 2:
259
+ self._trace_stack(stack, remaining_text, "R")
260
+ elif self._trace > 1:
261
+ self._trace_stack(stack, remaining_text)
262
+
263
+ def _check_grammar(self):
264
+ """
265
+ Check to make sure that all of the CFG productions are
266
+ potentially useful. If any productions can never be used,
267
+ then print a warning.
268
+
269
+ :rtype: None
270
+ """
271
+ productions = self._grammar.productions()
272
+
273
+ # Any production whose RHS is an extension of another production's RHS
274
+ # will never be used.
275
+ for i in range(len(productions)):
276
+ for j in range(i + 1, len(productions)):
277
+ rhs1 = productions[i].rhs()
278
+ rhs2 = productions[j].rhs()
279
+ if rhs1[: len(rhs2)] == rhs2:
280
+ print("Warning: %r will never be used" % productions[i])
281
+
282
+
283
+ ##//////////////////////////////////////////////////////
284
+ ## Stepping Shift/Reduce Parser
285
+ ##//////////////////////////////////////////////////////
286
+ class SteppingShiftReduceParser(ShiftReduceParser):
287
+ """
288
+ A ``ShiftReduceParser`` that allows you to setp through the parsing
289
+ process, performing a single operation at a time. It also allows
290
+ you to change the parser's grammar midway through parsing a text.
291
+
292
+ The ``initialize`` method is used to start parsing a text.
293
+ ``shift`` performs a single shift operation, and ``reduce`` performs
294
+ a single reduce operation. ``step`` will perform a single reduce
295
+ operation if possible; otherwise, it will perform a single shift
296
+ operation. ``parses`` returns the set of parses that have been
297
+ found by the parser.
298
+
299
+ :ivar _history: A list of ``(stack, remaining_text)`` pairs,
300
+ containing all of the previous states of the parser. This
301
+ history is used to implement the ``undo`` operation.
302
+ :see: ``nltk.grammar``
303
+ """
304
+
305
+ def __init__(self, grammar, trace=0):
306
+ super().__init__(grammar, trace)
307
+ self._stack = None
308
+ self._remaining_text = None
309
+ self._history = []
310
+
311
+ def parse(self, tokens):
312
+ tokens = list(tokens)
313
+ self.initialize(tokens)
314
+ while self.step():
315
+ pass
316
+ return self.parses()
317
+
318
+ def stack(self):
319
+ """
320
+ :return: The parser's stack.
321
+ :rtype: list(str and Tree)
322
+ """
323
+ return self._stack
324
+
325
+ def remaining_text(self):
326
+ """
327
+ :return: The portion of the text that is not yet covered by the
328
+ stack.
329
+ :rtype: list(str)
330
+ """
331
+ return self._remaining_text
332
+
333
+ def initialize(self, tokens):
334
+ """
335
+ Start parsing a given text. This sets the parser's stack to
336
+ ``[]`` and sets its remaining text to ``tokens``.
337
+ """
338
+ self._stack = []
339
+ self._remaining_text = tokens
340
+ self._history = []
341
+
342
+ def step(self):
343
+ """
344
+ Perform a single parsing operation. If a reduction is
345
+ possible, then perform that reduction, and return the
346
+ production that it is based on. Otherwise, if a shift is
347
+ possible, then perform it, and return True. Otherwise,
348
+ return False.
349
+
350
+ :return: False if no operation was performed; True if a shift was
351
+ performed; and the CFG production used to reduce if a
352
+ reduction was performed.
353
+ :rtype: Production or bool
354
+ """
355
+ return self.reduce() or self.shift()
356
+
357
+ def shift(self):
358
+ """
359
+ Move a token from the beginning of the remaining text to the
360
+ end of the stack. If there are no more tokens in the
361
+ remaining text, then do nothing.
362
+
363
+ :return: True if the shift operation was successful.
364
+ :rtype: bool
365
+ """
366
+ if len(self._remaining_text) == 0:
367
+ return False
368
+ self._history.append((self._stack[:], self._remaining_text[:]))
369
+ self._shift(self._stack, self._remaining_text)
370
+ return True
371
+
372
+ def reduce(self, production=None):
373
+ """
374
+ Use ``production`` to combine the rightmost stack elements into
375
+ a single Tree. If ``production`` does not match the
376
+ rightmost stack elements, then do nothing.
377
+
378
+ :return: The production used to reduce the stack, if a
379
+ reduction was performed. If no reduction was performed,
380
+ return None.
381
+
382
+ :rtype: Production or None
383
+ """
384
+ self._history.append((self._stack[:], self._remaining_text[:]))
385
+ return_val = self._reduce(self._stack, self._remaining_text, production)
386
+
387
+ if not return_val:
388
+ self._history.pop()
389
+ return return_val
390
+
391
+ def undo(self):
392
+ """
393
+ Return the parser to its state before the most recent
394
+ shift or reduce operation. Calling ``undo`` repeatedly return
395
+ the parser to successively earlier states. If no shift or
396
+ reduce operations have been performed, ``undo`` will make no
397
+ changes.
398
+
399
+ :return: true if an operation was successfully undone.
400
+ :rtype: bool
401
+ """
402
+ if len(self._history) == 0:
403
+ return False
404
+ (self._stack, self._remaining_text) = self._history.pop()
405
+ return True
406
+
407
+ def reducible_productions(self):
408
+ """
409
+ :return: A list of the productions for which reductions are
410
+ available for the current parser state.
411
+ :rtype: list(Production)
412
+ """
413
+ productions = []
414
+ for production in self._grammar.productions():
415
+ rhslen = len(production.rhs())
416
+ if self._match_rhs(production.rhs(), self._stack[-rhslen:]):
417
+ productions.append(production)
418
+ return productions
419
+
420
+ def parses(self):
421
+ """
422
+ :return: An iterator of the parses that have been found by this
423
+ parser so far.
424
+ :rtype: iter(Tree)
425
+ """
426
+ if (
427
+ len(self._remaining_text) == 0
428
+ and len(self._stack) == 1
429
+ and self._stack[0].label() == self._grammar.start().symbol()
430
+ ):
431
+ yield self._stack[0]
432
+
433
+ # copied from nltk.parser
434
+
435
+ def set_grammar(self, grammar):
436
+ """
437
+ Change the grammar used to parse texts.
438
+
439
+ :param grammar: The new grammar.
440
+ :type grammar: CFG
441
+ """
442
+ self._grammar = grammar
443
+
444
+
445
+ ##//////////////////////////////////////////////////////
446
+ ## Demonstration Code
447
+ ##//////////////////////////////////////////////////////
448
+
449
+
450
+ def demo():
451
+ """
452
+ A demonstration of the shift-reduce parser.
453
+ """
454
+
455
+ from nltk import CFG, parse
456
+
457
+ grammar = CFG.fromstring(
458
+ """
459
+ S -> NP VP
460
+ NP -> Det N | Det N PP
461
+ VP -> V NP | V NP PP
462
+ PP -> P NP
463
+ NP -> 'I'
464
+ N -> 'man' | 'park' | 'telescope' | 'dog'
465
+ Det -> 'the' | 'a'
466
+ P -> 'in' | 'with'
467
+ V -> 'saw'
468
+ """
469
+ )
470
+
471
+ sent = "I saw a man in the park".split()
472
+
473
+ parser = parse.ShiftReduceParser(grammar, trace=2)
474
+ for p in parser.parse(sent):
475
+ print(p)
476
+
477
+
478
+ if __name__ == "__main__":
479
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (453 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc ADDED
Binary file (602 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (1.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc ADDED
Binary file (518 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc ADDED
Binary file (337 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_brill.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc ADDED
Binary file (2.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc ADDED
Binary file (7.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc ADDED
Binary file (765 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc ADDED
Binary file (923 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc ADDED
Binary file (457 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc ADDED
Binary file (927 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc ADDED
Binary file (812 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc ADDED
Binary file (2.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc ADDED
Binary file (2.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc ADDED
Binary file (5.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc ADDED
Binary file (739 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tgrep.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (1.49 kB). View file