applied-ai-018 commited on
Commit
750c89d
·
verified ·
1 Parent(s): 0479309

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/parse/api.py +72 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/parse/corenlp.py +800 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/parse/malt.py +393 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py +772 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/parse/pchart.py +579 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py +716 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/parse/recursivedescent.py +684 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/parse/stanford.py +470 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/parse/transitionparser.py +794 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/parse/util.py +234 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/parse/viterbi.py +453 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/tag/__init__.py +184 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/tag/api.py +296 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/tag/brill.py +449 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/tag/brill_trainer.py +629 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/tag/crf.py +207 -0
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc ADDED
Binary file (7.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc ADDED
Binary file (53.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc ADDED
Binary file (4.04 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc ADDED
Binary file (21.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/parse/api.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parser API
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import itertools
11
+
12
+ from nltk.internals import overridden
13
+
14
+
15
+ class ParserI:
16
+ """
17
+ A processing class for deriving trees that represent possible
18
+ structures for a sequence of tokens. These tree structures are
19
+ known as "parses". Typically, parsers are used to derive syntax
20
+ trees for sentences. But parsers can also be used to derive other
21
+ kinds of tree structure, such as morphological trees and discourse
22
+ structures.
23
+
24
+ Subclasses must define:
25
+ - at least one of: ``parse()``, ``parse_sents()``.
26
+
27
+ Subclasses may define:
28
+ - ``grammar()``
29
+ """
30
+
31
+ def grammar(self):
32
+ """
33
+ :return: The grammar used by this parser.
34
+ """
35
+ raise NotImplementedError()
36
+
37
+ def parse(self, sent, *args, **kwargs):
38
+ """
39
+ :return: An iterator that generates parse trees for the sentence.
40
+ When possible this list is sorted from most likely to least likely.
41
+
42
+ :param sent: The sentence to be parsed
43
+ :type sent: list(str)
44
+ :rtype: iter(Tree)
45
+ """
46
+ if overridden(self.parse_sents):
47
+ return next(self.parse_sents([sent], *args, **kwargs))
48
+ elif overridden(self.parse_one):
49
+ return (
50
+ tree
51
+ for tree in [self.parse_one(sent, *args, **kwargs)]
52
+ if tree is not None
53
+ )
54
+ elif overridden(self.parse_all):
55
+ return iter(self.parse_all(sent, *args, **kwargs))
56
+ else:
57
+ raise NotImplementedError()
58
+
59
+ def parse_sents(self, sents, *args, **kwargs):
60
+ """
61
+ Apply ``self.parse()`` to each element of ``sents``.
62
+ :rtype: iter(iter(Tree))
63
+ """
64
+ return (self.parse(sent, *args, **kwargs) for sent in sents)
65
+
66
+ def parse_all(self, sent, *args, **kwargs):
67
+ """:rtype: list(Tree)"""
68
+ return list(self.parse(sent, *args, **kwargs))
69
+
70
+ def parse_one(self, sent, *args, **kwargs):
71
+ """:rtype: Tree or None"""
72
+ return next(self.parse(sent, *args, **kwargs), None)
env-llmeval/lib/python3.10/site-packages/nltk/parse/corenlp.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the CoreNLP REST API.
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Dmitrijs Milajevs <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import json
10
+ import os # required for doctests
11
+ import re
12
+ import socket
13
+ import time
14
+ from typing import List, Tuple
15
+
16
+ from nltk.internals import _java_options, config_java, find_jar_iter, java
17
+ from nltk.parse.api import ParserI
18
+ from nltk.parse.dependencygraph import DependencyGraph
19
+ from nltk.tag.api import TaggerI
20
+ from nltk.tokenize.api import TokenizerI
21
+ from nltk.tree import Tree
22
+
23
+ _stanford_url = "https://stanfordnlp.github.io/CoreNLP/"
24
+
25
+
26
+ class CoreNLPServerError(EnvironmentError):
27
+ """Exceptions associated with the Core NLP server."""
28
+
29
+
30
+ def try_port(port=0):
31
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
32
+ sock.bind(("", port))
33
+
34
+ p = sock.getsockname()[1]
35
+ sock.close()
36
+
37
+ return p
38
+
39
+
40
+ class CoreNLPServer:
41
+
42
+ _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)-models\.jar"
43
+ _JAR = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)\.jar"
44
+
45
+ def __init__(
46
+ self,
47
+ path_to_jar=None,
48
+ path_to_models_jar=None,
49
+ verbose=False,
50
+ java_options=None,
51
+ corenlp_options=None,
52
+ port=None,
53
+ ):
54
+
55
+ if corenlp_options is None:
56
+ corenlp_options = ["-preload", "tokenize,ssplit,pos,lemma,parse,depparse"]
57
+
58
+ jars = list(
59
+ find_jar_iter(
60
+ self._JAR,
61
+ path_to_jar,
62
+ env_vars=("CORENLP",),
63
+ searchpath=(),
64
+ url=_stanford_url,
65
+ verbose=verbose,
66
+ is_regex=True,
67
+ )
68
+ )
69
+
70
+ # find the most recent code and model jar
71
+ stanford_jar = max(jars, key=lambda model_name: re.match(self._JAR, model_name))
72
+
73
+ if port is None:
74
+ try:
75
+ port = try_port(9000)
76
+ except OSError:
77
+ port = try_port()
78
+ corenlp_options.extend(["-port", str(port)])
79
+ else:
80
+ try_port(port)
81
+ corenlp_options.extend(["-port", str(port)])
82
+
83
+ self.url = f"http://localhost:{port}"
84
+
85
+ model_jar = max(
86
+ find_jar_iter(
87
+ self._MODEL_JAR_PATTERN,
88
+ path_to_models_jar,
89
+ env_vars=("CORENLP_MODELS",),
90
+ searchpath=(),
91
+ url=_stanford_url,
92
+ verbose=verbose,
93
+ is_regex=True,
94
+ ),
95
+ key=lambda model_name: re.match(self._MODEL_JAR_PATTERN, model_name),
96
+ )
97
+
98
+ self.verbose = verbose
99
+
100
+ self._classpath = stanford_jar, model_jar
101
+
102
+ self.corenlp_options = corenlp_options
103
+ self.java_options = java_options or ["-mx2g"]
104
+
105
+ def start(self, stdout="devnull", stderr="devnull"):
106
+ """Starts the CoreNLP server
107
+
108
+ :param stdout, stderr: Specifies where CoreNLP output is redirected. Valid values are 'devnull', 'stdout', 'pipe'
109
+ """
110
+ import requests
111
+
112
+ cmd = ["edu.stanford.nlp.pipeline.StanfordCoreNLPServer"]
113
+
114
+ if self.corenlp_options:
115
+ cmd.extend(self.corenlp_options)
116
+
117
+ # Configure java.
118
+ default_options = " ".join(_java_options)
119
+ config_java(options=self.java_options, verbose=self.verbose)
120
+
121
+ try:
122
+ self.popen = java(
123
+ cmd,
124
+ classpath=self._classpath,
125
+ blocking=False,
126
+ stdout=stdout,
127
+ stderr=stderr,
128
+ )
129
+ finally:
130
+ # Return java configurations to their default values.
131
+ config_java(options=default_options, verbose=self.verbose)
132
+
133
+ # Check that the server is istill running.
134
+ returncode = self.popen.poll()
135
+ if returncode is not None:
136
+ _, stderrdata = self.popen.communicate()
137
+ raise CoreNLPServerError(
138
+ returncode,
139
+ "Could not start the server. "
140
+ "The error was: {}".format(stderrdata.decode("ascii")),
141
+ )
142
+
143
+ for i in range(30):
144
+ try:
145
+ response = requests.get(requests.compat.urljoin(self.url, "live"))
146
+ except requests.exceptions.ConnectionError:
147
+ time.sleep(1)
148
+ else:
149
+ if response.ok:
150
+ break
151
+ else:
152
+ raise CoreNLPServerError("Could not connect to the server.")
153
+
154
+ for i in range(60):
155
+ try:
156
+ response = requests.get(requests.compat.urljoin(self.url, "ready"))
157
+ except requests.exceptions.ConnectionError:
158
+ time.sleep(1)
159
+ else:
160
+ if response.ok:
161
+ break
162
+ else:
163
+ raise CoreNLPServerError("The server is not ready.")
164
+
165
+ def stop(self):
166
+ self.popen.terminate()
167
+ self.popen.wait()
168
+
169
+ def __enter__(self):
170
+ self.start()
171
+
172
+ return self
173
+
174
+ def __exit__(self, exc_type, exc_val, exc_tb):
175
+ self.stop()
176
+ return False
177
+
178
+
179
+ class GenericCoreNLPParser(ParserI, TokenizerI, TaggerI):
180
+ """Interface to the CoreNLP Parser."""
181
+
182
+ def __init__(
183
+ self,
184
+ url="http://localhost:9000",
185
+ encoding="utf8",
186
+ tagtype=None,
187
+ strict_json=True,
188
+ ):
189
+ import requests
190
+
191
+ self.url = url
192
+ self.encoding = encoding
193
+
194
+ if tagtype not in ["pos", "ner", None]:
195
+ raise ValueError("tagtype must be either 'pos', 'ner' or None")
196
+
197
+ self.tagtype = tagtype
198
+ self.strict_json = strict_json
199
+
200
+ self.session = requests.Session()
201
+
202
+ def parse_sents(self, sentences, *args, **kwargs):
203
+ """Parse multiple sentences.
204
+
205
+ Takes multiple sentences as a list where each sentence is a list of
206
+ words. Each sentence will be automatically tagged with this
207
+ CoreNLPParser instance's tagger.
208
+
209
+ If a whitespace exists inside a token, then the token will be treated as
210
+ several tokens.
211
+
212
+ :param sentences: Input sentences to parse
213
+ :type sentences: list(list(str))
214
+ :rtype: iter(iter(Tree))
215
+ """
216
+ # Converting list(list(str)) -> list(str)
217
+ sentences = (" ".join(words) for words in sentences)
218
+ return self.raw_parse_sents(sentences, *args, **kwargs)
219
+
220
+ def raw_parse(self, sentence, properties=None, *args, **kwargs):
221
+ """Parse a sentence.
222
+
223
+ Takes a sentence as a string; before parsing, it will be automatically
224
+ tokenized and tagged by the CoreNLP Parser.
225
+
226
+ :param sentence: Input sentence to parse
227
+ :type sentence: str
228
+ :rtype: iter(Tree)
229
+ """
230
+ default_properties = {"tokenize.whitespace": "false"}
231
+ default_properties.update(properties or {})
232
+
233
+ return next(
234
+ self.raw_parse_sents(
235
+ [sentence], properties=default_properties, *args, **kwargs
236
+ )
237
+ )
238
+
239
+ def api_call(self, data, properties=None, timeout=60):
240
+ default_properties = {
241
+ "outputFormat": "json",
242
+ "annotators": "tokenize,pos,lemma,ssplit,{parser_annotator}".format(
243
+ parser_annotator=self.parser_annotator
244
+ ),
245
+ }
246
+
247
+ default_properties.update(properties or {})
248
+
249
+ response = self.session.post(
250
+ self.url,
251
+ params={"properties": json.dumps(default_properties)},
252
+ data=data.encode(self.encoding),
253
+ headers={"Content-Type": f"text/plain; charset={self.encoding}"},
254
+ timeout=timeout,
255
+ )
256
+
257
+ response.raise_for_status()
258
+
259
+ return response.json(strict=self.strict_json)
260
+
261
+ def raw_parse_sents(
262
+ self, sentences, verbose=False, properties=None, *args, **kwargs
263
+ ):
264
+ """Parse multiple sentences.
265
+
266
+ Takes multiple sentences as a list of strings. Each sentence will be
267
+ automatically tokenized and tagged.
268
+
269
+ :param sentences: Input sentences to parse.
270
+ :type sentences: list(str)
271
+ :rtype: iter(iter(Tree))
272
+
273
+ """
274
+ default_properties = {
275
+ # Only splits on '\n', never inside the sentence.
276
+ "ssplit.eolonly": "true"
277
+ }
278
+
279
+ default_properties.update(properties or {})
280
+
281
+ """
282
+ for sentence in sentences:
283
+ parsed_data = self.api_call(sentence, properties=default_properties)
284
+
285
+ assert len(parsed_data['sentences']) == 1
286
+
287
+ for parse in parsed_data['sentences']:
288
+ tree = self.make_tree(parse)
289
+ yield iter([tree])
290
+ """
291
+ parsed_data = self.api_call("\n".join(sentences), properties=default_properties)
292
+ for parsed_sent in parsed_data["sentences"]:
293
+ tree = self.make_tree(parsed_sent)
294
+ yield iter([tree])
295
+
296
+ def parse_text(self, text, *args, **kwargs):
297
+ """Parse a piece of text.
298
+
299
+ The text might contain several sentences which will be split by CoreNLP.
300
+
301
+ :param str text: text to be split.
302
+ :returns: an iterable of syntactic structures. # TODO: should it be an iterable of iterables?
303
+
304
+ """
305
+ parsed_data = self.api_call(text, *args, **kwargs)
306
+
307
+ for parse in parsed_data["sentences"]:
308
+ yield self.make_tree(parse)
309
+
310
+ def tokenize(self, text, properties=None):
311
+ """Tokenize a string of text.
312
+
313
+ Skip these tests if CoreNLP is likely not ready.
314
+ >>> from nltk.test.setup_fixt import check_jar
315
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
316
+
317
+ The CoreNLP server can be started using the following notation, although
318
+ we recommend the `with CoreNLPServer() as server:` context manager notation
319
+ to ensure that the server is always stopped.
320
+ >>> server = CoreNLPServer()
321
+ >>> server.start()
322
+ >>> parser = CoreNLPParser(url=server.url)
323
+
324
+ >>> text = 'Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.'
325
+ >>> list(parser.tokenize(text))
326
+ ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
327
+
328
+ >>> s = "The colour of the wall is blue."
329
+ >>> list(
330
+ ... parser.tokenize(
331
+ ... 'The colour of the wall is blue.',
332
+ ... properties={'tokenize.options': 'americanize=true'},
333
+ ... )
334
+ ... )
335
+ ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.']
336
+ >>> server.stop()
337
+
338
+ """
339
+ default_properties = {"annotators": "tokenize,ssplit"}
340
+
341
+ default_properties.update(properties or {})
342
+
343
+ result = self.api_call(text, properties=default_properties)
344
+
345
+ for sentence in result["sentences"]:
346
+ for token in sentence["tokens"]:
347
+ yield token["originalText"] or token["word"]
348
+
349
+ def tag_sents(self, sentences):
350
+ """
351
+ Tag multiple sentences.
352
+
353
+ Takes multiple sentences as a list where each sentence is a list of
354
+ tokens.
355
+
356
+ :param sentences: Input sentences to tag
357
+ :type sentences: list(list(str))
358
+ :rtype: list(list(tuple(str, str))
359
+ """
360
+ # Converting list(list(str)) -> list(str)
361
+ sentences = (" ".join(words) for words in sentences)
362
+ return [sentences[0] for sentences in self.raw_tag_sents(sentences)]
363
+
364
+ def tag(self, sentence: str) -> List[Tuple[str, str]]:
365
+ """
366
+ Tag a list of tokens.
367
+
368
+ :rtype: list(tuple(str, str))
369
+
370
+ Skip these tests if CoreNLP is likely not ready.
371
+ >>> from nltk.test.setup_fixt import check_jar
372
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
373
+
374
+ The CoreNLP server can be started using the following notation, although
375
+ we recommend the `with CoreNLPServer() as server:` context manager notation
376
+ to ensure that the server is always stopped.
377
+ >>> server = CoreNLPServer()
378
+ >>> server.start()
379
+ >>> parser = CoreNLPParser(url=server.url, tagtype='ner')
380
+ >>> tokens = 'Rami Eid is studying at Stony Brook University in NY'.split()
381
+ >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE
382
+ [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), ('at', 'O'), ('Stony', 'ORGANIZATION'),
383
+ ('Brook', 'ORGANIZATION'), ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'STATE_OR_PROVINCE')]
384
+
385
+ >>> parser = CoreNLPParser(url=server.url, tagtype='pos')
386
+ >>> tokens = "What is the airspeed of an unladen swallow ?".split()
387
+ >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE
388
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'),
389
+ ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'),
390
+ ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
391
+ >>> server.stop()
392
+ """
393
+ return self.tag_sents([sentence])[0]
394
+
395
+ def raw_tag_sents(self, sentences):
396
+ """
397
+ Tag multiple sentences.
398
+
399
+ Takes multiple sentences as a list where each sentence is a string.
400
+
401
+ :param sentences: Input sentences to tag
402
+ :type sentences: list(str)
403
+ :rtype: list(list(list(tuple(str, str)))
404
+ """
405
+ default_properties = {
406
+ "ssplit.isOneSentence": "true",
407
+ "annotators": "tokenize,ssplit,",
408
+ }
409
+
410
+ # Supports only 'pos' or 'ner' tags.
411
+ assert self.tagtype in ["pos", "ner"]
412
+ default_properties["annotators"] += self.tagtype
413
+ for sentence in sentences:
414
+ tagged_data = self.api_call(sentence, properties=default_properties)
415
+ yield [
416
+ [
417
+ (token["word"], token[self.tagtype])
418
+ for token in tagged_sentence["tokens"]
419
+ ]
420
+ for tagged_sentence in tagged_data["sentences"]
421
+ ]
422
+
423
+
424
+ class CoreNLPParser(GenericCoreNLPParser):
425
+ """
426
+ Skip these tests if CoreNLP is likely not ready.
427
+ >>> from nltk.test.setup_fixt import check_jar
428
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
429
+
430
+ The recommended usage of `CoreNLPParser` is using the context manager notation:
431
+ >>> with CoreNLPServer() as server:
432
+ ... parser = CoreNLPParser(url=server.url)
433
+ ... next(
434
+ ... parser.raw_parse('The quick brown fox jumps over the lazy dog.')
435
+ ... ).pretty_print() # doctest: +NORMALIZE_WHITESPACE
436
+ ROOT
437
+ |
438
+ S
439
+ _______________|__________________________
440
+ | VP |
441
+ | _________|___ |
442
+ | | PP |
443
+ | | ________|___ |
444
+ NP | | NP |
445
+ ____|__________ | | _______|____ |
446
+ DT JJ JJ NN VBZ IN DT JJ NN .
447
+ | | | | | | | | | |
448
+ The quick brown fox jumps over the lazy dog .
449
+
450
+ Alternatively, the server can be started using the following notation.
451
+ Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started
452
+ outside of Python.
453
+ >>> server = CoreNLPServer()
454
+ >>> server.start()
455
+ >>> parser = CoreNLPParser(url=server.url)
456
+
457
+ >>> (parse_fox, ), (parse_wolf, ) = parser.raw_parse_sents(
458
+ ... [
459
+ ... 'The quick brown fox jumps over the lazy dog.',
460
+ ... 'The quick grey wolf jumps over the lazy fox.',
461
+ ... ]
462
+ ... )
463
+
464
+ >>> parse_fox.pretty_print() # doctest: +NORMALIZE_WHITESPACE
465
+ ROOT
466
+ |
467
+ S
468
+ _______________|__________________________
469
+ | VP |
470
+ | _________|___ |
471
+ | | PP |
472
+ | | ________|___ |
473
+ NP | | NP |
474
+ ____|__________ | | _______|____ |
475
+ DT JJ JJ NN VBZ IN DT JJ NN .
476
+ | | | | | | | | | |
477
+ The quick brown fox jumps over the lazy dog .
478
+
479
+ >>> parse_wolf.pretty_print() # doctest: +NORMALIZE_WHITESPACE
480
+ ROOT
481
+ |
482
+ S
483
+ _______________|__________________________
484
+ | VP |
485
+ | _________|___ |
486
+ | | PP |
487
+ | | ________|___ |
488
+ NP | | NP |
489
+ ____|_________ | | _______|____ |
490
+ DT JJ JJ NN VBZ IN DT JJ NN .
491
+ | | | | | | | | | |
492
+ The quick grey wolf jumps over the lazy fox .
493
+
494
+ >>> (parse_dog, ), (parse_friends, ) = parser.parse_sents(
495
+ ... [
496
+ ... "I 'm a dog".split(),
497
+ ... "This is my friends ' cat ( the tabby )".split(),
498
+ ... ]
499
+ ... )
500
+
501
+ >>> parse_dog.pretty_print() # doctest: +NORMALIZE_WHITESPACE
502
+ ROOT
503
+ |
504
+ S
505
+ _______|____
506
+ | VP
507
+ | ________|___
508
+ NP | NP
509
+ | | ___|___
510
+ PRP VBP DT NN
511
+ | | | |
512
+ I 'm a dog
513
+
514
+ >>> parse_friends.pretty_print() # doctest: +NORMALIZE_WHITESPACE
515
+ ROOT
516
+ |
517
+ S
518
+ ____|___________
519
+ | VP
520
+ | ___________|_____________
521
+ | | NP
522
+ | | _______|________________________
523
+ | | NP | | |
524
+ | | _____|_______ | | |
525
+ NP | NP | | NP |
526
+ | | ______|_________ | | ___|____ |
527
+ DT VBZ PRP$ NNS POS NN -LRB- DT NN -RRB-
528
+ | | | | | | | | | |
529
+ This is my friends ' cat -LRB- the tabby -RRB-
530
+
531
+ >>> parse_john, parse_mary, = parser.parse_text(
532
+ ... 'John loves Mary. Mary walks.'
533
+ ... )
534
+
535
+ >>> parse_john.pretty_print() # doctest: +NORMALIZE_WHITESPACE
536
+ ROOT
537
+ |
538
+ S
539
+ _____|_____________
540
+ | VP |
541
+ | ____|___ |
542
+ NP | NP |
543
+ | | | |
544
+ NNP VBZ NNP .
545
+ | | | |
546
+ John loves Mary .
547
+
548
+ >>> parse_mary.pretty_print() # doctest: +NORMALIZE_WHITESPACE
549
+ ROOT
550
+ |
551
+ S
552
+ _____|____
553
+ NP VP |
554
+ | | |
555
+ NNP VBZ .
556
+ | | |
557
+ Mary walks .
558
+
559
+ Special cases
560
+
561
+ >>> next(
562
+ ... parser.raw_parse(
563
+ ... 'NASIRIYA, Iraq—Iraqi doctors who treated former prisoner of war '
564
+ ... 'Jessica Lynch have angrily dismissed claims made in her biography '
565
+ ... 'that she was raped by her Iraqi captors.'
566
+ ... )
567
+ ... ).height()
568
+ 14
569
+
570
+ >>> next(
571
+ ... parser.raw_parse(
572
+ ... "The broader Standard & Poor's 500 Index <.SPX> was 0.46 points lower, or "
573
+ ... '0.05 percent, at 997.02.'
574
+ ... )
575
+ ... ).height()
576
+ 11
577
+
578
+ >>> server.stop()
579
+ """
580
+
581
+ _OUTPUT_FORMAT = "penn"
582
+ parser_annotator = "parse"
583
+
584
+ def make_tree(self, result):
585
+ return Tree.fromstring(result["parse"])
586
+
587
+
588
+ class CoreNLPDependencyParser(GenericCoreNLPParser):
589
+ """Dependency parser.
590
+
591
+ Skip these tests if CoreNLP is likely not ready.
592
+ >>> from nltk.test.setup_fixt import check_jar
593
+ >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True)
594
+
595
+ The recommended usage of `CoreNLPParser` is using the context manager notation:
596
+ >>> with CoreNLPServer() as server:
597
+ ... dep_parser = CoreNLPDependencyParser(url=server.url)
598
+ ... parse, = dep_parser.raw_parse(
599
+ ... 'The quick brown fox jumps over the lazy dog.'
600
+ ... )
601
+ ... print(parse.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
602
+ The DT 4 det
603
+ quick JJ 4 amod
604
+ brown JJ 4 amod
605
+ fox NN 5 nsubj
606
+ jumps VBZ 0 ROOT
607
+ over IN 9 case
608
+ the DT 9 det
609
+ lazy JJ 9 amod
610
+ dog NN 5 obl
611
+ . . 5 punct
612
+
613
+ Alternatively, the server can be started using the following notation.
614
+ Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started
615
+ outside of Python.
616
+ >>> server = CoreNLPServer()
617
+ >>> server.start()
618
+ >>> dep_parser = CoreNLPDependencyParser(url=server.url)
619
+ >>> parse, = dep_parser.raw_parse('The quick brown fox jumps over the lazy dog.')
620
+ >>> print(parse.tree()) # doctest: +NORMALIZE_WHITESPACE
621
+ (jumps (fox The quick brown) (dog over the lazy) .)
622
+
623
+ >>> for governor, dep, dependent in parse.triples():
624
+ ... print(governor, dep, dependent) # doctest: +NORMALIZE_WHITESPACE
625
+ ('jumps', 'VBZ') nsubj ('fox', 'NN')
626
+ ('fox', 'NN') det ('The', 'DT')
627
+ ('fox', 'NN') amod ('quick', 'JJ')
628
+ ('fox', 'NN') amod ('brown', 'JJ')
629
+ ('jumps', 'VBZ') obl ('dog', 'NN')
630
+ ('dog', 'NN') case ('over', 'IN')
631
+ ('dog', 'NN') det ('the', 'DT')
632
+ ('dog', 'NN') amod ('lazy', 'JJ')
633
+ ('jumps', 'VBZ') punct ('.', '.')
634
+
635
+ >>> (parse_fox, ), (parse_dog, ) = dep_parser.raw_parse_sents(
636
+ ... [
637
+ ... 'The quick brown fox jumps over the lazy dog.',
638
+ ... 'The quick grey wolf jumps over the lazy fox.',
639
+ ... ]
640
+ ... )
641
+ >>> print(parse_fox.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
642
+ The DT 4 det
643
+ quick JJ 4 amod
644
+ brown JJ 4 amod
645
+ fox NN 5 nsubj
646
+ jumps VBZ 0 ROOT
647
+ over IN 9 case
648
+ the DT 9 det
649
+ lazy JJ 9 amod
650
+ dog NN 5 obl
651
+ . . 5 punct
652
+
653
+ >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
654
+ The DT 4 det
655
+ quick JJ 4 amod
656
+ grey JJ 4 amod
657
+ wolf NN 5 nsubj
658
+ jumps VBZ 0 ROOT
659
+ over IN 9 case
660
+ the DT 9 det
661
+ lazy JJ 9 amod
662
+ fox NN 5 obl
663
+ . . 5 punct
664
+
665
+ >>> (parse_dog, ), (parse_friends, ) = dep_parser.parse_sents(
666
+ ... [
667
+ ... "I 'm a dog".split(),
668
+ ... "This is my friends ' cat ( the tabby )".split(),
669
+ ... ]
670
+ ... )
671
+ >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
672
+ I PRP 4 nsubj
673
+ 'm VBP 4 cop
674
+ a DT 4 det
675
+ dog NN 0 ROOT
676
+
677
+ >>> print(parse_friends.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
678
+ This DT 6 nsubj
679
+ is VBZ 6 cop
680
+ my PRP$ 4 nmod:poss
681
+ friends NNS 6 nmod:poss
682
+ ' POS 4 case
683
+ cat NN 0 ROOT
684
+ ( -LRB- 9 punct
685
+ the DT 9 det
686
+ tabby NN 6 dep
687
+ ) -RRB- 9 punct
688
+
689
+ >>> parse_john, parse_mary, = dep_parser.parse_text(
690
+ ... 'John loves Mary. Mary walks.'
691
+ ... )
692
+
693
+ >>> print(parse_john.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
694
+ John NNP 2 nsubj
695
+ loves VBZ 0 ROOT
696
+ Mary NNP 2 obj
697
+ . . 2 punct
698
+
699
+ >>> print(parse_mary.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE
700
+ Mary NNP 2 nsubj
701
+ walks VBZ 0 ROOT
702
+ . . 2 punct
703
+
704
+ Special cases
705
+
706
+ Non-breaking space inside of a token.
707
+
708
+ >>> len(
709
+ ... next(
710
+ ... dep_parser.raw_parse(
711
+ ... 'Anhalt said children typically treat a 20-ounce soda bottle as one '
712
+ ... 'serving, while it actually contains 2 1/2 servings.'
713
+ ... )
714
+ ... ).nodes
715
+ ... )
716
+ 23
717
+
718
+ Phone numbers.
719
+
720
+ >>> len(
721
+ ... next(
722
+ ... dep_parser.raw_parse('This is not going to crash: 01 111 555.')
723
+ ... ).nodes
724
+ ... )
725
+ 10
726
+
727
+ >>> print(
728
+ ... next(
729
+ ... dep_parser.raw_parse('The underscore _ should not simply disappear.')
730
+ ... ).to_conll(4)
731
+ ... ) # doctest: +NORMALIZE_WHITESPACE
732
+ The DT 2 det
733
+ underscore NN 7 nsubj
734
+ _ NFP 7 punct
735
+ should MD 7 aux
736
+ not RB 7 advmod
737
+ simply RB 7 advmod
738
+ disappear VB 0 ROOT
739
+ . . 7 punct
740
+
741
+ >>> print(
742
+ ... next(
743
+ ... dep_parser.raw_parse(
744
+ ... 'for all of its insights into the dream world of teen life , and its electronic expression through '
745
+ ... 'cyber culture , the film gives no quarter to anyone seeking to pull a cohesive story out of its 2 '
746
+ ... '1/2-hour running time .'
747
+ ... )
748
+ ... ).to_conll(4)
749
+ ... ) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
750
+ for IN 2 case
751
+ all DT 24 obl
752
+ of IN 5 case
753
+ its PRP$ 5 nmod:poss
754
+ insights NNS 2 nmod
755
+ into IN 9 case
756
+ the DT 9 det
757
+ dream NN 9 compound
758
+ world NN 5 nmod
759
+ of IN 12 case
760
+ teen NN 12 compound
761
+ ...
762
+
763
+ >>> server.stop()
764
+ """
765
+
766
+ _OUTPUT_FORMAT = "conll2007"
767
+ parser_annotator = "depparse"
768
+
769
+ def make_tree(self, result):
770
+
771
+ return DependencyGraph(
772
+ (
773
+ " ".join(n_items[1:]) # NLTK expects an iterable of strings...
774
+ for n_items in sorted(transform(result))
775
+ ),
776
+ cell_separator=" ", # To make sure that a non-breaking space is kept inside of a token.
777
+ )
778
+
779
+
780
+ def transform(sentence):
781
+ for dependency in sentence["basicDependencies"]:
782
+
783
+ dependent_index = dependency["dependent"]
784
+ token = sentence["tokens"][dependent_index - 1]
785
+
786
+ # Return values that we don't know as '_'. Also, consider tag and ctag
787
+ # to be equal.
788
+ yield (
789
+ dependent_index,
790
+ "_",
791
+ token["word"],
792
+ token["lemma"],
793
+ token["pos"],
794
+ token["pos"],
795
+ "_",
796
+ str(dependency["governor"]),
797
+ dependency["dep"],
798
+ "_",
799
+ "_",
800
+ )
env-llmeval/lib/python3.10/site-packages/nltk/parse/malt.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to MaltParser
2
+ #
3
+ # Author: Dan Garrette <[email protected]>
4
+ # Contributor: Liling Tan, Mustufain, osamamukhtar11
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import inspect
11
+ import os
12
+ import subprocess
13
+ import sys
14
+ import tempfile
15
+
16
+ from nltk.data import ZipFilePathPointer
17
+ from nltk.internals import find_dir, find_file, find_jars_within_path
18
+ from nltk.parse.api import ParserI
19
+ from nltk.parse.dependencygraph import DependencyGraph
20
+ from nltk.parse.util import taggedsents_to_conll
21
+
22
+
23
+ def malt_regex_tagger():
24
+ from nltk.tag import RegexpTagger
25
+
26
+ _tagger = RegexpTagger(
27
+ [
28
+ (r"\.$", "."),
29
+ (r"\,$", ","),
30
+ (r"\?$", "?"), # fullstop, comma, Qmark
31
+ (r"\($", "("),
32
+ (r"\)$", ")"), # round brackets
33
+ (r"\[$", "["),
34
+ (r"\]$", "]"), # square brackets
35
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
36
+ (r"(The|the|A|a|An|an)$", "DT"), # articles
37
+ (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns
38
+ (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive
39
+ (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive
40
+ (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions
41
+ (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions
42
+ (r"(till|Till|until|Until)$", "IN"), # time prepopsitions
43
+ (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions
44
+ (r"(under|Under|below|Below)$", "IN"), # space prepopsitions
45
+ (r"(over|Over|above|Above)$", "IN"), # space prepopsitions
46
+ (r"(across|Across|through|Through)$", "IN"), # space prepopsitions
47
+ (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions
48
+ (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions
49
+ (r".*able$", "JJ"), # adjectives
50
+ (r".*ness$", "NN"), # nouns formed from adjectives
51
+ (r".*ly$", "RB"), # adverbs
52
+ (r".*s$", "NNS"), # plural nouns
53
+ (r".*ing$", "VBG"), # gerunds
54
+ (r".*ed$", "VBD"), # past tense verbs
55
+ (r".*", "NN"), # nouns (default)
56
+ ]
57
+ )
58
+ return _tagger.tag
59
+
60
+
61
+ def find_maltparser(parser_dirname):
62
+ """
63
+ A module to find MaltParser .jar file and its dependencies.
64
+ """
65
+ if os.path.exists(parser_dirname): # If a full path is given.
66
+ _malt_dir = parser_dirname
67
+ else: # Try to find path to maltparser directory in environment variables.
68
+ _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",))
69
+ # Checks that that the found directory contains all the necessary .jar
70
+ malt_dependencies = ["", "", ""]
71
+ _malt_jars = set(find_jars_within_path(_malt_dir))
72
+ _jars = {os.path.split(jar)[1] for jar in _malt_jars}
73
+ malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"}
74
+
75
+ assert malt_dependencies.issubset(_jars)
76
+ assert any(
77
+ filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars)
78
+ )
79
+ return list(_malt_jars)
80
+
81
+
82
+ def find_malt_model(model_filename):
83
+ """
84
+ A module to find pre-trained MaltParser model.
85
+ """
86
+ if model_filename is None:
87
+ return "malt_temp.mco"
88
+ elif os.path.exists(model_filename): # If a full path is given.
89
+ return model_filename
90
+ else: # Try to find path to malt model in environment variables.
91
+ return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False)
92
+
93
+
94
+ class MaltParser(ParserI):
95
+ """
96
+ A class for dependency parsing with MaltParser. The input is the paths to:
97
+ - (optionally) a maltparser directory
98
+ - (optionally) the path to a pre-trained MaltParser .mco model file
99
+ - (optionally) the tagger to use for POS tagging before parsing
100
+ - (optionally) additional Java arguments
101
+
102
+ Example:
103
+ >>> from nltk.parse import malt
104
+ >>> # With MALT_PARSER and MALT_MODEL environment set.
105
+ >>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP
106
+ >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
107
+ (shot I (elephant an) (in (pajamas my)) .)
108
+ >>> # Without MALT_PARSER and MALT_MODEL environment.
109
+ >>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP
110
+ >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
111
+ (shot I (elephant an) (in (pajamas my)) .)
112
+ """
113
+
114
+ def __init__(
115
+ self,
116
+ parser_dirname="",
117
+ model_filename=None,
118
+ tagger=None,
119
+ additional_java_args=None,
120
+ ):
121
+ """
122
+ An interface for parsing with the Malt Parser.
123
+
124
+ :param parser_dirname: The path to the maltparser directory that
125
+ contains the maltparser-1.x.jar
126
+ :type parser_dirname: str
127
+ :param model_filename: The name of the pre-trained model with .mco file
128
+ extension. If provided, training will not be required.
129
+ (see http://www.maltparser.org/mco/mco.html and
130
+ see http://www.patful.com/chalk/node/185)
131
+ :type model_filename: str
132
+ :param tagger: The tagger used to POS tag the raw string before
133
+ formatting to CONLL format. It should behave like `nltk.pos_tag`
134
+ :type tagger: function
135
+ :param additional_java_args: This is the additional Java arguments that
136
+ one can use when calling Maltparser, usually this is the heapsize
137
+ limits, e.g. `additional_java_args=['-Xmx1024m']`
138
+ (see https://goo.gl/mpDBvQ)
139
+ :type additional_java_args: list
140
+ """
141
+
142
+ # Find all the necessary jar files for MaltParser.
143
+ self.malt_jars = find_maltparser(parser_dirname)
144
+ # Initialize additional java arguments.
145
+ self.additional_java_args = (
146
+ additional_java_args if additional_java_args is not None else []
147
+ )
148
+ # Initialize model.
149
+ self.model = find_malt_model(model_filename)
150
+ self._trained = self.model != "malt_temp.mco"
151
+ # Set the working_dir parameters i.e. `-w` from MaltParser's option.
152
+ self.working_dir = tempfile.gettempdir()
153
+ # Initialize POS tagger.
154
+ self.tagger = tagger if tagger is not None else malt_regex_tagger()
155
+
156
+ def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"):
157
+ """
158
+ Use MaltParser to parse multiple POS tagged sentences. Takes multiple
159
+ sentences where each sentence is a list of (word, tag) tuples.
160
+ The sentences must have already been tokenized and tagged.
161
+
162
+ :param sentences: Input sentences to parse
163
+ :type sentence: list(list(tuple(str, str)))
164
+ :return: iter(iter(``DependencyGraph``)) the dependency graph
165
+ representation of each sentence
166
+ """
167
+ if not self._trained:
168
+ raise Exception("Parser has not been trained. Call train() first.")
169
+
170
+ with tempfile.NamedTemporaryFile(
171
+ prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False
172
+ ) as input_file:
173
+ with tempfile.NamedTemporaryFile(
174
+ prefix="malt_output.conll.",
175
+ dir=self.working_dir,
176
+ mode="w",
177
+ delete=False,
178
+ ) as output_file:
179
+ # Convert list of sentences to CONLL format.
180
+ for line in taggedsents_to_conll(sentences):
181
+ input_file.write(str(line))
182
+ input_file.close()
183
+
184
+ # Generate command to run maltparser.
185
+ cmd = self.generate_malt_command(
186
+ input_file.name, output_file.name, mode="parse"
187
+ )
188
+
189
+ # This is a maltparser quirk, it needs to be run
190
+ # where the model file is. otherwise it goes into an awkward
191
+ # missing .jars or strange -w working_dir problem.
192
+ _current_path = os.getcwd() # Remembers the current path.
193
+ try: # Change to modelfile path
194
+ os.chdir(os.path.split(self.model)[0])
195
+ except:
196
+ pass
197
+ ret = self._execute(cmd, verbose) # Run command.
198
+ os.chdir(_current_path) # Change back to current path.
199
+
200
+ if ret != 0:
201
+ raise Exception(
202
+ "MaltParser parsing (%s) failed with exit "
203
+ "code %d" % (" ".join(cmd), ret)
204
+ )
205
+
206
+ # Must return iter(iter(Tree))
207
+ with open(output_file.name) as infile:
208
+ for tree_str in infile.read().split("\n\n"):
209
+ yield (
210
+ iter(
211
+ [
212
+ DependencyGraph(
213
+ tree_str, top_relation_label=top_relation_label
214
+ )
215
+ ]
216
+ )
217
+ )
218
+
219
+ os.remove(input_file.name)
220
+ os.remove(output_file.name)
221
+
222
+ def parse_sents(self, sentences, verbose=False, top_relation_label="null"):
223
+ """
224
+ Use MaltParser to parse multiple sentences.
225
+ Takes a list of sentences, where each sentence is a list of words.
226
+ Each sentence will be automatically tagged with this
227
+ MaltParser instance's tagger.
228
+
229
+ :param sentences: Input sentences to parse
230
+ :type sentence: list(list(str))
231
+ :return: iter(DependencyGraph)
232
+ """
233
+ tagged_sentences = (self.tagger(sentence) for sentence in sentences)
234
+ return self.parse_tagged_sents(
235
+ tagged_sentences, verbose, top_relation_label=top_relation_label
236
+ )
237
+
238
+ def generate_malt_command(self, inputfilename, outputfilename=None, mode=None):
239
+ """
240
+ This function generates the maltparser command use at the terminal.
241
+
242
+ :param inputfilename: path to the input file
243
+ :type inputfilename: str
244
+ :param outputfilename: path to the output file
245
+ :type outputfilename: str
246
+ """
247
+
248
+ cmd = ["java"]
249
+ cmd += self.additional_java_args # Adds additional java arguments
250
+ # Joins classpaths with ";" if on Windows and on Linux/Mac use ":"
251
+ classpaths_separator = ";" if sys.platform.startswith("win") else ":"
252
+ cmd += [
253
+ "-cp",
254
+ classpaths_separator.join(self.malt_jars),
255
+ ] # Adds classpaths for jars
256
+ cmd += ["org.maltparser.Malt"] # Adds the main function.
257
+
258
+ # Adds the model file.
259
+ if os.path.exists(self.model): # when parsing
260
+ cmd += ["-c", os.path.split(self.model)[-1]]
261
+ else: # when learning
262
+ cmd += ["-c", self.model]
263
+
264
+ cmd += ["-i", inputfilename]
265
+ if mode == "parse":
266
+ cmd += ["-o", outputfilename]
267
+ cmd += ["-m", mode] # mode use to generate parses.
268
+ return cmd
269
+
270
+ @staticmethod
271
+ def _execute(cmd, verbose=False):
272
+ output = None if verbose else subprocess.PIPE
273
+ p = subprocess.Popen(cmd, stdout=output, stderr=output)
274
+ return p.wait()
275
+
276
+ def train(self, depgraphs, verbose=False):
277
+ """
278
+ Train MaltParser from a list of ``DependencyGraph`` objects
279
+
280
+ :param depgraphs: list of ``DependencyGraph`` objects for training input data
281
+ :type depgraphs: DependencyGraph
282
+ """
283
+
284
+ # Write the conll_str to malt_train.conll file in /tmp/
285
+ with tempfile.NamedTemporaryFile(
286
+ prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
287
+ ) as input_file:
288
+ input_str = "\n".join(dg.to_conll(10) for dg in depgraphs)
289
+ input_file.write(str(input_str))
290
+ # Trains the model with the malt_train.conll
291
+ self.train_from_file(input_file.name, verbose=verbose)
292
+ # Removes the malt_train.conll once training finishes.
293
+ os.remove(input_file.name)
294
+
295
+ def train_from_file(self, conll_file, verbose=False):
296
+ """
297
+ Train MaltParser from a file
298
+ :param conll_file: str for the filename of the training input data
299
+ :type conll_file: str
300
+ """
301
+
302
+ # If conll_file is a ZipFilePathPointer,
303
+ # then we need to do some extra massaging
304
+ if isinstance(conll_file, ZipFilePathPointer):
305
+ with tempfile.NamedTemporaryFile(
306
+ prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
307
+ ) as input_file:
308
+ with conll_file.open() as conll_input_file:
309
+ conll_str = conll_input_file.read()
310
+ input_file.write(str(conll_str))
311
+ return self.train_from_file(input_file.name, verbose=verbose)
312
+
313
+ # Generate command to run maltparser.
314
+ cmd = self.generate_malt_command(conll_file, mode="learn")
315
+ ret = self._execute(cmd, verbose)
316
+ if ret != 0:
317
+ raise Exception(
318
+ "MaltParser training (%s) failed with exit "
319
+ "code %d" % (" ".join(cmd), ret)
320
+ )
321
+ self._trained = True
322
+
323
+
324
+ if __name__ == "__main__":
325
+ """
326
+ A demonstration function to show how NLTK users can use the malt parser API.
327
+
328
+ >>> from nltk import pos_tag
329
+ >>> assert 'MALT_PARSER' in os.environ, str(
330
+ ... "Please set MALT_PARSER in your global environment, e.g.:\n"
331
+ ... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'")
332
+ >>>
333
+ >>> assert 'MALT_MODEL' in os.environ, str(
334
+ ... "Please set MALT_MODEL in your global environment, e.g.:\n"
335
+ ... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'")
336
+ >>>
337
+ >>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
338
+ ... "2 sees _ VB _ _ 0 ROOT _ _\n"
339
+ ... "3 a _ DT _ _ 4 SPEC _ _\n"
340
+ ... "4 dog _ NN _ _ 2 OBJ _ _\n"
341
+ ... "5 . _ . _ _ 2 PUNCT _ _\n")
342
+ >>>
343
+ >>>
344
+ >>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
345
+ ... "2 walks _ VB _ _ 0 ROOT _ _\n"
346
+ ... "3 . _ . _ _ 2 PUNCT _ _\n")
347
+ >>> dg1 = DependencyGraph(_dg1_str)
348
+ >>> dg2 = DependencyGraph(_dg2_str)
349
+ >>> # Initialize a MaltParser object
350
+ >>> mp = MaltParser()
351
+ >>>
352
+ >>> # Trains a model.
353
+ >>> mp.train([dg1,dg2], verbose=False)
354
+ >>> sent1 = ['John','sees','Mary', '.']
355
+ >>> sent2 = ['John', 'walks', 'a', 'dog', '.']
356
+ >>>
357
+ >>> # Parse a single sentence.
358
+ >>> parsed_sent1 = mp.parse_one(sent1)
359
+ >>> parsed_sent2 = mp.parse_one(sent2)
360
+ >>> print(parsed_sent1.tree())
361
+ (sees John Mary .)
362
+ >>> print(parsed_sent2.tree())
363
+ (walks John (dog a) .)
364
+ >>>
365
+ >>> # Parsing multiple sentences.
366
+ >>> sentences = [sent1,sent2]
367
+ >>> parsed_sents = mp.parse_sents(sentences)
368
+ >>> print(next(next(parsed_sents)).tree())
369
+ (sees John Mary .)
370
+ >>> print(next(next(parsed_sents)).tree())
371
+ (walks John (dog a) .)
372
+ >>>
373
+ >>> # Initialize a MaltParser object with an English pre-trained model.
374
+ >>> parser_dirname = 'maltparser-1.9.2'
375
+ >>> model_name = 'engmalt.linear-1.7.mco'
376
+ >>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag)
377
+ >>> sent1 = 'I shot an elephant in my pajamas .'.split()
378
+ >>> sent2 = 'Time flies like banana .'.split()
379
+ >>> # Parse a single sentence.
380
+ >>> print(mp.parse_one(sent1).tree())
381
+ (shot I (elephant an) (in (pajamas my)) .)
382
+ # Parsing multiple sentences
383
+ >>> sentences = [sent1,sent2]
384
+ >>> parsed_sents = mp.parse_sents(sentences)
385
+ >>> print(next(next(parsed_sents)).tree())
386
+ (shot I (elephant an) (in (pajamas my)) .)
387
+ >>> print(next(next(parsed_sents)).tree())
388
+ (flies Time (like banana) .)
389
+ """
390
+
391
+ import doctest
392
+
393
+ doctest.testmod()
env-llmeval/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py ADDED
@@ -0,0 +1,772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ import logging
11
+ import math
12
+
13
+ from nltk.parse.dependencygraph import DependencyGraph
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ #################################################################
18
+ # DependencyScorerI - Interface for Graph-Edge Weight Calculation
19
+ #################################################################
20
+
21
+
22
+ class DependencyScorerI:
23
+ """
24
+ A scorer for calculated the weights on the edges of a weighted
25
+ dependency graph. This is used by a
26
+ ``ProbabilisticNonprojectiveParser`` to initialize the edge
27
+ weights of a ``DependencyGraph``. While typically this would be done
28
+ by training a binary classifier, any class that can return a
29
+ multidimensional list representation of the edge weights can
30
+ implement this interface. As such, it has no necessary
31
+ fields.
32
+ """
33
+
34
+ def __init__(self):
35
+ if self.__class__ == DependencyScorerI:
36
+ raise TypeError("DependencyScorerI is an abstract interface")
37
+
38
+ def train(self, graphs):
39
+ """
40
+ :type graphs: list(DependencyGraph)
41
+ :param graphs: A list of dependency graphs to train the scorer.
42
+ Typically the edges present in the graphs can be used as
43
+ positive training examples, and the edges not present as negative
44
+ examples.
45
+ """
46
+ raise NotImplementedError()
47
+
48
+ def score(self, graph):
49
+ """
50
+ :type graph: DependencyGraph
51
+ :param graph: A dependency graph whose set of edges need to be
52
+ scored.
53
+ :rtype: A three-dimensional list of numbers.
54
+ :return: The score is returned in a multidimensional(3) list, such
55
+ that the outer-dimension refers to the head, and the
56
+ inner-dimension refers to the dependencies. For instance,
57
+ scores[0][1] would reference the list of scores corresponding to
58
+ arcs from node 0 to node 1. The node's 'address' field can be used
59
+ to determine its number identification.
60
+
61
+ For further illustration, a score list corresponding to Fig.2 of
62
+ Keith Hall's 'K-best Spanning Tree Parsing' paper::
63
+
64
+ scores = [[[], [5], [1], [1]],
65
+ [[], [], [11], [4]],
66
+ [[], [10], [], [5]],
67
+ [[], [8], [8], []]]
68
+
69
+ When used in conjunction with a MaxEntClassifier, each score would
70
+ correspond to the confidence of a particular edge being classified
71
+ with the positive training examples.
72
+ """
73
+ raise NotImplementedError()
74
+
75
+
76
+ #################################################################
77
+ # NaiveBayesDependencyScorer
78
+ #################################################################
79
+
80
+
81
+ class NaiveBayesDependencyScorer(DependencyScorerI):
82
+ """
83
+ A dependency scorer built around a MaxEnt classifier. In this
84
+ particular class that classifier is a ``NaiveBayesClassifier``.
85
+ It uses head-word, head-tag, child-word, and child-tag features
86
+ for classification.
87
+
88
+ >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2
89
+
90
+ >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry]
91
+ >>> npp = ProbabilisticNonprojectiveParser()
92
+ >>> npp.train(graphs, NaiveBayesDependencyScorer())
93
+ >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc'])
94
+ >>> len(list(parses))
95
+ 1
96
+
97
+ """
98
+
99
+ def __init__(self):
100
+ pass # Do nothing without throwing error
101
+
102
+ def train(self, graphs):
103
+ """
104
+ Trains a ``NaiveBayesClassifier`` using the edges present in
105
+ graphs list as positive examples, the edges not present as
106
+ negative examples. Uses a feature vector of head-word,
107
+ head-tag, child-word, and child-tag.
108
+
109
+ :type graphs: list(DependencyGraph)
110
+ :param graphs: A list of dependency graphs to train the scorer.
111
+ """
112
+
113
+ from nltk.classify import NaiveBayesClassifier
114
+
115
+ # Create training labeled training examples
116
+ labeled_examples = []
117
+ for graph in graphs:
118
+ for head_node in graph.nodes.values():
119
+ for child_index, child_node in graph.nodes.items():
120
+ if child_index in head_node["deps"]:
121
+ label = "T"
122
+ else:
123
+ label = "F"
124
+ labeled_examples.append(
125
+ (
126
+ dict(
127
+ a=head_node["word"],
128
+ b=head_node["tag"],
129
+ c=child_node["word"],
130
+ d=child_node["tag"],
131
+ ),
132
+ label,
133
+ )
134
+ )
135
+
136
+ self.classifier = NaiveBayesClassifier.train(labeled_examples)
137
+
138
+ def score(self, graph):
139
+ """
140
+ Converts the graph into a feature-based representation of
141
+ each edge, and then assigns a score to each based on the
142
+ confidence of the classifier in assigning it to the
143
+ positive label. Scores are returned in a multidimensional list.
144
+
145
+ :type graph: DependencyGraph
146
+ :param graph: A dependency graph to score.
147
+ :rtype: 3 dimensional list
148
+ :return: Edge scores for the graph parameter.
149
+ """
150
+ # Convert graph to feature representation
151
+ edges = []
152
+ for head_node in graph.nodes.values():
153
+ for child_node in graph.nodes.values():
154
+ edges.append(
155
+ dict(
156
+ a=head_node["word"],
157
+ b=head_node["tag"],
158
+ c=child_node["word"],
159
+ d=child_node["tag"],
160
+ )
161
+ )
162
+
163
+ # Score edges
164
+ edge_scores = []
165
+ row = []
166
+ count = 0
167
+ for pdist in self.classifier.prob_classify_many(edges):
168
+ logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F"))
169
+ # smoothing in case the probability = 0
170
+ row.append([math.log(pdist.prob("T") + 0.00000000001)])
171
+ count += 1
172
+ if count == len(graph.nodes):
173
+ edge_scores.append(row)
174
+ row = []
175
+ count = 0
176
+ return edge_scores
177
+
178
+
179
+ #################################################################
180
+ # A Scorer for Demo Purposes
181
+ #################################################################
182
+ # A short class necessary to show parsing example from paper
183
+ class DemoScorer(DependencyScorerI):
184
+ def train(self, graphs):
185
+ print("Training...")
186
+
187
+ def score(self, graph):
188
+ # scores for Keith Hall 'K-best Spanning Tree Parsing' paper
189
+ return [
190
+ [[], [5], [1], [1]],
191
+ [[], [], [11], [4]],
192
+ [[], [10], [], [5]],
193
+ [[], [8], [8], []],
194
+ ]
195
+
196
+
197
+ #################################################################
198
+ # Non-Projective Probabilistic Parsing
199
+ #################################################################
200
+
201
+
202
+ class ProbabilisticNonprojectiveParser:
203
+ """A probabilistic non-projective dependency parser.
204
+
205
+ Nonprojective dependencies allows for "crossing branches" in the parse tree
206
+ which is necessary for representing particular linguistic phenomena, or even
207
+ typical parses in some languages. This parser follows the MST parsing
208
+ algorithm, outlined in McDonald(2005), which likens the search for the best
209
+ non-projective parse to finding the maximum spanning tree in a weighted
210
+ directed graph.
211
+
212
+ >>> class Scorer(DependencyScorerI):
213
+ ... def train(self, graphs):
214
+ ... pass
215
+ ...
216
+ ... def score(self, graph):
217
+ ... return [
218
+ ... [[], [5], [1], [1]],
219
+ ... [[], [], [11], [4]],
220
+ ... [[], [10], [], [5]],
221
+ ... [[], [8], [8], []],
222
+ ... ]
223
+
224
+
225
+ >>> npp = ProbabilisticNonprojectiveParser()
226
+ >>> npp.train([], Scorer())
227
+
228
+ >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None])
229
+ >>> len(list(parses))
230
+ 1
231
+
232
+ Rule based example
233
+
234
+ >>> from nltk.grammar import DependencyGrammar
235
+
236
+ >>> grammar = DependencyGrammar.fromstring('''
237
+ ... 'taught' -> 'play' | 'man'
238
+ ... 'man' -> 'the' | 'in'
239
+ ... 'in' -> 'corner'
240
+ ... 'corner' -> 'the'
241
+ ... 'play' -> 'golf' | 'dachshund' | 'to'
242
+ ... 'dachshund' -> 'his'
243
+ ... ''')
244
+
245
+ >>> ndp = NonprojectiveDependencyParser(grammar)
246
+ >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf'])
247
+ >>> len(list(parses))
248
+ 4
249
+
250
+ """
251
+
252
+ def __init__(self):
253
+ """
254
+ Creates a new non-projective parser.
255
+ """
256
+ logging.debug("initializing prob. nonprojective...")
257
+
258
+ def train(self, graphs, dependency_scorer):
259
+ """
260
+ Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects,
261
+ and establishes this as the parser's scorer. This is used to
262
+ initialize the scores on a ``DependencyGraph`` during the parsing
263
+ procedure.
264
+
265
+ :type graphs: list(DependencyGraph)
266
+ :param graphs: A list of dependency graphs to train the scorer.
267
+ :type dependency_scorer: DependencyScorerI
268
+ :param dependency_scorer: A scorer which implements the
269
+ ``DependencyScorerI`` interface.
270
+ """
271
+ self._scorer = dependency_scorer
272
+ self._scorer.train(graphs)
273
+
274
+ def initialize_edge_scores(self, graph):
275
+ """
276
+ Assigns a score to every edge in the ``DependencyGraph`` graph.
277
+ These scores are generated via the parser's scorer which
278
+ was assigned during the training process.
279
+
280
+ :type graph: DependencyGraph
281
+ :param graph: A dependency graph to assign scores to.
282
+ """
283
+ self.scores = self._scorer.score(graph)
284
+
285
+ def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph):
286
+ """
287
+ Takes a list of nodes that have been identified to belong to a cycle,
288
+ and collapses them into on larger node. The arcs of all nodes in
289
+ the graph must be updated to account for this.
290
+
291
+ :type new_node: Node.
292
+ :param new_node: A Node (Dictionary) to collapse the cycle nodes into.
293
+ :type cycle_path: A list of integers.
294
+ :param cycle_path: A list of node addresses, each of which is in the cycle.
295
+ :type g_graph, b_graph, c_graph: DependencyGraph
296
+ :param g_graph, b_graph, c_graph: Graphs which need to be updated.
297
+ """
298
+ logger.debug("Collapsing nodes...")
299
+ # Collapse all cycle nodes into v_n+1 in G_Graph
300
+ for cycle_node_index in cycle_path:
301
+ g_graph.remove_by_address(cycle_node_index)
302
+ g_graph.add_node(new_node)
303
+ g_graph.redirect_arcs(cycle_path, new_node["address"])
304
+
305
+ def update_edge_scores(self, new_node, cycle_path):
306
+ """
307
+ Updates the edge scores to reflect a collapse operation into
308
+ new_node.
309
+
310
+ :type new_node: A Node.
311
+ :param new_node: The node which cycle nodes are collapsed into.
312
+ :type cycle_path: A list of integers.
313
+ :param cycle_path: A list of node addresses that belong to the cycle.
314
+ """
315
+ logger.debug("cycle %s", cycle_path)
316
+
317
+ cycle_path = self.compute_original_indexes(cycle_path)
318
+
319
+ logger.debug("old cycle %s", cycle_path)
320
+ logger.debug("Prior to update: %s", self.scores)
321
+
322
+ for i, row in enumerate(self.scores):
323
+ for j, column in enumerate(self.scores[i]):
324
+ logger.debug(self.scores[i][j])
325
+ if j in cycle_path and i not in cycle_path and self.scores[i][j]:
326
+ subtract_val = self.compute_max_subtract_score(j, cycle_path)
327
+
328
+ logger.debug("%s - %s", self.scores[i][j], subtract_val)
329
+
330
+ new_vals = []
331
+ for cur_val in self.scores[i][j]:
332
+ new_vals.append(cur_val - subtract_val)
333
+
334
+ self.scores[i][j] = new_vals
335
+
336
+ for i, row in enumerate(self.scores):
337
+ for j, cell in enumerate(self.scores[i]):
338
+ if i in cycle_path and j in cycle_path:
339
+ self.scores[i][j] = []
340
+
341
+ logger.debug("After update: %s", self.scores)
342
+
343
+ def compute_original_indexes(self, new_indexes):
344
+ """
345
+ As nodes are collapsed into others, they are replaced
346
+ by the new node in the graph, but it's still necessary
347
+ to keep track of what these original nodes were. This
348
+ takes a list of node addresses and replaces any collapsed
349
+ node addresses with their original addresses.
350
+
351
+ :type new_indexes: A list of integers.
352
+ :param new_indexes: A list of node addresses to check for
353
+ subsumed nodes.
354
+ """
355
+ swapped = True
356
+ while swapped:
357
+ originals = []
358
+ swapped = False
359
+ for new_index in new_indexes:
360
+ if new_index in self.inner_nodes:
361
+ for old_val in self.inner_nodes[new_index]:
362
+ if old_val not in originals:
363
+ originals.append(old_val)
364
+ swapped = True
365
+ else:
366
+ originals.append(new_index)
367
+ new_indexes = originals
368
+ return new_indexes
369
+
370
+ def compute_max_subtract_score(self, column_index, cycle_indexes):
371
+ """
372
+ When updating scores the score of the highest-weighted incoming
373
+ arc is subtracted upon collapse. This returns the correct
374
+ amount to subtract from that edge.
375
+
376
+ :type column_index: integer.
377
+ :param column_index: A index representing the column of incoming arcs
378
+ to a particular node being updated
379
+ :type cycle_indexes: A list of integers.
380
+ :param cycle_indexes: Only arcs from cycle nodes are considered. This
381
+ is a list of such nodes addresses.
382
+ """
383
+ max_score = -100000
384
+ for row_index in cycle_indexes:
385
+ for subtract_val in self.scores[row_index][column_index]:
386
+ if subtract_val > max_score:
387
+ max_score = subtract_val
388
+ return max_score
389
+
390
+ def best_incoming_arc(self, node_index):
391
+ """
392
+ Returns the source of the best incoming arc to the
393
+ node with address: node_index
394
+
395
+ :type node_index: integer.
396
+ :param node_index: The address of the 'destination' node,
397
+ the node that is arced to.
398
+ """
399
+ originals = self.compute_original_indexes([node_index])
400
+ logger.debug("originals: %s", originals)
401
+
402
+ max_arc = None
403
+ max_score = None
404
+ for row_index in range(len(self.scores)):
405
+ for col_index in range(len(self.scores[row_index])):
406
+ if col_index in originals and (
407
+ max_score is None or self.scores[row_index][col_index] > max_score
408
+ ):
409
+ max_score = self.scores[row_index][col_index]
410
+ max_arc = row_index
411
+ logger.debug("%s, %s", row_index, col_index)
412
+
413
+ logger.debug(max_score)
414
+
415
+ for key in self.inner_nodes:
416
+ replaced_nodes = self.inner_nodes[key]
417
+ if max_arc in replaced_nodes:
418
+ return key
419
+
420
+ return max_arc
421
+
422
+ def original_best_arc(self, node_index):
423
+ originals = self.compute_original_indexes([node_index])
424
+ max_arc = None
425
+ max_score = None
426
+ max_orig = None
427
+ for row_index in range(len(self.scores)):
428
+ for col_index in range(len(self.scores[row_index])):
429
+ if col_index in originals and (
430
+ max_score is None or self.scores[row_index][col_index] > max_score
431
+ ):
432
+ max_score = self.scores[row_index][col_index]
433
+ max_arc = row_index
434
+ max_orig = col_index
435
+ return [max_arc, max_orig]
436
+
437
+ def parse(self, tokens, tags):
438
+ """
439
+ Parses a list of tokens in accordance to the MST parsing algorithm
440
+ for non-projective dependency parses. Assumes that the tokens to
441
+ be parsed have already been tagged and those tags are provided. Various
442
+ scoring methods can be used by implementing the ``DependencyScorerI``
443
+ interface and passing it to the training algorithm.
444
+
445
+ :type tokens: list(str)
446
+ :param tokens: A list of words or punctuation to be parsed.
447
+ :type tags: list(str)
448
+ :param tags: A list of tags corresponding by index to the words in the tokens list.
449
+ :return: An iterator of non-projective parses.
450
+ :rtype: iter(DependencyGraph)
451
+ """
452
+ self.inner_nodes = {}
453
+
454
+ # Initialize g_graph
455
+ g_graph = DependencyGraph()
456
+ for index, token in enumerate(tokens):
457
+ g_graph.nodes[index + 1].update(
458
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
459
+ )
460
+
461
+ # Fully connect non-root nodes in g_graph
462
+ g_graph.connect_graph()
463
+ original_graph = DependencyGraph()
464
+ for index, token in enumerate(tokens):
465
+ original_graph.nodes[index + 1].update(
466
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
467
+ )
468
+
469
+ b_graph = DependencyGraph()
470
+ c_graph = DependencyGraph()
471
+
472
+ for index, token in enumerate(tokens):
473
+ c_graph.nodes[index + 1].update(
474
+ {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1}
475
+ )
476
+
477
+ # Assign initial scores to g_graph edges
478
+ self.initialize_edge_scores(g_graph)
479
+ logger.debug(self.scores)
480
+ # Initialize a list of unvisited vertices (by node address)
481
+ unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()]
482
+ # Iterate over unvisited vertices
483
+ nr_vertices = len(tokens)
484
+ betas = {}
485
+ while unvisited_vertices:
486
+ # Mark current node as visited
487
+ current_vertex = unvisited_vertices.pop(0)
488
+ logger.debug("current_vertex: %s", current_vertex)
489
+ # Get corresponding node n_i to vertex v_i
490
+ current_node = g_graph.get_by_address(current_vertex)
491
+ logger.debug("current_node: %s", current_node)
492
+ # Get best in-edge node b for current node
493
+ best_in_edge = self.best_incoming_arc(current_vertex)
494
+ betas[current_vertex] = self.original_best_arc(current_vertex)
495
+ logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex)
496
+ # b_graph = Union(b_graph, b)
497
+ for new_vertex in [current_vertex, best_in_edge]:
498
+ b_graph.nodes[new_vertex].update(
499
+ {"word": "TEMP", "rel": "NTOP", "address": new_vertex}
500
+ )
501
+ b_graph.add_arc(best_in_edge, current_vertex)
502
+ # Beta(current node) = b - stored for parse recovery
503
+ # If b_graph contains a cycle, collapse it
504
+ cycle_path = b_graph.contains_cycle()
505
+ if cycle_path:
506
+ # Create a new node v_n+1 with address = len(nodes) + 1
507
+ new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1}
508
+ # c_graph = Union(c_graph, v_n+1)
509
+ c_graph.add_node(new_node)
510
+ # Collapse all nodes in cycle C into v_n+1
511
+ self.update_edge_scores(new_node, cycle_path)
512
+ self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph)
513
+ for cycle_index in cycle_path:
514
+ c_graph.add_arc(new_node["address"], cycle_index)
515
+ # self.replaced_by[cycle_index] = new_node['address']
516
+
517
+ self.inner_nodes[new_node["address"]] = cycle_path
518
+
519
+ # Add v_n+1 to list of unvisited vertices
520
+ unvisited_vertices.insert(0, nr_vertices + 1)
521
+
522
+ # increment # of nodes counter
523
+ nr_vertices += 1
524
+
525
+ # Remove cycle nodes from b_graph; B = B - cycle c
526
+ for cycle_node_address in cycle_path:
527
+ b_graph.remove_by_address(cycle_node_address)
528
+
529
+ logger.debug("g_graph: %s", g_graph)
530
+ logger.debug("b_graph: %s", b_graph)
531
+ logger.debug("c_graph: %s", c_graph)
532
+ logger.debug("Betas: %s", betas)
533
+ logger.debug("replaced nodes %s", self.inner_nodes)
534
+
535
+ # Recover parse tree
536
+ logger.debug("Final scores: %s", self.scores)
537
+
538
+ logger.debug("Recovering parse...")
539
+ for i in range(len(tokens) + 1, nr_vertices + 1):
540
+ betas[betas[i][1]] = betas[i]
541
+
542
+ logger.debug("Betas: %s", betas)
543
+ for node in original_graph.nodes.values():
544
+ # TODO: It's dangerous to assume that deps it a dictionary
545
+ # because it's a default dictionary. Ideally, here we should not
546
+ # be concerned how dependencies are stored inside of a dependency
547
+ # graph.
548
+ node["deps"] = {}
549
+ for i in range(1, len(tokens) + 1):
550
+ original_graph.add_arc(betas[i][0], betas[i][1])
551
+
552
+ logger.debug("Done.")
553
+ yield original_graph
554
+
555
+
556
+ #################################################################
557
+ # Rule-based Non-Projective Parser
558
+ #################################################################
559
+
560
+
561
+ class NonprojectiveDependencyParser:
562
+ """
563
+ A non-projective, rule-based, dependency parser. This parser
564
+ will return the set of all possible non-projective parses based on
565
+ the word-to-word relations defined in the parser's dependency
566
+ grammar, and will allow the branches of the parse tree to cross
567
+ in order to capture a variety of linguistic phenomena that a
568
+ projective parser will not.
569
+ """
570
+
571
+ def __init__(self, dependency_grammar):
572
+ """
573
+ Creates a new ``NonprojectiveDependencyParser``.
574
+
575
+ :param dependency_grammar: a grammar of word-to-word relations.
576
+ :type dependency_grammar: DependencyGrammar
577
+ """
578
+ self._grammar = dependency_grammar
579
+
580
+ def parse(self, tokens):
581
+ """
582
+ Parses the input tokens with respect to the parser's grammar. Parsing
583
+ is accomplished by representing the search-space of possible parses as
584
+ a fully-connected directed graph. Arcs that would lead to ungrammatical
585
+ parses are removed and a lattice is constructed of length n, where n is
586
+ the number of input tokens, to represent all possible grammatical
587
+ traversals. All possible paths through the lattice are then enumerated
588
+ to produce the set of non-projective parses.
589
+
590
+ param tokens: A list of tokens to parse.
591
+ type tokens: list(str)
592
+ return: An iterator of non-projective parses.
593
+ rtype: iter(DependencyGraph)
594
+ """
595
+ # Create graph representation of tokens
596
+ self._graph = DependencyGraph()
597
+
598
+ for index, token in enumerate(tokens):
599
+ self._graph.nodes[index] = {
600
+ "word": token,
601
+ "deps": [],
602
+ "rel": "NTOP",
603
+ "address": index,
604
+ }
605
+
606
+ for head_node in self._graph.nodes.values():
607
+ deps = []
608
+ for dep_node in self._graph.nodes.values():
609
+ if (
610
+ self._grammar.contains(head_node["word"], dep_node["word"])
611
+ and head_node["word"] != dep_node["word"]
612
+ ):
613
+ deps.append(dep_node["address"])
614
+ head_node["deps"] = deps
615
+
616
+ # Create lattice of possible heads
617
+ roots = []
618
+ possible_heads = []
619
+ for i, word in enumerate(tokens):
620
+ heads = []
621
+ for j, head in enumerate(tokens):
622
+ if (i != j) and self._grammar.contains(head, word):
623
+ heads.append(j)
624
+ if len(heads) == 0:
625
+ roots.append(i)
626
+ possible_heads.append(heads)
627
+
628
+ # Set roots to attempt
629
+ if len(roots) < 2:
630
+ if len(roots) == 0:
631
+ for i in range(len(tokens)):
632
+ roots.append(i)
633
+
634
+ # Traverse lattice
635
+ analyses = []
636
+ for _ in roots:
637
+ stack = []
638
+ analysis = [[] for i in range(len(possible_heads))]
639
+ i = 0
640
+ forward = True
641
+ while i >= 0:
642
+ if forward:
643
+ if len(possible_heads[i]) == 1:
644
+ analysis[i] = possible_heads[i][0]
645
+ elif len(possible_heads[i]) == 0:
646
+ analysis[i] = -1
647
+ else:
648
+ head = possible_heads[i].pop()
649
+ analysis[i] = head
650
+ stack.append([i, head])
651
+ if not forward:
652
+ index_on_stack = False
653
+ for stack_item in stack:
654
+ if stack_item[0] == i:
655
+ index_on_stack = True
656
+ orig_length = len(possible_heads[i])
657
+
658
+ if index_on_stack and orig_length == 0:
659
+ for j in range(len(stack) - 1, -1, -1):
660
+ stack_item = stack[j]
661
+ if stack_item[0] == i:
662
+ possible_heads[i].append(stack.pop(j)[1])
663
+
664
+ elif index_on_stack and orig_length > 0:
665
+ head = possible_heads[i].pop()
666
+ analysis[i] = head
667
+ stack.append([i, head])
668
+ forward = True
669
+
670
+ if i + 1 == len(possible_heads):
671
+ analyses.append(analysis[:])
672
+ forward = False
673
+ if forward:
674
+ i += 1
675
+ else:
676
+ i -= 1
677
+
678
+ # Filter parses
679
+ # ensure 1 root, every thing has 1 head
680
+ for analysis in analyses:
681
+ if analysis.count(-1) > 1:
682
+ # there are several root elements!
683
+ continue
684
+
685
+ graph = DependencyGraph()
686
+ graph.root = graph.nodes[analysis.index(-1) + 1]
687
+
688
+ for address, (token, head_index) in enumerate(
689
+ zip(tokens, analysis), start=1
690
+ ):
691
+ head_address = head_index + 1
692
+
693
+ node = graph.nodes[address]
694
+ node.update({"word": token, "address": address})
695
+
696
+ if head_address == 0:
697
+ rel = "ROOT"
698
+ else:
699
+ rel = ""
700
+ graph.nodes[head_index + 1]["deps"][rel].append(address)
701
+
702
+ # TODO: check for cycles
703
+ yield graph
704
+
705
+
706
+ #################################################################
707
+ # Demos
708
+ #################################################################
709
+
710
+
711
+ def demo():
712
+ # hall_demo()
713
+ nonprojective_conll_parse_demo()
714
+ rule_based_demo()
715
+
716
+
717
+ def hall_demo():
718
+ npp = ProbabilisticNonprojectiveParser()
719
+ npp.train([], DemoScorer())
720
+ for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]):
721
+ print(parse_graph)
722
+
723
+
724
+ def nonprojective_conll_parse_demo():
725
+ from nltk.parse.dependencygraph import conll_data2
726
+
727
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
728
+ npp = ProbabilisticNonprojectiveParser()
729
+ npp.train(graphs, NaiveBayesDependencyScorer())
730
+ for parse_graph in npp.parse(
731
+ ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"]
732
+ ):
733
+ print(parse_graph)
734
+
735
+
736
+ def rule_based_demo():
737
+ from nltk.grammar import DependencyGrammar
738
+
739
+ grammar = DependencyGrammar.fromstring(
740
+ """
741
+ 'taught' -> 'play' | 'man'
742
+ 'man' -> 'the' | 'in'
743
+ 'in' -> 'corner'
744
+ 'corner' -> 'the'
745
+ 'play' -> 'golf' | 'dachshund' | 'to'
746
+ 'dachshund' -> 'his'
747
+ """
748
+ )
749
+ print(grammar)
750
+ ndp = NonprojectiveDependencyParser(grammar)
751
+ graphs = ndp.parse(
752
+ [
753
+ "the",
754
+ "man",
755
+ "in",
756
+ "the",
757
+ "corner",
758
+ "taught",
759
+ "his",
760
+ "dachshund",
761
+ "to",
762
+ "play",
763
+ "golf",
764
+ ]
765
+ )
766
+ print("Graphs:")
767
+ for graph in graphs:
768
+ print(graph)
769
+
770
+
771
+ if __name__ == "__main__":
772
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/pchart.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Probabilistic Chart Parsers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Classes and interfaces for associating probabilities with tree
11
+ structures that represent the internal organization of a text. The
12
+ probabilistic parser module defines ``BottomUpProbabilisticChartParser``.
13
+
14
+ ``BottomUpProbabilisticChartParser`` is an abstract class that implements
15
+ a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges,
16
+ and adds them to the chart one at a time. The ordering of this queue
17
+ is based on the probabilities associated with the edges, allowing the
18
+ parser to expand more likely edges before less likely ones. Each
19
+ subclass implements a different queue ordering, producing different
20
+ search strategies. Currently the following subclasses are defined:
21
+
22
+ - ``InsideChartParser`` searches edges in decreasing order of
23
+ their trees' inside probabilities.
24
+ - ``RandomChartParser`` searches edges in random order.
25
+ - ``LongestChartParser`` searches edges in decreasing order of their
26
+ location's length.
27
+
28
+ The ``BottomUpProbabilisticChartParser`` constructor has an optional
29
+ argument beam_size. If non-zero, this controls the size of the beam
30
+ (aka the edge queue). This option is most useful with InsideChartParser.
31
+ """
32
+
33
+ ##//////////////////////////////////////////////////////
34
+ ## Bottom-Up PCFG Chart Parser
35
+ ##//////////////////////////////////////////////////////
36
+
37
+ # [XX] This might not be implemented quite right -- it would be better
38
+ # to associate probabilities with child pointer lists.
39
+
40
+ import random
41
+ from functools import reduce
42
+
43
+ from nltk.grammar import PCFG, Nonterminal
44
+ from nltk.parse.api import ParserI
45
+ from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge
46
+ from nltk.tree import ProbabilisticTree, Tree
47
+
48
+
49
+ # Probabilistic edges
50
+ class ProbabilisticLeafEdge(LeafEdge):
51
+ def prob(self):
52
+ return 1.0
53
+
54
+
55
+ class ProbabilisticTreeEdge(TreeEdge):
56
+ def __init__(self, prob, *args, **kwargs):
57
+ TreeEdge.__init__(self, *args, **kwargs)
58
+ self._prob = prob
59
+ # two edges with different probabilities are not equal.
60
+ self._comparison_key = (self._comparison_key, prob)
61
+
62
+ def prob(self):
63
+ return self._prob
64
+
65
+ @staticmethod
66
+ def from_production(production, index, p):
67
+ return ProbabilisticTreeEdge(
68
+ p, (index, index), production.lhs(), production.rhs(), 0
69
+ )
70
+
71
+
72
+ # Rules using probabilistic edges
73
+ class ProbabilisticBottomUpInitRule(AbstractChartRule):
74
+ NUM_EDGES = 0
75
+
76
+ def apply(self, chart, grammar):
77
+ for index in range(chart.num_leaves()):
78
+ new_edge = ProbabilisticLeafEdge(chart.leaf(index), index)
79
+ if chart.insert(new_edge, ()):
80
+ yield new_edge
81
+
82
+
83
+ class ProbabilisticBottomUpPredictRule(AbstractChartRule):
84
+ NUM_EDGES = 1
85
+
86
+ def apply(self, chart, grammar, edge):
87
+ if edge.is_incomplete():
88
+ return
89
+ for prod in grammar.productions():
90
+ if edge.lhs() == prod.rhs()[0]:
91
+ new_edge = ProbabilisticTreeEdge.from_production(
92
+ prod, edge.start(), prod.prob()
93
+ )
94
+ if chart.insert(new_edge, ()):
95
+ yield new_edge
96
+
97
+
98
+ class ProbabilisticFundamentalRule(AbstractChartRule):
99
+ NUM_EDGES = 2
100
+
101
+ def apply(self, chart, grammar, left_edge, right_edge):
102
+ # Make sure the rule is applicable.
103
+ if not (
104
+ left_edge.end() == right_edge.start()
105
+ and left_edge.nextsym() == right_edge.lhs()
106
+ and left_edge.is_incomplete()
107
+ and right_edge.is_complete()
108
+ ):
109
+ return
110
+
111
+ # Construct the new edge.
112
+ p = left_edge.prob() * right_edge.prob()
113
+ new_edge = ProbabilisticTreeEdge(
114
+ p,
115
+ span=(left_edge.start(), right_edge.end()),
116
+ lhs=left_edge.lhs(),
117
+ rhs=left_edge.rhs(),
118
+ dot=left_edge.dot() + 1,
119
+ )
120
+
121
+ # Add it to the chart, with appropriate child pointers.
122
+ changed_chart = False
123
+ for cpl1 in chart.child_pointer_lists(left_edge):
124
+ if chart.insert(new_edge, cpl1 + (right_edge,)):
125
+ changed_chart = True
126
+
127
+ # If we changed the chart, then generate the edge.
128
+ if changed_chart:
129
+ yield new_edge
130
+
131
+
132
+ class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule):
133
+ NUM_EDGES = 1
134
+
135
+ _fundamental_rule = ProbabilisticFundamentalRule()
136
+
137
+ def apply(self, chart, grammar, edge1):
138
+ fr = self._fundamental_rule
139
+ if edge1.is_incomplete():
140
+ # edge1 = left_edge; edge2 = right_edge
141
+ for edge2 in chart.select(
142
+ start=edge1.end(), is_complete=True, lhs=edge1.nextsym()
143
+ ):
144
+ yield from fr.apply(chart, grammar, edge1, edge2)
145
+ else:
146
+ # edge2 = left_edge; edge1 = right_edge
147
+ for edge2 in chart.select(
148
+ end=edge1.start(), is_complete=False, nextsym=edge1.lhs()
149
+ ):
150
+ yield from fr.apply(chart, grammar, edge2, edge1)
151
+
152
+ def __str__(self):
153
+ return "Fundamental Rule"
154
+
155
+
156
+ class BottomUpProbabilisticChartParser(ParserI):
157
+ """
158
+ An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to
159
+ record partial results. ``BottomUpProbabilisticChartParser`` maintains
160
+ a queue of edges that can be added to the chart. This queue is
161
+ initialized with edges for each token in the text that is being
162
+ parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into
163
+ the chart one at a time, starting with the most likely edges, and
164
+ proceeding to less likely edges. For each edge that is added to
165
+ the chart, it may become possible to insert additional edges into
166
+ the chart; these are added to the queue. This process continues
167
+ until enough complete parses have been generated, or until the
168
+ queue is empty.
169
+
170
+ The sorting order for the queue is not specified by
171
+ ``BottomUpProbabilisticChartParser``. Different sorting orders will
172
+ result in different search strategies. The sorting order for the
173
+ queue is defined by the method ``sort_queue``; subclasses are required
174
+ to provide a definition for this method.
175
+
176
+ :type _grammar: PCFG
177
+ :ivar _grammar: The grammar used to parse sentences.
178
+ :type _trace: int
179
+ :ivar _trace: The level of tracing output that should be generated
180
+ when parsing a text.
181
+ """
182
+
183
+ def __init__(self, grammar, beam_size=0, trace=0):
184
+ """
185
+ Create a new ``BottomUpProbabilisticChartParser``, that uses
186
+ ``grammar`` to parse texts.
187
+
188
+ :type grammar: PCFG
189
+ :param grammar: The grammar used to parse texts.
190
+ :type beam_size: int
191
+ :param beam_size: The maximum length for the parser's edge queue.
192
+ :type trace: int
193
+ :param trace: The level of tracing that should be used when
194
+ parsing a text. ``0`` will generate no tracing output;
195
+ and higher numbers will produce more verbose tracing
196
+ output.
197
+ """
198
+ if not isinstance(grammar, PCFG):
199
+ raise ValueError("The grammar must be probabilistic PCFG")
200
+ self._grammar = grammar
201
+ self.beam_size = beam_size
202
+ self._trace = trace
203
+
204
+ def grammar(self):
205
+ return self._grammar
206
+
207
+ def trace(self, trace=2):
208
+ """
209
+ Set the level of tracing output that should be generated when
210
+ parsing a text.
211
+
212
+ :type trace: int
213
+ :param trace: The trace level. A trace level of ``0`` will
214
+ generate no tracing output; and higher trace levels will
215
+ produce more verbose tracing output.
216
+ :rtype: None
217
+ """
218
+ self._trace = trace
219
+
220
+ # TODO: change this to conform more with the standard ChartParser
221
+ def parse(self, tokens):
222
+ self._grammar.check_coverage(tokens)
223
+ chart = Chart(list(tokens))
224
+ grammar = self._grammar
225
+
226
+ # Chart parser rules.
227
+ bu_init = ProbabilisticBottomUpInitRule()
228
+ bu = ProbabilisticBottomUpPredictRule()
229
+ fr = SingleEdgeProbabilisticFundamentalRule()
230
+
231
+ # Our queue
232
+ queue = []
233
+
234
+ # Initialize the chart.
235
+ for edge in bu_init.apply(chart, grammar):
236
+ if self._trace > 1:
237
+ print(
238
+ " %-50s [%s]"
239
+ % (chart.pretty_format_edge(edge, width=2), edge.prob())
240
+ )
241
+ queue.append(edge)
242
+
243
+ while len(queue) > 0:
244
+ # Re-sort the queue.
245
+ self.sort_queue(queue, chart)
246
+
247
+ # Prune the queue to the correct size if a beam was defined
248
+ if self.beam_size:
249
+ self._prune(queue, chart)
250
+
251
+ # Get the best edge.
252
+ edge = queue.pop()
253
+ if self._trace > 0:
254
+ print(
255
+ " %-50s [%s]"
256
+ % (chart.pretty_format_edge(edge, width=2), edge.prob())
257
+ )
258
+
259
+ # Apply BU & FR to it.
260
+ queue.extend(bu.apply(chart, grammar, edge))
261
+ queue.extend(fr.apply(chart, grammar, edge))
262
+
263
+ # Get a list of complete parses.
264
+ parses = list(chart.parses(grammar.start(), ProbabilisticTree))
265
+
266
+ # Assign probabilities to the trees.
267
+ prod_probs = {}
268
+ for prod in grammar.productions():
269
+ prod_probs[prod.lhs(), prod.rhs()] = prod.prob()
270
+ for parse in parses:
271
+ self._setprob(parse, prod_probs)
272
+
273
+ # Sort by probability
274
+ parses.sort(reverse=True, key=lambda tree: tree.prob())
275
+
276
+ return iter(parses)
277
+
278
+ def _setprob(self, tree, prod_probs):
279
+ if tree.prob() is not None:
280
+ return
281
+
282
+ # Get the prob of the CFG production.
283
+ lhs = Nonterminal(tree.label())
284
+ rhs = []
285
+ for child in tree:
286
+ if isinstance(child, Tree):
287
+ rhs.append(Nonterminal(child.label()))
288
+ else:
289
+ rhs.append(child)
290
+ prob = prod_probs[lhs, tuple(rhs)]
291
+
292
+ # Get the probs of children.
293
+ for child in tree:
294
+ if isinstance(child, Tree):
295
+ self._setprob(child, prod_probs)
296
+ prob *= child.prob()
297
+
298
+ tree.set_prob(prob)
299
+
300
+ def sort_queue(self, queue, chart):
301
+ """
302
+ Sort the given queue of ``Edge`` objects, placing the edge that should
303
+ be tried first at the beginning of the queue. This method
304
+ will be called after each ``Edge`` is added to the queue.
305
+
306
+ :param queue: The queue of ``Edge`` objects to sort. Each edge in
307
+ this queue is an edge that could be added to the chart by
308
+ the fundamental rule; but that has not yet been added.
309
+ :type queue: list(Edge)
310
+ :param chart: The chart being used to parse the text. This
311
+ chart can be used to provide extra information for sorting
312
+ the queue.
313
+ :type chart: Chart
314
+ :rtype: None
315
+ """
316
+ raise NotImplementedError()
317
+
318
+ def _prune(self, queue, chart):
319
+ """Discard items in the queue if the queue is longer than the beam."""
320
+ if len(queue) > self.beam_size:
321
+ split = len(queue) - self.beam_size
322
+ if self._trace > 2:
323
+ for edge in queue[:split]:
324
+ print(" %-50s [DISCARDED]" % chart.pretty_format_edge(edge, 2))
325
+ del queue[:split]
326
+
327
+
328
+ class InsideChartParser(BottomUpProbabilisticChartParser):
329
+ """
330
+ A bottom-up parser for ``PCFG`` grammars that tries edges in descending
331
+ order of the inside probabilities of their trees. The "inside
332
+ probability" of a tree is simply the
333
+ probability of the entire tree, ignoring its context. In
334
+ particular, the inside probability of a tree generated by
335
+ production *p* with children *c[1], c[2], ..., c[n]* is
336
+ *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside
337
+ probability of a token is 1 if it is present in the text, and 0 if
338
+ it is absent.
339
+
340
+ This sorting order results in a type of lowest-cost-first search
341
+ strategy.
342
+ """
343
+
344
+ # Inherit constructor.
345
+ def sort_queue(self, queue, chart):
346
+ """
347
+ Sort the given queue of edges, in descending order of the
348
+ inside probabilities of the edges' trees.
349
+
350
+ :param queue: The queue of ``Edge`` objects to sort. Each edge in
351
+ this queue is an edge that could be added to the chart by
352
+ the fundamental rule; but that has not yet been added.
353
+ :type queue: list(Edge)
354
+ :param chart: The chart being used to parse the text. This
355
+ chart can be used to provide extra information for sorting
356
+ the queue.
357
+ :type chart: Chart
358
+ :rtype: None
359
+ """
360
+ queue.sort(key=lambda edge: edge.prob())
361
+
362
+
363
+ # Eventually, this will become some sort of inside-outside parser:
364
+ # class InsideOutsideParser(BottomUpProbabilisticChartParser):
365
+ # def __init__(self, grammar, trace=0):
366
+ # # Inherit docs.
367
+ # BottomUpProbabilisticChartParser.__init__(self, grammar, trace)
368
+ #
369
+ # # Find the best path from S to each nonterminal
370
+ # bestp = {}
371
+ # for production in grammar.productions(): bestp[production.lhs()]=0
372
+ # bestp[grammar.start()] = 1.0
373
+ #
374
+ # for i in range(len(grammar.productions())):
375
+ # for production in grammar.productions():
376
+ # lhs = production.lhs()
377
+ # for elt in production.rhs():
378
+ # bestp[elt] = max(bestp[lhs]*production.prob(),
379
+ # bestp.get(elt,0))
380
+ #
381
+ # self._bestp = bestp
382
+ # for (k,v) in self._bestp.items(): print(k,v)
383
+ #
384
+ # def _sortkey(self, edge):
385
+ # return edge.structure()[PROB] * self._bestp[edge.lhs()]
386
+ #
387
+ # def sort_queue(self, queue, chart):
388
+ # queue.sort(key=self._sortkey)
389
+
390
+
391
+ class RandomChartParser(BottomUpProbabilisticChartParser):
392
+ """
393
+ A bottom-up parser for ``PCFG`` grammars that tries edges in random order.
394
+ This sorting order results in a random search strategy.
395
+ """
396
+
397
+ # Inherit constructor
398
+ def sort_queue(self, queue, chart):
399
+ i = random.randint(0, len(queue) - 1)
400
+ (queue[-1], queue[i]) = (queue[i], queue[-1])
401
+
402
+
403
+ class UnsortedChartParser(BottomUpProbabilisticChartParser):
404
+ """
405
+ A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order.
406
+ """
407
+
408
+ # Inherit constructor
409
+ def sort_queue(self, queue, chart):
410
+ return
411
+
412
+
413
+ class LongestChartParser(BottomUpProbabilisticChartParser):
414
+ """
415
+ A bottom-up parser for ``PCFG`` grammars that tries longer edges before
416
+ shorter ones. This sorting order results in a type of best-first
417
+ search strategy.
418
+ """
419
+
420
+ # Inherit constructor
421
+ def sort_queue(self, queue, chart):
422
+ queue.sort(key=lambda edge: edge.length())
423
+
424
+
425
+ ##//////////////////////////////////////////////////////
426
+ ## Test Code
427
+ ##//////////////////////////////////////////////////////
428
+
429
+
430
+ def demo(choice=None, draw_parses=None, print_parses=None):
431
+ """
432
+ A demonstration of the probabilistic parsers. The user is
433
+ prompted to select which demo to run, and how many parses should
434
+ be found; and then each parser is run on the same demo, and a
435
+ summary of the results are displayed.
436
+ """
437
+ import sys
438
+ import time
439
+
440
+ from nltk import tokenize
441
+ from nltk.parse import pchart
442
+
443
+ # Define two demos. Each demo has a sentence and a grammar.
444
+ toy_pcfg1 = PCFG.fromstring(
445
+ """
446
+ S -> NP VP [1.0]
447
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
448
+ Det -> 'the' [0.8] | 'my' [0.2]
449
+ N -> 'man' [0.5] | 'telescope' [0.5]
450
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
451
+ V -> 'ate' [0.35] | 'saw' [0.65]
452
+ PP -> P NP [1.0]
453
+ P -> 'with' [0.61] | 'under' [0.39]
454
+ """
455
+ )
456
+
457
+ toy_pcfg2 = PCFG.fromstring(
458
+ """
459
+ S -> NP VP [1.0]
460
+ VP -> V NP [.59]
461
+ VP -> V [.40]
462
+ VP -> VP PP [.01]
463
+ NP -> Det N [.41]
464
+ NP -> Name [.28]
465
+ NP -> NP PP [.31]
466
+ PP -> P NP [1.0]
467
+ V -> 'saw' [.21]
468
+ V -> 'ate' [.51]
469
+ V -> 'ran' [.28]
470
+ N -> 'boy' [.11]
471
+ N -> 'cookie' [.12]
472
+ N -> 'table' [.13]
473
+ N -> 'telescope' [.14]
474
+ N -> 'hill' [.5]
475
+ Name -> 'Jack' [.52]
476
+ Name -> 'Bob' [.48]
477
+ P -> 'with' [.61]
478
+ P -> 'under' [.39]
479
+ Det -> 'the' [.41]
480
+ Det -> 'a' [.31]
481
+ Det -> 'my' [.28]
482
+ """
483
+ )
484
+
485
+ demos = [
486
+ ("I saw John with my telescope", toy_pcfg1),
487
+ ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2),
488
+ ]
489
+
490
+ if choice is None:
491
+ # Ask the user which demo they want to use.
492
+ print()
493
+ for i in range(len(demos)):
494
+ print(f"{i + 1:>3}: {demos[i][0]}")
495
+ print(" %r" % demos[i][1])
496
+ print()
497
+ print("Which demo (%d-%d)? " % (1, len(demos)), end=" ")
498
+ choice = int(sys.stdin.readline().strip()) - 1
499
+ try:
500
+ sent, grammar = demos[choice]
501
+ except:
502
+ print("Bad sentence number")
503
+ return
504
+
505
+ # Tokenize the sentence.
506
+ tokens = sent.split()
507
+
508
+ # Define a list of parsers. We'll use all parsers.
509
+ parsers = [
510
+ pchart.InsideChartParser(grammar),
511
+ pchart.RandomChartParser(grammar),
512
+ pchart.UnsortedChartParser(grammar),
513
+ pchart.LongestChartParser(grammar),
514
+ pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser
515
+ ]
516
+
517
+ # Run the parsers on the tokenized sentence.
518
+ times = []
519
+ average_p = []
520
+ num_parses = []
521
+ all_parses = {}
522
+ for parser in parsers:
523
+ print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}")
524
+ parser.trace(3)
525
+ t = time.time()
526
+ parses = list(parser.parse(tokens))
527
+ times.append(time.time() - t)
528
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0
529
+ average_p.append(p)
530
+ num_parses.append(len(parses))
531
+ for p in parses:
532
+ all_parses[p.freeze()] = 1
533
+
534
+ # Print some summary statistics
535
+ print()
536
+ print(" Parser Beam | Time (secs) # Parses Average P(parse)")
537
+ print("------------------------+------------------------------------------")
538
+ for i in range(len(parsers)):
539
+ print(
540
+ "%18s %4d |%11.4f%11d%19.14f"
541
+ % (
542
+ parsers[i].__class__.__name__,
543
+ parsers[i].beam_size,
544
+ times[i],
545
+ num_parses[i],
546
+ average_p[i],
547
+ )
548
+ )
549
+ parses = all_parses.keys()
550
+ if parses:
551
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses)
552
+ else:
553
+ p = 0
554
+ print("------------------------+------------------------------------------")
555
+ print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p))
556
+
557
+ if draw_parses is None:
558
+ # Ask the user if we should draw the parses.
559
+ print()
560
+ print("Draw parses (y/n)? ", end=" ")
561
+ draw_parses = sys.stdin.readline().strip().lower().startswith("y")
562
+ if draw_parses:
563
+ from nltk.draw.tree import draw_trees
564
+
565
+ print(" please wait...")
566
+ draw_trees(*parses)
567
+
568
+ if print_parses is None:
569
+ # Ask the user if we should print the parses.
570
+ print()
571
+ print("Print parses (y/n)? ", end=" ")
572
+ print_parses = sys.stdin.readline().strip().lower().startswith("y")
573
+ if print_parses:
574
+ for parse in parses:
575
+ print(parse)
576
+
577
+
578
+ if __name__ == "__main__":
579
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py ADDED
@@ -0,0 +1,716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Dependency Grammars
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Jason Narad <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ from collections import defaultdict
11
+ from functools import total_ordering
12
+ from itertools import chain
13
+
14
+ from nltk.grammar import (
15
+ DependencyGrammar,
16
+ DependencyProduction,
17
+ ProbabilisticDependencyGrammar,
18
+ )
19
+ from nltk.internals import raise_unorderable_types
20
+ from nltk.parse.dependencygraph import DependencyGraph
21
+
22
+ #################################################################
23
+ # Dependency Span
24
+ #################################################################
25
+
26
+
27
+ @total_ordering
28
+ class DependencySpan:
29
+ """
30
+ A contiguous span over some part of the input string representing
31
+ dependency (head -> modifier) relationships amongst words. An atomic
32
+ span corresponds to only one word so it isn't a 'span' in the conventional
33
+ sense, as its _start_index = _end_index = _head_index for concatenation
34
+ purposes. All other spans are assumed to have arcs between all nodes
35
+ within the start and end indexes of the span, and one head index corresponding
36
+ to the head word for the entire span. This is the same as the root node if
37
+ the dependency structure were depicted as a graph.
38
+ """
39
+
40
+ def __init__(self, start_index, end_index, head_index, arcs, tags):
41
+ self._start_index = start_index
42
+ self._end_index = end_index
43
+ self._head_index = head_index
44
+ self._arcs = arcs
45
+ self._tags = tags
46
+ self._comparison_key = (start_index, end_index, head_index, tuple(arcs))
47
+ self._hash = hash(self._comparison_key)
48
+
49
+ def head_index(self):
50
+ """
51
+ :return: An value indexing the head of the entire ``DependencySpan``.
52
+ :rtype: int
53
+ """
54
+ return self._head_index
55
+
56
+ def __repr__(self):
57
+ """
58
+ :return: A concise string representatino of the ``DependencySpan``.
59
+ :rtype: str.
60
+ """
61
+ return "Span %d-%d; Head Index: %d" % (
62
+ self._start_index,
63
+ self._end_index,
64
+ self._head_index,
65
+ )
66
+
67
+ def __str__(self):
68
+ """
69
+ :return: A verbose string representation of the ``DependencySpan``.
70
+ :rtype: str
71
+ """
72
+ str = "Span %d-%d; Head Index: %d" % (
73
+ self._start_index,
74
+ self._end_index,
75
+ self._head_index,
76
+ )
77
+ for i in range(len(self._arcs)):
78
+ str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i])
79
+ return str
80
+
81
+ def __eq__(self, other):
82
+ return (
83
+ type(self) == type(other) and self._comparison_key == other._comparison_key
84
+ )
85
+
86
+ def __ne__(self, other):
87
+ return not self == other
88
+
89
+ def __lt__(self, other):
90
+ if not isinstance(other, DependencySpan):
91
+ raise_unorderable_types("<", self, other)
92
+ return self._comparison_key < other._comparison_key
93
+
94
+ def __hash__(self):
95
+ """
96
+ :return: The hash value of this ``DependencySpan``.
97
+ """
98
+ return self._hash
99
+
100
+
101
+ #################################################################
102
+ # Chart Cell
103
+ #################################################################
104
+
105
+
106
+ class ChartCell:
107
+ """
108
+ A cell from the parse chart formed when performing the CYK algorithm.
109
+ Each cell keeps track of its x and y coordinates (though this will probably
110
+ be discarded), and a list of spans serving as the cell's entries.
111
+ """
112
+
113
+ def __init__(self, x, y):
114
+ """
115
+ :param x: This cell's x coordinate.
116
+ :type x: int.
117
+ :param y: This cell's y coordinate.
118
+ :type y: int.
119
+ """
120
+ self._x = x
121
+ self._y = y
122
+ self._entries = set()
123
+
124
+ def add(self, span):
125
+ """
126
+ Appends the given span to the list of spans
127
+ representing the chart cell's entries.
128
+
129
+ :param span: The span to add.
130
+ :type span: DependencySpan
131
+ """
132
+ self._entries.add(span)
133
+
134
+ def __str__(self):
135
+ """
136
+ :return: A verbose string representation of this ``ChartCell``.
137
+ :rtype: str.
138
+ """
139
+ return "CC[%d,%d]: %s" % (self._x, self._y, self._entries)
140
+
141
+ def __repr__(self):
142
+ """
143
+ :return: A concise string representation of this ``ChartCell``.
144
+ :rtype: str.
145
+ """
146
+ return "%s" % self
147
+
148
+
149
+ #################################################################
150
+ # Parsing with Dependency Grammars
151
+ #################################################################
152
+
153
+
154
+ class ProjectiveDependencyParser:
155
+ """
156
+ A projective, rule-based, dependency parser. A ProjectiveDependencyParser
157
+ is created with a DependencyGrammar, a set of productions specifying
158
+ word-to-word dependency relations. The parse() method will then
159
+ return the set of all parses, in tree representation, for a given input
160
+ sequence of tokens. Each parse must meet the requirements of the both
161
+ the grammar and the projectivity constraint which specifies that the
162
+ branches of the dependency tree are not allowed to cross. Alternatively,
163
+ this can be understood as stating that each parent node and its children
164
+ in the parse tree form a continuous substring of the input sequence.
165
+ """
166
+
167
+ def __init__(self, dependency_grammar):
168
+ """
169
+ Create a new ProjectiveDependencyParser, from a word-to-word
170
+ dependency grammar ``DependencyGrammar``.
171
+
172
+ :param dependency_grammar: A word-to-word relation dependencygrammar.
173
+ :type dependency_grammar: DependencyGrammar
174
+ """
175
+ self._grammar = dependency_grammar
176
+
177
+ def parse(self, tokens):
178
+ """
179
+ Performs a projective dependency parse on the list of tokens using
180
+ a chart-based, span-concatenation algorithm similar to Eisner (1996).
181
+
182
+ :param tokens: The list of input tokens.
183
+ :type tokens: list(str)
184
+ :return: An iterator over parse trees.
185
+ :rtype: iter(Tree)
186
+ """
187
+ self._tokens = list(tokens)
188
+ chart = []
189
+ for i in range(0, len(self._tokens) + 1):
190
+ chart.append([])
191
+ for j in range(0, len(self._tokens) + 1):
192
+ chart[i].append(ChartCell(i, j))
193
+ if i == j + 1:
194
+ chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"]))
195
+
196
+ for i in range(1, len(self._tokens) + 1):
197
+ for j in range(i - 2, -1, -1):
198
+ for k in range(i - 1, j, -1):
199
+ for span1 in chart[k][j]._entries:
200
+ for span2 in chart[i][k]._entries:
201
+ for newspan in self.concatenate(span1, span2):
202
+ chart[i][j].add(newspan)
203
+
204
+ for parse in chart[len(self._tokens)][0]._entries:
205
+ conll_format = ""
206
+ # malt_format = ""
207
+ for i in range(len(tokens)):
208
+ # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null')
209
+ # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-')
210
+ # Modify to comply with the new Dependency Graph requirement (at least must have an root elements)
211
+ conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
212
+ i + 1,
213
+ tokens[i],
214
+ tokens[i],
215
+ "null",
216
+ "null",
217
+ "null",
218
+ parse._arcs[i] + 1,
219
+ "ROOT",
220
+ "-",
221
+ "-",
222
+ )
223
+ dg = DependencyGraph(conll_format)
224
+ # if self.meets_arity(dg):
225
+ yield dg.tree()
226
+
227
+ def concatenate(self, span1, span2):
228
+ """
229
+ Concatenates the two spans in whichever way possible. This
230
+ includes rightward concatenation (from the leftmost word of the
231
+ leftmost span to the rightmost word of the rightmost span) and
232
+ leftward concatenation (vice-versa) between adjacent spans. Unlike
233
+ Eisner's presentation of span concatenation, these spans do not
234
+ share or pivot on a particular word/word-index.
235
+
236
+ :return: A list of new spans formed through concatenation.
237
+ :rtype: list(DependencySpan)
238
+ """
239
+ spans = []
240
+ if span1._start_index == span2._start_index:
241
+ print("Error: Mismatched spans - replace this with thrown error")
242
+ if span1._start_index > span2._start_index:
243
+ temp_span = span1
244
+ span1 = span2
245
+ span2 = temp_span
246
+ # adjacent rightward covered concatenation
247
+ new_arcs = span1._arcs + span2._arcs
248
+ new_tags = span1._tags + span2._tags
249
+ if self._grammar.contains(
250
+ self._tokens[span1._head_index], self._tokens[span2._head_index]
251
+ ):
252
+ # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index))
253
+ new_arcs[span2._head_index - span1._start_index] = span1._head_index
254
+ spans.append(
255
+ DependencySpan(
256
+ span1._start_index,
257
+ span2._end_index,
258
+ span1._head_index,
259
+ new_arcs,
260
+ new_tags,
261
+ )
262
+ )
263
+ # adjacent leftward covered concatenation
264
+ new_arcs = span1._arcs + span2._arcs
265
+ if self._grammar.contains(
266
+ self._tokens[span2._head_index], self._tokens[span1._head_index]
267
+ ):
268
+ # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index))
269
+ new_arcs[span1._head_index - span1._start_index] = span2._head_index
270
+ spans.append(
271
+ DependencySpan(
272
+ span1._start_index,
273
+ span2._end_index,
274
+ span2._head_index,
275
+ new_arcs,
276
+ new_tags,
277
+ )
278
+ )
279
+ return spans
280
+
281
+
282
+ #################################################################
283
+ # Parsing with Probabilistic Dependency Grammars
284
+ #################################################################
285
+
286
+
287
+ class ProbabilisticProjectiveDependencyParser:
288
+ """A probabilistic, projective dependency parser.
289
+
290
+ This parser returns the most probable projective parse derived from the
291
+ probabilistic dependency grammar derived from the train() method. The
292
+ probabilistic model is an implementation of Eisner's (1996) Model C, which
293
+ conditions on head-word, head-tag, child-word, and child-tag. The decoding
294
+ uses a bottom-up chart-based span concatenation algorithm that's identical
295
+ to the one utilized by the rule-based projective parser.
296
+
297
+ Usage example
298
+
299
+ >>> from nltk.parse.dependencygraph import conll_data2
300
+
301
+ >>> graphs = [
302
+ ... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry
303
+ ... ]
304
+
305
+ >>> ppdp = ProbabilisticProjectiveDependencyParser()
306
+ >>> ppdp.train(graphs)
307
+
308
+ >>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.']
309
+ >>> list(ppdp.parse(sent))
310
+ [Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])]
311
+
312
+ """
313
+
314
+ def __init__(self):
315
+ """
316
+ Create a new probabilistic dependency parser. No additional
317
+ operations are necessary.
318
+ """
319
+
320
+ def parse(self, tokens):
321
+ """
322
+ Parses the list of tokens subject to the projectivity constraint
323
+ and the productions in the parser's grammar. This uses a method
324
+ similar to the span-concatenation algorithm defined in Eisner (1996).
325
+ It returns the most probable parse derived from the parser's
326
+ probabilistic dependency grammar.
327
+ """
328
+ self._tokens = list(tokens)
329
+ chart = []
330
+ for i in range(0, len(self._tokens) + 1):
331
+ chart.append([])
332
+ for j in range(0, len(self._tokens) + 1):
333
+ chart[i].append(ChartCell(i, j))
334
+ if i == j + 1:
335
+ if tokens[i - 1] in self._grammar._tags:
336
+ for tag in self._grammar._tags[tokens[i - 1]]:
337
+ chart[i][j].add(
338
+ DependencySpan(i - 1, i, i - 1, [-1], [tag])
339
+ )
340
+ else:
341
+ print(
342
+ "No tag found for input token '%s', parse is impossible."
343
+ % tokens[i - 1]
344
+ )
345
+ return []
346
+ for i in range(1, len(self._tokens) + 1):
347
+ for j in range(i - 2, -1, -1):
348
+ for k in range(i - 1, j, -1):
349
+ for span1 in chart[k][j]._entries:
350
+ for span2 in chart[i][k]._entries:
351
+ for newspan in self.concatenate(span1, span2):
352
+ chart[i][j].add(newspan)
353
+ trees = []
354
+ max_parse = None
355
+ max_score = 0
356
+ for parse in chart[len(self._tokens)][0]._entries:
357
+ conll_format = ""
358
+ malt_format = ""
359
+ for i in range(len(tokens)):
360
+ malt_format += "%s\t%s\t%d\t%s\n" % (
361
+ tokens[i],
362
+ "null",
363
+ parse._arcs[i] + 1,
364
+ "null",
365
+ )
366
+ # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-')
367
+ # Modify to comply with recent change in dependency graph such that there must be a ROOT element.
368
+ conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % (
369
+ i + 1,
370
+ tokens[i],
371
+ tokens[i],
372
+ parse._tags[i],
373
+ parse._tags[i],
374
+ "null",
375
+ parse._arcs[i] + 1,
376
+ "ROOT",
377
+ "-",
378
+ "-",
379
+ )
380
+ dg = DependencyGraph(conll_format)
381
+ score = self.compute_prob(dg)
382
+ trees.append((score, dg.tree()))
383
+ trees.sort()
384
+ return (tree for (score, tree) in trees)
385
+
386
+ def concatenate(self, span1, span2):
387
+ """
388
+ Concatenates the two spans in whichever way possible. This
389
+ includes rightward concatenation (from the leftmost word of the
390
+ leftmost span to the rightmost word of the rightmost span) and
391
+ leftward concatenation (vice-versa) between adjacent spans. Unlike
392
+ Eisner's presentation of span concatenation, these spans do not
393
+ share or pivot on a particular word/word-index.
394
+
395
+ :return: A list of new spans formed through concatenation.
396
+ :rtype: list(DependencySpan)
397
+ """
398
+ spans = []
399
+ if span1._start_index == span2._start_index:
400
+ print("Error: Mismatched spans - replace this with thrown error")
401
+ if span1._start_index > span2._start_index:
402
+ temp_span = span1
403
+ span1 = span2
404
+ span2 = temp_span
405
+ # adjacent rightward covered concatenation
406
+ new_arcs = span1._arcs + span2._arcs
407
+ new_tags = span1._tags + span2._tags
408
+ if self._grammar.contains(
409
+ self._tokens[span1._head_index], self._tokens[span2._head_index]
410
+ ):
411
+ new_arcs[span2._head_index - span1._start_index] = span1._head_index
412
+ spans.append(
413
+ DependencySpan(
414
+ span1._start_index,
415
+ span2._end_index,
416
+ span1._head_index,
417
+ new_arcs,
418
+ new_tags,
419
+ )
420
+ )
421
+ # adjacent leftward covered concatenation
422
+ new_arcs = span1._arcs + span2._arcs
423
+ new_tags = span1._tags + span2._tags
424
+ if self._grammar.contains(
425
+ self._tokens[span2._head_index], self._tokens[span1._head_index]
426
+ ):
427
+ new_arcs[span1._head_index - span1._start_index] = span2._head_index
428
+ spans.append(
429
+ DependencySpan(
430
+ span1._start_index,
431
+ span2._end_index,
432
+ span2._head_index,
433
+ new_arcs,
434
+ new_tags,
435
+ )
436
+ )
437
+ return spans
438
+
439
+ def train(self, graphs):
440
+ """
441
+ Trains a ProbabilisticDependencyGrammar based on the list of input
442
+ DependencyGraphs. This model is an implementation of Eisner's (1996)
443
+ Model C, which derives its statistics from head-word, head-tag,
444
+ child-word, and child-tag relationships.
445
+
446
+ :param graphs: A list of dependency graphs to train from.
447
+ :type: list(DependencyGraph)
448
+ """
449
+ productions = []
450
+ events = defaultdict(int)
451
+ tags = {}
452
+ for dg in graphs:
453
+ for node_index in range(1, len(dg.nodes)):
454
+ # children = dg.nodes[node_index]['deps']
455
+ children = list(
456
+ chain.from_iterable(dg.nodes[node_index]["deps"].values())
457
+ )
458
+
459
+ nr_left_children = dg.left_children(node_index)
460
+ nr_right_children = dg.right_children(node_index)
461
+ nr_children = nr_left_children + nr_right_children
462
+ for child_index in range(
463
+ 0 - (nr_left_children + 1), nr_right_children + 2
464
+ ):
465
+ head_word = dg.nodes[node_index]["word"]
466
+ head_tag = dg.nodes[node_index]["tag"]
467
+ if head_word in tags:
468
+ tags[head_word].add(head_tag)
469
+ else:
470
+ tags[head_word] = {head_tag}
471
+ child = "STOP"
472
+ child_tag = "STOP"
473
+ prev_word = "START"
474
+ prev_tag = "START"
475
+ if child_index < 0:
476
+ array_index = child_index + nr_left_children
477
+ if array_index >= 0:
478
+ child = dg.nodes[children[array_index]]["word"]
479
+ child_tag = dg.nodes[children[array_index]]["tag"]
480
+ if child_index != -1:
481
+ prev_word = dg.nodes[children[array_index + 1]]["word"]
482
+ prev_tag = dg.nodes[children[array_index + 1]]["tag"]
483
+ if child != "STOP":
484
+ productions.append(DependencyProduction(head_word, [child]))
485
+ head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format(
486
+ child,
487
+ child_tag,
488
+ prev_tag,
489
+ head_word,
490
+ head_tag,
491
+ )
492
+ mod_event = "(mods ({}, {}, {}) left))".format(
493
+ prev_tag,
494
+ head_word,
495
+ head_tag,
496
+ )
497
+ events[head_event] += 1
498
+ events[mod_event] += 1
499
+ elif child_index > 0:
500
+ array_index = child_index + nr_left_children - 1
501
+ if array_index < nr_children:
502
+ child = dg.nodes[children[array_index]]["word"]
503
+ child_tag = dg.nodes[children[array_index]]["tag"]
504
+ if child_index != 1:
505
+ prev_word = dg.nodes[children[array_index - 1]]["word"]
506
+ prev_tag = dg.nodes[children[array_index - 1]]["tag"]
507
+ if child != "STOP":
508
+ productions.append(DependencyProduction(head_word, [child]))
509
+ head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format(
510
+ child,
511
+ child_tag,
512
+ prev_tag,
513
+ head_word,
514
+ head_tag,
515
+ )
516
+ mod_event = "(mods ({}, {}, {}) right))".format(
517
+ prev_tag,
518
+ head_word,
519
+ head_tag,
520
+ )
521
+ events[head_event] += 1
522
+ events[mod_event] += 1
523
+ self._grammar = ProbabilisticDependencyGrammar(productions, events, tags)
524
+
525
+ def compute_prob(self, dg):
526
+ """
527
+ Computes the probability of a dependency graph based
528
+ on the parser's probability model (defined by the parser's
529
+ statistical dependency grammar).
530
+
531
+ :param dg: A dependency graph to score.
532
+ :type dg: DependencyGraph
533
+ :return: The probability of the dependency graph.
534
+ :rtype: int
535
+ """
536
+ prob = 1.0
537
+ for node_index in range(1, len(dg.nodes)):
538
+ # children = dg.nodes[node_index]['deps']
539
+ children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values()))
540
+
541
+ nr_left_children = dg.left_children(node_index)
542
+ nr_right_children = dg.right_children(node_index)
543
+ nr_children = nr_left_children + nr_right_children
544
+ for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2):
545
+ head_word = dg.nodes[node_index]["word"]
546
+ head_tag = dg.nodes[node_index]["tag"]
547
+ child = "STOP"
548
+ child_tag = "STOP"
549
+ prev_word = "START"
550
+ prev_tag = "START"
551
+ if child_index < 0:
552
+ array_index = child_index + nr_left_children
553
+ if array_index >= 0:
554
+ child = dg.nodes[children[array_index]]["word"]
555
+ child_tag = dg.nodes[children[array_index]]["tag"]
556
+ if child_index != -1:
557
+ prev_word = dg.nodes[children[array_index + 1]]["word"]
558
+ prev_tag = dg.nodes[children[array_index + 1]]["tag"]
559
+ head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format(
560
+ child,
561
+ child_tag,
562
+ prev_tag,
563
+ head_word,
564
+ head_tag,
565
+ )
566
+ mod_event = "(mods ({}, {}, {}) left))".format(
567
+ prev_tag,
568
+ head_word,
569
+ head_tag,
570
+ )
571
+ h_count = self._grammar._events[head_event]
572
+ m_count = self._grammar._events[mod_event]
573
+
574
+ # If the grammar is not covered
575
+ if m_count != 0:
576
+ prob *= h_count / m_count
577
+ else:
578
+ prob = 0.00000001 # Very small number
579
+
580
+ elif child_index > 0:
581
+ array_index = child_index + nr_left_children - 1
582
+ if array_index < nr_children:
583
+ child = dg.nodes[children[array_index]]["word"]
584
+ child_tag = dg.nodes[children[array_index]]["tag"]
585
+ if child_index != 1:
586
+ prev_word = dg.nodes[children[array_index - 1]]["word"]
587
+ prev_tag = dg.nodes[children[array_index - 1]]["tag"]
588
+ head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format(
589
+ child,
590
+ child_tag,
591
+ prev_tag,
592
+ head_word,
593
+ head_tag,
594
+ )
595
+ mod_event = "(mods ({}, {}, {}) right))".format(
596
+ prev_tag,
597
+ head_word,
598
+ head_tag,
599
+ )
600
+ h_count = self._grammar._events[head_event]
601
+ m_count = self._grammar._events[mod_event]
602
+
603
+ if m_count != 0:
604
+ prob *= h_count / m_count
605
+ else:
606
+ prob = 0.00000001 # Very small number
607
+
608
+ return prob
609
+
610
+
611
+ #################################################################
612
+ # Demos
613
+ #################################################################
614
+
615
+
616
+ def demo():
617
+ projective_rule_parse_demo()
618
+ # arity_parse_demo()
619
+ projective_prob_parse_demo()
620
+
621
+
622
+ def projective_rule_parse_demo():
623
+ """
624
+ A demonstration showing the creation and use of a
625
+ ``DependencyGrammar`` to perform a projective dependency
626
+ parse.
627
+ """
628
+ grammar = DependencyGrammar.fromstring(
629
+ """
630
+ 'scratch' -> 'cats' | 'walls'
631
+ 'walls' -> 'the'
632
+ 'cats' -> 'the'
633
+ """
634
+ )
635
+ print(grammar)
636
+ pdp = ProjectiveDependencyParser(grammar)
637
+ trees = pdp.parse(["the", "cats", "scratch", "the", "walls"])
638
+ for tree in trees:
639
+ print(tree)
640
+
641
+
642
+ def arity_parse_demo():
643
+ """
644
+ A demonstration showing the creation of a ``DependencyGrammar``
645
+ in which a specific number of modifiers is listed for a given
646
+ head. This can further constrain the number of possible parses
647
+ created by a ``ProjectiveDependencyParser``.
648
+ """
649
+ print()
650
+ print("A grammar with no arity constraints. Each DependencyProduction")
651
+ print("specifies a relationship between one head word and only one")
652
+ print("modifier word.")
653
+ grammar = DependencyGrammar.fromstring(
654
+ """
655
+ 'fell' -> 'price' | 'stock'
656
+ 'price' -> 'of' | 'the'
657
+ 'of' -> 'stock'
658
+ 'stock' -> 'the'
659
+ """
660
+ )
661
+ print(grammar)
662
+
663
+ print()
664
+ print("For the sentence 'The price of the stock fell', this grammar")
665
+ print("will produce the following three parses:")
666
+ pdp = ProjectiveDependencyParser(grammar)
667
+ trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
668
+ for tree in trees:
669
+ print(tree)
670
+
671
+ print()
672
+ print("By contrast, the following grammar contains a ")
673
+ print("DependencyProduction that specifies a relationship")
674
+ print("between a single head word, 'price', and two modifier")
675
+ print("words, 'of' and 'the'.")
676
+ grammar = DependencyGrammar.fromstring(
677
+ """
678
+ 'fell' -> 'price' | 'stock'
679
+ 'price' -> 'of' 'the'
680
+ 'of' -> 'stock'
681
+ 'stock' -> 'the'
682
+ """
683
+ )
684
+ print(grammar)
685
+
686
+ print()
687
+ print(
688
+ "This constrains the number of possible parses to just one:"
689
+ ) # unimplemented, soon to replace
690
+ pdp = ProjectiveDependencyParser(grammar)
691
+ trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"])
692
+ for tree in trees:
693
+ print(tree)
694
+
695
+
696
+ def projective_prob_parse_demo():
697
+ """
698
+ A demo showing the training and use of a projective
699
+ dependency parser.
700
+ """
701
+ from nltk.parse.dependencygraph import conll_data2
702
+
703
+ graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
704
+ ppdp = ProbabilisticProjectiveDependencyParser()
705
+ print("Training Probabilistic Projective Dependency Parser...")
706
+ ppdp.train(graphs)
707
+
708
+ sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."]
709
+ print("Parsing '", " ".join(sent), "'...")
710
+ print("Parse:")
711
+ for tree in ppdp.parse(sent):
712
+ print(tree)
713
+
714
+
715
+ if __name__ == "__main__":
716
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/recursivedescent.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Recursive Descent Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from nltk.grammar import Nonterminal
10
+ from nltk.parse.api import ParserI
11
+ from nltk.tree import ImmutableTree, Tree
12
+
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Recursive Descent Parser
16
+ ##//////////////////////////////////////////////////////
17
+ class RecursiveDescentParser(ParserI):
18
+ """
19
+ A simple top-down CFG parser that parses texts by recursively
20
+ expanding the fringe of a Tree, and matching it against a
21
+ text.
22
+
23
+ ``RecursiveDescentParser`` uses a list of tree locations called a
24
+ "frontier" to remember which subtrees have not yet been expanded
25
+ and which leaves have not yet been matched against the text. Each
26
+ tree location consists of a list of child indices specifying the
27
+ path from the root of the tree to a subtree or a leaf; see the
28
+ reference documentation for Tree for more information
29
+ about tree locations.
30
+
31
+ When the parser begins parsing a text, it constructs a tree
32
+ containing only the start symbol, and a frontier containing the
33
+ location of the tree's root node. It then extends the tree to
34
+ cover the text, using the following recursive procedure:
35
+
36
+ - If the frontier is empty, and the text is covered by the tree,
37
+ then return the tree as a possible parse.
38
+ - If the frontier is empty, and the text is not covered by the
39
+ tree, then return no parses.
40
+ - If the first element of the frontier is a subtree, then
41
+ use CFG productions to "expand" it. For each applicable
42
+ production, add the expanded subtree's children to the
43
+ frontier, and recursively find all parses that can be
44
+ generated by the new tree and frontier.
45
+ - If the first element of the frontier is a token, then "match"
46
+ it against the next token from the text. Remove the token
47
+ from the frontier, and recursively find all parses that can be
48
+ generated by the new tree and frontier.
49
+
50
+ :see: ``nltk.grammar``
51
+ """
52
+
53
+ def __init__(self, grammar, trace=0):
54
+ """
55
+ Create a new ``RecursiveDescentParser``, that uses ``grammar``
56
+ to parse texts.
57
+
58
+ :type grammar: CFG
59
+ :param grammar: The grammar used to parse texts.
60
+ :type trace: int
61
+ :param trace: The level of tracing that should be used when
62
+ parsing a text. ``0`` will generate no tracing output;
63
+ and higher numbers will produce more verbose tracing
64
+ output.
65
+ """
66
+ self._grammar = grammar
67
+ self._trace = trace
68
+
69
+ def grammar(self):
70
+ return self._grammar
71
+
72
+ def parse(self, tokens):
73
+ # Inherit docs from ParserI
74
+
75
+ tokens = list(tokens)
76
+ self._grammar.check_coverage(tokens)
77
+
78
+ # Start a recursive descent parse, with an initial tree
79
+ # containing just the start symbol.
80
+ start = self._grammar.start().symbol()
81
+ initial_tree = Tree(start, [])
82
+ frontier = [()]
83
+ if self._trace:
84
+ self._trace_start(initial_tree, frontier, tokens)
85
+ return self._parse(tokens, initial_tree, frontier)
86
+
87
+ def _parse(self, remaining_text, tree, frontier):
88
+ """
89
+ Recursively expand and match each elements of ``tree``
90
+ specified by ``frontier``, to cover ``remaining_text``. Return
91
+ a list of all parses found.
92
+
93
+ :return: An iterator of all parses that can be generated by
94
+ matching and expanding the elements of ``tree``
95
+ specified by ``frontier``.
96
+ :rtype: iter(Tree)
97
+ :type tree: Tree
98
+ :param tree: A partial structure for the text that is
99
+ currently being parsed. The elements of ``tree``
100
+ that are specified by ``frontier`` have not yet been
101
+ expanded or matched.
102
+ :type remaining_text: list(str)
103
+ :param remaining_text: The portion of the text that is not yet
104
+ covered by ``tree``.
105
+ :type frontier: list(tuple(int))
106
+ :param frontier: A list of the locations within ``tree`` of
107
+ all subtrees that have not yet been expanded, and all
108
+ leaves that have not yet been matched. This list sorted
109
+ in left-to-right order of location within the tree.
110
+ """
111
+
112
+ # If the tree covers the text, and there's nothing left to
113
+ # expand, then we've found a complete parse; return it.
114
+ if len(remaining_text) == 0 and len(frontier) == 0:
115
+ if self._trace:
116
+ self._trace_succeed(tree, frontier)
117
+ yield tree
118
+
119
+ # If there's still text, but nothing left to expand, we failed.
120
+ elif len(frontier) == 0:
121
+ if self._trace:
122
+ self._trace_backtrack(tree, frontier)
123
+
124
+ # If the next element on the frontier is a tree, expand it.
125
+ elif isinstance(tree[frontier[0]], Tree):
126
+ yield from self._expand(remaining_text, tree, frontier)
127
+
128
+ # If the next element on the frontier is a token, match it.
129
+ else:
130
+ yield from self._match(remaining_text, tree, frontier)
131
+
132
+ def _match(self, rtext, tree, frontier):
133
+ """
134
+ :rtype: iter(Tree)
135
+ :return: an iterator of all parses that can be generated by
136
+ matching the first element of ``frontier`` against the
137
+ first token in ``rtext``. In particular, if the first
138
+ element of ``frontier`` has the same type as the first
139
+ token in ``rtext``, then substitute the token into
140
+ ``tree``; and return all parses that can be generated by
141
+ matching and expanding the remaining elements of
142
+ ``frontier``. If the first element of ``frontier`` does not
143
+ have the same type as the first token in ``rtext``, then
144
+ return empty list.
145
+
146
+ :type tree: Tree
147
+ :param tree: A partial structure for the text that is
148
+ currently being parsed. The elements of ``tree``
149
+ that are specified by ``frontier`` have not yet been
150
+ expanded or matched.
151
+ :type rtext: list(str)
152
+ :param rtext: The portion of the text that is not yet
153
+ covered by ``tree``.
154
+ :type frontier: list of tuple of int
155
+ :param frontier: A list of the locations within ``tree`` of
156
+ all subtrees that have not yet been expanded, and all
157
+ leaves that have not yet been matched.
158
+ """
159
+
160
+ tree_leaf = tree[frontier[0]]
161
+ if len(rtext) > 0 and tree_leaf == rtext[0]:
162
+ # If it's a terminal that matches rtext[0], then substitute
163
+ # in the token, and continue parsing.
164
+ newtree = tree.copy(deep=True)
165
+ newtree[frontier[0]] = rtext[0]
166
+ if self._trace:
167
+ self._trace_match(newtree, frontier[1:], rtext[0])
168
+ yield from self._parse(rtext[1:], newtree, frontier[1:])
169
+ else:
170
+ # If it's a non-matching terminal, fail.
171
+ if self._trace:
172
+ self._trace_backtrack(tree, frontier, rtext[:1])
173
+
174
+ def _expand(self, remaining_text, tree, frontier, production=None):
175
+ """
176
+ :rtype: iter(Tree)
177
+ :return: An iterator of all parses that can be generated by
178
+ expanding the first element of ``frontier`` with
179
+ ``production``. In particular, if the first element of
180
+ ``frontier`` is a subtree whose node type is equal to
181
+ ``production``'s left hand side, then add a child to that
182
+ subtree for each element of ``production``'s right hand
183
+ side; and return all parses that can be generated by
184
+ matching and expanding the remaining elements of
185
+ ``frontier``. If the first element of ``frontier`` is not a
186
+ subtree whose node type is equal to ``production``'s left
187
+ hand side, then return an empty list. If ``production`` is
188
+ not specified, then return a list of all parses that can
189
+ be generated by expanding the first element of ``frontier``
190
+ with *any* CFG production.
191
+
192
+ :type tree: Tree
193
+ :param tree: A partial structure for the text that is
194
+ currently being parsed. The elements of ``tree``
195
+ that are specified by ``frontier`` have not yet been
196
+ expanded or matched.
197
+ :type remaining_text: list(str)
198
+ :param remaining_text: The portion of the text that is not yet
199
+ covered by ``tree``.
200
+ :type frontier: list(tuple(int))
201
+ :param frontier: A list of the locations within ``tree`` of
202
+ all subtrees that have not yet been expanded, and all
203
+ leaves that have not yet been matched.
204
+ """
205
+
206
+ if production is None:
207
+ productions = self._grammar.productions()
208
+ else:
209
+ productions = [production]
210
+
211
+ for production in productions:
212
+ lhs = production.lhs().symbol()
213
+ if lhs == tree[frontier[0]].label():
214
+ subtree = self._production_to_tree(production)
215
+ if frontier[0] == ():
216
+ newtree = subtree
217
+ else:
218
+ newtree = tree.copy(deep=True)
219
+ newtree[frontier[0]] = subtree
220
+ new_frontier = [
221
+ frontier[0] + (i,) for i in range(len(production.rhs()))
222
+ ]
223
+ if self._trace:
224
+ self._trace_expand(newtree, new_frontier, production)
225
+ yield from self._parse(
226
+ remaining_text, newtree, new_frontier + frontier[1:]
227
+ )
228
+
229
+ def _production_to_tree(self, production):
230
+ """
231
+ :rtype: Tree
232
+ :return: The Tree that is licensed by ``production``.
233
+ In particular, given the production ``[lhs -> elt[1] ... elt[n]]``
234
+ return a tree that has a node ``lhs.symbol``, and
235
+ ``n`` children. For each nonterminal element
236
+ ``elt[i]`` in the production, the tree token has a
237
+ childless subtree with node value ``elt[i].symbol``; and
238
+ for each terminal element ``elt[j]``, the tree token has
239
+ a leaf token with type ``elt[j]``.
240
+
241
+ :param production: The CFG production that licenses the tree
242
+ token that should be returned.
243
+ :type production: Production
244
+ """
245
+ children = []
246
+ for elt in production.rhs():
247
+ if isinstance(elt, Nonterminal):
248
+ children.append(Tree(elt.symbol(), []))
249
+ else:
250
+ # This will be matched.
251
+ children.append(elt)
252
+ return Tree(production.lhs().symbol(), children)
253
+
254
+ def trace(self, trace=2):
255
+ """
256
+ Set the level of tracing output that should be generated when
257
+ parsing a text.
258
+
259
+ :type trace: int
260
+ :param trace: The trace level. A trace level of ``0`` will
261
+ generate no tracing output; and higher trace levels will
262
+ produce more verbose tracing output.
263
+ :rtype: None
264
+ """
265
+ self._trace = trace
266
+
267
+ def _trace_fringe(self, tree, treeloc=None):
268
+ """
269
+ Print trace output displaying the fringe of ``tree``. The
270
+ fringe of ``tree`` consists of all of its leaves and all of
271
+ its childless subtrees.
272
+
273
+ :rtype: None
274
+ """
275
+
276
+ if treeloc == ():
277
+ print("*", end=" ")
278
+ if isinstance(tree, Tree):
279
+ if len(tree) == 0:
280
+ print(repr(Nonterminal(tree.label())), end=" ")
281
+ for i in range(len(tree)):
282
+ if treeloc is not None and i == treeloc[0]:
283
+ self._trace_fringe(tree[i], treeloc[1:])
284
+ else:
285
+ self._trace_fringe(tree[i])
286
+ else:
287
+ print(repr(tree), end=" ")
288
+
289
+ def _trace_tree(self, tree, frontier, operation):
290
+ """
291
+ Print trace output displaying the parser's current state.
292
+
293
+ :param operation: A character identifying the operation that
294
+ generated the current state.
295
+ :rtype: None
296
+ """
297
+ if self._trace == 2:
298
+ print(" %c [" % operation, end=" ")
299
+ else:
300
+ print(" [", end=" ")
301
+ if len(frontier) > 0:
302
+ self._trace_fringe(tree, frontier[0])
303
+ else:
304
+ self._trace_fringe(tree)
305
+ print("]")
306
+
307
+ def _trace_start(self, tree, frontier, text):
308
+ print("Parsing %r" % " ".join(text))
309
+ if self._trace > 2:
310
+ print("Start:")
311
+ if self._trace > 1:
312
+ self._trace_tree(tree, frontier, " ")
313
+
314
+ def _trace_expand(self, tree, frontier, production):
315
+ if self._trace > 2:
316
+ print("Expand: %s" % production)
317
+ if self._trace > 1:
318
+ self._trace_tree(tree, frontier, "E")
319
+
320
+ def _trace_match(self, tree, frontier, tok):
321
+ if self._trace > 2:
322
+ print("Match: %r" % tok)
323
+ if self._trace > 1:
324
+ self._trace_tree(tree, frontier, "M")
325
+
326
+ def _trace_succeed(self, tree, frontier):
327
+ if self._trace > 2:
328
+ print("GOOD PARSE:")
329
+ if self._trace == 1:
330
+ print("Found a parse:\n%s" % tree)
331
+ if self._trace > 1:
332
+ self._trace_tree(tree, frontier, "+")
333
+
334
+ def _trace_backtrack(self, tree, frontier, toks=None):
335
+ if self._trace > 2:
336
+ if toks:
337
+ print("Backtrack: %r match failed" % toks[0])
338
+ else:
339
+ print("Backtrack")
340
+
341
+
342
+ ##//////////////////////////////////////////////////////
343
+ ## Stepping Recursive Descent Parser
344
+ ##//////////////////////////////////////////////////////
345
+ class SteppingRecursiveDescentParser(RecursiveDescentParser):
346
+ """
347
+ A ``RecursiveDescentParser`` that allows you to step through the
348
+ parsing process, performing a single operation at a time.
349
+
350
+ The ``initialize`` method is used to start parsing a text.
351
+ ``expand`` expands the first element on the frontier using a single
352
+ CFG production, and ``match`` matches the first element on the
353
+ frontier against the next text token. ``backtrack`` undoes the most
354
+ recent expand or match operation. ``step`` performs a single
355
+ expand, match, or backtrack operation. ``parses`` returns the set
356
+ of parses that have been found by the parser.
357
+
358
+ :ivar _history: A list of ``(rtext, tree, frontier)`` tripples,
359
+ containing the previous states of the parser. This history is
360
+ used to implement the ``backtrack`` operation.
361
+ :ivar _tried_e: A record of all productions that have been tried
362
+ for a given tree. This record is used by ``expand`` to perform
363
+ the next untried production.
364
+ :ivar _tried_m: A record of what tokens have been matched for a
365
+ given tree. This record is used by ``step`` to decide whether
366
+ or not to match a token.
367
+ :see: ``nltk.grammar``
368
+ """
369
+
370
+ def __init__(self, grammar, trace=0):
371
+ super().__init__(grammar, trace)
372
+ self._rtext = None
373
+ self._tree = None
374
+ self._frontier = [()]
375
+ self._tried_e = {}
376
+ self._tried_m = {}
377
+ self._history = []
378
+ self._parses = []
379
+
380
+ # [XX] TEMPORARY HACK WARNING! This should be replaced with
381
+ # something nicer when we get the chance.
382
+ def _freeze(self, tree):
383
+ c = tree.copy()
384
+ # for pos in c.treepositions('leaves'):
385
+ # c[pos] = c[pos].freeze()
386
+ return ImmutableTree.convert(c)
387
+
388
+ def parse(self, tokens):
389
+ tokens = list(tokens)
390
+ self.initialize(tokens)
391
+ while self.step() is not None:
392
+ pass
393
+ return self.parses()
394
+
395
+ def initialize(self, tokens):
396
+ """
397
+ Start parsing a given text. This sets the parser's tree to
398
+ the start symbol, its frontier to the root node, and its
399
+ remaining text to ``token['SUBTOKENS']``.
400
+ """
401
+
402
+ self._rtext = tokens
403
+ start = self._grammar.start().symbol()
404
+ self._tree = Tree(start, [])
405
+ self._frontier = [()]
406
+ self._tried_e = {}
407
+ self._tried_m = {}
408
+ self._history = []
409
+ self._parses = []
410
+ if self._trace:
411
+ self._trace_start(self._tree, self._frontier, self._rtext)
412
+
413
+ def remaining_text(self):
414
+ """
415
+ :return: The portion of the text that is not yet covered by the
416
+ tree.
417
+ :rtype: list(str)
418
+ """
419
+ return self._rtext
420
+
421
+ def frontier(self):
422
+ """
423
+ :return: A list of the tree locations of all subtrees that
424
+ have not yet been expanded, and all leaves that have not
425
+ yet been matched.
426
+ :rtype: list(tuple(int))
427
+ """
428
+ return self._frontier
429
+
430
+ def tree(self):
431
+ """
432
+ :return: A partial structure for the text that is
433
+ currently being parsed. The elements specified by the
434
+ frontier have not yet been expanded or matched.
435
+ :rtype: Tree
436
+ """
437
+ return self._tree
438
+
439
+ def step(self):
440
+ """
441
+ Perform a single parsing operation. If an untried match is
442
+ possible, then perform the match, and return the matched
443
+ token. If an untried expansion is possible, then perform the
444
+ expansion, and return the production that it is based on. If
445
+ backtracking is possible, then backtrack, and return True.
446
+ Otherwise, return None.
447
+
448
+ :return: None if no operation was performed; a token if a match
449
+ was performed; a production if an expansion was performed;
450
+ and True if a backtrack operation was performed.
451
+ :rtype: Production or String or bool
452
+ """
453
+ # Try matching (if we haven't already)
454
+ if self.untried_match():
455
+ token = self.match()
456
+ if token is not None:
457
+ return token
458
+
459
+ # Try expanding.
460
+ production = self.expand()
461
+ if production is not None:
462
+ return production
463
+
464
+ # Try backtracking
465
+ if self.backtrack():
466
+ self._trace_backtrack(self._tree, self._frontier)
467
+ return True
468
+
469
+ # Nothing left to do.
470
+ return None
471
+
472
+ def expand(self, production=None):
473
+ """
474
+ Expand the first element of the frontier. In particular, if
475
+ the first element of the frontier is a subtree whose node type
476
+ is equal to ``production``'s left hand side, then add a child
477
+ to that subtree for each element of ``production``'s right hand
478
+ side. If ``production`` is not specified, then use the first
479
+ untried expandable production. If all expandable productions
480
+ have been tried, do nothing.
481
+
482
+ :return: The production used to expand the frontier, if an
483
+ expansion was performed. If no expansion was performed,
484
+ return None.
485
+ :rtype: Production or None
486
+ """
487
+
488
+ # Make sure we *can* expand.
489
+ if len(self._frontier) == 0:
490
+ return None
491
+ if not isinstance(self._tree[self._frontier[0]], Tree):
492
+ return None
493
+
494
+ # If they didn't specify a production, check all untried ones.
495
+ if production is None:
496
+ productions = self.untried_expandable_productions()
497
+ else:
498
+ productions = [production]
499
+
500
+ parses = []
501
+ for prod in productions:
502
+ # Record that we've tried this production now.
503
+ self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
504
+
505
+ # Try expanding.
506
+ for _result in self._expand(self._rtext, self._tree, self._frontier, prod):
507
+ return prod
508
+
509
+ # We didn't expand anything.
510
+ return None
511
+
512
+ def match(self):
513
+ """
514
+ Match the first element of the frontier. In particular, if
515
+ the first element of the frontier has the same type as the
516
+ next text token, then substitute the text token into the tree.
517
+
518
+ :return: The token matched, if a match operation was
519
+ performed. If no match was performed, return None
520
+ :rtype: str or None
521
+ """
522
+
523
+ # Record that we've tried matching this token.
524
+ tok = self._rtext[0]
525
+ self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
526
+
527
+ # Make sure we *can* match.
528
+ if len(self._frontier) == 0:
529
+ return None
530
+ if isinstance(self._tree[self._frontier[0]], Tree):
531
+ return None
532
+
533
+ for _result in self._match(self._rtext, self._tree, self._frontier):
534
+ # Return the token we just matched.
535
+ return self._history[-1][0][0]
536
+ return None
537
+
538
+ def backtrack(self):
539
+ """
540
+ Return the parser to its state before the most recent
541
+ match or expand operation. Calling ``undo`` repeatedly return
542
+ the parser to successively earlier states. If no match or
543
+ expand operations have been performed, ``undo`` will make no
544
+ changes.
545
+
546
+ :return: true if an operation was successfully undone.
547
+ :rtype: bool
548
+ """
549
+ if len(self._history) == 0:
550
+ return False
551
+ (self._rtext, self._tree, self._frontier) = self._history.pop()
552
+ return True
553
+
554
+ def expandable_productions(self):
555
+ """
556
+ :return: A list of all the productions for which expansions
557
+ are available for the current parser state.
558
+ :rtype: list(Production)
559
+ """
560
+ # Make sure we *can* expand.
561
+ if len(self._frontier) == 0:
562
+ return []
563
+ frontier_child = self._tree[self._frontier[0]]
564
+ if len(self._frontier) == 0 or not isinstance(frontier_child, Tree):
565
+ return []
566
+
567
+ return [
568
+ p
569
+ for p in self._grammar.productions()
570
+ if p.lhs().symbol() == frontier_child.label()
571
+ ]
572
+
573
+ def untried_expandable_productions(self):
574
+ """
575
+ :return: A list of all the untried productions for which
576
+ expansions are available for the current parser state.
577
+ :rtype: list(Production)
578
+ """
579
+
580
+ tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
581
+ return [p for p in self.expandable_productions() if p not in tried_expansions]
582
+
583
+ def untried_match(self):
584
+ """
585
+ :return: Whether the first element of the frontier is a token
586
+ that has not yet been matched.
587
+ :rtype: bool
588
+ """
589
+
590
+ if len(self._rtext) == 0:
591
+ return False
592
+ tried_matches = self._tried_m.get(self._freeze(self._tree), [])
593
+ return self._rtext[0] not in tried_matches
594
+
595
+ def currently_complete(self):
596
+ """
597
+ :return: Whether the parser's current state represents a
598
+ complete parse.
599
+ :rtype: bool
600
+ """
601
+ return len(self._frontier) == 0 and len(self._rtext) == 0
602
+
603
+ def _parse(self, remaining_text, tree, frontier):
604
+ """
605
+ A stub version of ``_parse`` that sets the parsers current
606
+ state to the given arguments. In ``RecursiveDescentParser``,
607
+ the ``_parse`` method is used to recursively continue parsing a
608
+ text. ``SteppingRecursiveDescentParser`` overrides it to
609
+ capture these recursive calls. It records the parser's old
610
+ state in the history (to allow for backtracking), and updates
611
+ the parser's new state using the given arguments. Finally, it
612
+ returns ``[1]``, which is used by ``match`` and ``expand`` to
613
+ detect whether their operations were successful.
614
+
615
+ :return: ``[1]``
616
+ :rtype: list of int
617
+ """
618
+ self._history.append((self._rtext, self._tree, self._frontier))
619
+ self._rtext = remaining_text
620
+ self._tree = tree
621
+ self._frontier = frontier
622
+
623
+ # Is it a good parse? If so, record it.
624
+ if len(frontier) == 0 and len(remaining_text) == 0:
625
+ self._parses.append(tree)
626
+ self._trace_succeed(self._tree, self._frontier)
627
+
628
+ return [1]
629
+
630
+ def parses(self):
631
+ """
632
+ :return: An iterator of the parses that have been found by this
633
+ parser so far.
634
+ :rtype: list of Tree
635
+ """
636
+ return iter(self._parses)
637
+
638
+ def set_grammar(self, grammar):
639
+ """
640
+ Change the grammar used to parse texts.
641
+
642
+ :param grammar: The new grammar.
643
+ :type grammar: CFG
644
+ """
645
+ self._grammar = grammar
646
+
647
+
648
+ ##//////////////////////////////////////////////////////
649
+ ## Demonstration Code
650
+ ##//////////////////////////////////////////////////////
651
+
652
+
653
+ def demo():
654
+ """
655
+ A demonstration of the recursive descent parser.
656
+ """
657
+
658
+ from nltk import CFG, parse
659
+
660
+ grammar = CFG.fromstring(
661
+ """
662
+ S -> NP VP
663
+ NP -> Det N | Det N PP
664
+ VP -> V NP | V NP PP
665
+ PP -> P NP
666
+ NP -> 'I'
667
+ N -> 'man' | 'park' | 'telescope' | 'dog'
668
+ Det -> 'the' | 'a'
669
+ P -> 'in' | 'with'
670
+ V -> 'saw'
671
+ """
672
+ )
673
+
674
+ for prod in grammar.productions():
675
+ print(prod)
676
+
677
+ sent = "I saw a man in the park".split()
678
+ parser = parse.RecursiveDescentParser(grammar, trace=2)
679
+ for p in parser.parse(sent):
680
+ print(p)
681
+
682
+
683
+ if __name__ == "__main__":
684
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/parse/stanford.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the Stanford Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Xu <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import os
10
+ import tempfile
11
+ import warnings
12
+ from subprocess import PIPE
13
+
14
+ from nltk.internals import (
15
+ _java_options,
16
+ config_java,
17
+ find_jar_iter,
18
+ find_jars_within_path,
19
+ java,
20
+ )
21
+ from nltk.parse.api import ParserI
22
+ from nltk.parse.dependencygraph import DependencyGraph
23
+ from nltk.tree import Tree
24
+
25
+ _stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml"
26
+
27
+
28
+ class GenericStanfordParser(ParserI):
29
+ """Interface to the Stanford Parser"""
30
+
31
+ _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar"
32
+ _JAR = r"stanford-parser\.jar"
33
+ _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser"
34
+
35
+ _USE_STDIN = False
36
+ _DOUBLE_SPACED_OUTPUT = False
37
+
38
+ def __init__(
39
+ self,
40
+ path_to_jar=None,
41
+ path_to_models_jar=None,
42
+ model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz",
43
+ encoding="utf8",
44
+ verbose=False,
45
+ java_options="-mx4g",
46
+ corenlp_options="",
47
+ ):
48
+
49
+ # find the most recent code and model jar
50
+ stanford_jar = max(
51
+ find_jar_iter(
52
+ self._JAR,
53
+ path_to_jar,
54
+ env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"),
55
+ searchpath=(),
56
+ url=_stanford_url,
57
+ verbose=verbose,
58
+ is_regex=True,
59
+ ),
60
+ key=lambda model_path: os.path.dirname(model_path),
61
+ )
62
+
63
+ model_jar = max(
64
+ find_jar_iter(
65
+ self._MODEL_JAR_PATTERN,
66
+ path_to_models_jar,
67
+ env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"),
68
+ searchpath=(),
69
+ url=_stanford_url,
70
+ verbose=verbose,
71
+ is_regex=True,
72
+ ),
73
+ key=lambda model_path: os.path.dirname(model_path),
74
+ )
75
+
76
+ # self._classpath = (stanford_jar, model_jar)
77
+
78
+ # Adding logging jar files to classpath
79
+ stanford_dir = os.path.split(stanford_jar)[0]
80
+ self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir))
81
+
82
+ self.model_path = model_path
83
+ self._encoding = encoding
84
+ self.corenlp_options = corenlp_options
85
+ self.java_options = java_options
86
+
87
+ def _parse_trees_output(self, output_):
88
+ res = []
89
+ cur_lines = []
90
+ cur_trees = []
91
+ blank = False
92
+ for line in output_.splitlines(False):
93
+ if line == "":
94
+ if blank:
95
+ res.append(iter(cur_trees))
96
+ cur_trees = []
97
+ blank = False
98
+ elif self._DOUBLE_SPACED_OUTPUT:
99
+ cur_trees.append(self._make_tree("\n".join(cur_lines)))
100
+ cur_lines = []
101
+ blank = True
102
+ else:
103
+ res.append(iter([self._make_tree("\n".join(cur_lines))]))
104
+ cur_lines = []
105
+ else:
106
+ cur_lines.append(line)
107
+ blank = False
108
+ return iter(res)
109
+
110
+ def parse_sents(self, sentences, verbose=False):
111
+ """
112
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
113
+ list where each sentence is a list of words.
114
+ Each sentence will be automatically tagged with this StanfordParser instance's
115
+ tagger.
116
+ If whitespaces exists inside a token, then the token will be treated as
117
+ separate tokens.
118
+
119
+ :param sentences: Input sentences to parse
120
+ :type sentences: list(list(str))
121
+ :rtype: iter(iter(Tree))
122
+ """
123
+ cmd = [
124
+ self._MAIN_CLASS,
125
+ "-model",
126
+ self.model_path,
127
+ "-sentences",
128
+ "newline",
129
+ "-outputFormat",
130
+ self._OUTPUT_FORMAT,
131
+ "-tokenized",
132
+ "-escaper",
133
+ "edu.stanford.nlp.process.PTBEscapingProcessor",
134
+ ]
135
+ return self._parse_trees_output(
136
+ self._execute(
137
+ cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose
138
+ )
139
+ )
140
+
141
+ def raw_parse(self, sentence, verbose=False):
142
+ """
143
+ Use StanfordParser to parse a sentence. Takes a sentence as a string;
144
+ before parsing, it will be automatically tokenized and tagged by
145
+ the Stanford Parser.
146
+
147
+ :param sentence: Input sentence to parse
148
+ :type sentence: str
149
+ :rtype: iter(Tree)
150
+ """
151
+ return next(self.raw_parse_sents([sentence], verbose))
152
+
153
+ def raw_parse_sents(self, sentences, verbose=False):
154
+ """
155
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
156
+ list of strings.
157
+ Each sentence will be automatically tokenized and tagged by the Stanford Parser.
158
+
159
+ :param sentences: Input sentences to parse
160
+ :type sentences: list(str)
161
+ :rtype: iter(iter(Tree))
162
+ """
163
+ cmd = [
164
+ self._MAIN_CLASS,
165
+ "-model",
166
+ self.model_path,
167
+ "-sentences",
168
+ "newline",
169
+ "-outputFormat",
170
+ self._OUTPUT_FORMAT,
171
+ ]
172
+ return self._parse_trees_output(
173
+ self._execute(cmd, "\n".join(sentences), verbose)
174
+ )
175
+
176
+ def tagged_parse(self, sentence, verbose=False):
177
+ """
178
+ Use StanfordParser to parse a sentence. Takes a sentence as a list of
179
+ (word, tag) tuples; the sentence must have already been tokenized and
180
+ tagged.
181
+
182
+ :param sentence: Input sentence to parse
183
+ :type sentence: list(tuple(str, str))
184
+ :rtype: iter(Tree)
185
+ """
186
+ return next(self.tagged_parse_sents([sentence], verbose))
187
+
188
+ def tagged_parse_sents(self, sentences, verbose=False):
189
+ """
190
+ Use StanfordParser to parse multiple sentences. Takes multiple sentences
191
+ where each sentence is a list of (word, tag) tuples.
192
+ The sentences must have already been tokenized and tagged.
193
+
194
+ :param sentences: Input sentences to parse
195
+ :type sentences: list(list(tuple(str, str)))
196
+ :rtype: iter(iter(Tree))
197
+ """
198
+ tag_separator = "/"
199
+ cmd = [
200
+ self._MAIN_CLASS,
201
+ "-model",
202
+ self.model_path,
203
+ "-sentences",
204
+ "newline",
205
+ "-outputFormat",
206
+ self._OUTPUT_FORMAT,
207
+ "-tokenized",
208
+ "-tagSeparator",
209
+ tag_separator,
210
+ "-tokenizerFactory",
211
+ "edu.stanford.nlp.process.WhitespaceTokenizer",
212
+ "-tokenizerMethod",
213
+ "newCoreLabelTokenizerFactory",
214
+ ]
215
+ # We don't need to escape slashes as "splitting is done on the last instance of the character in the token"
216
+ return self._parse_trees_output(
217
+ self._execute(
218
+ cmd,
219
+ "\n".join(
220
+ " ".join(tag_separator.join(tagged) for tagged in sentence)
221
+ for sentence in sentences
222
+ ),
223
+ verbose,
224
+ )
225
+ )
226
+
227
+ def _execute(self, cmd, input_, verbose=False):
228
+ encoding = self._encoding
229
+ cmd.extend(["-encoding", encoding])
230
+ if self.corenlp_options:
231
+ cmd.extend(self.corenlp_options.split())
232
+
233
+ default_options = " ".join(_java_options)
234
+
235
+ # Configure java.
236
+ config_java(options=self.java_options, verbose=verbose)
237
+
238
+ # Windows is incompatible with NamedTemporaryFile() without passing in delete=False.
239
+ with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file:
240
+ # Write the actual sentences to the temporary input file
241
+ if isinstance(input_, str) and encoding:
242
+ input_ = input_.encode(encoding)
243
+ input_file.write(input_)
244
+ input_file.flush()
245
+
246
+ # Run the tagger and get the output.
247
+ if self._USE_STDIN:
248
+ input_file.seek(0)
249
+ stdout, stderr = java(
250
+ cmd,
251
+ classpath=self._classpath,
252
+ stdin=input_file,
253
+ stdout=PIPE,
254
+ stderr=PIPE,
255
+ )
256
+ else:
257
+ cmd.append(input_file.name)
258
+ stdout, stderr = java(
259
+ cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE
260
+ )
261
+
262
+ stdout = stdout.replace(b"\xc2\xa0", b" ")
263
+ stdout = stdout.replace(b"\x00\xa0", b" ")
264
+ stdout = stdout.decode(encoding)
265
+
266
+ os.unlink(input_file.name)
267
+
268
+ # Return java configurations to their default values.
269
+ config_java(options=default_options, verbose=False)
270
+
271
+ return stdout
272
+
273
+
274
+ class StanfordParser(GenericStanfordParser):
275
+ """
276
+ >>> parser=StanfordParser(
277
+ ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
278
+ ... ) # doctest: +SKIP
279
+
280
+ >>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP
281
+ [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
282
+ Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
283
+ Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])]
284
+
285
+ >>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents((
286
+ ... "the quick brown fox jumps over the lazy dog",
287
+ ... "the quick grey wolf jumps over the lazy fox"
288
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
289
+ [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
290
+ Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
291
+ Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP',
292
+ [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP',
293
+ [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']),
294
+ Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])]
295
+
296
+ >>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents((
297
+ ... "I 'm a dog".split(),
298
+ ... "This is my friends ' cat ( the tabby )".split(),
299
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
300
+ [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]),
301
+ Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP',
302
+ [Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']),
303
+ Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []),
304
+ Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])]
305
+
306
+ >>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents((
307
+ ... (
308
+ ... ("The", "DT"),
309
+ ... ("quick", "JJ"),
310
+ ... ("brown", "JJ"),
311
+ ... ("fox", "NN"),
312
+ ... ("jumped", "VBD"),
313
+ ... ("over", "IN"),
314
+ ... ("the", "DT"),
315
+ ... ("lazy", "JJ"),
316
+ ... ("dog", "NN"),
317
+ ... (".", "."),
318
+ ... ),
319
+ ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
320
+ [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
321
+ Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP',
322
+ [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])]
323
+ """
324
+
325
+ _OUTPUT_FORMAT = "penn"
326
+
327
+ def __init__(self, *args, **kwargs):
328
+ warnings.warn(
329
+ "The StanfordParser will be deprecated\n"
330
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.",
331
+ DeprecationWarning,
332
+ stacklevel=2,
333
+ )
334
+
335
+ super().__init__(*args, **kwargs)
336
+
337
+ def _make_tree(self, result):
338
+ return Tree.fromstring(result)
339
+
340
+
341
+ class StanfordDependencyParser(GenericStanfordParser):
342
+
343
+ """
344
+ >>> dep_parser=StanfordDependencyParser(
345
+ ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
346
+ ... ) # doctest: +SKIP
347
+
348
+ >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
349
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])]
350
+
351
+ >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
352
+ [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
353
+ ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
354
+ ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
355
+ ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
356
+
357
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
358
+ ... "The quick brown fox jumps over the lazy dog.",
359
+ ... "The quick grey wolf jumps over the lazy fox."
360
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
361
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]),
362
+ Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])]
363
+
364
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
365
+ ... "I 'm a dog".split(),
366
+ ... "This is my friends ' cat ( the tabby )".split(),
367
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
368
+ [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])]
369
+
370
+ >>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents((
371
+ ... (
372
+ ... ("The", "DT"),
373
+ ... ("quick", "JJ"),
374
+ ... ("brown", "JJ"),
375
+ ... ("fox", "NN"),
376
+ ... ("jumped", "VBD"),
377
+ ... ("over", "IN"),
378
+ ... ("the", "DT"),
379
+ ... ("lazy", "JJ"),
380
+ ... ("dog", "NN"),
381
+ ... (".", "."),
382
+ ... ),
383
+ ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
384
+ [[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
385
+ ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
386
+ ((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
387
+ ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
388
+
389
+ """
390
+
391
+ _OUTPUT_FORMAT = "conll2007"
392
+
393
+ def __init__(self, *args, **kwargs):
394
+ warnings.warn(
395
+ "The StanfordDependencyParser will be deprecated\n"
396
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
397
+ DeprecationWarning,
398
+ stacklevel=2,
399
+ )
400
+
401
+ super().__init__(*args, **kwargs)
402
+
403
+ def _make_tree(self, result):
404
+ return DependencyGraph(result, top_relation_label="root")
405
+
406
+
407
+ class StanfordNeuralDependencyParser(GenericStanfordParser):
408
+ """
409
+ >>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP
410
+ >>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP
411
+
412
+ >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
413
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])]
414
+
415
+ >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
416
+ [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det',
417
+ (u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'),
418
+ u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')),
419
+ ((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det',
420
+ (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'),
421
+ u'punct', (u'.', u'.'))]]
422
+
423
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
424
+ ... "The quick brown fox jumps over the lazy dog.",
425
+ ... "The quick grey wolf jumps over the lazy fox."
426
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
427
+ [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over',
428
+ 'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']),
429
+ Tree('fox', ['over', 'the', 'lazy']), '.'])]
430
+
431
+ >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
432
+ ... "I 'm a dog".split(),
433
+ ... "This is my friends ' cat ( the tabby )".split(),
434
+ ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
435
+ [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends',
436
+ ['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])]
437
+ """
438
+
439
+ _OUTPUT_FORMAT = "conll"
440
+ _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
441
+ _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar"
442
+ _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar"
443
+ _USE_STDIN = True
444
+ _DOUBLE_SPACED_OUTPUT = True
445
+
446
+ def __init__(self, *args, **kwargs):
447
+ warnings.warn(
448
+ "The StanfordNeuralDependencyParser will be deprecated\n"
449
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
450
+ DeprecationWarning,
451
+ stacklevel=2,
452
+ )
453
+
454
+ super().__init__(*args, **kwargs)
455
+ self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse"
456
+
457
+ def tagged_parse_sents(self, sentences, verbose=False):
458
+ """
459
+ Currently unimplemented because the neural dependency parser (and
460
+ the StanfordCoreNLP pipeline class) doesn't support passing in pre-
461
+ tagged tokens.
462
+ """
463
+ raise NotImplementedError(
464
+ "tagged_parse[_sents] is not supported by "
465
+ "StanfordNeuralDependencyParser; use "
466
+ "parse[_sents] or raw_parse[_sents] instead."
467
+ )
468
+
469
+ def _make_tree(self, result):
470
+ return DependencyGraph(result, top_relation_label="ROOT")
env-llmeval/lib/python3.10/site-packages/nltk/parse/transitionparser.py ADDED
@@ -0,0 +1,794 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
2
+ #
3
+ # Author: Long Duong <[email protected]>
4
+ #
5
+ # Copyright (C) 2001-2023 NLTK Project
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import pickle
10
+ import tempfile
11
+ from copy import deepcopy
12
+ from operator import itemgetter
13
+ from os import remove
14
+
15
+ try:
16
+ from numpy import array
17
+ from scipy import sparse
18
+ from sklearn import svm
19
+ from sklearn.datasets import load_svmlight_file
20
+ except ImportError:
21
+ pass
22
+
23
+ from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI
24
+
25
+
26
+ class Configuration:
27
+ """
28
+ Class for holding configuration which is the partial analysis of the input sentence.
29
+ The transition based parser aims at finding set of operators that transfer the initial
30
+ configuration to the terminal configuration.
31
+
32
+ The configuration includes:
33
+ - Stack: for storing partially proceeded words
34
+ - Buffer: for storing remaining input words
35
+ - Set of arcs: for storing partially built dependency tree
36
+
37
+ This class also provides a method to represent a configuration as list of features.
38
+ """
39
+
40
+ def __init__(self, dep_graph):
41
+ """
42
+ :param dep_graph: the representation of an input in the form of dependency graph.
43
+ :type dep_graph: DependencyGraph where the dependencies are not specified.
44
+ """
45
+ # dep_graph.nodes contain list of token for a sentence
46
+ self.stack = [0] # The root element
47
+ self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
48
+ self.arcs = [] # empty set of arc
49
+ self._tokens = dep_graph.nodes
50
+ self._max_address = len(self.buffer)
51
+
52
+ def __str__(self):
53
+ return (
54
+ "Stack : "
55
+ + str(self.stack)
56
+ + " Buffer : "
57
+ + str(self.buffer)
58
+ + " Arcs : "
59
+ + str(self.arcs)
60
+ )
61
+
62
+ def _check_informative(self, feat, flag=False):
63
+ """
64
+ Check whether a feature is informative
65
+ The flag control whether "_" is informative or not
66
+ """
67
+ if feat is None:
68
+ return False
69
+ if feat == "":
70
+ return False
71
+ if flag is False:
72
+ if feat == "_":
73
+ return False
74
+ return True
75
+
76
+ def extract_features(self):
77
+ """
78
+ Extract the set of features for the current configuration. Implement standard features as describe in
79
+ Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
80
+ Please note that these features are very basic.
81
+ :return: list(str)
82
+ """
83
+ result = []
84
+ # Todo : can come up with more complicated features set for better
85
+ # performance.
86
+ if len(self.stack) > 0:
87
+ # Stack 0
88
+ stack_idx0 = self.stack[len(self.stack) - 1]
89
+ token = self._tokens[stack_idx0]
90
+ if self._check_informative(token["word"], True):
91
+ result.append("STK_0_FORM_" + token["word"])
92
+ if "lemma" in token and self._check_informative(token["lemma"]):
93
+ result.append("STK_0_LEMMA_" + token["lemma"])
94
+ if self._check_informative(token["tag"]):
95
+ result.append("STK_0_POS_" + token["tag"])
96
+ if "feats" in token and self._check_informative(token["feats"]):
97
+ feats = token["feats"].split("|")
98
+ for feat in feats:
99
+ result.append("STK_0_FEATS_" + feat)
100
+ # Stack 1
101
+ if len(self.stack) > 1:
102
+ stack_idx1 = self.stack[len(self.stack) - 2]
103
+ token = self._tokens[stack_idx1]
104
+ if self._check_informative(token["tag"]):
105
+ result.append("STK_1_POS_" + token["tag"])
106
+
107
+ # Left most, right most dependency of stack[0]
108
+ left_most = 1000000
109
+ right_most = -1
110
+ dep_left_most = ""
111
+ dep_right_most = ""
112
+ for (wi, r, wj) in self.arcs:
113
+ if wi == stack_idx0:
114
+ if (wj > wi) and (wj > right_most):
115
+ right_most = wj
116
+ dep_right_most = r
117
+ if (wj < wi) and (wj < left_most):
118
+ left_most = wj
119
+ dep_left_most = r
120
+ if self._check_informative(dep_left_most):
121
+ result.append("STK_0_LDEP_" + dep_left_most)
122
+ if self._check_informative(dep_right_most):
123
+ result.append("STK_0_RDEP_" + dep_right_most)
124
+
125
+ # Check Buffered 0
126
+ if len(self.buffer) > 0:
127
+ # Buffer 0
128
+ buffer_idx0 = self.buffer[0]
129
+ token = self._tokens[buffer_idx0]
130
+ if self._check_informative(token["word"], True):
131
+ result.append("BUF_0_FORM_" + token["word"])
132
+ if "lemma" in token and self._check_informative(token["lemma"]):
133
+ result.append("BUF_0_LEMMA_" + token["lemma"])
134
+ if self._check_informative(token["tag"]):
135
+ result.append("BUF_0_POS_" + token["tag"])
136
+ if "feats" in token and self._check_informative(token["feats"]):
137
+ feats = token["feats"].split("|")
138
+ for feat in feats:
139
+ result.append("BUF_0_FEATS_" + feat)
140
+ # Buffer 1
141
+ if len(self.buffer) > 1:
142
+ buffer_idx1 = self.buffer[1]
143
+ token = self._tokens[buffer_idx1]
144
+ if self._check_informative(token["word"], True):
145
+ result.append("BUF_1_FORM_" + token["word"])
146
+ if self._check_informative(token["tag"]):
147
+ result.append("BUF_1_POS_" + token["tag"])
148
+ if len(self.buffer) > 2:
149
+ buffer_idx2 = self.buffer[2]
150
+ token = self._tokens[buffer_idx2]
151
+ if self._check_informative(token["tag"]):
152
+ result.append("BUF_2_POS_" + token["tag"])
153
+ if len(self.buffer) > 3:
154
+ buffer_idx3 = self.buffer[3]
155
+ token = self._tokens[buffer_idx3]
156
+ if self._check_informative(token["tag"]):
157
+ result.append("BUF_3_POS_" + token["tag"])
158
+ # Left most, right most dependency of stack[0]
159
+ left_most = 1000000
160
+ right_most = -1
161
+ dep_left_most = ""
162
+ dep_right_most = ""
163
+ for (wi, r, wj) in self.arcs:
164
+ if wi == buffer_idx0:
165
+ if (wj > wi) and (wj > right_most):
166
+ right_most = wj
167
+ dep_right_most = r
168
+ if (wj < wi) and (wj < left_most):
169
+ left_most = wj
170
+ dep_left_most = r
171
+ if self._check_informative(dep_left_most):
172
+ result.append("BUF_0_LDEP_" + dep_left_most)
173
+ if self._check_informative(dep_right_most):
174
+ result.append("BUF_0_RDEP_" + dep_right_most)
175
+
176
+ return result
177
+
178
+
179
+ class Transition:
180
+ """
181
+ This class defines a set of transition which is applied to a configuration to get another configuration
182
+ Note that for different parsing algorithm, the transition is different.
183
+ """
184
+
185
+ # Define set of transitions
186
+ LEFT_ARC = "LEFTARC"
187
+ RIGHT_ARC = "RIGHTARC"
188
+ SHIFT = "SHIFT"
189
+ REDUCE = "REDUCE"
190
+
191
+ def __init__(self, alg_option):
192
+ """
193
+ :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
194
+ :type alg_option: str
195
+ """
196
+ self._algo = alg_option
197
+ if alg_option not in [
198
+ TransitionParser.ARC_STANDARD,
199
+ TransitionParser.ARC_EAGER,
200
+ ]:
201
+ raise ValueError(
202
+ " Currently we only support %s and %s "
203
+ % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER)
204
+ )
205
+
206
+ def left_arc(self, conf, relation):
207
+ """
208
+ Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
209
+
210
+ :param configuration: is the current configuration
211
+ :return: A new configuration or -1 if the pre-condition is not satisfied
212
+ """
213
+ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
214
+ return -1
215
+ if conf.buffer[0] == 0:
216
+ # here is the Root element
217
+ return -1
218
+
219
+ idx_wi = conf.stack[len(conf.stack) - 1]
220
+
221
+ flag = True
222
+ if self._algo == TransitionParser.ARC_EAGER:
223
+ for (idx_parent, r, idx_child) in conf.arcs:
224
+ if idx_child == idx_wi:
225
+ flag = False
226
+
227
+ if flag:
228
+ conf.stack.pop()
229
+ idx_wj = conf.buffer[0]
230
+ conf.arcs.append((idx_wj, relation, idx_wi))
231
+ else:
232
+ return -1
233
+
234
+ def right_arc(self, conf, relation):
235
+ """
236
+ Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
237
+
238
+ :param configuration: is the current configuration
239
+ :return: A new configuration or -1 if the pre-condition is not satisfied
240
+ """
241
+ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
242
+ return -1
243
+ if self._algo == TransitionParser.ARC_STANDARD:
244
+ idx_wi = conf.stack.pop()
245
+ idx_wj = conf.buffer[0]
246
+ conf.buffer[0] = idx_wi
247
+ conf.arcs.append((idx_wi, relation, idx_wj))
248
+ else: # arc-eager
249
+ idx_wi = conf.stack[len(conf.stack) - 1]
250
+ idx_wj = conf.buffer.pop(0)
251
+ conf.stack.append(idx_wj)
252
+ conf.arcs.append((idx_wi, relation, idx_wj))
253
+
254
+ def reduce(self, conf):
255
+ """
256
+ Note that the algorithm for reduce is only available for arc-eager
257
+
258
+ :param configuration: is the current configuration
259
+ :return: A new configuration or -1 if the pre-condition is not satisfied
260
+ """
261
+
262
+ if self._algo != TransitionParser.ARC_EAGER:
263
+ return -1
264
+ if len(conf.stack) <= 0:
265
+ return -1
266
+
267
+ idx_wi = conf.stack[len(conf.stack) - 1]
268
+ flag = False
269
+ for (idx_parent, r, idx_child) in conf.arcs:
270
+ if idx_child == idx_wi:
271
+ flag = True
272
+ if flag:
273
+ conf.stack.pop() # reduce it
274
+ else:
275
+ return -1
276
+
277
+ def shift(self, conf):
278
+ """
279
+ Note that the algorithm for shift is the SAME for arc-standard and arc-eager
280
+
281
+ :param configuration: is the current configuration
282
+ :return: A new configuration or -1 if the pre-condition is not satisfied
283
+ """
284
+ if len(conf.buffer) <= 0:
285
+ return -1
286
+ idx_wi = conf.buffer.pop(0)
287
+ conf.stack.append(idx_wi)
288
+
289
+
290
+ class TransitionParser(ParserI):
291
+
292
+ """
293
+ Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
294
+ """
295
+
296
+ ARC_STANDARD = "arc-standard"
297
+ ARC_EAGER = "arc-eager"
298
+
299
+ def __init__(self, algorithm):
300
+ """
301
+ :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
302
+ :type algorithm: str
303
+ """
304
+ if not (algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
305
+ raise ValueError(
306
+ " Currently we only support %s and %s "
307
+ % (self.ARC_STANDARD, self.ARC_EAGER)
308
+ )
309
+ self._algorithm = algorithm
310
+
311
+ self._dictionary = {}
312
+ self._transition = {}
313
+ self._match_transition = {}
314
+
315
+ def _get_dep_relation(self, idx_parent, idx_child, depgraph):
316
+ p_node = depgraph.nodes[idx_parent]
317
+ c_node = depgraph.nodes[idx_child]
318
+
319
+ if c_node["word"] is None:
320
+ return None # Root word
321
+
322
+ if c_node["head"] == p_node["address"]:
323
+ return c_node["rel"]
324
+ else:
325
+ return None
326
+
327
+ def _convert_to_binary_features(self, features):
328
+ """
329
+ :param features: list of feature string which is needed to convert to binary features
330
+ :type features: list(str)
331
+ :return : string of binary features in libsvm format which is 'featureID:value' pairs
332
+ """
333
+ unsorted_result = []
334
+ for feature in features:
335
+ self._dictionary.setdefault(feature, len(self._dictionary))
336
+ unsorted_result.append(self._dictionary[feature])
337
+
338
+ # Default value of each feature is 1.0
339
+ return " ".join(
340
+ str(featureID) + ":1.0" for featureID in sorted(unsorted_result)
341
+ )
342
+
343
+ def _is_projective(self, depgraph):
344
+ arc_list = []
345
+ for key in depgraph.nodes:
346
+ node = depgraph.nodes[key]
347
+
348
+ if "head" in node:
349
+ childIdx = node["address"]
350
+ parentIdx = node["head"]
351
+ if parentIdx is not None:
352
+ arc_list.append((parentIdx, childIdx))
353
+
354
+ for (parentIdx, childIdx) in arc_list:
355
+ # Ensure that childIdx < parentIdx
356
+ if childIdx > parentIdx:
357
+ temp = childIdx
358
+ childIdx = parentIdx
359
+ parentIdx = temp
360
+ for k in range(childIdx + 1, parentIdx):
361
+ for m in range(len(depgraph.nodes)):
362
+ if (m < childIdx) or (m > parentIdx):
363
+ if (k, m) in arc_list:
364
+ return False
365
+ if (m, k) in arc_list:
366
+ return False
367
+ return True
368
+
369
+ def _write_to_file(self, key, binary_features, input_file):
370
+ """
371
+ write the binary features to input file and update the transition dictionary
372
+ """
373
+ self._transition.setdefault(key, len(self._transition) + 1)
374
+ self._match_transition[self._transition[key]] = key
375
+
376
+ input_str = str(self._transition[key]) + " " + binary_features + "\n"
377
+ input_file.write(input_str.encode("utf-8"))
378
+
379
+ def _create_training_examples_arc_std(self, depgraphs, input_file):
380
+ """
381
+ Create the training example in the libsvm format and write it to the input_file.
382
+ Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
383
+ """
384
+ operation = Transition(self.ARC_STANDARD)
385
+ count_proj = 0
386
+ training_seq = []
387
+
388
+ for depgraph in depgraphs:
389
+ if not self._is_projective(depgraph):
390
+ continue
391
+
392
+ count_proj += 1
393
+ conf = Configuration(depgraph)
394
+ while len(conf.buffer) > 0:
395
+ b0 = conf.buffer[0]
396
+ features = conf.extract_features()
397
+ binary_features = self._convert_to_binary_features(features)
398
+
399
+ if len(conf.stack) > 0:
400
+ s0 = conf.stack[len(conf.stack) - 1]
401
+ # Left-arc operation
402
+ rel = self._get_dep_relation(b0, s0, depgraph)
403
+ if rel is not None:
404
+ key = Transition.LEFT_ARC + ":" + rel
405
+ self._write_to_file(key, binary_features, input_file)
406
+ operation.left_arc(conf, rel)
407
+ training_seq.append(key)
408
+ continue
409
+
410
+ # Right-arc operation
411
+ rel = self._get_dep_relation(s0, b0, depgraph)
412
+ if rel is not None:
413
+ precondition = True
414
+ # Get the max-index of buffer
415
+ maxID = conf._max_address
416
+
417
+ for w in range(maxID + 1):
418
+ if w != b0:
419
+ relw = self._get_dep_relation(b0, w, depgraph)
420
+ if relw is not None:
421
+ if (b0, relw, w) not in conf.arcs:
422
+ precondition = False
423
+
424
+ if precondition:
425
+ key = Transition.RIGHT_ARC + ":" + rel
426
+ self._write_to_file(key, binary_features, input_file)
427
+ operation.right_arc(conf, rel)
428
+ training_seq.append(key)
429
+ continue
430
+
431
+ # Shift operation as the default
432
+ key = Transition.SHIFT
433
+ self._write_to_file(key, binary_features, input_file)
434
+ operation.shift(conf)
435
+ training_seq.append(key)
436
+
437
+ print(" Number of training examples : " + str(len(depgraphs)))
438
+ print(" Number of valid (projective) examples : " + str(count_proj))
439
+ return training_seq
440
+
441
+ def _create_training_examples_arc_eager(self, depgraphs, input_file):
442
+ """
443
+ Create the training example in the libsvm format and write it to the input_file.
444
+ Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
445
+ """
446
+ operation = Transition(self.ARC_EAGER)
447
+ countProj = 0
448
+ training_seq = []
449
+
450
+ for depgraph in depgraphs:
451
+ if not self._is_projective(depgraph):
452
+ continue
453
+
454
+ countProj += 1
455
+ conf = Configuration(depgraph)
456
+ while len(conf.buffer) > 0:
457
+ b0 = conf.buffer[0]
458
+ features = conf.extract_features()
459
+ binary_features = self._convert_to_binary_features(features)
460
+
461
+ if len(conf.stack) > 0:
462
+ s0 = conf.stack[len(conf.stack) - 1]
463
+ # Left-arc operation
464
+ rel = self._get_dep_relation(b0, s0, depgraph)
465
+ if rel is not None:
466
+ key = Transition.LEFT_ARC + ":" + rel
467
+ self._write_to_file(key, binary_features, input_file)
468
+ operation.left_arc(conf, rel)
469
+ training_seq.append(key)
470
+ continue
471
+
472
+ # Right-arc operation
473
+ rel = self._get_dep_relation(s0, b0, depgraph)
474
+ if rel is not None:
475
+ key = Transition.RIGHT_ARC + ":" + rel
476
+ self._write_to_file(key, binary_features, input_file)
477
+ operation.right_arc(conf, rel)
478
+ training_seq.append(key)
479
+ continue
480
+
481
+ # reduce operation
482
+ flag = False
483
+ for k in range(s0):
484
+ if self._get_dep_relation(k, b0, depgraph) is not None:
485
+ flag = True
486
+ if self._get_dep_relation(b0, k, depgraph) is not None:
487
+ flag = True
488
+ if flag:
489
+ key = Transition.REDUCE
490
+ self._write_to_file(key, binary_features, input_file)
491
+ operation.reduce(conf)
492
+ training_seq.append(key)
493
+ continue
494
+
495
+ # Shift operation as the default
496
+ key = Transition.SHIFT
497
+ self._write_to_file(key, binary_features, input_file)
498
+ operation.shift(conf)
499
+ training_seq.append(key)
500
+
501
+ print(" Number of training examples : " + str(len(depgraphs)))
502
+ print(" Number of valid (projective) examples : " + str(countProj))
503
+ return training_seq
504
+
505
+ def train(self, depgraphs, modelfile, verbose=True):
506
+ """
507
+ :param depgraphs : list of DependencyGraph as the training data
508
+ :type depgraphs : DependencyGraph
509
+ :param modelfile : file name to save the trained model
510
+ :type modelfile : str
511
+ """
512
+
513
+ try:
514
+ input_file = tempfile.NamedTemporaryFile(
515
+ prefix="transition_parse.train", dir=tempfile.gettempdir(), delete=False
516
+ )
517
+
518
+ if self._algorithm == self.ARC_STANDARD:
519
+ self._create_training_examples_arc_std(depgraphs, input_file)
520
+ else:
521
+ self._create_training_examples_arc_eager(depgraphs, input_file)
522
+
523
+ input_file.close()
524
+ # Using the temporary file to train the libsvm classifier
525
+ x_train, y_train = load_svmlight_file(input_file.name)
526
+ # The parameter is set according to the paper:
527
+ # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
528
+ # Todo : because of probability = True => very slow due to
529
+ # cross-validation. Need to improve the speed here
530
+ model = svm.SVC(
531
+ kernel="poly",
532
+ degree=2,
533
+ coef0=0,
534
+ gamma=0.2,
535
+ C=0.5,
536
+ verbose=verbose,
537
+ probability=True,
538
+ )
539
+
540
+ model.fit(x_train, y_train)
541
+ # Save the model to file name (as pickle)
542
+ pickle.dump(model, open(modelfile, "wb"))
543
+ finally:
544
+ remove(input_file.name)
545
+
546
+ def parse(self, depgraphs, modelFile):
547
+ """
548
+ :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
549
+ :type depgraphs: list(DependencyGraph)
550
+ :param modelfile: the model file
551
+ :type modelfile: str
552
+ :return: list (DependencyGraph) with the 'head' and 'rel' information
553
+ """
554
+ result = []
555
+ # First load the model
556
+ model = pickle.load(open(modelFile, "rb"))
557
+ operation = Transition(self._algorithm)
558
+
559
+ for depgraph in depgraphs:
560
+ conf = Configuration(depgraph)
561
+ while len(conf.buffer) > 0:
562
+ features = conf.extract_features()
563
+ col = []
564
+ row = []
565
+ data = []
566
+ for feature in features:
567
+ if feature in self._dictionary:
568
+ col.append(self._dictionary[feature])
569
+ row.append(0)
570
+ data.append(1.0)
571
+ np_col = array(sorted(col)) # NB : index must be sorted
572
+ np_row = array(row)
573
+ np_data = array(data)
574
+
575
+ x_test = sparse.csr_matrix(
576
+ (np_data, (np_row, np_col)), shape=(1, len(self._dictionary))
577
+ )
578
+
579
+ # It's best to use decision function as follow BUT it's not supported yet for sparse SVM
580
+ # Using decision function to build the votes array
581
+ # dec_func = model.decision_function(x_test)[0]
582
+ # votes = {}
583
+ # k = 0
584
+ # for i in range(len(model.classes_)):
585
+ # for j in range(i+1, len(model.classes_)):
586
+ # #if dec_func[k] > 0:
587
+ # votes.setdefault(i,0)
588
+ # votes[i] +=1
589
+ # else:
590
+ # votes.setdefault(j,0)
591
+ # votes[j] +=1
592
+ # k +=1
593
+ # Sort votes according to the values
594
+ # sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
595
+
596
+ # We will use predict_proba instead of decision_function
597
+ prob_dict = {}
598
+ pred_prob = model.predict_proba(x_test)[0]
599
+ for i in range(len(pred_prob)):
600
+ prob_dict[i] = pred_prob[i]
601
+ sorted_Prob = sorted(prob_dict.items(), key=itemgetter(1), reverse=True)
602
+
603
+ # Note that SHIFT is always a valid operation
604
+ for (y_pred_idx, confidence) in sorted_Prob:
605
+ # y_pred = model.predict(x_test)[0]
606
+ # From the prediction match to the operation
607
+ y_pred = model.classes_[y_pred_idx]
608
+
609
+ if y_pred in self._match_transition:
610
+ strTransition = self._match_transition[y_pred]
611
+ baseTransition = strTransition.split(":")[0]
612
+
613
+ if baseTransition == Transition.LEFT_ARC:
614
+ if (
615
+ operation.left_arc(conf, strTransition.split(":")[1])
616
+ != -1
617
+ ):
618
+ break
619
+ elif baseTransition == Transition.RIGHT_ARC:
620
+ if (
621
+ operation.right_arc(conf, strTransition.split(":")[1])
622
+ != -1
623
+ ):
624
+ break
625
+ elif baseTransition == Transition.REDUCE:
626
+ if operation.reduce(conf) != -1:
627
+ break
628
+ elif baseTransition == Transition.SHIFT:
629
+ if operation.shift(conf) != -1:
630
+ break
631
+ else:
632
+ raise ValueError(
633
+ "The predicted transition is not recognized, expected errors"
634
+ )
635
+
636
+ # Finish with operations build the dependency graph from Conf.arcs
637
+
638
+ new_depgraph = deepcopy(depgraph)
639
+ for key in new_depgraph.nodes:
640
+ node = new_depgraph.nodes[key]
641
+ node["rel"] = ""
642
+ # With the default, all the token depend on the Root
643
+ node["head"] = 0
644
+ for (head, rel, child) in conf.arcs:
645
+ c_node = new_depgraph.nodes[child]
646
+ c_node["head"] = head
647
+ c_node["rel"] = rel
648
+ result.append(new_depgraph)
649
+
650
+ return result
651
+
652
+
653
+ def demo():
654
+ """
655
+ >>> from nltk.parse import DependencyGraph, DependencyEvaluator
656
+ >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
657
+ >>> gold_sent = DependencyGraph(\"""
658
+ ... Economic JJ 2 ATT
659
+ ... news NN 3 SBJ
660
+ ... has VBD 0 ROOT
661
+ ... little JJ 5 ATT
662
+ ... effect NN 3 OBJ
663
+ ... on IN 5 ATT
664
+ ... financial JJ 8 ATT
665
+ ... markets NNS 6 PC
666
+ ... . . 3 PU
667
+ ... \""")
668
+
669
+ >>> conf = Configuration(gold_sent)
670
+
671
+ ###################### Check the Initial Feature ########################
672
+
673
+ >>> print(', '.join(conf.extract_features()))
674
+ STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
675
+
676
+ ###################### Check The Transition #######################
677
+ Check the Initialized Configuration
678
+ >>> print(conf)
679
+ Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
680
+
681
+ A. Do some transition checks for ARC-STANDARD
682
+
683
+ >>> operation = Transition('arc-standard')
684
+ >>> operation.shift(conf)
685
+ >>> operation.left_arc(conf, "ATT")
686
+ >>> operation.shift(conf)
687
+ >>> operation.left_arc(conf,"SBJ")
688
+ >>> operation.shift(conf)
689
+ >>> operation.shift(conf)
690
+ >>> operation.left_arc(conf, "ATT")
691
+ >>> operation.shift(conf)
692
+ >>> operation.shift(conf)
693
+ >>> operation.shift(conf)
694
+ >>> operation.left_arc(conf, "ATT")
695
+
696
+ Middle Configuration and Features Check
697
+ >>> print(conf)
698
+ Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
699
+
700
+ >>> print(', '.join(conf.extract_features()))
701
+ STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
702
+
703
+ >>> operation.right_arc(conf, "PC")
704
+ >>> operation.right_arc(conf, "ATT")
705
+ >>> operation.right_arc(conf, "OBJ")
706
+ >>> operation.shift(conf)
707
+ >>> operation.right_arc(conf, "PU")
708
+ >>> operation.right_arc(conf, "ROOT")
709
+ >>> operation.shift(conf)
710
+
711
+ Terminated Configuration Check
712
+ >>> print(conf)
713
+ Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
714
+
715
+
716
+ B. Do some transition checks for ARC-EAGER
717
+
718
+ >>> conf = Configuration(gold_sent)
719
+ >>> operation = Transition('arc-eager')
720
+ >>> operation.shift(conf)
721
+ >>> operation.left_arc(conf,'ATT')
722
+ >>> operation.shift(conf)
723
+ >>> operation.left_arc(conf,'SBJ')
724
+ >>> operation.right_arc(conf,'ROOT')
725
+ >>> operation.shift(conf)
726
+ >>> operation.left_arc(conf,'ATT')
727
+ >>> operation.right_arc(conf,'OBJ')
728
+ >>> operation.right_arc(conf,'ATT')
729
+ >>> operation.shift(conf)
730
+ >>> operation.left_arc(conf,'ATT')
731
+ >>> operation.right_arc(conf,'PC')
732
+ >>> operation.reduce(conf)
733
+ >>> operation.reduce(conf)
734
+ >>> operation.reduce(conf)
735
+ >>> operation.right_arc(conf,'PU')
736
+ >>> print(conf)
737
+ Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
738
+
739
+ ###################### Check The Training Function #######################
740
+
741
+ A. Check the ARC-STANDARD training
742
+ >>> import tempfile
743
+ >>> import os
744
+ >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
745
+
746
+ >>> parser_std = TransitionParser('arc-standard')
747
+ >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
748
+ Number of training examples : 1
749
+ Number of valid (projective) examples : 1
750
+ SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
751
+
752
+ >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False)
753
+ Number of training examples : 1
754
+ Number of valid (projective) examples : 1
755
+ >>> input_file.close()
756
+ >>> remove(input_file.name)
757
+
758
+ B. Check the ARC-EAGER training
759
+
760
+ >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
761
+ >>> parser_eager = TransitionParser('arc-eager')
762
+ >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
763
+ Number of training examples : 1
764
+ Number of valid (projective) examples : 1
765
+ SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
766
+
767
+ >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False)
768
+ Number of training examples : 1
769
+ Number of valid (projective) examples : 1
770
+
771
+ >>> input_file.close()
772
+ >>> remove(input_file.name)
773
+
774
+ ###################### Check The Parsing Function ########################
775
+
776
+ A. Check the ARC-STANDARD parser
777
+
778
+ >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
779
+ >>> de = DependencyEvaluator(result, [gold_sent])
780
+ >>> de.eval() >= (0, 0)
781
+ True
782
+
783
+ B. Check the ARC-EAGER parser
784
+ >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
785
+ >>> de = DependencyEvaluator(result, [gold_sent])
786
+ >>> de.eval() >= (0, 0)
787
+ True
788
+
789
+ Remove test temporary files
790
+ >>> remove('temp.arceager.model')
791
+ >>> remove('temp.arcstd.model')
792
+
793
+ Note that result is very poor because of only one training example.
794
+ """
env-llmeval/lib/python3.10/site-packages/nltk/parse/util.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Parser Utility Functions
2
+ #
3
+ # Author: Ewan Klein <[email protected]>
4
+ # Tom Aarsen <>
5
+ #
6
+ # Copyright (C) 2001-2023 NLTK Project
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+
11
+ """
12
+ Utility functions for parsers.
13
+ """
14
+
15
+ from nltk.data import load
16
+ from nltk.grammar import CFG, PCFG, FeatureGrammar
17
+ from nltk.parse.chart import Chart, ChartParser
18
+ from nltk.parse.featurechart import FeatureChart, FeatureChartParser
19
+ from nltk.parse.pchart import InsideChartParser
20
+
21
+
22
+ def load_parser(
23
+ grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args
24
+ ):
25
+ """
26
+ Load a grammar from a file, and build a parser based on that grammar.
27
+ The parser depends on the grammar format, and might also depend
28
+ on properties of the grammar itself.
29
+
30
+ The following grammar formats are currently supported:
31
+ - ``'cfg'`` (CFGs: ``CFG``)
32
+ - ``'pcfg'`` (probabilistic CFGs: ``PCFG``)
33
+ - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``)
34
+
35
+ :type grammar_url: str
36
+ :param grammar_url: A URL specifying where the grammar is located.
37
+ The default protocol is ``"nltk:"``, which searches for the file
38
+ in the the NLTK data package.
39
+ :type trace: int
40
+ :param trace: The level of tracing that should be used when
41
+ parsing a text. ``0`` will generate no tracing output;
42
+ and higher numbers will produce more verbose tracing output.
43
+ :param parser: The class used for parsing; should be ``ChartParser``
44
+ or a subclass.
45
+ If None, the class depends on the grammar format.
46
+ :param chart_class: The class used for storing the chart;
47
+ should be ``Chart`` or a subclass.
48
+ Only used for CFGs and feature CFGs.
49
+ If None, the chart class depends on the grammar format.
50
+ :type beam_size: int
51
+ :param beam_size: The maximum length for the parser's edge queue.
52
+ Only used for probabilistic CFGs.
53
+ :param load_args: Keyword parameters used when loading the grammar.
54
+ See ``data.load`` for more information.
55
+ """
56
+ grammar = load(grammar_url, **load_args)
57
+ if not isinstance(grammar, CFG):
58
+ raise ValueError("The grammar must be a CFG, " "or a subclass thereof.")
59
+ if isinstance(grammar, PCFG):
60
+ if parser is None:
61
+ parser = InsideChartParser
62
+ return parser(grammar, trace=trace, beam_size=beam_size)
63
+
64
+ elif isinstance(grammar, FeatureGrammar):
65
+ if parser is None:
66
+ parser = FeatureChartParser
67
+ if chart_class is None:
68
+ chart_class = FeatureChart
69
+ return parser(grammar, trace=trace, chart_class=chart_class)
70
+
71
+ else: # Plain CFG.
72
+ if parser is None:
73
+ parser = ChartParser
74
+ if chart_class is None:
75
+ chart_class = Chart
76
+ return parser(grammar, trace=trace, chart_class=chart_class)
77
+
78
+
79
+ def taggedsent_to_conll(sentence):
80
+ """
81
+ A module to convert a single POS tagged sentence into CONLL format.
82
+
83
+ >>> from nltk import word_tokenize, pos_tag
84
+ >>> text = "This is a foobar sentence."
85
+ >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE
86
+ ... print(line, end="")
87
+ 1 This _ DT DT _ 0 a _ _
88
+ 2 is _ VBZ VBZ _ 0 a _ _
89
+ 3 a _ DT DT _ 0 a _ _
90
+ 4 foobar _ JJ JJ _ 0 a _ _
91
+ 5 sentence _ NN NN _ 0 a _ _
92
+ 6 . _ . . _ 0 a _ _
93
+
94
+ :param sentence: A single input sentence to parse
95
+ :type sentence: list(tuple(str, str))
96
+ :rtype: iter(str)
97
+ :return: a generator yielding a single sentence in CONLL format.
98
+ """
99
+ for (i, (word, tag)) in enumerate(sentence, start=1):
100
+ input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"]
101
+ input_str = "\t".join(input_str) + "\n"
102
+ yield input_str
103
+
104
+
105
+ def taggedsents_to_conll(sentences):
106
+ """
107
+ A module to convert the a POS tagged document stream
108
+ (i.e. list of list of tuples, a list of sentences) and yield lines
109
+ in CONLL format. This module yields one line per word and two newlines
110
+ for end of sentence.
111
+
112
+ >>> from nltk import word_tokenize, sent_tokenize, pos_tag
113
+ >>> text = "This is a foobar sentence. Is that right?"
114
+ >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)]
115
+ >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE
116
+ ... if line:
117
+ ... print(line, end="")
118
+ 1 This _ DT DT _ 0 a _ _
119
+ 2 is _ VBZ VBZ _ 0 a _ _
120
+ 3 a _ DT DT _ 0 a _ _
121
+ 4 foobar _ JJ JJ _ 0 a _ _
122
+ 5 sentence _ NN NN _ 0 a _ _
123
+ 6 . _ . . _ 0 a _ _
124
+ <BLANKLINE>
125
+ <BLANKLINE>
126
+ 1 Is _ VBZ VBZ _ 0 a _ _
127
+ 2 that _ IN IN _ 0 a _ _
128
+ 3 right _ NN NN _ 0 a _ _
129
+ 4 ? _ . . _ 0 a _ _
130
+ <BLANKLINE>
131
+ <BLANKLINE>
132
+
133
+ :param sentences: Input sentences to parse
134
+ :type sentence: list(list(tuple(str, str)))
135
+ :rtype: iter(str)
136
+ :return: a generator yielding sentences in CONLL format.
137
+ """
138
+ for sentence in sentences:
139
+ yield from taggedsent_to_conll(sentence)
140
+ yield "\n\n"
141
+
142
+
143
+ ######################################################################
144
+ # { Test Suites
145
+ ######################################################################
146
+
147
+
148
+ class TestGrammar:
149
+ """
150
+ Unit tests for CFG.
151
+ """
152
+
153
+ def __init__(self, grammar, suite, accept=None, reject=None):
154
+ self.test_grammar = grammar
155
+
156
+ self.cp = load_parser(grammar, trace=0)
157
+ self.suite = suite
158
+ self._accept = accept
159
+ self._reject = reject
160
+
161
+ def run(self, show_trees=False):
162
+ """
163
+ Sentences in the test suite are divided into two classes:
164
+
165
+ - grammatical (``accept``) and
166
+ - ungrammatical (``reject``).
167
+
168
+ If a sentence should parse according to the grammar, the value of
169
+ ``trees`` will be a non-empty list. If a sentence should be rejected
170
+ according to the grammar, then the value of ``trees`` will be None.
171
+ """
172
+ for test in self.suite:
173
+ print(test["doc"] + ":", end=" ")
174
+ for key in ["accept", "reject"]:
175
+ for sent in test[key]:
176
+ tokens = sent.split()
177
+ trees = list(self.cp.parse(tokens))
178
+ if show_trees and trees:
179
+ print()
180
+ print(sent)
181
+ for tree in trees:
182
+ print(tree)
183
+ if key == "accept":
184
+ if trees == []:
185
+ raise ValueError("Sentence '%s' failed to parse'" % sent)
186
+ else:
187
+ accepted = True
188
+ else:
189
+ if trees:
190
+ raise ValueError("Sentence '%s' received a parse'" % sent)
191
+ else:
192
+ rejected = True
193
+ if accepted and rejected:
194
+ print("All tests passed!")
195
+
196
+
197
+ def extract_test_sentences(string, comment_chars="#%;", encoding=None):
198
+ """
199
+ Parses a string with one test sentence per line.
200
+ Lines can optionally begin with:
201
+
202
+ - a bool, saying if the sentence is grammatical or not, or
203
+ - an int, giving the number of parse trees is should have,
204
+
205
+ The result information is followed by a colon, and then the sentence.
206
+ Empty lines and lines beginning with a comment char are ignored.
207
+
208
+ :return: a list of tuple of sentences and expected results,
209
+ where a sentence is a list of str,
210
+ and a result is None, or bool, or int
211
+
212
+ :param comment_chars: ``str`` of possible comment characters.
213
+ :param encoding: the encoding of the string, if it is binary
214
+ """
215
+ if encoding is not None:
216
+ string = string.decode(encoding)
217
+ sentences = []
218
+ for sentence in string.split("\n"):
219
+ if sentence == "" or sentence[0] in comment_chars:
220
+ continue
221
+ split_info = sentence.split(":", 1)
222
+ result = None
223
+ if len(split_info) == 2:
224
+ if split_info[0] in ["True", "true", "False", "false"]:
225
+ result = split_info[0] in ["True", "true"]
226
+ sentence = split_info[1]
227
+ else:
228
+ result = int(split_info[0])
229
+ sentence = split_info[1]
230
+ tokens = sentence.split()
231
+ if tokens == []:
232
+ continue
233
+ sentences += [(tokens, result)]
234
+ return sentences
env-llmeval/lib/python3.10/site-packages/nltk/parse/viterbi.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Viterbi Probabilistic Parser
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from functools import reduce
10
+
11
+ from nltk.parse.api import ParserI
12
+ from nltk.tree import ProbabilisticTree, Tree
13
+
14
+ ##//////////////////////////////////////////////////////
15
+ ## Viterbi PCFG Parser
16
+ ##//////////////////////////////////////////////////////
17
+
18
+
19
+ class ViterbiParser(ParserI):
20
+ """
21
+ A bottom-up ``PCFG`` parser that uses dynamic programming to find
22
+ the single most likely parse for a text. The ``ViterbiParser`` parser
23
+ parses texts by filling in a "most likely constituent table".
24
+ This table records the most probable tree representation for any
25
+ given span and node value. In particular, it has an entry for
26
+ every start index, end index, and node value, recording the most
27
+ likely subtree that spans from the start index to the end index,
28
+ and has the given node value.
29
+
30
+ The ``ViterbiParser`` parser fills in this table incrementally. It starts
31
+ by filling in all entries for constituents that span one element
32
+ of text (i.e., entries where the end index is one greater than the
33
+ start index). After it has filled in all table entries for
34
+ constituents that span one element of text, it fills in the
35
+ entries for constitutants that span two elements of text. It
36
+ continues filling in the entries for constituents spanning larger
37
+ and larger portions of the text, until the entire table has been
38
+ filled. Finally, it returns the table entry for a constituent
39
+ spanning the entire text, whose node value is the grammar's start
40
+ symbol.
41
+
42
+ In order to find the most likely constituent with a given span and
43
+ node value, the ``ViterbiParser`` parser considers all productions that
44
+ could produce that node value. For each production, it finds all
45
+ children that collectively cover the span and have the node values
46
+ specified by the production's right hand side. If the probability
47
+ of the tree formed by applying the production to the children is
48
+ greater than the probability of the current entry in the table,
49
+ then the table is updated with this new tree.
50
+
51
+ A pseudo-code description of the algorithm used by
52
+ ``ViterbiParser`` is:
53
+
54
+ | Create an empty most likely constituent table, *MLC*.
55
+ | For width in 1...len(text):
56
+ | For start in 1...len(text)-width:
57
+ | For prod in grammar.productions:
58
+ | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC,
59
+ | where t[i].label()==prod.rhs[i],
60
+ | and the sequence covers [start:start+width]:
61
+ | old_p = MLC[start, start+width, prod.lhs]
62
+ | new_p = P(t[1])P(t[1])...P(t[n])P(prod)
63
+ | if new_p > old_p:
64
+ | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n])
65
+ | MLC[start, start+width, prod.lhs] = new_tree
66
+ | Return MLC[0, len(text), start_symbol]
67
+
68
+ :type _grammar: PCFG
69
+ :ivar _grammar: The grammar used to parse sentences.
70
+ :type _trace: int
71
+ :ivar _trace: The level of tracing output that should be generated
72
+ when parsing a text.
73
+ """
74
+
75
+ def __init__(self, grammar, trace=0):
76
+ """
77
+ Create a new ``ViterbiParser`` parser, that uses ``grammar`` to
78
+ parse texts.
79
+
80
+ :type grammar: PCFG
81
+ :param grammar: The grammar used to parse texts.
82
+ :type trace: int
83
+ :param trace: The level of tracing that should be used when
84
+ parsing a text. ``0`` will generate no tracing output;
85
+ and higher numbers will produce more verbose tracing
86
+ output.
87
+ """
88
+ self._grammar = grammar
89
+ self._trace = trace
90
+
91
+ def grammar(self):
92
+ return self._grammar
93
+
94
+ def trace(self, trace=2):
95
+ """
96
+ Set the level of tracing output that should be generated when
97
+ parsing a text.
98
+
99
+ :type trace: int
100
+ :param trace: The trace level. A trace level of ``0`` will
101
+ generate no tracing output; and higher trace levels will
102
+ produce more verbose tracing output.
103
+ :rtype: None
104
+ """
105
+ self._trace = trace
106
+
107
+ def parse(self, tokens):
108
+ # Inherit docs from ParserI
109
+
110
+ tokens = list(tokens)
111
+ self._grammar.check_coverage(tokens)
112
+
113
+ # The most likely constituent table. This table specifies the
114
+ # most likely constituent for a given span and type.
115
+ # Constituents can be either Trees or tokens. For Trees,
116
+ # the "type" is the Nonterminal for the tree's root node
117
+ # value. For Tokens, the "type" is the token's type.
118
+ # The table is stored as a dictionary, since it is sparse.
119
+ constituents = {}
120
+
121
+ # Initialize the constituents dictionary with the words from
122
+ # the text.
123
+ if self._trace:
124
+ print("Inserting tokens into the most likely" + " constituents table...")
125
+ for index in range(len(tokens)):
126
+ token = tokens[index]
127
+ constituents[index, index + 1, token] = token
128
+ if self._trace > 1:
129
+ self._trace_lexical_insertion(token, index, len(tokens))
130
+
131
+ # Consider each span of length 1, 2, ..., n; and add any trees
132
+ # that might cover that span to the constituents dictionary.
133
+ for length in range(1, len(tokens) + 1):
134
+ if self._trace:
135
+ print(
136
+ "Finding the most likely constituents"
137
+ + " spanning %d text elements..." % length
138
+ )
139
+ for start in range(len(tokens) - length + 1):
140
+ span = (start, start + length)
141
+ self._add_constituents_spanning(span, constituents, tokens)
142
+
143
+ # Return the tree that spans the entire text & have the right cat
144
+ tree = constituents.get((0, len(tokens), self._grammar.start()))
145
+ if tree is not None:
146
+ yield tree
147
+
148
+ def _add_constituents_spanning(self, span, constituents, tokens):
149
+ """
150
+ Find any constituents that might cover ``span``, and add them
151
+ to the most likely constituents table.
152
+
153
+ :rtype: None
154
+ :type span: tuple(int, int)
155
+ :param span: The section of the text for which we are
156
+ trying to find possible constituents. The span is
157
+ specified as a pair of integers, where the first integer
158
+ is the index of the first token that should be included in
159
+ the constituent; and the second integer is the index of
160
+ the first token that should not be included in the
161
+ constituent. I.e., the constituent should cover
162
+ ``text[span[0]:span[1]]``, where ``text`` is the text
163
+ that we are parsing.
164
+
165
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
166
+ :param constituents: The most likely constituents table. This
167
+ table records the most probable tree representation for
168
+ any given span and node value. In particular,
169
+ ``constituents(s,e,nv)`` is the most likely
170
+ ``ProbabilisticTree`` that covers ``text[s:e]``
171
+ and has a node value ``nv.symbol()``, where ``text``
172
+ is the text that we are parsing. When
173
+ ``_add_constituents_spanning`` is called, ``constituents``
174
+ should contain all possible constituents that are shorter
175
+ than ``span``.
176
+
177
+ :type tokens: list of tokens
178
+ :param tokens: The text we are parsing. This is only used for
179
+ trace output.
180
+ """
181
+ # Since some of the grammar productions may be unary, we need to
182
+ # repeatedly try all of the productions until none of them add any
183
+ # new constituents.
184
+ changed = True
185
+ while changed:
186
+ changed = False
187
+
188
+ # Find all ways instantiations of the grammar productions that
189
+ # cover the span.
190
+ instantiations = self._find_instantiations(span, constituents)
191
+
192
+ # For each production instantiation, add a new
193
+ # ProbabilisticTree whose probability is the product
194
+ # of the childrens' probabilities and the production's
195
+ # probability.
196
+ for (production, children) in instantiations:
197
+ subtrees = [c for c in children if isinstance(c, Tree)]
198
+ p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob())
199
+ node = production.lhs().symbol()
200
+ tree = ProbabilisticTree(node, children, prob=p)
201
+
202
+ # If it's new a constituent, then add it to the
203
+ # constituents dictionary.
204
+ c = constituents.get((span[0], span[1], production.lhs()))
205
+ if self._trace > 1:
206
+ if c is None or c != tree:
207
+ if c is None or c.prob() < tree.prob():
208
+ print(" Insert:", end=" ")
209
+ else:
210
+ print(" Discard:", end=" ")
211
+ self._trace_production(production, p, span, len(tokens))
212
+ if c is None or c.prob() < tree.prob():
213
+ constituents[span[0], span[1], production.lhs()] = tree
214
+ changed = True
215
+
216
+ def _find_instantiations(self, span, constituents):
217
+ """
218
+ :return: a list of the production instantiations that cover a
219
+ given span of the text. A "production instantiation" is
220
+ a tuple containing a production and a list of children,
221
+ where the production's right hand side matches the list of
222
+ children; and the children cover ``span``. :rtype: list
223
+ of ``pair`` of ``Production``, (list of
224
+ (``ProbabilisticTree`` or token.
225
+
226
+ :type span: tuple(int, int)
227
+ :param span: The section of the text for which we are
228
+ trying to find production instantiations. The span is
229
+ specified as a pair of integers, where the first integer
230
+ is the index of the first token that should be covered by
231
+ the production instantiation; and the second integer is
232
+ the index of the first token that should not be covered by
233
+ the production instantiation.
234
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
235
+ :param constituents: The most likely constituents table. This
236
+ table records the most probable tree representation for
237
+ any given span and node value. See the module
238
+ documentation for more information.
239
+ """
240
+ rv = []
241
+ for production in self._grammar.productions():
242
+ childlists = self._match_rhs(production.rhs(), span, constituents)
243
+
244
+ for childlist in childlists:
245
+ rv.append((production, childlist))
246
+ return rv
247
+
248
+ def _match_rhs(self, rhs, span, constituents):
249
+ """
250
+ :return: a set of all the lists of children that cover ``span``
251
+ and that match ``rhs``.
252
+ :rtype: list(list(ProbabilisticTree or token)
253
+
254
+ :type rhs: list(Nonterminal or any)
255
+ :param rhs: The list specifying what kinds of children need to
256
+ cover ``span``. Each nonterminal in ``rhs`` specifies
257
+ that the corresponding child should be a tree whose node
258
+ value is that nonterminal's symbol. Each terminal in ``rhs``
259
+ specifies that the corresponding child should be a token
260
+ whose type is that terminal.
261
+ :type span: tuple(int, int)
262
+ :param span: The section of the text for which we are
263
+ trying to find child lists. The span is specified as a
264
+ pair of integers, where the first integer is the index of
265
+ the first token that should be covered by the child list;
266
+ and the second integer is the index of the first token
267
+ that should not be covered by the child list.
268
+ :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree)
269
+ :param constituents: The most likely constituents table. This
270
+ table records the most probable tree representation for
271
+ any given span and node value. See the module
272
+ documentation for more information.
273
+ """
274
+ (start, end) = span
275
+
276
+ # Base case
277
+ if start >= end and rhs == ():
278
+ return [[]]
279
+ if start >= end or rhs == ():
280
+ return []
281
+
282
+ # Find everything that matches the 1st symbol of the RHS
283
+ childlists = []
284
+ for split in range(start, end + 1):
285
+ l = constituents.get((start, split, rhs[0]))
286
+ if l is not None:
287
+ rights = self._match_rhs(rhs[1:], (split, end), constituents)
288
+ childlists += [[l] + r for r in rights]
289
+
290
+ return childlists
291
+
292
+ def _trace_production(self, production, p, span, width):
293
+ """
294
+ Print trace output indicating that a given production has been
295
+ applied at a given location.
296
+
297
+ :param production: The production that has been applied
298
+ :type production: Production
299
+ :param p: The probability of the tree produced by the production.
300
+ :type p: float
301
+ :param span: The span of the production
302
+ :type span: tuple
303
+ :rtype: None
304
+ """
305
+
306
+ str = "|" + "." * span[0]
307
+ str += "=" * (span[1] - span[0])
308
+ str += "." * (width - span[1]) + "| "
309
+ str += "%s" % production
310
+ if self._trace > 2:
311
+ str = f"{str:<40} {p:12.10f} "
312
+
313
+ print(str)
314
+
315
+ def _trace_lexical_insertion(self, token, index, width):
316
+ str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| "
317
+ str += f"{token}"
318
+ print(str)
319
+
320
+ def __repr__(self):
321
+ return "<ViterbiParser for %r>" % self._grammar
322
+
323
+
324
+ ##//////////////////////////////////////////////////////
325
+ ## Test Code
326
+ ##//////////////////////////////////////////////////////
327
+
328
+
329
+ def demo():
330
+ """
331
+ A demonstration of the probabilistic parsers. The user is
332
+ prompted to select which demo to run, and how many parses should
333
+ be found; and then each parser is run on the same demo, and a
334
+ summary of the results are displayed.
335
+ """
336
+ import sys
337
+ import time
338
+
339
+ from nltk import tokenize
340
+ from nltk.grammar import PCFG
341
+ from nltk.parse import ViterbiParser
342
+
343
+ toy_pcfg1 = PCFG.fromstring(
344
+ """
345
+ S -> NP VP [1.0]
346
+ NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15]
347
+ Det -> 'the' [0.8] | 'my' [0.2]
348
+ N -> 'man' [0.5] | 'telescope' [0.5]
349
+ VP -> VP PP [0.1] | V NP [0.7] | V [0.2]
350
+ V -> 'ate' [0.35] | 'saw' [0.65]
351
+ PP -> P NP [1.0]
352
+ P -> 'with' [0.61] | 'under' [0.39]
353
+ """
354
+ )
355
+
356
+ toy_pcfg2 = PCFG.fromstring(
357
+ """
358
+ S -> NP VP [1.0]
359
+ VP -> V NP [.59]
360
+ VP -> V [.40]
361
+ VP -> VP PP [.01]
362
+ NP -> Det N [.41]
363
+ NP -> Name [.28]
364
+ NP -> NP PP [.31]
365
+ PP -> P NP [1.0]
366
+ V -> 'saw' [.21]
367
+ V -> 'ate' [.51]
368
+ V -> 'ran' [.28]
369
+ N -> 'boy' [.11]
370
+ N -> 'cookie' [.12]
371
+ N -> 'table' [.13]
372
+ N -> 'telescope' [.14]
373
+ N -> 'hill' [.5]
374
+ Name -> 'Jack' [.52]
375
+ Name -> 'Bob' [.48]
376
+ P -> 'with' [.61]
377
+ P -> 'under' [.39]
378
+ Det -> 'the' [.41]
379
+ Det -> 'a' [.31]
380
+ Det -> 'my' [.28]
381
+ """
382
+ )
383
+
384
+ # Define two demos. Each demo has a sentence and a grammar.
385
+ demos = [
386
+ ("I saw the man with my telescope", toy_pcfg1),
387
+ ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2),
388
+ ]
389
+
390
+ # Ask the user which demo they want to use.
391
+ print()
392
+ for i in range(len(demos)):
393
+ print(f"{i + 1:>3}: {demos[i][0]}")
394
+ print(" %r" % demos[i][1])
395
+ print()
396
+ print("Which demo (%d-%d)? " % (1, len(demos)), end=" ")
397
+ try:
398
+ snum = int(sys.stdin.readline().strip()) - 1
399
+ sent, grammar = demos[snum]
400
+ except:
401
+ print("Bad sentence number")
402
+ return
403
+
404
+ # Tokenize the sentence.
405
+ tokens = sent.split()
406
+
407
+ parser = ViterbiParser(grammar)
408
+ all_parses = {}
409
+
410
+ print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}")
411
+ parser.trace(3)
412
+ t = time.time()
413
+ parses = parser.parse_all(tokens)
414
+ time = time.time() - t
415
+ average = (
416
+ reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0
417
+ )
418
+ num_parses = len(parses)
419
+ for p in parses:
420
+ all_parses[p.freeze()] = 1
421
+
422
+ # Print some summary statistics
423
+ print()
424
+ print("Time (secs) # Parses Average P(parse)")
425
+ print("-----------------------------------------")
426
+ print("%11.4f%11d%19.14f" % (time, num_parses, average))
427
+ parses = all_parses.keys()
428
+ if parses:
429
+ p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses)
430
+ else:
431
+ p = 0
432
+ print("------------------------------------------")
433
+ print("%11s%11d%19.14f" % ("n/a", len(parses), p))
434
+
435
+ # Ask the user if we should draw the parses.
436
+ print()
437
+ print("Draw parses (y/n)? ", end=" ")
438
+ if sys.stdin.readline().strip().lower().startswith("y"):
439
+ from nltk.draw.tree import draw_trees
440
+
441
+ print(" please wait...")
442
+ draw_trees(*parses)
443
+
444
+ # Ask the user if we should print the parses.
445
+ print()
446
+ print("Print parses (y/n)? ", end=" ")
447
+ if sys.stdin.readline().strip().lower().startswith("y"):
448
+ for parse in parses:
449
+ print(parse)
450
+
451
+
452
+ if __name__ == "__main__":
453
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/tag/__init__.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Taggers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ NLTK Taggers
10
+
11
+ This package contains classes and interfaces for part-of-speech
12
+ tagging, or simply "tagging".
13
+
14
+ A "tag" is a case-sensitive string that specifies some property of a token,
15
+ such as its part of speech. Tagged tokens are encoded as tuples
16
+ ``(tag, token)``. For example, the following tagged token combines
17
+ the word ``'fly'`` with a noun part of speech tag (``'NN'``):
18
+
19
+ >>> tagged_tok = ('fly', 'NN')
20
+
21
+ An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset:
22
+
23
+ >>> from nltk import pos_tag, word_tokenize
24
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
25
+ [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
26
+ ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
27
+
28
+ A Russian tagger is also available if you specify lang="rus". It uses
29
+ the Russian National Corpus tagset:
30
+
31
+ >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP
32
+ [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'),
33
+ ('бумажку', 'S'), ('.', 'NONLEX')]
34
+
35
+ This package defines several taggers, which take a list of tokens,
36
+ assign a tag to each one, and return the resulting list of tagged tokens.
37
+ Most of the taggers are built automatically based on a training corpus.
38
+ For example, the unigram tagger tags each word *w* by checking what
39
+ the most frequent tag for *w* was in a training corpus:
40
+
41
+ >>> from nltk.corpus import brown
42
+ >>> from nltk.tag import UnigramTagger
43
+ >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
44
+ >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment']
45
+ >>> for word, tag in tagger.tag(sent):
46
+ ... print(word, '->', tag)
47
+ Mitchell -> NP
48
+ decried -> None
49
+ the -> AT
50
+ high -> JJ
51
+ rate -> NN
52
+ of -> IN
53
+ unemployment -> None
54
+
55
+ Note that words that the tagger has not seen during training receive a tag
56
+ of ``None``.
57
+
58
+ We evaluate a tagger on data that was not seen during training:
59
+
60
+ >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3)
61
+ 0.735
62
+
63
+ For more information, please consult chapter 5 of the NLTK Book.
64
+
65
+ isort:skip_file
66
+ """
67
+
68
+ from nltk.tag.api import TaggerI
69
+ from nltk.tag.util import str2tuple, tuple2str, untag
70
+ from nltk.tag.sequential import (
71
+ SequentialBackoffTagger,
72
+ ContextTagger,
73
+ DefaultTagger,
74
+ NgramTagger,
75
+ UnigramTagger,
76
+ BigramTagger,
77
+ TrigramTagger,
78
+ AffixTagger,
79
+ RegexpTagger,
80
+ ClassifierBasedTagger,
81
+ ClassifierBasedPOSTagger,
82
+ )
83
+ from nltk.tag.brill import BrillTagger
84
+ from nltk.tag.brill_trainer import BrillTaggerTrainer
85
+ from nltk.tag.tnt import TnT
86
+ from nltk.tag.hunpos import HunposTagger
87
+ from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger
88
+ from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer
89
+ from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger
90
+ from nltk.tag.mapping import tagset_mapping, map_tag
91
+ from nltk.tag.crf import CRFTagger
92
+ from nltk.tag.perceptron import PerceptronTagger
93
+
94
+ from nltk.data import load, find
95
+
96
+ RUS_PICKLE = (
97
+ "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle"
98
+ )
99
+
100
+
101
+ def _get_tagger(lang=None):
102
+ if lang == "rus":
103
+ tagger = PerceptronTagger(False)
104
+ ap_russian_model_loc = "file:" + str(find(RUS_PICKLE))
105
+ tagger.load(ap_russian_model_loc)
106
+ else:
107
+ tagger = PerceptronTagger()
108
+ return tagger
109
+
110
+
111
+ def _pos_tag(tokens, tagset=None, tagger=None, lang=None):
112
+ # Currently only supports English and Russian.
113
+ if lang not in ["eng", "rus"]:
114
+ raise NotImplementedError(
115
+ "Currently, NLTK pos_tag only supports English and Russian "
116
+ "(i.e. lang='eng' or lang='rus')"
117
+ )
118
+ # Throws Error if tokens is of string type
119
+ elif isinstance(tokens, str):
120
+ raise TypeError("tokens: expected a list of strings, got a string")
121
+
122
+ else:
123
+ tagged_tokens = tagger.tag(tokens)
124
+ if tagset: # Maps to the specified tagset.
125
+ if lang == "eng":
126
+ tagged_tokens = [
127
+ (token, map_tag("en-ptb", tagset, tag))
128
+ for (token, tag) in tagged_tokens
129
+ ]
130
+ elif lang == "rus":
131
+ # Note that the new Russian pos tags from the model contains suffixes,
132
+ # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018
133
+ tagged_tokens = [
134
+ (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0]))
135
+ for (token, tag) in tagged_tokens
136
+ ]
137
+ return tagged_tokens
138
+
139
+
140
+ def pos_tag(tokens, tagset=None, lang="eng"):
141
+ """
142
+ Use NLTK's currently recommended part of speech tagger to
143
+ tag the given list of tokens.
144
+
145
+ >>> from nltk.tag import pos_tag
146
+ >>> from nltk.tokenize import word_tokenize
147
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
148
+ [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
149
+ ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
150
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE
151
+ [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'),
152
+ ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')]
153
+
154
+ NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence.
155
+
156
+ :param tokens: Sequence of tokens to be tagged
157
+ :type tokens: list(str)
158
+ :param tagset: the tagset to be used, e.g. universal, wsj, brown
159
+ :type tagset: str
160
+ :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
161
+ :type lang: str
162
+ :return: The tagged tokens
163
+ :rtype: list(tuple(str, str))
164
+ """
165
+ tagger = _get_tagger(lang)
166
+ return _pos_tag(tokens, tagset, tagger, lang)
167
+
168
+
169
+ def pos_tag_sents(sentences, tagset=None, lang="eng"):
170
+ """
171
+ Use NLTK's currently recommended part of speech tagger to tag the
172
+ given list of sentences, each consisting of a list of tokens.
173
+
174
+ :param sentences: List of sentences to be tagged
175
+ :type sentences: list(list(str))
176
+ :param tagset: the tagset to be used, e.g. universal, wsj, brown
177
+ :type tagset: str
178
+ :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
179
+ :type lang: str
180
+ :return: The list of tagged sentences
181
+ :rtype: list(list(tuple(str, str)))
182
+ """
183
+ tagger = _get_tagger(lang)
184
+ return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences]
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc ADDED
Binary file (7.57 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tag/api.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagger Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # Tom Aarsen <>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Interface for tagging each token in a sentence with supplementary
12
+ information, such as its part of speech.
13
+ """
14
+ from abc import ABCMeta, abstractmethod
15
+ from functools import lru_cache
16
+ from itertools import chain
17
+ from typing import Dict
18
+
19
+ from nltk.internals import deprecated, overridden
20
+ from nltk.metrics import ConfusionMatrix, accuracy
21
+ from nltk.tag.util import untag
22
+
23
+
24
+ class TaggerI(metaclass=ABCMeta):
25
+ """
26
+ A processing interface for assigning a tag to each token in a list.
27
+ Tags are case sensitive strings that identify some property of each
28
+ token, such as its part of speech or its sense.
29
+
30
+ Some taggers require specific types for their tokens. This is
31
+ generally indicated by the use of a sub-interface to ``TaggerI``.
32
+ For example, featureset taggers, which are subclassed from
33
+ ``FeaturesetTagger``, require that each token be a ``featureset``.
34
+
35
+ Subclasses must define:
36
+ - either ``tag()`` or ``tag_sents()`` (or both)
37
+ """
38
+
39
+ @abstractmethod
40
+ def tag(self, tokens):
41
+ """
42
+ Determine the most appropriate tag sequence for the given
43
+ token sequence, and return a corresponding list of tagged
44
+ tokens. A tagged token is encoded as a tuple ``(token, tag)``.
45
+
46
+ :rtype: list(tuple(str, str))
47
+ """
48
+ if overridden(self.tag_sents):
49
+ return self.tag_sents([tokens])[0]
50
+
51
+ def tag_sents(self, sentences):
52
+ """
53
+ Apply ``self.tag()`` to each element of *sentences*. I.e.::
54
+
55
+ return [self.tag(sent) for sent in sentences]
56
+ """
57
+ return [self.tag(sent) for sent in sentences]
58
+
59
+ @deprecated("Use accuracy(gold) instead.")
60
+ def evaluate(self, gold):
61
+ return self.accuracy(gold)
62
+
63
+ def accuracy(self, gold):
64
+ """
65
+ Score the accuracy of the tagger against the gold standard.
66
+ Strip the tags from the gold standard text, retag it using
67
+ the tagger, then compute the accuracy score.
68
+
69
+ :param gold: The list of tagged sentences to score the tagger on.
70
+ :type gold: list(list(tuple(str, str)))
71
+ :rtype: float
72
+ """
73
+
74
+ tagged_sents = self.tag_sents(untag(sent) for sent in gold)
75
+ gold_tokens = list(chain.from_iterable(gold))
76
+ test_tokens = list(chain.from_iterable(tagged_sents))
77
+ return accuracy(gold_tokens, test_tokens)
78
+
79
+ @lru_cache(maxsize=1)
80
+ def _confusion_cached(self, gold):
81
+ """
82
+ Inner function used after ``gold`` is converted to a
83
+ ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on
84
+ creating a ConfusionMatrix.
85
+
86
+ :param gold: The list of tagged sentences to run the tagger with,
87
+ also used as the reference values in the generated confusion matrix.
88
+ :type gold: tuple(tuple(tuple(str, str)))
89
+ :rtype: ConfusionMatrix
90
+ """
91
+
92
+ tagged_sents = self.tag_sents(untag(sent) for sent in gold)
93
+ gold_tokens = [token for _word, token in chain.from_iterable(gold)]
94
+ test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)]
95
+ return ConfusionMatrix(gold_tokens, test_tokens)
96
+
97
+ def confusion(self, gold):
98
+ """
99
+ Return a ConfusionMatrix with the tags from ``gold`` as the reference
100
+ values, with the predictions from ``tag_sents`` as the predicted values.
101
+
102
+ >>> from nltk.tag import PerceptronTagger
103
+ >>> from nltk.corpus import treebank
104
+ >>> tagger = PerceptronTagger()
105
+ >>> gold_data = treebank.tagged_sents()[:10]
106
+ >>> print(tagger.confusion(gold_data))
107
+ | - |
108
+ | N |
109
+ | O P |
110
+ | N J J N N P P R R V V V V V W |
111
+ | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` |
112
+ | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` |
113
+ -------+----------------------------------------------------------------------------------------------+
114
+ '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
115
+ , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
116
+ -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . |
117
+ . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . |
118
+ CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . |
119
+ CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . |
120
+ DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . |
121
+ EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . |
122
+ IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . |
123
+ JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . |
124
+ JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . |
125
+ JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . |
126
+ MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . |
127
+ NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . |
128
+ NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . |
129
+ NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . |
130
+ POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . |
131
+ PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . |
132
+ PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . |
133
+ RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . |
134
+ RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . |
135
+ RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . |
136
+ TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . |
137
+ VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . |
138
+ VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . |
139
+ VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . |
140
+ VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . |
141
+ VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . |
142
+ VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . |
143
+ WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . |
144
+ `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>|
145
+ -------+----------------------------------------------------------------------------------------------+
146
+ (row = reference; col = test)
147
+ <BLANKLINE>
148
+
149
+ :param gold: The list of tagged sentences to run the tagger with,
150
+ also used as the reference values in the generated confusion matrix.
151
+ :type gold: list(list(tuple(str, str)))
152
+ :rtype: ConfusionMatrix
153
+ """
154
+
155
+ return self._confusion_cached(tuple(tuple(sent) for sent in gold))
156
+
157
+ def recall(self, gold) -> Dict[str, float]:
158
+ """
159
+ Compute the recall for each tag from ``gold`` or from running ``tag``
160
+ on the tokenized sentences from ``gold``. Then, return the dictionary
161
+ with mappings from tag to recall. The recall is defined as:
162
+
163
+ - *r* = true positive / (true positive + false positive)
164
+
165
+ :param gold: The list of tagged sentences to score the tagger on.
166
+ :type gold: list(list(tuple(str, str)))
167
+ :return: A mapping from tags to recall
168
+ :rtype: Dict[str, float]
169
+ """
170
+
171
+ cm = self.confusion(gold)
172
+ return {tag: cm.recall(tag) for tag in cm._values}
173
+
174
+ def precision(self, gold):
175
+ """
176
+ Compute the precision for each tag from ``gold`` or from running ``tag``
177
+ on the tokenized sentences from ``gold``. Then, return the dictionary
178
+ with mappings from tag to precision. The precision is defined as:
179
+
180
+ - *p* = true positive / (true positive + false negative)
181
+
182
+ :param gold: The list of tagged sentences to score the tagger on.
183
+ :type gold: list(list(tuple(str, str)))
184
+ :return: A mapping from tags to precision
185
+ :rtype: Dict[str, float]
186
+ """
187
+
188
+ cm = self.confusion(gold)
189
+ return {tag: cm.precision(tag) for tag in cm._values}
190
+
191
+ def f_measure(self, gold, alpha=0.5):
192
+ """
193
+ Compute the f-measure for each tag from ``gold`` or from running ``tag``
194
+ on the tokenized sentences from ``gold``. Then, return the dictionary
195
+ with mappings from tag to f-measure. The f-measure is the harmonic mean
196
+ of the ``precision`` and ``recall``, weighted by ``alpha``.
197
+ In particular, given the precision *p* and recall *r* defined by:
198
+
199
+ - *p* = true positive / (true positive + false negative)
200
+ - *r* = true positive / (true positive + false positive)
201
+
202
+ The f-measure is:
203
+
204
+ - *1/(alpha/p + (1-alpha)/r)*
205
+
206
+ With ``alpha = 0.5``, this reduces to:
207
+
208
+ - *2pr / (p + r)*
209
+
210
+ :param gold: The list of tagged sentences to score the tagger on.
211
+ :type gold: list(list(tuple(str, str)))
212
+ :param alpha: Ratio of the cost of false negative compared to false
213
+ positives. Defaults to 0.5, where the costs are equal.
214
+ :type alpha: float
215
+ :return: A mapping from tags to precision
216
+ :rtype: Dict[str, float]
217
+ """
218
+ cm = self.confusion(gold)
219
+ return {tag: cm.f_measure(tag, alpha) for tag in cm._values}
220
+
221
+ def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False):
222
+ """Tabulate the **recall**, **precision** and **f-measure**
223
+ for each tag from ``gold`` or from running ``tag`` on the tokenized
224
+ sentences from ``gold``.
225
+
226
+ >>> from nltk.tag import PerceptronTagger
227
+ >>> from nltk.corpus import treebank
228
+ >>> tagger = PerceptronTagger()
229
+ >>> gold_data = treebank.tagged_sents()[:10]
230
+ >>> print(tagger.evaluate_per_tag(gold_data))
231
+ Tag | Prec. | Recall | F-measure
232
+ -------+--------+--------+-----------
233
+ '' | 1.0000 | 1.0000 | 1.0000
234
+ , | 1.0000 | 1.0000 | 1.0000
235
+ -NONE- | 0.0000 | 0.0000 | 0.0000
236
+ . | 1.0000 | 1.0000 | 1.0000
237
+ CC | 1.0000 | 1.0000 | 1.0000
238
+ CD | 0.7143 | 1.0000 | 0.8333
239
+ DT | 1.0000 | 1.0000 | 1.0000
240
+ EX | 1.0000 | 1.0000 | 1.0000
241
+ IN | 0.9167 | 0.8800 | 0.8980
242
+ JJ | 0.8889 | 0.8889 | 0.8889
243
+ JJR | 0.0000 | 0.0000 | 0.0000
244
+ JJS | 1.0000 | 1.0000 | 1.0000
245
+ MD | 1.0000 | 1.0000 | 1.0000
246
+ NN | 0.8000 | 0.9333 | 0.8615
247
+ NNP | 0.8929 | 1.0000 | 0.9434
248
+ NNS | 0.9500 | 1.0000 | 0.9744
249
+ POS | 1.0000 | 1.0000 | 1.0000
250
+ PRP | 1.0000 | 1.0000 | 1.0000
251
+ PRP$ | 1.0000 | 1.0000 | 1.0000
252
+ RB | 0.4000 | 1.0000 | 0.5714
253
+ RBR | 1.0000 | 0.5000 | 0.6667
254
+ RP | 1.0000 | 1.0000 | 1.0000
255
+ TO | 1.0000 | 1.0000 | 1.0000
256
+ VB | 1.0000 | 1.0000 | 1.0000
257
+ VBD | 0.8571 | 0.8571 | 0.8571
258
+ VBG | 1.0000 | 0.8000 | 0.8889
259
+ VBN | 1.0000 | 0.8000 | 0.8889
260
+ VBP | 1.0000 | 1.0000 | 1.0000
261
+ VBZ | 1.0000 | 1.0000 | 1.0000
262
+ WDT | 0.0000 | 0.0000 | 0.0000
263
+ `` | 1.0000 | 1.0000 | 1.0000
264
+ <BLANKLINE>
265
+
266
+ :param gold: The list of tagged sentences to score the tagger on.
267
+ :type gold: list(list(tuple(str, str)))
268
+ :param alpha: Ratio of the cost of false negative compared to false
269
+ positives, as used in the f-measure computation. Defaults to 0.5,
270
+ where the costs are equal.
271
+ :type alpha: float
272
+ :param truncate: If specified, then only show the specified
273
+ number of values. Any sorting (e.g., sort_by_count)
274
+ will be performed before truncation. Defaults to None
275
+ :type truncate: int, optional
276
+ :param sort_by_count: Whether to sort the outputs on number of
277
+ occurrences of that tag in the ``gold`` data, defaults to False
278
+ :type sort_by_count: bool, optional
279
+ :return: A tabulated recall, precision and f-measure string
280
+ :rtype: str
281
+ """
282
+ cm = self.confusion(gold)
283
+ return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count)
284
+
285
+ def _check_params(self, train, model):
286
+ if (train and model) or (not train and not model):
287
+ raise ValueError("Must specify either training data or trained model.")
288
+
289
+
290
+ class FeaturesetTaggerI(TaggerI):
291
+ """
292
+ A tagger that requires tokens to be ``featuresets``. A featureset
293
+ is a dictionary that maps from feature names to feature
294
+ values. See ``nltk.classify`` for more information about features
295
+ and featuresets.
296
+ """
env-llmeval/lib/python3.10/site-packages/nltk/tag/brill.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from collections import Counter, defaultdict
11
+
12
+ from nltk import jsontags
13
+ from nltk.tag import TaggerI
14
+ from nltk.tbl import Feature, Template
15
+
16
+ ######################################################################
17
+ # Brill Templates
18
+ ######################################################################
19
+
20
+
21
+ @jsontags.register_tag
22
+ class Word(Feature):
23
+ """
24
+ Feature which examines the text (word) of nearby tokens.
25
+ """
26
+
27
+ json_tag = "nltk.tag.brill.Word"
28
+
29
+ @staticmethod
30
+ def extract_property(tokens, index):
31
+ """@return: The given token's text."""
32
+ return tokens[index][0]
33
+
34
+
35
+ @jsontags.register_tag
36
+ class Pos(Feature):
37
+ """
38
+ Feature which examines the tags of nearby tokens.
39
+ """
40
+
41
+ json_tag = "nltk.tag.brill.Pos"
42
+
43
+ @staticmethod
44
+ def extract_property(tokens, index):
45
+ """@return: The given token's tag."""
46
+ return tokens[index][1]
47
+
48
+
49
+ def nltkdemo18():
50
+ """
51
+ Return 18 templates, from the original nltk demo, in multi-feature syntax
52
+ """
53
+ return [
54
+ Template(Pos([-1])),
55
+ Template(Pos([1])),
56
+ Template(Pos([-2])),
57
+ Template(Pos([2])),
58
+ Template(Pos([-2, -1])),
59
+ Template(Pos([1, 2])),
60
+ Template(Pos([-3, -2, -1])),
61
+ Template(Pos([1, 2, 3])),
62
+ Template(Pos([-1]), Pos([1])),
63
+ Template(Word([-1])),
64
+ Template(Word([1])),
65
+ Template(Word([-2])),
66
+ Template(Word([2])),
67
+ Template(Word([-2, -1])),
68
+ Template(Word([1, 2])),
69
+ Template(Word([-3, -2, -1])),
70
+ Template(Word([1, 2, 3])),
71
+ Template(Word([-1]), Word([1])),
72
+ ]
73
+
74
+
75
+ def nltkdemo18plus():
76
+ """
77
+ Return 18 templates, from the original nltk demo, and additionally a few
78
+ multi-feature ones (the motivation is easy comparison with nltkdemo18)
79
+ """
80
+ return nltkdemo18() + [
81
+ Template(Word([-1]), Pos([1])),
82
+ Template(Pos([-1]), Word([1])),
83
+ Template(Word([-1]), Word([0]), Pos([1])),
84
+ Template(Pos([-1]), Word([0]), Word([1])),
85
+ Template(Pos([-1]), Word([0]), Pos([1])),
86
+ ]
87
+
88
+
89
+ def fntbl37():
90
+ """
91
+ Return 37 templates taken from the postagging task of the
92
+ fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/
93
+ (37 is after excluding a handful which do not condition on Pos[0];
94
+ fntbl can do that but the current nltk implementation cannot.)
95
+ """
96
+ return [
97
+ Template(Word([0]), Word([1]), Word([2])),
98
+ Template(Word([-1]), Word([0]), Word([1])),
99
+ Template(Word([0]), Word([-1])),
100
+ Template(Word([0]), Word([1])),
101
+ Template(Word([0]), Word([2])),
102
+ Template(Word([0]), Word([-2])),
103
+ Template(Word([1, 2])),
104
+ Template(Word([-2, -1])),
105
+ Template(Word([1, 2, 3])),
106
+ Template(Word([-3, -2, -1])),
107
+ Template(Word([0]), Pos([2])),
108
+ Template(Word([0]), Pos([-2])),
109
+ Template(Word([0]), Pos([1])),
110
+ Template(Word([0]), Pos([-1])),
111
+ Template(Word([0])),
112
+ Template(Word([-2])),
113
+ Template(Word([2])),
114
+ Template(Word([1])),
115
+ Template(Word([-1])),
116
+ Template(Pos([-1]), Pos([1])),
117
+ Template(Pos([1]), Pos([2])),
118
+ Template(Pos([-1]), Pos([-2])),
119
+ Template(Pos([1])),
120
+ Template(Pos([-1])),
121
+ Template(Pos([-2])),
122
+ Template(Pos([2])),
123
+ Template(Pos([1, 2, 3])),
124
+ Template(Pos([1, 2])),
125
+ Template(Pos([-3, -2, -1])),
126
+ Template(Pos([-2, -1])),
127
+ Template(Pos([1]), Word([0]), Word([1])),
128
+ Template(Pos([1]), Word([0]), Word([-1])),
129
+ Template(Pos([-1]), Word([-1]), Word([0])),
130
+ Template(Pos([-1]), Word([0]), Word([1])),
131
+ Template(Pos([-2]), Pos([-1])),
132
+ Template(Pos([1]), Pos([2])),
133
+ Template(Pos([1]), Pos([2]), Word([1])),
134
+ ]
135
+
136
+
137
+ def brill24():
138
+ """
139
+ Return 24 templates of the seminal TBL paper, Brill (1995)
140
+ """
141
+ return [
142
+ Template(Pos([-1])),
143
+ Template(Pos([1])),
144
+ Template(Pos([-2])),
145
+ Template(Pos([2])),
146
+ Template(Pos([-2, -1])),
147
+ Template(Pos([1, 2])),
148
+ Template(Pos([-3, -2, -1])),
149
+ Template(Pos([1, 2, 3])),
150
+ Template(Pos([-1]), Pos([1])),
151
+ Template(Pos([-2]), Pos([-1])),
152
+ Template(Pos([1]), Pos([2])),
153
+ Template(Word([-1])),
154
+ Template(Word([1])),
155
+ Template(Word([-2])),
156
+ Template(Word([2])),
157
+ Template(Word([-2, -1])),
158
+ Template(Word([1, 2])),
159
+ Template(Word([-1, 0])),
160
+ Template(Word([0, 1])),
161
+ Template(Word([0])),
162
+ Template(Word([-1]), Pos([-1])),
163
+ Template(Word([1]), Pos([1])),
164
+ Template(Word([0]), Word([-1]), Pos([-1])),
165
+ Template(Word([0]), Word([1]), Pos([1])),
166
+ ]
167
+
168
+
169
+ def describe_template_sets():
170
+ """
171
+ Print the available template sets in this demo, with a short description"
172
+ """
173
+ import inspect
174
+ import sys
175
+
176
+ # a bit of magic to get all functions in this module
177
+ templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
178
+ for (name, obj) in templatesets:
179
+ if name == "describe_template_sets":
180
+ continue
181
+ print(name, obj.__doc__, "\n")
182
+
183
+
184
+ ######################################################################
185
+ # The Brill Tagger
186
+ ######################################################################
187
+
188
+
189
+ @jsontags.register_tag
190
+ class BrillTagger(TaggerI):
191
+ """
192
+ Brill's transformational rule-based tagger. Brill taggers use an
193
+ initial tagger (such as ``tag.DefaultTagger``) to assign an initial
194
+ tag sequence to a text; and then apply an ordered list of
195
+ transformational rules to correct the tags of individual tokens.
196
+ These transformation rules are specified by the ``TagRule``
197
+ interface.
198
+
199
+ Brill taggers can be created directly, from an initial tagger and
200
+ a list of transformational rules; but more often, Brill taggers
201
+ are created by learning rules from a training corpus, using one
202
+ of the TaggerTrainers available.
203
+ """
204
+
205
+ json_tag = "nltk.tag.BrillTagger"
206
+
207
+ def __init__(self, initial_tagger, rules, training_stats=None):
208
+ """
209
+ :param initial_tagger: The initial tagger
210
+ :type initial_tagger: TaggerI
211
+
212
+ :param rules: An ordered list of transformation rules that
213
+ should be used to correct the initial tagging.
214
+ :type rules: list(TagRule)
215
+
216
+ :param training_stats: A dictionary of statistics collected
217
+ during training, for possible later use
218
+ :type training_stats: dict
219
+
220
+ """
221
+ self._initial_tagger = initial_tagger
222
+ self._rules = tuple(rules)
223
+ self._training_stats = training_stats
224
+
225
+ def encode_json_obj(self):
226
+ return self._initial_tagger, self._rules, self._training_stats
227
+
228
+ @classmethod
229
+ def decode_json_obj(cls, obj):
230
+ _initial_tagger, _rules, _training_stats = obj
231
+ return cls(_initial_tagger, _rules, _training_stats)
232
+
233
+ def rules(self):
234
+ """
235
+ Return the ordered list of transformation rules that this tagger has learnt
236
+
237
+ :return: the ordered list of transformation rules that correct the initial tagging
238
+ :rtype: list of Rules
239
+ """
240
+ return self._rules
241
+
242
+ def train_stats(self, statistic=None):
243
+ """
244
+ Return a named statistic collected during training, or a dictionary of all
245
+ available statistics if no name given
246
+
247
+ :param statistic: name of statistic
248
+ :type statistic: str
249
+ :return: some statistic collected during training of this tagger
250
+ :rtype: any (but usually a number)
251
+ """
252
+ if statistic is None:
253
+ return self._training_stats
254
+ else:
255
+ return self._training_stats.get(statistic)
256
+
257
+ def tag(self, tokens):
258
+ # Inherit documentation from TaggerI
259
+
260
+ # Run the initial tagger.
261
+ tagged_tokens = self._initial_tagger.tag(tokens)
262
+
263
+ # Create a dictionary that maps each tag to a list of the
264
+ # indices of tokens that have that tag.
265
+ tag_to_positions = defaultdict(set)
266
+ for i, (token, tag) in enumerate(tagged_tokens):
267
+ tag_to_positions[tag].add(i)
268
+
269
+ # Apply each rule, in order. Only try to apply rules at
270
+ # positions that have the desired original tag.
271
+ for rule in self._rules:
272
+ # Find the positions where it might apply
273
+ positions = tag_to_positions.get(rule.original_tag, [])
274
+ # Apply the rule at those positions.
275
+ changed = rule.apply(tagged_tokens, positions)
276
+ # Update tag_to_positions with the positions of tags that
277
+ # were modified.
278
+ for i in changed:
279
+ tag_to_positions[rule.original_tag].remove(i)
280
+ tag_to_positions[rule.replacement_tag].add(i)
281
+
282
+ return tagged_tokens
283
+
284
+ def print_template_statistics(self, test_stats=None, printunused=True):
285
+ """
286
+ Print a list of all templates, ranked according to efficiency.
287
+
288
+ If test_stats is available, the templates are ranked according to their
289
+ relative contribution (summed for all rules created from a given template,
290
+ weighted by score) to the performance on the test set. If no test_stats, then
291
+ statistics collected during training are used instead. There is also
292
+ an unweighted measure (just counting the rules). This is less informative,
293
+ though, as many low-score rules will appear towards end of training.
294
+
295
+ :param test_stats: dictionary of statistics collected during testing
296
+ :type test_stats: dict of str -> any (but usually numbers)
297
+ :param printunused: if True, print a list of all unused templates
298
+ :type printunused: bool
299
+ :return: None
300
+ :rtype: None
301
+ """
302
+ tids = [r.templateid for r in self._rules]
303
+ train_stats = self.train_stats()
304
+
305
+ trainscores = train_stats["rulescores"]
306
+ assert len(trainscores) == len(
307
+ tids
308
+ ), "corrupt statistics: " "{} train scores for {} rules".format(
309
+ trainscores, tids
310
+ )
311
+ template_counts = Counter(tids)
312
+ weighted_traincounts = Counter()
313
+ for (tid, score) in zip(tids, trainscores):
314
+ weighted_traincounts[tid] += score
315
+ tottrainscores = sum(trainscores)
316
+
317
+ # det_tplsort() is for deterministic sorting;
318
+ # the otherwise convenient Counter.most_common() unfortunately
319
+ # does not break ties deterministically
320
+ # between python versions and will break cross-version tests
321
+ def det_tplsort(tpl_value):
322
+ return (tpl_value[1], repr(tpl_value[0]))
323
+
324
+ def print_train_stats():
325
+ print(
326
+ "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format(
327
+ len(template_counts), len(tids)
328
+ )
329
+ )
330
+ print(
331
+ "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
332
+ "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats)
333
+ )
334
+ head = "#ID | Score (train) | #Rules | Template"
335
+ print(head, "\n", "-" * len(head), sep="")
336
+ train_tplscores = sorted(
337
+ weighted_traincounts.items(), key=det_tplsort, reverse=True
338
+ )
339
+ for (tid, trainscore) in train_tplscores:
340
+ s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format(
341
+ tid,
342
+ trainscore,
343
+ trainscore / tottrainscores,
344
+ template_counts[tid],
345
+ template_counts[tid] / len(tids),
346
+ Template.ALLTEMPLATES[int(tid)],
347
+ )
348
+ print(s)
349
+
350
+ def print_testtrain_stats():
351
+ testscores = test_stats["rulescores"]
352
+ print(
353
+ "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format(
354
+ len(template_counts), len(tids)
355
+ )
356
+ )
357
+ print(
358
+ "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
359
+ "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats)
360
+ )
361
+ print(
362
+ "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
363
+ "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats)
364
+ )
365
+ weighted_testcounts = Counter()
366
+ for (tid, score) in zip(tids, testscores):
367
+ weighted_testcounts[tid] += score
368
+ tottestscores = sum(testscores)
369
+ head = "#ID | Score (test) | Score (train) | #Rules | Template"
370
+ print(head, "\n", "-" * len(head), sep="")
371
+ test_tplscores = sorted(
372
+ weighted_testcounts.items(), key=det_tplsort, reverse=True
373
+ )
374
+ for (tid, testscore) in test_tplscores:
375
+ s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format(
376
+ tid,
377
+ testscore,
378
+ testscore / tottestscores,
379
+ weighted_traincounts[tid],
380
+ weighted_traincounts[tid] / tottrainscores,
381
+ template_counts[tid],
382
+ template_counts[tid] / len(tids),
383
+ Template.ALLTEMPLATES[int(tid)],
384
+ )
385
+ print(s)
386
+
387
+ def print_unused_templates():
388
+ usedtpls = {int(tid) for tid in tids}
389
+ unused = [
390
+ (tid, tpl)
391
+ for (tid, tpl) in enumerate(Template.ALLTEMPLATES)
392
+ if tid not in usedtpls
393
+ ]
394
+ print(f"UNUSED TEMPLATES ({len(unused)})")
395
+
396
+ for (tid, tpl) in unused:
397
+ print(f"{tid:03d} {str(tpl):s}")
398
+
399
+ if test_stats is None:
400
+ print_train_stats()
401
+ else:
402
+ print_testtrain_stats()
403
+ print()
404
+ if printunused:
405
+ print_unused_templates()
406
+ print()
407
+
408
+ def batch_tag_incremental(self, sequences, gold):
409
+ """
410
+ Tags by applying each rule to the entire corpus (rather than all rules to a
411
+ single sequence). The point is to collect statistics on the test set for
412
+ individual rules.
413
+
414
+ NOTE: This is inefficient (does not build any index, so will traverse the entire
415
+ corpus N times for N rules) -- usually you would not care about statistics for
416
+ individual rules and thus use batch_tag() instead
417
+
418
+ :param sequences: lists of token sequences (sentences, in some applications) to be tagged
419
+ :type sequences: list of list of strings
420
+ :param gold: the gold standard
421
+ :type gold: list of list of strings
422
+ :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
423
+ """
424
+
425
+ def counterrors(xs):
426
+ return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
427
+
428
+ testing_stats = {}
429
+ testing_stats["tokencount"] = sum(len(t) for t in sequences)
430
+ testing_stats["sequencecount"] = len(sequences)
431
+ tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
432
+ testing_stats["initialerrors"] = counterrors(tagged_tokenses)
433
+ testing_stats["initialacc"] = (
434
+ 1 - testing_stats["initialerrors"] / testing_stats["tokencount"]
435
+ )
436
+ # Apply each rule to the entire corpus, in order
437
+ errors = [testing_stats["initialerrors"]]
438
+ for rule in self._rules:
439
+ for tagged_tokens in tagged_tokenses:
440
+ rule.apply(tagged_tokens)
441
+ errors.append(counterrors(tagged_tokenses))
442
+ testing_stats["rulescores"] = [
443
+ err0 - err1 for (err0, err1) in zip(errors, errors[1:])
444
+ ]
445
+ testing_stats["finalerrors"] = errors[-1]
446
+ testing_stats["finalacc"] = (
447
+ 1 - testing_stats["finalerrors"] / testing_stats["tokencount"]
448
+ )
449
+ return (tagged_tokenses, testing_stats)
env-llmeval/lib/python3.10/site-packages/nltk/tag/brill_trainer.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2013 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import bisect
11
+ import textwrap
12
+ from collections import defaultdict
13
+
14
+ from nltk.tag import BrillTagger, untag
15
+
16
+ ######################################################################
17
+ # Brill Tagger Trainer
18
+ ######################################################################
19
+
20
+
21
+ class BrillTaggerTrainer:
22
+ """
23
+ A trainer for tbl taggers.
24
+ """
25
+
26
+ def __init__(
27
+ self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str"
28
+ ):
29
+ """
30
+ Construct a Brill tagger from a baseline tagger and a
31
+ set of templates
32
+
33
+ :param initial_tagger: the baseline tagger
34
+ :type initial_tagger: Tagger
35
+ :param templates: templates to be used in training
36
+ :type templates: list of Templates
37
+ :param trace: verbosity level
38
+ :type trace: int
39
+ :param deterministic: if True, adjudicate ties deterministically
40
+ :type deterministic: bool
41
+ :param ruleformat: format of reported Rules
42
+ :type ruleformat: str
43
+ :return: An untrained BrillTagger
44
+ :rtype: BrillTagger
45
+ """
46
+
47
+ if deterministic is None:
48
+ deterministic = trace > 0
49
+ self._initial_tagger = initial_tagger
50
+ self._templates = templates
51
+ self._trace = trace
52
+ self._deterministic = deterministic
53
+ self._ruleformat = ruleformat
54
+
55
+ self._tag_positions = None
56
+ """Mapping from tags to lists of positions that use that tag."""
57
+
58
+ self._rules_by_position = None
59
+ """Mapping from positions to the set of rules that are known
60
+ to occur at that position. Position is (sentnum, wordnum).
61
+ Initially, this will only contain positions where each rule
62
+ applies in a helpful way; but when we examine a rule, we'll
63
+ extend this list to also include positions where each rule
64
+ applies in a harmful or neutral way."""
65
+
66
+ self._positions_by_rule = None
67
+ """Mapping from rule to position to effect, specifying the
68
+ effect that each rule has on the overall score, at each
69
+ position. Position is (sentnum, wordnum); and effect is
70
+ -1, 0, or 1. As with _rules_by_position, this mapping starts
71
+ out only containing rules with positive effects; but when
72
+ we examine a rule, we'll extend this mapping to include
73
+ the positions where the rule is harmful or neutral."""
74
+
75
+ self._rules_by_score = None
76
+ """Mapping from scores to the set of rules whose effect on the
77
+ overall score is upper bounded by that score. Invariant:
78
+ rulesByScore[s] will contain r iff the sum of
79
+ _positions_by_rule[r] is s."""
80
+
81
+ self._rule_scores = None
82
+ """Mapping from rules to upper bounds on their effects on the
83
+ overall score. This is the inverse mapping to _rules_by_score.
84
+ Invariant: ruleScores[r] = sum(_positions_by_rule[r])"""
85
+
86
+ self._first_unknown_position = None
87
+ """Mapping from rules to the first position where we're unsure
88
+ if the rule applies. This records the next position we
89
+ need to check to see if the rule messed anything up."""
90
+
91
+ # Training
92
+
93
+ def train(self, train_sents, max_rules=200, min_score=2, min_acc=None):
94
+ r"""
95
+ Trains the Brill tagger on the corpus *train_sents*,
96
+ producing at most *max_rules* transformations, each of which
97
+ reduces the net number of errors in the corpus by at least
98
+ *min_score*, and each of which has accuracy not lower than
99
+ *min_acc*.
100
+
101
+ >>> # Relevant imports
102
+ >>> from nltk.tbl.template import Template
103
+ >>> from nltk.tag.brill import Pos, Word
104
+ >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer
105
+
106
+ >>> # Load some data
107
+ >>> from nltk.corpus import treebank
108
+ >>> training_data = treebank.tagged_sents()[:100]
109
+ >>> baseline_data = treebank.tagged_sents()[100:200]
110
+ >>> gold_data = treebank.tagged_sents()[200:300]
111
+ >>> testing_data = [untag(s) for s in gold_data]
112
+
113
+ >>> backoff = RegexpTagger([
114
+ ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers
115
+ ... (r'(The|the|A|a|An|an)$', 'AT'), # articles
116
+ ... (r'.*able$', 'JJ'), # adjectives
117
+ ... (r'.*ness$', 'NN'), # nouns formed from adjectives
118
+ ... (r'.*ly$', 'RB'), # adverbs
119
+ ... (r'.*s$', 'NNS'), # plural nouns
120
+ ... (r'.*ing$', 'VBG'), # gerunds
121
+ ... (r'.*ed$', 'VBD'), # past tense verbs
122
+ ... (r'.*', 'NN') # nouns (default)
123
+ ... ])
124
+
125
+ >>> baseline = backoff #see NOTE1
126
+ >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS
127
+ 0.243...
128
+
129
+ >>> # Set up templates
130
+ >>> Template._cleartemplates() #clear any templates created in earlier tests
131
+ >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
132
+
133
+ >>> # Construct a BrillTaggerTrainer
134
+ >>> tt = BrillTaggerTrainer(baseline, templates, trace=3)
135
+
136
+ >>> tagger1 = tt.train(training_data, max_rules=10)
137
+ TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None)
138
+ Finding initial useful rules...
139
+ Found 847 useful rules.
140
+ <BLANKLINE>
141
+ B |
142
+ S F r O | Score = Fixed - Broken
143
+ c i o t | R Fixed = num tags changed incorrect -> correct
144
+ o x k h | u Broken = num tags changed correct -> incorrect
145
+ r e e e | l Other = num tags changed incorrect -> incorrect
146
+ e d n r | e
147
+ ------------------+-------------------------------------------------------
148
+ 132 132 0 0 | AT->DT if Pos:NN@[-1]
149
+ 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0]
150
+ 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0]
151
+ 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0]
152
+ 47 63 16 162 | NN->IN if Pos:NNS@[-1]
153
+ 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0]
154
+ 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0]
155
+ 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0]
156
+ 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1]
157
+ 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0]
158
+
159
+ >>> tagger1.rules()[1:3]
160
+ (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]))
161
+
162
+ >>> train_stats = tagger1.train_stats()
163
+ >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']]
164
+ [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]]
165
+
166
+ >>> tagger1.print_template_statistics(printunused=False)
167
+ TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules)
168
+ TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746
169
+ #ID | Score (train) | #Rules | Template
170
+ --------------------------------------------
171
+ 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0]))
172
+ 000 | 201 0.397 | 3 0.300 | Template(Pos([-1]))
173
+ <BLANKLINE>
174
+ <BLANKLINE>
175
+
176
+ >>> round(tagger1.accuracy(gold_data),5)
177
+ 0.43834
178
+
179
+ >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
180
+
181
+ >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'),
182
+ ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'),
183
+ ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')]
184
+ True
185
+
186
+ >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']]
187
+ [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]]
188
+
189
+ >>> # A high-accuracy tagger
190
+ >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99)
191
+ TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99)
192
+ Finding initial useful rules...
193
+ Found 847 useful rules.
194
+ <BLANKLINE>
195
+ B |
196
+ S F r O | Score = Fixed - Broken
197
+ c i o t | R Fixed = num tags changed incorrect -> correct
198
+ o x k h | u Broken = num tags changed correct -> incorrect
199
+ r e e e | l Other = num tags changed incorrect -> incorrect
200
+ e d n r | e
201
+ ------------------+-------------------------------------------------------
202
+ 132 132 0 0 | AT->DT if Pos:NN@[-1]
203
+ 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0]
204
+ 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0]
205
+ 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0]
206
+ 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0]
207
+ 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0]
208
+ 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0]
209
+ 19 19 0 6 | NN->VB if Pos:TO@[-1]
210
+ 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0]
211
+ 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0]
212
+
213
+ >>> round(tagger2.accuracy(gold_data), 8)
214
+ 0.43996744
215
+
216
+ >>> tagger2.rules()[2:4]
217
+ (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')]))
218
+
219
+ # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger,
220
+ # with a RegexpTagger only as backoff. For instance,
221
+ # >>> baseline = UnigramTagger(baseline_data, backoff=backoff)
222
+ # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results
223
+ # between python versions. The simplistic backoff above is a workaround to make doctests
224
+ # get consistent input.
225
+
226
+ :param train_sents: training data
227
+ :type train_sents: list(list(tuple))
228
+ :param max_rules: output at most max_rules rules
229
+ :type max_rules: int
230
+ :param min_score: stop training when no rules better than min_score can be found
231
+ :type min_score: int
232
+ :param min_acc: discard any rule with lower accuracy than min_acc
233
+ :type min_acc: float or None
234
+ :return: the learned tagger
235
+ :rtype: BrillTagger
236
+ """
237
+ # FIXME: several tests are a bit too dependent on tracing format
238
+ # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates
239
+
240
+ # Basic idea: Keep track of the rules that apply at each position.
241
+ # And keep track of the positions to which each rule applies.
242
+
243
+ # Create a new copy of the training corpus, and run the
244
+ # initial tagger on it. We will progressively update this
245
+ # test corpus to look more like the training corpus.
246
+ test_sents = [
247
+ list(self._initial_tagger.tag(untag(sent))) for sent in train_sents
248
+ ]
249
+
250
+ # Collect some statistics on the training process
251
+ trainstats = {}
252
+ trainstats["min_acc"] = min_acc
253
+ trainstats["min_score"] = min_score
254
+ trainstats["tokencount"] = sum(len(t) for t in test_sents)
255
+ trainstats["sequencecount"] = len(test_sents)
256
+ trainstats["templatecount"] = len(self._templates)
257
+ trainstats["rulescores"] = []
258
+ trainstats["initialerrors"] = sum(
259
+ tag[1] != truth[1]
260
+ for paired in zip(test_sents, train_sents)
261
+ for (tag, truth) in zip(*paired)
262
+ )
263
+ trainstats["initialacc"] = (
264
+ 1 - trainstats["initialerrors"] / trainstats["tokencount"]
265
+ )
266
+ if self._trace > 0:
267
+ print(
268
+ "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; "
269
+ "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format(
270
+ **trainstats
271
+ )
272
+ )
273
+
274
+ # Initialize our mappings. This will find any errors made
275
+ # by the initial tagger, and use those to generate repair
276
+ # rules, which are added to the rule mappings.
277
+ if self._trace:
278
+ print("Finding initial useful rules...")
279
+ self._init_mappings(test_sents, train_sents)
280
+ if self._trace:
281
+ print(f" Found {len(self._rule_scores)} useful rules.")
282
+
283
+ # Let the user know what we're up to.
284
+ if self._trace > 2:
285
+ self._trace_header()
286
+ elif self._trace == 1:
287
+ print("Selecting rules...")
288
+
289
+ # Repeatedly select the best rule, and add it to `rules`.
290
+ rules = []
291
+ try:
292
+ while len(rules) < max_rules:
293
+ # Find the best rule, and add it to our rule list.
294
+ rule = self._best_rule(train_sents, test_sents, min_score, min_acc)
295
+ if rule:
296
+ rules.append(rule)
297
+ score = self._rule_scores[rule]
298
+ trainstats["rulescores"].append(score)
299
+ else:
300
+ break # No more good rules left!
301
+
302
+ # Report the rule that we found.
303
+ if self._trace > 1:
304
+ self._trace_rule(rule)
305
+
306
+ # Apply the new rule at the relevant sites
307
+ self._apply_rule(rule, test_sents)
308
+
309
+ # Update _tag_positions[rule.original_tag] and
310
+ # _tag_positions[rule.replacement_tag] for the affected
311
+ # positions (i.e., self._positions_by_rule[rule]).
312
+ self._update_tag_positions(rule)
313
+
314
+ # Update rules that were affected by the change.
315
+ self._update_rules(rule, train_sents, test_sents)
316
+
317
+ # The user can cancel training manually:
318
+ except KeyboardInterrupt:
319
+ print(f"Training stopped manually -- {len(rules)} rules found")
320
+
321
+ # Discard our tag position mapping & rule mappings.
322
+ self._clean()
323
+ trainstats["finalerrors"] = trainstats["initialerrors"] - sum(
324
+ trainstats["rulescores"]
325
+ )
326
+ trainstats["finalacc"] = (
327
+ 1 - trainstats["finalerrors"] / trainstats["tokencount"]
328
+ )
329
+ # Create and return a tagger from the rules we found.
330
+ return BrillTagger(self._initial_tagger, rules, trainstats)
331
+
332
+ def _init_mappings(self, test_sents, train_sents):
333
+ """
334
+ Initialize the tag position mapping & the rule related
335
+ mappings. For each error in test_sents, find new rules that
336
+ would correct them, and add them to the rule mappings.
337
+ """
338
+ self._tag_positions = defaultdict(list)
339
+ self._rules_by_position = defaultdict(set)
340
+ self._positions_by_rule = defaultdict(dict)
341
+ self._rules_by_score = defaultdict(set)
342
+ self._rule_scores = defaultdict(int)
343
+ self._first_unknown_position = defaultdict(int)
344
+ # Scan through the corpus, initializing the tag_positions
345
+ # mapping and all the rule-related mappings.
346
+ for sentnum, sent in enumerate(test_sents):
347
+ for wordnum, (word, tag) in enumerate(sent):
348
+
349
+ # Initialize tag_positions
350
+ self._tag_positions[tag].append((sentnum, wordnum))
351
+
352
+ # If it's an error token, update the rule-related mappings.
353
+ correct_tag = train_sents[sentnum][wordnum][1]
354
+ if tag != correct_tag:
355
+ for rule in self._find_rules(sent, wordnum, correct_tag):
356
+ self._update_rule_applies(rule, sentnum, wordnum, train_sents)
357
+
358
+ def _clean(self):
359
+ self._tag_positions = None
360
+ self._rules_by_position = None
361
+ self._positions_by_rule = None
362
+ self._rules_by_score = None
363
+ self._rule_scores = None
364
+ self._first_unknown_position = None
365
+
366
+ def _find_rules(self, sent, wordnum, new_tag):
367
+ """
368
+ Use the templates to find rules that apply at index *wordnum*
369
+ in the sentence *sent* and generate the tag *new_tag*.
370
+ """
371
+ for template in self._templates:
372
+ yield from template.applicable_rules(sent, wordnum, new_tag)
373
+
374
+ def _update_rule_applies(self, rule, sentnum, wordnum, train_sents):
375
+ """
376
+ Update the rule data tables to reflect the fact that
377
+ *rule* applies at the position *(sentnum, wordnum)*.
378
+ """
379
+ pos = sentnum, wordnum
380
+
381
+ # If the rule is already known to apply here, ignore.
382
+ # (This only happens if the position's tag hasn't changed.)
383
+ if pos in self._positions_by_rule[rule]:
384
+ return
385
+
386
+ # Update self._positions_by_rule.
387
+ correct_tag = train_sents[sentnum][wordnum][1]
388
+ if rule.replacement_tag == correct_tag:
389
+ self._positions_by_rule[rule][pos] = 1
390
+ elif rule.original_tag == correct_tag:
391
+ self._positions_by_rule[rule][pos] = -1
392
+ else: # was wrong, remains wrong
393
+ self._positions_by_rule[rule][pos] = 0
394
+
395
+ # Update _rules_by_position
396
+ self._rules_by_position[pos].add(rule)
397
+
398
+ # Update _rule_scores.
399
+ old_score = self._rule_scores[rule]
400
+ self._rule_scores[rule] += self._positions_by_rule[rule][pos]
401
+
402
+ # Update _rules_by_score.
403
+ self._rules_by_score[old_score].discard(rule)
404
+ self._rules_by_score[self._rule_scores[rule]].add(rule)
405
+
406
+ def _update_rule_not_applies(self, rule, sentnum, wordnum):
407
+ """
408
+ Update the rule data tables to reflect the fact that *rule*
409
+ does not apply at the position *(sentnum, wordnum)*.
410
+ """
411
+ pos = sentnum, wordnum
412
+
413
+ # Update _rule_scores.
414
+ old_score = self._rule_scores[rule]
415
+ self._rule_scores[rule] -= self._positions_by_rule[rule][pos]
416
+
417
+ # Update _rules_by_score.
418
+ self._rules_by_score[old_score].discard(rule)
419
+ self._rules_by_score[self._rule_scores[rule]].add(rule)
420
+
421
+ # Update _positions_by_rule
422
+ del self._positions_by_rule[rule][pos]
423
+ self._rules_by_position[pos].remove(rule)
424
+
425
+ # Optional addition: if the rule now applies nowhere, delete
426
+ # all its dictionary entries.
427
+
428
+ def _best_rule(self, train_sents, test_sents, min_score, min_acc):
429
+ """
430
+ Find the next best rule. This is done by repeatedly taking a
431
+ rule with the highest score and stepping through the corpus to
432
+ see where it applies. When it makes an error (decreasing its
433
+ score) it's bumped down, and we try a new rule with the
434
+ highest score. When we find a rule which has the highest
435
+ score *and* which has been tested against the entire corpus, we
436
+ can conclude that it's the next best rule.
437
+ """
438
+ for max_score in sorted(self._rules_by_score.keys(), reverse=True):
439
+ if len(self._rules_by_score) == 0:
440
+ return None
441
+ if max_score < min_score or max_score <= 0:
442
+ return None
443
+ best_rules = list(self._rules_by_score[max_score])
444
+ if self._deterministic:
445
+ best_rules.sort(key=repr)
446
+ for rule in best_rules:
447
+ positions = self._tag_positions[rule.original_tag]
448
+
449
+ unk = self._first_unknown_position.get(rule, (0, -1))
450
+ start = bisect.bisect_left(positions, unk)
451
+
452
+ for i in range(start, len(positions)):
453
+ sentnum, wordnum = positions[i]
454
+ if rule.applies(test_sents[sentnum], wordnum):
455
+ self._update_rule_applies(rule, sentnum, wordnum, train_sents)
456
+ if self._rule_scores[rule] < max_score:
457
+ self._first_unknown_position[rule] = (sentnum, wordnum + 1)
458
+ break # The update demoted the rule.
459
+
460
+ if self._rule_scores[rule] == max_score:
461
+ self._first_unknown_position[rule] = (len(train_sents) + 1, 0)
462
+ # optimization: if no min_acc threshold given, don't bother computing accuracy
463
+ if min_acc is None:
464
+ return rule
465
+ else:
466
+ changes = self._positions_by_rule[rule].values()
467
+ num_fixed = len([c for c in changes if c == 1])
468
+ num_broken = len([c for c in changes if c == -1])
469
+ # acc here is fixed/(fixed+broken); could also be
470
+ # fixed/(fixed+broken+other) == num_fixed/len(changes)
471
+ acc = num_fixed / (num_fixed + num_broken)
472
+ if acc >= min_acc:
473
+ return rule
474
+ # else: rule too inaccurate, discard and try next
475
+
476
+ # We demoted (or skipped due to < min_acc, if that was given)
477
+ # all the rules with score==max_score.
478
+
479
+ assert min_acc is not None or not self._rules_by_score[max_score]
480
+ if not self._rules_by_score[max_score]:
481
+ del self._rules_by_score[max_score]
482
+
483
+ def _apply_rule(self, rule, test_sents):
484
+ """
485
+ Update *test_sents* by applying *rule* everywhere where its
486
+ conditions are met.
487
+ """
488
+ update_positions = set(self._positions_by_rule[rule])
489
+ new_tag = rule.replacement_tag
490
+
491
+ if self._trace > 3:
492
+ self._trace_apply(len(update_positions))
493
+
494
+ # Update test_sents.
495
+ for (sentnum, wordnum) in update_positions:
496
+ text = test_sents[sentnum][wordnum][0]
497
+ test_sents[sentnum][wordnum] = (text, new_tag)
498
+
499
+ def _update_tag_positions(self, rule):
500
+ """
501
+ Update _tag_positions to reflect the changes to tags that are
502
+ made by *rule*.
503
+ """
504
+ # Update the tag index.
505
+ for pos in self._positions_by_rule[rule]:
506
+ # Delete the old tag.
507
+ old_tag_positions = self._tag_positions[rule.original_tag]
508
+ old_index = bisect.bisect_left(old_tag_positions, pos)
509
+ del old_tag_positions[old_index]
510
+ # Insert the new tag.
511
+ new_tag_positions = self._tag_positions[rule.replacement_tag]
512
+ bisect.insort_left(new_tag_positions, pos)
513
+
514
+ def _update_rules(self, rule, train_sents, test_sents):
515
+ """
516
+ Check if we should add or remove any rules from consideration,
517
+ given the changes made by *rule*.
518
+ """
519
+ # Collect a list of all positions that might be affected.
520
+ neighbors = set()
521
+ for sentnum, wordnum in self._positions_by_rule[rule]:
522
+ for template in self._templates:
523
+ n = template.get_neighborhood(test_sents[sentnum], wordnum)
524
+ neighbors.update([(sentnum, i) for i in n])
525
+
526
+ # Update the rules at each position.
527
+ num_obsolete = num_new = num_unseen = 0
528
+ for sentnum, wordnum in neighbors:
529
+ test_sent = test_sents[sentnum]
530
+ correct_tag = train_sents[sentnum][wordnum][1]
531
+
532
+ # Check if the change causes any rule at this position to
533
+ # stop matching; if so, then update our rule mappings
534
+ # accordingly.
535
+ old_rules = set(self._rules_by_position[sentnum, wordnum])
536
+ for old_rule in old_rules:
537
+ if not old_rule.applies(test_sent, wordnum):
538
+ num_obsolete += 1
539
+ self._update_rule_not_applies(old_rule, sentnum, wordnum)
540
+
541
+ # Check if the change causes our templates to propose any
542
+ # new rules for this position.
543
+ for template in self._templates:
544
+ for new_rule in template.applicable_rules(
545
+ test_sent, wordnum, correct_tag
546
+ ):
547
+ if new_rule not in old_rules:
548
+ num_new += 1
549
+ if new_rule not in self._rule_scores:
550
+ num_unseen += 1
551
+ old_rules.add(new_rule)
552
+ self._update_rule_applies(
553
+ new_rule, sentnum, wordnum, train_sents
554
+ )
555
+
556
+ # We may have caused other rules to match here, that are
557
+ # not proposed by our templates -- in particular, rules
558
+ # that are harmful or neutral. We therefore need to
559
+ # update any rule whose first_unknown_position is past
560
+ # this rule.
561
+ for new_rule, pos in self._first_unknown_position.items():
562
+ if pos > (sentnum, wordnum):
563
+ if new_rule not in old_rules:
564
+ num_new += 1
565
+ if new_rule.applies(test_sent, wordnum):
566
+ self._update_rule_applies(
567
+ new_rule, sentnum, wordnum, train_sents
568
+ )
569
+
570
+ if self._trace > 3:
571
+ self._trace_update_rules(num_obsolete, num_new, num_unseen)
572
+
573
+ # Tracing
574
+
575
+ def _trace_header(self):
576
+ print(
577
+ """
578
+ B |
579
+ S F r O | Score = Fixed - Broken
580
+ c i o t | R Fixed = num tags changed incorrect -> correct
581
+ o x k h | u Broken = num tags changed correct -> incorrect
582
+ r e e e | l Other = num tags changed incorrect -> incorrect
583
+ e d n r | e
584
+ ------------------+-------------------------------------------------------
585
+ """.rstrip()
586
+ )
587
+
588
+ def _trace_rule(self, rule):
589
+ assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values())
590
+
591
+ changes = self._positions_by_rule[rule].values()
592
+ num_fixed = len([c for c in changes if c == 1])
593
+ num_broken = len([c for c in changes if c == -1])
594
+ num_other = len([c for c in changes if c == 0])
595
+ score = self._rule_scores[rule]
596
+
597
+ rulestr = rule.format(self._ruleformat)
598
+ if self._trace > 2:
599
+ print(
600
+ "{:4d}{:4d}{:4d}{:4d} |".format(
601
+ score, num_fixed, num_broken, num_other
602
+ ),
603
+ end=" ",
604
+ )
605
+ print(
606
+ textwrap.fill(
607
+ rulestr,
608
+ initial_indent=" " * 20,
609
+ width=79,
610
+ subsequent_indent=" " * 18 + "| ",
611
+ ).strip()
612
+ )
613
+ else:
614
+ print(rulestr)
615
+
616
+ def _trace_apply(self, num_updates):
617
+ prefix = " " * 18 + "|"
618
+ print(prefix)
619
+ print(prefix, f"Applying rule to {num_updates} positions.")
620
+
621
+ def _trace_update_rules(self, num_obsolete, num_new, num_unseen):
622
+ prefix = " " * 18 + "|"
623
+ print(prefix, "Updated rule tables:")
624
+ print(prefix, (f" - {num_obsolete} rule applications removed"))
625
+ print(
626
+ prefix,
627
+ (f" - {num_new} rule applications added ({num_unseen} novel)"),
628
+ )
629
+ print(prefix)
env-llmeval/lib/python3.10/site-packages/nltk/tag/crf.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the CRFSuite Tagger
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Long Duong <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A module for POS tagging using CRFSuite
10
+ """
11
+
12
+ import re
13
+ import unicodedata
14
+
15
+ from nltk.tag.api import TaggerI
16
+
17
+ try:
18
+ import pycrfsuite
19
+ except ImportError:
20
+ pass
21
+
22
+
23
+ class CRFTagger(TaggerI):
24
+ """
25
+ A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite
26
+
27
+ >>> from nltk.tag import CRFTagger
28
+ >>> ct = CRFTagger() # doctest: +SKIP
29
+
30
+ >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')],
31
+ ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]]
32
+
33
+ >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP
34
+ >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP
35
+ [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]]
36
+
37
+ >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]]
38
+ >>> ct.accuracy(gold_sentences) # doctest: +SKIP
39
+ 1.0
40
+
41
+ Setting learned model file
42
+ >>> ct = CRFTagger() # doctest: +SKIP
43
+ >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP
44
+ >>> ct.accuracy(gold_sentences) # doctest: +SKIP
45
+ 1.0
46
+ """
47
+
48
+ def __init__(self, feature_func=None, verbose=False, training_opt={}):
49
+ """
50
+ Initialize the CRFSuite tagger
51
+
52
+ :param feature_func: The function that extracts features for each token of a sentence. This function should take
53
+ 2 parameters: tokens and index which extract features at index position from tokens list. See the build in
54
+ _get_features function for more detail.
55
+ :param verbose: output the debugging messages during training.
56
+ :type verbose: boolean
57
+ :param training_opt: python-crfsuite training options
58
+ :type training_opt: dictionary
59
+
60
+ Set of possible training options (using LBFGS training algorithm).
61
+ :'feature.minfreq': The minimum frequency of features.
62
+ :'feature.possible_states': Force to generate possible state features.
63
+ :'feature.possible_transitions': Force to generate possible transition features.
64
+ :'c1': Coefficient for L1 regularization.
65
+ :'c2': Coefficient for L2 regularization.
66
+ :'max_iterations': The maximum number of iterations for L-BFGS optimization.
67
+ :'num_memories': The number of limited memories for approximating the inverse hessian matrix.
68
+ :'epsilon': Epsilon for testing the convergence of the objective.
69
+ :'period': The duration of iterations to test the stopping criterion.
70
+ :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the
71
+ improvement of the log likelihood over the last ${period} iterations is no greater than this threshold.
72
+ :'linesearch': The line search algorithm used in L-BFGS updates:
73
+
74
+ - 'MoreThuente': More and Thuente's method,
75
+ - 'Backtracking': Backtracking method with regular Wolfe condition,
76
+ - 'StrongBacktracking': Backtracking method with strong Wolfe condition
77
+ :'max_linesearch': The maximum number of trials for the line search algorithm.
78
+ """
79
+
80
+ self._model_file = ""
81
+ self._tagger = pycrfsuite.Tagger()
82
+
83
+ if feature_func is None:
84
+ self._feature_func = self._get_features
85
+ else:
86
+ self._feature_func = feature_func
87
+
88
+ self._verbose = verbose
89
+ self._training_options = training_opt
90
+ self._pattern = re.compile(r"\d")
91
+
92
+ def set_model_file(self, model_file):
93
+ self._model_file = model_file
94
+ self._tagger.open(self._model_file)
95
+
96
+ def _get_features(self, tokens, idx):
97
+ """
98
+ Extract basic features about this word including
99
+ - Current word
100
+ - is it capitalized?
101
+ - Does it have punctuation?
102
+ - Does it have a number?
103
+ - Suffixes up to length 3
104
+
105
+ Note that : we might include feature over previous word, next word etc.
106
+
107
+ :return: a list which contains the features
108
+ :rtype: list(str)
109
+ """
110
+ token = tokens[idx]
111
+
112
+ feature_list = []
113
+
114
+ if not token:
115
+ return feature_list
116
+
117
+ # Capitalization
118
+ if token[0].isupper():
119
+ feature_list.append("CAPITALIZATION")
120
+
121
+ # Number
122
+ if re.search(self._pattern, token) is not None:
123
+ feature_list.append("HAS_NUM")
124
+
125
+ # Punctuation
126
+ punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"}
127
+ if all(unicodedata.category(x) in punc_cat for x in token):
128
+ feature_list.append("PUNCTUATION")
129
+
130
+ # Suffix up to length 3
131
+ if len(token) > 1:
132
+ feature_list.append("SUF_" + token[-1:])
133
+ if len(token) > 2:
134
+ feature_list.append("SUF_" + token[-2:])
135
+ if len(token) > 3:
136
+ feature_list.append("SUF_" + token[-3:])
137
+
138
+ feature_list.append("WORD_" + token)
139
+
140
+ return feature_list
141
+
142
+ def tag_sents(self, sents):
143
+ """
144
+ Tag a list of sentences. NB before using this function, user should specify the mode_file either by
145
+
146
+ - Train a new model using ``train`` function
147
+ - Use the pre-trained model which is set via ``set_model_file`` function
148
+
149
+ :params sentences: list of sentences needed to tag.
150
+ :type sentences: list(list(str))
151
+ :return: list of tagged sentences.
152
+ :rtype: list(list(tuple(str,str)))
153
+ """
154
+ if self._model_file == "":
155
+ raise Exception(
156
+ " No model file is found !! Please use train or set_model_file function"
157
+ )
158
+
159
+ # We need the list of sentences instead of the list generator for matching the input and output
160
+ result = []
161
+ for tokens in sents:
162
+ features = [self._feature_func(tokens, i) for i in range(len(tokens))]
163
+ labels = self._tagger.tag(features)
164
+
165
+ if len(labels) != len(tokens):
166
+ raise Exception(" Predicted Length Not Matched, Expect Errors !")
167
+
168
+ tagged_sent = list(zip(tokens, labels))
169
+ result.append(tagged_sent)
170
+
171
+ return result
172
+
173
+ def train(self, train_data, model_file):
174
+ """
175
+ Train the CRF tagger using CRFSuite
176
+ :params train_data : is the list of annotated sentences.
177
+ :type train_data : list (list(tuple(str,str)))
178
+ :params model_file : the model will be saved to this file.
179
+
180
+ """
181
+ trainer = pycrfsuite.Trainer(verbose=self._verbose)
182
+ trainer.set_params(self._training_options)
183
+
184
+ for sent in train_data:
185
+ tokens, labels = zip(*sent)
186
+ features = [self._feature_func(tokens, i) for i in range(len(tokens))]
187
+ trainer.append(features, labels)
188
+
189
+ # Now train the model, the output should be model_file
190
+ trainer.train(model_file)
191
+ # Save the model file
192
+ self.set_model_file(model_file)
193
+
194
+ def tag(self, tokens):
195
+ """
196
+ Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by
197
+
198
+ - Train a new model using ``train`` function
199
+ - Use the pre-trained model which is set via ``set_model_file`` function
200
+
201
+ :params tokens: list of tokens needed to tag.
202
+ :type tokens: list(str)
203
+ :return: list of tagged tokens.
204
+ :rtype: list(tuple(str,str))
205
+ """
206
+
207
+ return self.tag_sents([tokens])[0]