applied-ai-018 commited on
Commit
9abfc01
·
verified ·
1 Parent(s): 685344b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/23.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step40/zero/23.input_layernorm.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/nltk/ccg/__init__.py +34 -0
  5. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/nltk/ccg/api.py +358 -0
  12. venv/lib/python3.10/site-packages/nltk/ccg/chart.py +480 -0
  13. venv/lib/python3.10/site-packages/nltk/ccg/combinator.py +339 -0
  14. venv/lib/python3.10/site-packages/nltk/ccg/lexicon.py +338 -0
  15. venv/lib/python3.10/site-packages/nltk/ccg/logic.py +60 -0
  16. venv/lib/python3.10/site-packages/nltk/tag/__init__.py +184 -0
  17. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/nltk/tag/api.py +296 -0
  32. venv/lib/python3.10/site-packages/nltk/tag/brill.py +449 -0
  33. venv/lib/python3.10/site-packages/nltk/tag/brill_trainer.py +629 -0
  34. venv/lib/python3.10/site-packages/nltk/tag/crf.py +207 -0
  35. venv/lib/python3.10/site-packages/nltk/tag/hmm.py +1329 -0
  36. venv/lib/python3.10/site-packages/nltk/tag/hunpos.py +142 -0
  37. venv/lib/python3.10/site-packages/nltk/tag/mapping.py +136 -0
  38. venv/lib/python3.10/site-packages/nltk/tag/perceptron.py +371 -0
  39. venv/lib/python3.10/site-packages/nltk/tag/senna.py +134 -0
  40. venv/lib/python3.10/site-packages/nltk/tag/sequential.py +755 -0
  41. venv/lib/python3.10/site-packages/nltk/tag/stanford.py +236 -0
  42. venv/lib/python3.10/site-packages/nltk/tag/tnt.py +579 -0
  43. venv/lib/python3.10/site-packages/nltk/tag/util.py +72 -0
  44. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec34c6a75f69252093c0b2a4049d5cbb72ce2dfdb246e68b2bfe89177266903
3
+ size 9387
ckpts/universal/global_step40/zero/23.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b653b78234e0782e985e3c7c00239a8dd9eaaf5f0e83b24d4850d227fc0e585
3
+ size 9387
ckpts/universal/global_step40/zero/23.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77ad8ab6e7a60fd6e6024c6f6ac80f55a84b923f82bd1a2f34a6b65066b1bf2b
3
+ size 9293
venv/lib/python3.10/site-packages/nltk/ccg/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Combinatory Categorial Grammar.
10
+
11
+ For more information see nltk/doc/contrib/ccg/ccg.pdf
12
+ """
13
+
14
+ from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge
15
+ from nltk.ccg.combinator import (
16
+ BackwardApplication,
17
+ BackwardBx,
18
+ BackwardCombinator,
19
+ BackwardComposition,
20
+ BackwardSx,
21
+ BackwardT,
22
+ DirectedBinaryCombinator,
23
+ ForwardApplication,
24
+ ForwardCombinator,
25
+ ForwardComposition,
26
+ ForwardSubstitution,
27
+ ForwardT,
28
+ UndirectedBinaryCombinator,
29
+ UndirectedComposition,
30
+ UndirectedFunctionApplication,
31
+ UndirectedSubstitution,
32
+ UndirectedTypeRaise,
33
+ )
34
+ from nltk.ccg.lexicon import CCGLexicon
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (970 Bytes). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc ADDED
Binary file (9.17 kB). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
venv/lib/python3.10/site-packages/nltk/ccg/api.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: CCG Categories
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ from abc import ABCMeta, abstractmethod
9
+ from functools import total_ordering
10
+
11
+ from nltk.internals import raise_unorderable_types
12
+
13
+
14
+ @total_ordering
15
+ class AbstractCCGCategory(metaclass=ABCMeta):
16
+ """
17
+ Interface for categories in combinatory grammars.
18
+ """
19
+
20
+ @abstractmethod
21
+ def is_primitive(self):
22
+ """
23
+ Returns true if the category is primitive.
24
+ """
25
+
26
+ @abstractmethod
27
+ def is_function(self):
28
+ """
29
+ Returns true if the category is a function application.
30
+ """
31
+
32
+ @abstractmethod
33
+ def is_var(self):
34
+ """
35
+ Returns true if the category is a variable.
36
+ """
37
+
38
+ @abstractmethod
39
+ def substitute(self, substitutions):
40
+ """
41
+ Takes a set of (var, category) substitutions, and replaces every
42
+ occurrence of the variable with the corresponding category.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_unify(self, other):
47
+ """
48
+ Determines whether two categories can be unified.
49
+ - Returns None if they cannot be unified
50
+ - Returns a list of necessary substitutions if they can.
51
+ """
52
+
53
+ # Utility functions: comparison, strings and hashing.
54
+ @abstractmethod
55
+ def __str__(self):
56
+ pass
57
+
58
+ def __eq__(self, other):
59
+ return (
60
+ self.__class__ is other.__class__
61
+ and self._comparison_key == other._comparison_key
62
+ )
63
+
64
+ def __ne__(self, other):
65
+ return not self == other
66
+
67
+ def __lt__(self, other):
68
+ if not isinstance(other, AbstractCCGCategory):
69
+ raise_unorderable_types("<", self, other)
70
+ if self.__class__ is other.__class__:
71
+ return self._comparison_key < other._comparison_key
72
+ else:
73
+ return self.__class__.__name__ < other.__class__.__name__
74
+
75
+ def __hash__(self):
76
+ try:
77
+ return self._hash
78
+ except AttributeError:
79
+ self._hash = hash(self._comparison_key)
80
+ return self._hash
81
+
82
+
83
+ class CCGVar(AbstractCCGCategory):
84
+ """
85
+ Class representing a variable CCG category.
86
+ Used for conjunctions (and possibly type-raising, if implemented as a
87
+ unary rule).
88
+ """
89
+
90
+ _maxID = 0
91
+
92
+ def __init__(self, prim_only=False):
93
+ """Initialize a variable (selects a new identifier)
94
+
95
+ :param prim_only: a boolean that determines whether the variable is
96
+ restricted to primitives
97
+ :type prim_only: bool
98
+ """
99
+ self._id = self.new_id()
100
+ self._prim_only = prim_only
101
+ self._comparison_key = self._id
102
+
103
+ @classmethod
104
+ def new_id(cls):
105
+ """
106
+ A class method allowing generation of unique variable identifiers.
107
+ """
108
+ cls._maxID = cls._maxID + 1
109
+ return cls._maxID - 1
110
+
111
+ @classmethod
112
+ def reset_id(cls):
113
+ cls._maxID = 0
114
+
115
+ def is_primitive(self):
116
+ return False
117
+
118
+ def is_function(self):
119
+ return False
120
+
121
+ def is_var(self):
122
+ return True
123
+
124
+ def substitute(self, substitutions):
125
+ """If there is a substitution corresponding to this variable,
126
+ return the substituted category.
127
+ """
128
+ for (var, cat) in substitutions:
129
+ if var == self:
130
+ return cat
131
+ return self
132
+
133
+ def can_unify(self, other):
134
+ """If the variable can be replaced with other
135
+ a substitution is returned.
136
+ """
137
+ if other.is_primitive() or not self._prim_only:
138
+ return [(self, other)]
139
+ return None
140
+
141
+ def id(self):
142
+ return self._id
143
+
144
+ def __str__(self):
145
+ return "_var" + str(self._id)
146
+
147
+
148
+ @total_ordering
149
+ class Direction:
150
+ """
151
+ Class representing the direction of a function application.
152
+ Also contains maintains information as to which combinators
153
+ may be used with the category.
154
+ """
155
+
156
+ def __init__(self, dir, restrictions):
157
+ self._dir = dir
158
+ self._restrs = restrictions
159
+ self._comparison_key = (dir, tuple(restrictions))
160
+
161
+ # Testing the application direction
162
+ def is_forward(self):
163
+ return self._dir == "/"
164
+
165
+ def is_backward(self):
166
+ return self._dir == "\\"
167
+
168
+ def dir(self):
169
+ return self._dir
170
+
171
+ def restrs(self):
172
+ """A list of restrictions on the combinators.
173
+ '.' denotes that permuting operations are disallowed
174
+ ',' denotes that function composition is disallowed
175
+ '_' denotes that the direction has variable restrictions.
176
+ (This is redundant in the current implementation of type-raising)
177
+ """
178
+ return self._restrs
179
+
180
+ def is_variable(self):
181
+ return self._restrs == "_"
182
+
183
+ # Unification and substitution of variable directions.
184
+ # Used only if type-raising is implemented as a unary rule, as it
185
+ # must inherit restrictions from the argument category.
186
+ def can_unify(self, other):
187
+ if other.is_variable():
188
+ return [("_", self.restrs())]
189
+ elif self.is_variable():
190
+ return [("_", other.restrs())]
191
+ else:
192
+ if self.restrs() == other.restrs():
193
+ return []
194
+ return None
195
+
196
+ def substitute(self, subs):
197
+ if not self.is_variable():
198
+ return self
199
+
200
+ for (var, restrs) in subs:
201
+ if var == "_":
202
+ return Direction(self._dir, restrs)
203
+ return self
204
+
205
+ # Testing permitted combinators
206
+ def can_compose(self):
207
+ return "," not in self._restrs
208
+
209
+ def can_cross(self):
210
+ return "." not in self._restrs
211
+
212
+ def __eq__(self, other):
213
+ return (
214
+ self.__class__ is other.__class__
215
+ and self._comparison_key == other._comparison_key
216
+ )
217
+
218
+ def __ne__(self, other):
219
+ return not self == other
220
+
221
+ def __lt__(self, other):
222
+ if not isinstance(other, Direction):
223
+ raise_unorderable_types("<", self, other)
224
+ if self.__class__ is other.__class__:
225
+ return self._comparison_key < other._comparison_key
226
+ else:
227
+ return self.__class__.__name__ < other.__class__.__name__
228
+
229
+ def __hash__(self):
230
+ try:
231
+ return self._hash
232
+ except AttributeError:
233
+ self._hash = hash(self._comparison_key)
234
+ return self._hash
235
+
236
+ def __str__(self):
237
+ r_str = ""
238
+ for r in self._restrs:
239
+ r_str = r_str + "%s" % r
240
+ return f"{self._dir}{r_str}"
241
+
242
+ # The negation operator reverses the direction of the application
243
+ def __neg__(self):
244
+ if self._dir == "/":
245
+ return Direction("\\", self._restrs)
246
+ else:
247
+ return Direction("/", self._restrs)
248
+
249
+
250
+ class PrimitiveCategory(AbstractCCGCategory):
251
+ """
252
+ Class representing primitive categories.
253
+ Takes a string representation of the category, and a
254
+ list of strings specifying the morphological subcategories.
255
+ """
256
+
257
+ def __init__(self, categ, restrictions=[]):
258
+ self._categ = categ
259
+ self._restrs = restrictions
260
+ self._comparison_key = (categ, tuple(restrictions))
261
+
262
+ def is_primitive(self):
263
+ return True
264
+
265
+ def is_function(self):
266
+ return False
267
+
268
+ def is_var(self):
269
+ return False
270
+
271
+ def restrs(self):
272
+ return self._restrs
273
+
274
+ def categ(self):
275
+ return self._categ
276
+
277
+ # Substitution does nothing to a primitive category
278
+ def substitute(self, subs):
279
+ return self
280
+
281
+ # A primitive can be unified with a class of the same
282
+ # base category, given that the other category shares all
283
+ # of its subclasses, or with a variable.
284
+ def can_unify(self, other):
285
+ if not other.is_primitive():
286
+ return None
287
+ if other.is_var():
288
+ return [(other, self)]
289
+ if other.categ() == self.categ():
290
+ for restr in self._restrs:
291
+ if restr not in other.restrs():
292
+ return None
293
+ return []
294
+ return None
295
+
296
+ def __str__(self):
297
+ if self._restrs == []:
298
+ return "%s" % self._categ
299
+ restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs)
300
+ return f"{self._categ}{restrictions}"
301
+
302
+
303
+ class FunctionalCategory(AbstractCCGCategory):
304
+ """
305
+ Class that represents a function application category.
306
+ Consists of argument and result categories, together with
307
+ an application direction.
308
+ """
309
+
310
+ def __init__(self, res, arg, dir):
311
+ self._res = res
312
+ self._arg = arg
313
+ self._dir = dir
314
+ self._comparison_key = (arg, dir, res)
315
+
316
+ def is_primitive(self):
317
+ return False
318
+
319
+ def is_function(self):
320
+ return True
321
+
322
+ def is_var(self):
323
+ return False
324
+
325
+ # Substitution returns the category consisting of the
326
+ # substitution applied to each of its constituents.
327
+ def substitute(self, subs):
328
+ sub_res = self._res.substitute(subs)
329
+ sub_dir = self._dir.substitute(subs)
330
+ sub_arg = self._arg.substitute(subs)
331
+ return FunctionalCategory(sub_res, sub_arg, self._dir)
332
+
333
+ # A function can unify with another function, so long as its
334
+ # constituents can unify, or with an unrestricted variable.
335
+ def can_unify(self, other):
336
+ if other.is_var():
337
+ return [(other, self)]
338
+ if other.is_function():
339
+ sa = self._res.can_unify(other.res())
340
+ sd = self._dir.can_unify(other.dir())
341
+ if sa is not None and sd is not None:
342
+ sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa))
343
+ if sb is not None:
344
+ return sa + sb
345
+ return None
346
+
347
+ # Constituent accessors
348
+ def arg(self):
349
+ return self._arg
350
+
351
+ def res(self):
352
+ return self._res
353
+
354
+ def dir(self):
355
+ return self._dir
356
+
357
+ def __str__(self):
358
+ return f"({self._res}{self._dir}{self._arg})"
venv/lib/python3.10/site-packages/nltk/ccg/chart.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ The lexicon is constructed by calling
10
+ ``lexicon.fromstring(<lexicon string>)``.
11
+
12
+ In order to construct a parser, you also need a rule set.
13
+ The standard English rules are provided in chart as
14
+ ``chart.DefaultRuleSet``.
15
+
16
+ The parser can then be constructed by calling, for example:
17
+ ``parser = chart.CCGChartParser(<lexicon>, <ruleset>)``
18
+
19
+ Parsing is then performed by running
20
+ ``parser.parse(<sentence>.split())``.
21
+
22
+ While this returns a list of trees, the default representation
23
+ of the produced trees is not very enlightening, particularly
24
+ given that it uses the same tree class as the CFG parsers.
25
+ It is probably better to call:
26
+ ``chart.printCCGDerivation(<parse tree extracted from list>)``
27
+ which should print a nice representation of the derivation.
28
+
29
+ This entire process is shown far more clearly in the demonstration:
30
+ python chart.py
31
+ """
32
+
33
+ import itertools
34
+
35
+ from nltk.ccg.combinator import *
36
+ from nltk.ccg.combinator import (
37
+ BackwardApplication,
38
+ BackwardBx,
39
+ BackwardComposition,
40
+ BackwardSx,
41
+ BackwardT,
42
+ ForwardApplication,
43
+ ForwardComposition,
44
+ ForwardSubstitution,
45
+ ForwardT,
46
+ )
47
+ from nltk.ccg.lexicon import Token, fromstring
48
+ from nltk.ccg.logic import *
49
+ from nltk.parse import ParserI
50
+ from nltk.parse.chart import AbstractChartRule, Chart, EdgeI
51
+ from nltk.sem.logic import *
52
+ from nltk.tree import Tree
53
+
54
+
55
+ # Based on the EdgeI class from NLTK.
56
+ # A number of the properties of the EdgeI interface don't
57
+ # transfer well to CCGs, however.
58
+ class CCGEdge(EdgeI):
59
+ def __init__(self, span, categ, rule):
60
+ self._span = span
61
+ self._categ = categ
62
+ self._rule = rule
63
+ self._comparison_key = (span, categ, rule)
64
+
65
+ # Accessors
66
+ def lhs(self):
67
+ return self._categ
68
+
69
+ def span(self):
70
+ return self._span
71
+
72
+ def start(self):
73
+ return self._span[0]
74
+
75
+ def end(self):
76
+ return self._span[1]
77
+
78
+ def length(self):
79
+ return self._span[1] - self.span[0]
80
+
81
+ def rhs(self):
82
+ return ()
83
+
84
+ def dot(self):
85
+ return 0
86
+
87
+ def is_complete(self):
88
+ return True
89
+
90
+ def is_incomplete(self):
91
+ return False
92
+
93
+ def nextsym(self):
94
+ return None
95
+
96
+ def categ(self):
97
+ return self._categ
98
+
99
+ def rule(self):
100
+ return self._rule
101
+
102
+
103
+ class CCGLeafEdge(EdgeI):
104
+ """
105
+ Class representing leaf edges in a CCG derivation.
106
+ """
107
+
108
+ def __init__(self, pos, token, leaf):
109
+ self._pos = pos
110
+ self._token = token
111
+ self._leaf = leaf
112
+ self._comparison_key = (pos, token.categ(), leaf)
113
+
114
+ # Accessors
115
+ def lhs(self):
116
+ return self._token.categ()
117
+
118
+ def span(self):
119
+ return (self._pos, self._pos + 1)
120
+
121
+ def start(self):
122
+ return self._pos
123
+
124
+ def end(self):
125
+ return self._pos + 1
126
+
127
+ def length(self):
128
+ return 1
129
+
130
+ def rhs(self):
131
+ return self._leaf
132
+
133
+ def dot(self):
134
+ return 0
135
+
136
+ def is_complete(self):
137
+ return True
138
+
139
+ def is_incomplete(self):
140
+ return False
141
+
142
+ def nextsym(self):
143
+ return None
144
+
145
+ def token(self):
146
+ return self._token
147
+
148
+ def categ(self):
149
+ return self._token.categ()
150
+
151
+ def leaf(self):
152
+ return self._leaf
153
+
154
+
155
+ class BinaryCombinatorRule(AbstractChartRule):
156
+ """
157
+ Class implementing application of a binary combinator to a chart.
158
+ Takes the directed combinator to apply.
159
+ """
160
+
161
+ NUMEDGES = 2
162
+
163
+ def __init__(self, combinator):
164
+ self._combinator = combinator
165
+
166
+ # Apply a combinator
167
+ def apply(self, chart, grammar, left_edge, right_edge):
168
+ # The left & right edges must be touching.
169
+ if not (left_edge.end() == right_edge.start()):
170
+ return
171
+
172
+ # Check if the two edges are permitted to combine.
173
+ # If so, generate the corresponding edge.
174
+ if self._combinator.can_combine(left_edge.categ(), right_edge.categ()):
175
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
176
+ new_edge = CCGEdge(
177
+ span=(left_edge.start(), right_edge.end()),
178
+ categ=res,
179
+ rule=self._combinator,
180
+ )
181
+ if chart.insert(new_edge, (left_edge, right_edge)):
182
+ yield new_edge
183
+
184
+ # The representation of the combinator (for printing derivations)
185
+ def __str__(self):
186
+ return "%s" % self._combinator
187
+
188
+
189
+ # Type-raising must be handled slightly differently to the other rules, as the
190
+ # resulting rules only span a single edge, rather than both edges.
191
+
192
+
193
+ class ForwardTypeRaiseRule(AbstractChartRule):
194
+ """
195
+ Class for applying forward type raising
196
+ """
197
+
198
+ NUMEDGES = 2
199
+
200
+ def __init__(self):
201
+ self._combinator = ForwardT
202
+
203
+ def apply(self, chart, grammar, left_edge, right_edge):
204
+ if not (left_edge.end() == right_edge.start()):
205
+ return
206
+
207
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
208
+ new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator)
209
+ if chart.insert(new_edge, (left_edge,)):
210
+ yield new_edge
211
+
212
+ def __str__(self):
213
+ return "%s" % self._combinator
214
+
215
+
216
+ class BackwardTypeRaiseRule(AbstractChartRule):
217
+ """
218
+ Class for applying backward type raising.
219
+ """
220
+
221
+ NUMEDGES = 2
222
+
223
+ def __init__(self):
224
+ self._combinator = BackwardT
225
+
226
+ def apply(self, chart, grammar, left_edge, right_edge):
227
+ if not (left_edge.end() == right_edge.start()):
228
+ return
229
+
230
+ for res in self._combinator.combine(left_edge.categ(), right_edge.categ()):
231
+ new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator)
232
+ if chart.insert(new_edge, (right_edge,)):
233
+ yield new_edge
234
+
235
+ def __str__(self):
236
+ return "%s" % self._combinator
237
+
238
+
239
+ # Common sets of combinators used for English derivations.
240
+ ApplicationRuleSet = [
241
+ BinaryCombinatorRule(ForwardApplication),
242
+ BinaryCombinatorRule(BackwardApplication),
243
+ ]
244
+ CompositionRuleSet = [
245
+ BinaryCombinatorRule(ForwardComposition),
246
+ BinaryCombinatorRule(BackwardComposition),
247
+ BinaryCombinatorRule(BackwardBx),
248
+ ]
249
+ SubstitutionRuleSet = [
250
+ BinaryCombinatorRule(ForwardSubstitution),
251
+ BinaryCombinatorRule(BackwardSx),
252
+ ]
253
+ TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()]
254
+
255
+ # The standard English rule set.
256
+ DefaultRuleSet = (
257
+ ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet
258
+ )
259
+
260
+
261
+ class CCGChartParser(ParserI):
262
+ """
263
+ Chart parser for CCGs.
264
+ Based largely on the ChartParser class from NLTK.
265
+ """
266
+
267
+ def __init__(self, lexicon, rules, trace=0):
268
+ self._lexicon = lexicon
269
+ self._rules = rules
270
+ self._trace = trace
271
+
272
+ def lexicon(self):
273
+ return self._lexicon
274
+
275
+ # Implements the CYK algorithm
276
+ def parse(self, tokens):
277
+ tokens = list(tokens)
278
+ chart = CCGChart(list(tokens))
279
+ lex = self._lexicon
280
+
281
+ # Initialize leaf edges.
282
+ for index in range(chart.num_leaves()):
283
+ for token in lex.categories(chart.leaf(index)):
284
+ new_edge = CCGLeafEdge(index, token, chart.leaf(index))
285
+ chart.insert(new_edge, ())
286
+
287
+ # Select a span for the new edges
288
+ for span in range(2, chart.num_leaves() + 1):
289
+ for start in range(0, chart.num_leaves() - span + 1):
290
+ # Try all possible pairs of edges that could generate
291
+ # an edge for that span
292
+ for part in range(1, span):
293
+ lstart = start
294
+ mid = start + part
295
+ rend = start + span
296
+
297
+ for left in chart.select(span=(lstart, mid)):
298
+ for right in chart.select(span=(mid, rend)):
299
+ # Generate all possible combinations of the two edges
300
+ for rule in self._rules:
301
+ edges_added_by_rule = 0
302
+ for newedge in rule.apply(chart, lex, left, right):
303
+ edges_added_by_rule += 1
304
+
305
+ # Output the resulting parses
306
+ return chart.parses(lex.start())
307
+
308
+
309
+ class CCGChart(Chart):
310
+ def __init__(self, tokens):
311
+ Chart.__init__(self, tokens)
312
+
313
+ # Constructs the trees for a given parse. Unfortnunately, the parse trees need to be
314
+ # constructed slightly differently to those in the default Chart class, so it has to
315
+ # be reimplemented
316
+ def _trees(self, edge, complete, memo, tree_class):
317
+ assert complete, "CCGChart cannot build incomplete trees"
318
+
319
+ if edge in memo:
320
+ return memo[edge]
321
+
322
+ if isinstance(edge, CCGLeafEdge):
323
+ word = tree_class(edge.token(), [self._tokens[edge.start()]])
324
+ leaf = tree_class((edge.token(), "Leaf"), [word])
325
+ memo[edge] = [leaf]
326
+ return [leaf]
327
+
328
+ memo[edge] = []
329
+ trees = []
330
+
331
+ for cpl in self.child_pointer_lists(edge):
332
+ child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl]
333
+ for children in itertools.product(*child_choices):
334
+ lhs = (
335
+ Token(
336
+ self._tokens[edge.start() : edge.end()],
337
+ edge.lhs(),
338
+ compute_semantics(children, edge),
339
+ ),
340
+ str(edge.rule()),
341
+ )
342
+ trees.append(tree_class(lhs, children))
343
+
344
+ memo[edge] = trees
345
+ return trees
346
+
347
+
348
+ def compute_semantics(children, edge):
349
+ if children[0].label()[0].semantics() is None:
350
+ return None
351
+
352
+ if len(children) == 2:
353
+ if isinstance(edge.rule(), BackwardCombinator):
354
+ children = [children[1], children[0]]
355
+
356
+ combinator = edge.rule()._combinator
357
+ function = children[0].label()[0].semantics()
358
+ argument = children[1].label()[0].semantics()
359
+
360
+ if isinstance(combinator, UndirectedFunctionApplication):
361
+ return compute_function_semantics(function, argument)
362
+ elif isinstance(combinator, UndirectedComposition):
363
+ return compute_composition_semantics(function, argument)
364
+ elif isinstance(combinator, UndirectedSubstitution):
365
+ return compute_substitution_semantics(function, argument)
366
+ else:
367
+ raise AssertionError("Unsupported combinator '" + combinator + "'")
368
+ else:
369
+ return compute_type_raised_semantics(children[0].label()[0].semantics())
370
+
371
+
372
+ # --------
373
+ # Displaying derivations
374
+ # --------
375
+ def printCCGDerivation(tree):
376
+ # Get the leaves and initial categories
377
+ leafcats = tree.pos()
378
+ leafstr = ""
379
+ catstr = ""
380
+
381
+ # Construct a string with both the leaf word and corresponding
382
+ # category aligned.
383
+ for (leaf, cat) in leafcats:
384
+ str_cat = "%s" % cat
385
+ nextlen = 2 + max(len(leaf), len(str_cat))
386
+ lcatlen = (nextlen - len(str_cat)) // 2
387
+ rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
388
+ catstr += " " * lcatlen + str_cat + " " * rcatlen
389
+ lleaflen = (nextlen - len(leaf)) // 2
390
+ rleaflen = lleaflen + (nextlen - len(leaf)) % 2
391
+ leafstr += " " * lleaflen + leaf + " " * rleaflen
392
+ print(leafstr.rstrip())
393
+ print(catstr.rstrip())
394
+
395
+ # Display the derivation steps
396
+ printCCGTree(0, tree)
397
+
398
+
399
+ # Prints the sequence of derivation steps.
400
+ def printCCGTree(lwidth, tree):
401
+ rwidth = lwidth
402
+
403
+ # Is a leaf (word).
404
+ # Increment the span by the space occupied by the leaf.
405
+ if not isinstance(tree, Tree):
406
+ return 2 + lwidth + len(tree)
407
+
408
+ # Find the width of the current derivation step
409
+ for child in tree:
410
+ rwidth = max(rwidth, printCCGTree(rwidth, child))
411
+
412
+ # Is a leaf node.
413
+ # Don't print anything, but account for the space occupied.
414
+ if not isinstance(tree.label(), tuple):
415
+ return max(
416
+ rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0])
417
+ )
418
+
419
+ (token, op) = tree.label()
420
+
421
+ if op == "Leaf":
422
+ return rwidth
423
+
424
+ # Pad to the left with spaces, followed by a sequence of '-'
425
+ # and the derivation rule.
426
+ print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op)
427
+ # Print the resulting category on a new line.
428
+ str_res = "%s" % (token.categ())
429
+ if token.semantics() is not None:
430
+ str_res += " {" + str(token.semantics()) + "}"
431
+ respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth
432
+ print(respadlen * " " + str_res)
433
+ return rwidth
434
+
435
+
436
+ ### Demonstration code
437
+
438
+ # Construct the lexicon
439
+ lex = fromstring(
440
+ """
441
+ :- S, NP, N, VP # Primitive categories, S is the target primitive
442
+
443
+ Det :: NP/N # Family of words
444
+ Pro :: NP
445
+ TV :: VP/NP
446
+ Modal :: (S\\NP)/VP # Backslashes need to be escaped
447
+
448
+ I => Pro # Word -> Category mapping
449
+ you => Pro
450
+
451
+ the => Det
452
+
453
+ # Variables have the special keyword 'var'
454
+ # '.' prevents permutation
455
+ # ',' prevents composition
456
+ and => var\\.,var/.,var
457
+
458
+ which => (N\\N)/(S/NP)
459
+
460
+ will => Modal # Categories can be either explicit, or families.
461
+ might => Modal
462
+
463
+ cook => TV
464
+ eat => TV
465
+
466
+ mushrooms => N
467
+ parsnips => N
468
+ bacon => N
469
+ """
470
+ )
471
+
472
+
473
+ def demo():
474
+ parser = CCGChartParser(lex, DefaultRuleSet)
475
+ for parse in parser.parse("I might cook and eat the bacon".split()):
476
+ printCCGDerivation(parse)
477
+
478
+
479
+ if __name__ == "__main__":
480
+ demo()
venv/lib/python3.10/site-packages/nltk/ccg/combinator.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ CCG Combinators
9
+ """
10
+
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ from nltk.ccg.api import FunctionalCategory
14
+
15
+
16
+ class UndirectedBinaryCombinator(metaclass=ABCMeta):
17
+ """
18
+ Abstract class for representing a binary combinator.
19
+ Merely defines functions for checking if the function and argument
20
+ are able to be combined, and what the resulting category is.
21
+
22
+ Note that as no assumptions are made as to direction, the unrestricted
23
+ combinators can perform all backward, forward and crossed variations
24
+ of the combinators; these restrictions must be added in the rule
25
+ class.
26
+ """
27
+
28
+ @abstractmethod
29
+ def can_combine(self, function, argument):
30
+ pass
31
+
32
+ @abstractmethod
33
+ def combine(self, function, argument):
34
+ pass
35
+
36
+
37
+ class DirectedBinaryCombinator(metaclass=ABCMeta):
38
+ """
39
+ Wrapper for the undirected binary combinator.
40
+ It takes left and right categories, and decides which is to be
41
+ the function, and which the argument.
42
+ It then decides whether or not they can be combined.
43
+ """
44
+
45
+ @abstractmethod
46
+ def can_combine(self, left, right):
47
+ pass
48
+
49
+ @abstractmethod
50
+ def combine(self, left, right):
51
+ pass
52
+
53
+
54
+ class ForwardCombinator(DirectedBinaryCombinator):
55
+ """
56
+ Class representing combinators where the primary functor is on the left.
57
+
58
+ Takes an undirected combinator, and a predicate which adds constraints
59
+ restricting the cases in which it may apply.
60
+ """
61
+
62
+ def __init__(self, combinator, predicate, suffix=""):
63
+ self._combinator = combinator
64
+ self._predicate = predicate
65
+ self._suffix = suffix
66
+
67
+ def can_combine(self, left, right):
68
+ return self._combinator.can_combine(left, right) and self._predicate(
69
+ left, right
70
+ )
71
+
72
+ def combine(self, left, right):
73
+ yield from self._combinator.combine(left, right)
74
+
75
+ def __str__(self):
76
+ return f">{self._combinator}{self._suffix}"
77
+
78
+
79
+ class BackwardCombinator(DirectedBinaryCombinator):
80
+ """
81
+ The backward equivalent of the ForwardCombinator class.
82
+ """
83
+
84
+ def __init__(self, combinator, predicate, suffix=""):
85
+ self._combinator = combinator
86
+ self._predicate = predicate
87
+ self._suffix = suffix
88
+
89
+ def can_combine(self, left, right):
90
+ return self._combinator.can_combine(right, left) and self._predicate(
91
+ left, right
92
+ )
93
+
94
+ def combine(self, left, right):
95
+ yield from self._combinator.combine(right, left)
96
+
97
+ def __str__(self):
98
+ return f"<{self._combinator}{self._suffix}"
99
+
100
+
101
+ class UndirectedFunctionApplication(UndirectedBinaryCombinator):
102
+ """
103
+ Class representing function application.
104
+ Implements rules of the form:
105
+ X/Y Y -> X (>)
106
+ And the corresponding backwards application rule
107
+ """
108
+
109
+ def can_combine(self, function, argument):
110
+ if not function.is_function():
111
+ return False
112
+
113
+ return not function.arg().can_unify(argument) is None
114
+
115
+ def combine(self, function, argument):
116
+ if not function.is_function():
117
+ return
118
+
119
+ subs = function.arg().can_unify(argument)
120
+ if subs is None:
121
+ return
122
+
123
+ yield function.res().substitute(subs)
124
+
125
+ def __str__(self):
126
+ return ""
127
+
128
+
129
+ # Predicates for function application.
130
+
131
+ # Ensures the left functor takes an argument on the right
132
+ def forwardOnly(left, right):
133
+ return left.dir().is_forward()
134
+
135
+
136
+ # Ensures the right functor takes an argument on the left
137
+ def backwardOnly(left, right):
138
+ return right.dir().is_backward()
139
+
140
+
141
+ # Application combinator instances
142
+ ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly)
143
+ BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly)
144
+
145
+
146
+ class UndirectedComposition(UndirectedBinaryCombinator):
147
+ """
148
+ Functional composition (harmonic) combinator.
149
+ Implements rules of the form
150
+ X/Y Y/Z -> X/Z (B>)
151
+ And the corresponding backwards and crossed variations.
152
+ """
153
+
154
+ def can_combine(self, function, argument):
155
+ # Can only combine two functions, and both functions must
156
+ # allow composition.
157
+ if not (function.is_function() and argument.is_function()):
158
+ return False
159
+ if function.dir().can_compose() and argument.dir().can_compose():
160
+ return not function.arg().can_unify(argument.res()) is None
161
+ return False
162
+
163
+ def combine(self, function, argument):
164
+ if not (function.is_function() and argument.is_function()):
165
+ return
166
+ if function.dir().can_compose() and argument.dir().can_compose():
167
+ subs = function.arg().can_unify(argument.res())
168
+ if subs is not None:
169
+ yield FunctionalCategory(
170
+ function.res().substitute(subs),
171
+ argument.arg().substitute(subs),
172
+ argument.dir(),
173
+ )
174
+
175
+ def __str__(self):
176
+ return "B"
177
+
178
+
179
+ # Predicates for restricting application of straight composition.
180
+ def bothForward(left, right):
181
+ return left.dir().is_forward() and right.dir().is_forward()
182
+
183
+
184
+ def bothBackward(left, right):
185
+ return left.dir().is_backward() and right.dir().is_backward()
186
+
187
+
188
+ # Predicates for crossed composition
189
+ def crossedDirs(left, right):
190
+ return left.dir().is_forward() and right.dir().is_backward()
191
+
192
+
193
+ def backwardBxConstraint(left, right):
194
+ # The functors must be crossed inwards
195
+ if not crossedDirs(left, right):
196
+ return False
197
+ # Permuting combinators must be allowed
198
+ if not left.dir().can_cross() and right.dir().can_cross():
199
+ return False
200
+ # The resulting argument category is restricted to be primitive
201
+ return left.arg().is_primitive()
202
+
203
+
204
+ # Straight composition combinators
205
+ ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly)
206
+ BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly)
207
+
208
+ # Backward crossed composition
209
+ BackwardBx = BackwardCombinator(
210
+ UndirectedComposition(), backwardBxConstraint, suffix="x"
211
+ )
212
+
213
+
214
+ class UndirectedSubstitution(UndirectedBinaryCombinator):
215
+ r"""
216
+ Substitution (permutation) combinator.
217
+ Implements rules of the form
218
+ Y/Z (X\Y)/Z -> X/Z (<Sx)
219
+ And other variations.
220
+ """
221
+
222
+ def can_combine(self, function, argument):
223
+ if function.is_primitive() or argument.is_primitive():
224
+ return False
225
+
226
+ # These could potentially be moved to the predicates, as the
227
+ # constraints may not be general to all languages.
228
+ if function.res().is_primitive():
229
+ return False
230
+ if not function.arg().is_primitive():
231
+ return False
232
+
233
+ if not (function.dir().can_compose() and argument.dir().can_compose()):
234
+ return False
235
+ return (function.res().arg() == argument.res()) and (
236
+ function.arg() == argument.arg()
237
+ )
238
+
239
+ def combine(self, function, argument):
240
+ if self.can_combine(function, argument):
241
+ yield FunctionalCategory(
242
+ function.res().res(), argument.arg(), argument.dir()
243
+ )
244
+
245
+ def __str__(self):
246
+ return "S"
247
+
248
+
249
+ # Predicate for forward substitution
250
+ def forwardSConstraint(left, right):
251
+ if not bothForward(left, right):
252
+ return False
253
+ return left.res().dir().is_forward() and left.arg().is_primitive()
254
+
255
+
256
+ # Predicate for backward crossed substitution
257
+ def backwardSxConstraint(left, right):
258
+ if not left.dir().can_cross() and right.dir().can_cross():
259
+ return False
260
+ if not bothForward(left, right):
261
+ return False
262
+ return right.res().dir().is_backward() and right.arg().is_primitive()
263
+
264
+
265
+ # Instances of substitution combinators
266
+ ForwardSubstitution = ForwardCombinator(UndirectedSubstitution(), forwardSConstraint)
267
+ BackwardSx = BackwardCombinator(UndirectedSubstitution(), backwardSxConstraint, "x")
268
+
269
+
270
+ # Retrieves the left-most functional category.
271
+ # ie, (N\N)/(S/NP) => N\N
272
+ def innermostFunction(categ):
273
+ while categ.res().is_function():
274
+ categ = categ.res()
275
+ return categ
276
+
277
+
278
+ class UndirectedTypeRaise(UndirectedBinaryCombinator):
279
+ """
280
+ Undirected combinator for type raising.
281
+ """
282
+
283
+ def can_combine(self, function, arg):
284
+ # The argument must be a function.
285
+ # The restriction that arg.res() must be a function
286
+ # merely reduces redundant type-raising; if arg.res() is
287
+ # primitive, we have:
288
+ # X Y\X =>(<T) Y/(Y\X) Y\X =>(>) Y
289
+ # which is equivalent to
290
+ # X Y\X =>(<) Y
291
+ if not (arg.is_function() and arg.res().is_function()):
292
+ return False
293
+
294
+ arg = innermostFunction(arg)
295
+
296
+ # left, arg_categ are undefined!
297
+ subs = left.can_unify(arg_categ.arg())
298
+ if subs is not None:
299
+ return True
300
+ return False
301
+
302
+ def combine(self, function, arg):
303
+ if not (
304
+ function.is_primitive() and arg.is_function() and arg.res().is_function()
305
+ ):
306
+ return
307
+
308
+ # Type-raising matches only the innermost application.
309
+ arg = innermostFunction(arg)
310
+
311
+ subs = function.can_unify(arg.arg())
312
+ if subs is not None:
313
+ xcat = arg.res().substitute(subs)
314
+ yield FunctionalCategory(
315
+ xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir())
316
+ )
317
+
318
+ def __str__(self):
319
+ return "T"
320
+
321
+
322
+ # Predicates for type-raising
323
+ # The direction of the innermost category must be towards
324
+ # the primary functor.
325
+ # The restriction that the variable must be primitive is not
326
+ # common to all versions of CCGs; some authors have other restrictions.
327
+ def forwardTConstraint(left, right):
328
+ arg = innermostFunction(right)
329
+ return arg.dir().is_backward() and arg.res().is_primitive()
330
+
331
+
332
+ def backwardTConstraint(left, right):
333
+ arg = innermostFunction(left)
334
+ return arg.dir().is_forward() and arg.res().is_primitive()
335
+
336
+
337
+ # Instances of type-raising combinators
338
+ ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint)
339
+ BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint)
venv/lib/python3.10/site-packages/nltk/ccg/lexicon.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Graeme Gange <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ CCG Lexicons
9
+ """
10
+
11
+ import re
12
+ from collections import defaultdict
13
+
14
+ from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory
15
+ from nltk.internals import deprecated
16
+ from nltk.sem.logic import Expression
17
+
18
+ # ------------
19
+ # Regular expressions used for parsing components of the lexicon
20
+ # ------------
21
+
22
+ # Parses a primitive category and subscripts
23
+ PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""")
24
+
25
+ # Separates the next primitive category from the remainder of the
26
+ # string
27
+ NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""")
28
+
29
+ # Separates the next application operator from the remainder
30
+ APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""")
31
+
32
+ # Parses the definition of the right-hand side (rhs) of either a word or a family
33
+ LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE)
34
+
35
+ # Parses the right hand side that contains category and maybe semantic predicate
36
+ RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE)
37
+
38
+ # Parses the semantic predicate
39
+ SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE)
40
+
41
+ # Strips comments from a line
42
+ COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""")
43
+
44
+
45
+ class Token:
46
+ """
47
+ Class representing a token.
48
+
49
+ token => category {semantics}
50
+ e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)}
51
+
52
+ * `token` (string)
53
+ * `categ` (string)
54
+ * `semantics` (Expression)
55
+ """
56
+
57
+ def __init__(self, token, categ, semantics=None):
58
+ self._token = token
59
+ self._categ = categ
60
+ self._semantics = semantics
61
+
62
+ def categ(self):
63
+ return self._categ
64
+
65
+ def semantics(self):
66
+ return self._semantics
67
+
68
+ def __str__(self):
69
+ semantics_str = ""
70
+ if self._semantics is not None:
71
+ semantics_str = " {" + str(self._semantics) + "}"
72
+ return "" + str(self._categ) + semantics_str
73
+
74
+ def __cmp__(self, other):
75
+ if not isinstance(other, Token):
76
+ return -1
77
+ return cmp((self._categ, self._semantics), other.categ(), other.semantics())
78
+
79
+
80
+ class CCGLexicon:
81
+ """
82
+ Class representing a lexicon for CCG grammars.
83
+
84
+ * `primitives`: The list of primitive categories for the lexicon
85
+ * `families`: Families of categories
86
+ * `entries`: A mapping of words to possible categories
87
+ """
88
+
89
+ def __init__(self, start, primitives, families, entries):
90
+ self._start = PrimitiveCategory(start)
91
+ self._primitives = primitives
92
+ self._families = families
93
+ self._entries = entries
94
+
95
+ def categories(self, word):
96
+ """
97
+ Returns all the possible categories for a word
98
+ """
99
+ return self._entries[word]
100
+
101
+ def start(self):
102
+ """
103
+ Return the target category for the parser
104
+ """
105
+ return self._start
106
+
107
+ def __str__(self):
108
+ """
109
+ String representation of the lexicon. Used for debugging.
110
+ """
111
+ string = ""
112
+ first = True
113
+ for ident in sorted(self._entries):
114
+ if not first:
115
+ string = string + "\n"
116
+ string = string + ident + " => "
117
+
118
+ first = True
119
+ for cat in self._entries[ident]:
120
+ if not first:
121
+ string = string + " | "
122
+ else:
123
+ first = False
124
+ string = string + "%s" % cat
125
+ return string
126
+
127
+
128
+ # -----------
129
+ # Parsing lexicons
130
+ # -----------
131
+
132
+
133
+ def matchBrackets(string):
134
+ """
135
+ Separate the contents matching the first set of brackets from the rest of
136
+ the input.
137
+ """
138
+ rest = string[1:]
139
+ inside = "("
140
+
141
+ while rest != "" and not rest.startswith(")"):
142
+ if rest.startswith("("):
143
+ (part, rest) = matchBrackets(rest)
144
+ inside = inside + part
145
+ else:
146
+ inside = inside + rest[0]
147
+ rest = rest[1:]
148
+ if rest.startswith(")"):
149
+ return (inside + ")", rest[1:])
150
+ raise AssertionError("Unmatched bracket in string '" + string + "'")
151
+
152
+
153
+ def nextCategory(string):
154
+ """
155
+ Separate the string for the next portion of the category from the rest
156
+ of the string
157
+ """
158
+ if string.startswith("("):
159
+ return matchBrackets(string)
160
+ return NEXTPRIM_RE.match(string).groups()
161
+
162
+
163
+ def parseApplication(app):
164
+ """
165
+ Parse an application operator
166
+ """
167
+ return Direction(app[0], app[1:])
168
+
169
+
170
+ def parseSubscripts(subscr):
171
+ """
172
+ Parse the subscripts for a primitive category
173
+ """
174
+ if subscr:
175
+ return subscr[1:-1].split(",")
176
+ return []
177
+
178
+
179
+ def parsePrimitiveCategory(chunks, primitives, families, var):
180
+ """
181
+ Parse a primitive category
182
+
183
+ If the primitive is the special category 'var', replace it with the
184
+ correct `CCGVar`.
185
+ """
186
+ if chunks[0] == "var":
187
+ if chunks[1] is None:
188
+ if var is None:
189
+ var = CCGVar()
190
+ return (var, var)
191
+
192
+ catstr = chunks[0]
193
+ if catstr in families:
194
+ (cat, cvar) = families[catstr]
195
+ if var is None:
196
+ var = cvar
197
+ else:
198
+ cat = cat.substitute([(cvar, var)])
199
+ return (cat, var)
200
+
201
+ if catstr in primitives:
202
+ subscrs = parseSubscripts(chunks[1])
203
+ return (PrimitiveCategory(catstr, subscrs), var)
204
+ raise AssertionError(
205
+ "String '" + catstr + "' is neither a family nor primitive category."
206
+ )
207
+
208
+
209
+ def augParseCategory(line, primitives, families, var=None):
210
+ """
211
+ Parse a string representing a category, and returns a tuple with
212
+ (possibly) the CCG variable for the category
213
+ """
214
+ (cat_string, rest) = nextCategory(line)
215
+
216
+ if cat_string.startswith("("):
217
+ (res, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
218
+
219
+ else:
220
+ (res, var) = parsePrimitiveCategory(
221
+ PRIM_RE.match(cat_string).groups(), primitives, families, var
222
+ )
223
+
224
+ while rest != "":
225
+ app = APP_RE.match(rest).groups()
226
+ direction = parseApplication(app[0:3])
227
+ rest = app[3]
228
+
229
+ (cat_string, rest) = nextCategory(rest)
230
+ if cat_string.startswith("("):
231
+ (arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
232
+ else:
233
+ (arg, var) = parsePrimitiveCategory(
234
+ PRIM_RE.match(cat_string).groups(), primitives, families, var
235
+ )
236
+ res = FunctionalCategory(res, arg, direction)
237
+
238
+ return (res, var)
239
+
240
+
241
+ def fromstring(lex_str, include_semantics=False):
242
+ """
243
+ Convert string representation into a lexicon for CCGs.
244
+ """
245
+ CCGVar.reset_id()
246
+ primitives = []
247
+ families = {}
248
+ entries = defaultdict(list)
249
+ for line in lex_str.splitlines():
250
+ # Strip comments and leading/trailing whitespace.
251
+ line = COMMENTS_RE.match(line).groups()[0].strip()
252
+ if line == "":
253
+ continue
254
+
255
+ if line.startswith(":-"):
256
+ # A line of primitive categories.
257
+ # The first one is the target category
258
+ # ie, :- S, N, NP, VP
259
+ primitives = primitives + [
260
+ prim.strip() for prim in line[2:].strip().split(",")
261
+ ]
262
+ else:
263
+ # Either a family definition, or a word definition
264
+ (ident, sep, rhs) = LEX_RE.match(line).groups()
265
+ (catstr, semantics_str) = RHS_RE.match(rhs).groups()
266
+ (cat, var) = augParseCategory(catstr, primitives, families)
267
+
268
+ if sep == "::":
269
+ # Family definition
270
+ # ie, Det :: NP/N
271
+ families[ident] = (cat, var)
272
+ else:
273
+ semantics = None
274
+ if include_semantics is True:
275
+ if semantics_str is None:
276
+ raise AssertionError(
277
+ line
278
+ + " must contain semantics because include_semantics is set to True"
279
+ )
280
+ else:
281
+ semantics = Expression.fromstring(
282
+ SEMANTICS_RE.match(semantics_str).groups()[0]
283
+ )
284
+ # Word definition
285
+ # ie, which => (N\N)/(S/NP)
286
+ entries[ident].append(Token(ident, cat, semantics))
287
+ return CCGLexicon(primitives[0], primitives, families, entries)
288
+
289
+
290
+ @deprecated("Use fromstring() instead.")
291
+ def parseLexicon(lex_str):
292
+ return fromstring(lex_str)
293
+
294
+
295
+ openccg_tinytiny = fromstring(
296
+ """
297
+ # Rather minimal lexicon based on the openccg `tinytiny' grammar.
298
+ # Only incorporates a subset of the morphological subcategories, however.
299
+ :- S,NP,N # Primitive categories
300
+ Det :: NP/N # Determiners
301
+ Pro :: NP
302
+ IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
303
+ IntransVpl :: S\\NP[pl] # Plural
304
+ TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
305
+ TransVpl :: S\\NP[pl]/NP # Plural
306
+
307
+ the => NP[sg]/N[sg]
308
+ the => NP[pl]/N[pl]
309
+
310
+ I => Pro
311
+ me => Pro
312
+ we => Pro
313
+ us => Pro
314
+
315
+ book => N[sg]
316
+ books => N[pl]
317
+
318
+ peach => N[sg]
319
+ peaches => N[pl]
320
+
321
+ policeman => N[sg]
322
+ policemen => N[pl]
323
+
324
+ boy => N[sg]
325
+ boys => N[pl]
326
+
327
+ sleep => IntransVsg
328
+ sleep => IntransVpl
329
+
330
+ eat => IntransVpl
331
+ eat => TransVpl
332
+ eats => IntransVsg
333
+ eats => TransVsg
334
+
335
+ see => TransVpl
336
+ sees => TransVsg
337
+ """
338
+ )
venv/lib/python3.10/site-packages/nltk/ccg/logic.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Combinatory Categorial Grammar
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Tanin Na Nakorn (@tanin)
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+ """
8
+ Helper functions for CCG semantics computation
9
+ """
10
+
11
+ from nltk.sem.logic import *
12
+
13
+
14
+ def compute_type_raised_semantics(semantics):
15
+ core = semantics
16
+ parent = None
17
+ while isinstance(core, LambdaExpression):
18
+ parent = core
19
+ core = core.term
20
+
21
+ var = Variable("F")
22
+ while var in core.free():
23
+ var = unique_variable(pattern=var)
24
+ core = ApplicationExpression(FunctionVariableExpression(var), core)
25
+
26
+ if parent is not None:
27
+ parent.term = core
28
+ else:
29
+ semantics = core
30
+
31
+ return LambdaExpression(var, semantics)
32
+
33
+
34
+ def compute_function_semantics(function, argument):
35
+ return ApplicationExpression(function, argument).simplify()
36
+
37
+
38
+ def compute_composition_semantics(function, argument):
39
+ assert isinstance(argument, LambdaExpression), (
40
+ "`" + str(argument) + "` must be a lambda expression"
41
+ )
42
+ return LambdaExpression(
43
+ argument.variable, ApplicationExpression(function, argument.term).simplify()
44
+ )
45
+
46
+
47
+ def compute_substitution_semantics(function, argument):
48
+ assert isinstance(function, LambdaExpression) and isinstance(
49
+ function.term, LambdaExpression
50
+ ), ("`" + str(function) + "` must be a lambda expression with 2 arguments")
51
+ assert isinstance(argument, LambdaExpression), (
52
+ "`" + str(argument) + "` must be a lambda expression"
53
+ )
54
+
55
+ new_argument = ApplicationExpression(
56
+ argument, VariableExpression(function.variable)
57
+ ).simplify()
58
+ new_term = ApplicationExpression(function.term, new_argument).simplify()
59
+
60
+ return LambdaExpression(function.variable, new_term)
venv/lib/python3.10/site-packages/nltk/tag/__init__.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Taggers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ """
9
+ NLTK Taggers
10
+
11
+ This package contains classes and interfaces for part-of-speech
12
+ tagging, or simply "tagging".
13
+
14
+ A "tag" is a case-sensitive string that specifies some property of a token,
15
+ such as its part of speech. Tagged tokens are encoded as tuples
16
+ ``(tag, token)``. For example, the following tagged token combines
17
+ the word ``'fly'`` with a noun part of speech tag (``'NN'``):
18
+
19
+ >>> tagged_tok = ('fly', 'NN')
20
+
21
+ An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset:
22
+
23
+ >>> from nltk import pos_tag, word_tokenize
24
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
25
+ [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
26
+ ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
27
+
28
+ A Russian tagger is also available if you specify lang="rus". It uses
29
+ the Russian National Corpus tagset:
30
+
31
+ >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP
32
+ [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'),
33
+ ('бумажку', 'S'), ('.', 'NONLEX')]
34
+
35
+ This package defines several taggers, which take a list of tokens,
36
+ assign a tag to each one, and return the resulting list of tagged tokens.
37
+ Most of the taggers are built automatically based on a training corpus.
38
+ For example, the unigram tagger tags each word *w* by checking what
39
+ the most frequent tag for *w* was in a training corpus:
40
+
41
+ >>> from nltk.corpus import brown
42
+ >>> from nltk.tag import UnigramTagger
43
+ >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
44
+ >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment']
45
+ >>> for word, tag in tagger.tag(sent):
46
+ ... print(word, '->', tag)
47
+ Mitchell -> NP
48
+ decried -> None
49
+ the -> AT
50
+ high -> JJ
51
+ rate -> NN
52
+ of -> IN
53
+ unemployment -> None
54
+
55
+ Note that words that the tagger has not seen during training receive a tag
56
+ of ``None``.
57
+
58
+ We evaluate a tagger on data that was not seen during training:
59
+
60
+ >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3)
61
+ 0.735
62
+
63
+ For more information, please consult chapter 5 of the NLTK Book.
64
+
65
+ isort:skip_file
66
+ """
67
+
68
+ from nltk.tag.api import TaggerI
69
+ from nltk.tag.util import str2tuple, tuple2str, untag
70
+ from nltk.tag.sequential import (
71
+ SequentialBackoffTagger,
72
+ ContextTagger,
73
+ DefaultTagger,
74
+ NgramTagger,
75
+ UnigramTagger,
76
+ BigramTagger,
77
+ TrigramTagger,
78
+ AffixTagger,
79
+ RegexpTagger,
80
+ ClassifierBasedTagger,
81
+ ClassifierBasedPOSTagger,
82
+ )
83
+ from nltk.tag.brill import BrillTagger
84
+ from nltk.tag.brill_trainer import BrillTaggerTrainer
85
+ from nltk.tag.tnt import TnT
86
+ from nltk.tag.hunpos import HunposTagger
87
+ from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger
88
+ from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer
89
+ from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger
90
+ from nltk.tag.mapping import tagset_mapping, map_tag
91
+ from nltk.tag.crf import CRFTagger
92
+ from nltk.tag.perceptron import PerceptronTagger
93
+
94
+ from nltk.data import load, find
95
+
96
+ RUS_PICKLE = (
97
+ "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle"
98
+ )
99
+
100
+
101
+ def _get_tagger(lang=None):
102
+ if lang == "rus":
103
+ tagger = PerceptronTagger(False)
104
+ ap_russian_model_loc = "file:" + str(find(RUS_PICKLE))
105
+ tagger.load(ap_russian_model_loc)
106
+ else:
107
+ tagger = PerceptronTagger()
108
+ return tagger
109
+
110
+
111
+ def _pos_tag(tokens, tagset=None, tagger=None, lang=None):
112
+ # Currently only supports English and Russian.
113
+ if lang not in ["eng", "rus"]:
114
+ raise NotImplementedError(
115
+ "Currently, NLTK pos_tag only supports English and Russian "
116
+ "(i.e. lang='eng' or lang='rus')"
117
+ )
118
+ # Throws Error if tokens is of string type
119
+ elif isinstance(tokens, str):
120
+ raise TypeError("tokens: expected a list of strings, got a string")
121
+
122
+ else:
123
+ tagged_tokens = tagger.tag(tokens)
124
+ if tagset: # Maps to the specified tagset.
125
+ if lang == "eng":
126
+ tagged_tokens = [
127
+ (token, map_tag("en-ptb", tagset, tag))
128
+ for (token, tag) in tagged_tokens
129
+ ]
130
+ elif lang == "rus":
131
+ # Note that the new Russian pos tags from the model contains suffixes,
132
+ # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018
133
+ tagged_tokens = [
134
+ (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0]))
135
+ for (token, tag) in tagged_tokens
136
+ ]
137
+ return tagged_tokens
138
+
139
+
140
+ def pos_tag(tokens, tagset=None, lang="eng"):
141
+ """
142
+ Use NLTK's currently recommended part of speech tagger to
143
+ tag the given list of tokens.
144
+
145
+ >>> from nltk.tag import pos_tag
146
+ >>> from nltk.tokenize import word_tokenize
147
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE
148
+ [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'),
149
+ ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')]
150
+ >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE
151
+ [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'),
152
+ ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')]
153
+
154
+ NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence.
155
+
156
+ :param tokens: Sequence of tokens to be tagged
157
+ :type tokens: list(str)
158
+ :param tagset: the tagset to be used, e.g. universal, wsj, brown
159
+ :type tagset: str
160
+ :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
161
+ :type lang: str
162
+ :return: The tagged tokens
163
+ :rtype: list(tuple(str, str))
164
+ """
165
+ tagger = _get_tagger(lang)
166
+ return _pos_tag(tokens, tagset, tagger, lang)
167
+
168
+
169
+ def pos_tag_sents(sentences, tagset=None, lang="eng"):
170
+ """
171
+ Use NLTK's currently recommended part of speech tagger to tag the
172
+ given list of sentences, each consisting of a list of tokens.
173
+
174
+ :param sentences: List of sentences to be tagged
175
+ :type sentences: list(list(str))
176
+ :param tagset: the tagset to be used, e.g. universal, wsj, brown
177
+ :type tagset: str
178
+ :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian
179
+ :type lang: str
180
+ :return: The list of tagged sentences
181
+ :rtype: list(list(tuple(str, str)))
182
+ """
183
+ tagger = _get_tagger(lang)
184
+ return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences]
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.11 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc ADDED
Binary file (7.57 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
venv/lib/python3.10/site-packages/nltk/tag/api.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagger Interface
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # Tom Aarsen <>
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Interface for tagging each token in a sentence with supplementary
12
+ information, such as its part of speech.
13
+ """
14
+ from abc import ABCMeta, abstractmethod
15
+ from functools import lru_cache
16
+ from itertools import chain
17
+ from typing import Dict
18
+
19
+ from nltk.internals import deprecated, overridden
20
+ from nltk.metrics import ConfusionMatrix, accuracy
21
+ from nltk.tag.util import untag
22
+
23
+
24
+ class TaggerI(metaclass=ABCMeta):
25
+ """
26
+ A processing interface for assigning a tag to each token in a list.
27
+ Tags are case sensitive strings that identify some property of each
28
+ token, such as its part of speech or its sense.
29
+
30
+ Some taggers require specific types for their tokens. This is
31
+ generally indicated by the use of a sub-interface to ``TaggerI``.
32
+ For example, featureset taggers, which are subclassed from
33
+ ``FeaturesetTagger``, require that each token be a ``featureset``.
34
+
35
+ Subclasses must define:
36
+ - either ``tag()`` or ``tag_sents()`` (or both)
37
+ """
38
+
39
+ @abstractmethod
40
+ def tag(self, tokens):
41
+ """
42
+ Determine the most appropriate tag sequence for the given
43
+ token sequence, and return a corresponding list of tagged
44
+ tokens. A tagged token is encoded as a tuple ``(token, tag)``.
45
+
46
+ :rtype: list(tuple(str, str))
47
+ """
48
+ if overridden(self.tag_sents):
49
+ return self.tag_sents([tokens])[0]
50
+
51
+ def tag_sents(self, sentences):
52
+ """
53
+ Apply ``self.tag()`` to each element of *sentences*. I.e.::
54
+
55
+ return [self.tag(sent) for sent in sentences]
56
+ """
57
+ return [self.tag(sent) for sent in sentences]
58
+
59
+ @deprecated("Use accuracy(gold) instead.")
60
+ def evaluate(self, gold):
61
+ return self.accuracy(gold)
62
+
63
+ def accuracy(self, gold):
64
+ """
65
+ Score the accuracy of the tagger against the gold standard.
66
+ Strip the tags from the gold standard text, retag it using
67
+ the tagger, then compute the accuracy score.
68
+
69
+ :param gold: The list of tagged sentences to score the tagger on.
70
+ :type gold: list(list(tuple(str, str)))
71
+ :rtype: float
72
+ """
73
+
74
+ tagged_sents = self.tag_sents(untag(sent) for sent in gold)
75
+ gold_tokens = list(chain.from_iterable(gold))
76
+ test_tokens = list(chain.from_iterable(tagged_sents))
77
+ return accuracy(gold_tokens, test_tokens)
78
+
79
+ @lru_cache(maxsize=1)
80
+ def _confusion_cached(self, gold):
81
+ """
82
+ Inner function used after ``gold`` is converted to a
83
+ ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on
84
+ creating a ConfusionMatrix.
85
+
86
+ :param gold: The list of tagged sentences to run the tagger with,
87
+ also used as the reference values in the generated confusion matrix.
88
+ :type gold: tuple(tuple(tuple(str, str)))
89
+ :rtype: ConfusionMatrix
90
+ """
91
+
92
+ tagged_sents = self.tag_sents(untag(sent) for sent in gold)
93
+ gold_tokens = [token for _word, token in chain.from_iterable(gold)]
94
+ test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)]
95
+ return ConfusionMatrix(gold_tokens, test_tokens)
96
+
97
+ def confusion(self, gold):
98
+ """
99
+ Return a ConfusionMatrix with the tags from ``gold`` as the reference
100
+ values, with the predictions from ``tag_sents`` as the predicted values.
101
+
102
+ >>> from nltk.tag import PerceptronTagger
103
+ >>> from nltk.corpus import treebank
104
+ >>> tagger = PerceptronTagger()
105
+ >>> gold_data = treebank.tagged_sents()[:10]
106
+ >>> print(tagger.confusion(gold_data))
107
+ | - |
108
+ | N |
109
+ | O P |
110
+ | N J J N N P P R R V V V V V W |
111
+ | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` |
112
+ | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` |
113
+ -------+----------------------------------------------------------------------------------------------+
114
+ '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
115
+ , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . |
116
+ -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . |
117
+ . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . |
118
+ CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . |
119
+ CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . |
120
+ DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . |
121
+ EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . |
122
+ IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . |
123
+ JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . |
124
+ JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . |
125
+ JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . |
126
+ MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . |
127
+ NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . |
128
+ NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . |
129
+ NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . |
130
+ POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . |
131
+ PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . |
132
+ PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . |
133
+ RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . |
134
+ RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . |
135
+ RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . |
136
+ TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . |
137
+ VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . |
138
+ VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . |
139
+ VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . |
140
+ VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . |
141
+ VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . |
142
+ VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . |
143
+ WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . |
144
+ `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>|
145
+ -------+----------------------------------------------------------------------------------------------+
146
+ (row = reference; col = test)
147
+ <BLANKLINE>
148
+
149
+ :param gold: The list of tagged sentences to run the tagger with,
150
+ also used as the reference values in the generated confusion matrix.
151
+ :type gold: list(list(tuple(str, str)))
152
+ :rtype: ConfusionMatrix
153
+ """
154
+
155
+ return self._confusion_cached(tuple(tuple(sent) for sent in gold))
156
+
157
+ def recall(self, gold) -> Dict[str, float]:
158
+ """
159
+ Compute the recall for each tag from ``gold`` or from running ``tag``
160
+ on the tokenized sentences from ``gold``. Then, return the dictionary
161
+ with mappings from tag to recall. The recall is defined as:
162
+
163
+ - *r* = true positive / (true positive + false positive)
164
+
165
+ :param gold: The list of tagged sentences to score the tagger on.
166
+ :type gold: list(list(tuple(str, str)))
167
+ :return: A mapping from tags to recall
168
+ :rtype: Dict[str, float]
169
+ """
170
+
171
+ cm = self.confusion(gold)
172
+ return {tag: cm.recall(tag) for tag in cm._values}
173
+
174
+ def precision(self, gold):
175
+ """
176
+ Compute the precision for each tag from ``gold`` or from running ``tag``
177
+ on the tokenized sentences from ``gold``. Then, return the dictionary
178
+ with mappings from tag to precision. The precision is defined as:
179
+
180
+ - *p* = true positive / (true positive + false negative)
181
+
182
+ :param gold: The list of tagged sentences to score the tagger on.
183
+ :type gold: list(list(tuple(str, str)))
184
+ :return: A mapping from tags to precision
185
+ :rtype: Dict[str, float]
186
+ """
187
+
188
+ cm = self.confusion(gold)
189
+ return {tag: cm.precision(tag) for tag in cm._values}
190
+
191
+ def f_measure(self, gold, alpha=0.5):
192
+ """
193
+ Compute the f-measure for each tag from ``gold`` or from running ``tag``
194
+ on the tokenized sentences from ``gold``. Then, return the dictionary
195
+ with mappings from tag to f-measure. The f-measure is the harmonic mean
196
+ of the ``precision`` and ``recall``, weighted by ``alpha``.
197
+ In particular, given the precision *p* and recall *r* defined by:
198
+
199
+ - *p* = true positive / (true positive + false negative)
200
+ - *r* = true positive / (true positive + false positive)
201
+
202
+ The f-measure is:
203
+
204
+ - *1/(alpha/p + (1-alpha)/r)*
205
+
206
+ With ``alpha = 0.5``, this reduces to:
207
+
208
+ - *2pr / (p + r)*
209
+
210
+ :param gold: The list of tagged sentences to score the tagger on.
211
+ :type gold: list(list(tuple(str, str)))
212
+ :param alpha: Ratio of the cost of false negative compared to false
213
+ positives. Defaults to 0.5, where the costs are equal.
214
+ :type alpha: float
215
+ :return: A mapping from tags to precision
216
+ :rtype: Dict[str, float]
217
+ """
218
+ cm = self.confusion(gold)
219
+ return {tag: cm.f_measure(tag, alpha) for tag in cm._values}
220
+
221
+ def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False):
222
+ """Tabulate the **recall**, **precision** and **f-measure**
223
+ for each tag from ``gold`` or from running ``tag`` on the tokenized
224
+ sentences from ``gold``.
225
+
226
+ >>> from nltk.tag import PerceptronTagger
227
+ >>> from nltk.corpus import treebank
228
+ >>> tagger = PerceptronTagger()
229
+ >>> gold_data = treebank.tagged_sents()[:10]
230
+ >>> print(tagger.evaluate_per_tag(gold_data))
231
+ Tag | Prec. | Recall | F-measure
232
+ -------+--------+--------+-----------
233
+ '' | 1.0000 | 1.0000 | 1.0000
234
+ , | 1.0000 | 1.0000 | 1.0000
235
+ -NONE- | 0.0000 | 0.0000 | 0.0000
236
+ . | 1.0000 | 1.0000 | 1.0000
237
+ CC | 1.0000 | 1.0000 | 1.0000
238
+ CD | 0.7143 | 1.0000 | 0.8333
239
+ DT | 1.0000 | 1.0000 | 1.0000
240
+ EX | 1.0000 | 1.0000 | 1.0000
241
+ IN | 0.9167 | 0.8800 | 0.8980
242
+ JJ | 0.8889 | 0.8889 | 0.8889
243
+ JJR | 0.0000 | 0.0000 | 0.0000
244
+ JJS | 1.0000 | 1.0000 | 1.0000
245
+ MD | 1.0000 | 1.0000 | 1.0000
246
+ NN | 0.8000 | 0.9333 | 0.8615
247
+ NNP | 0.8929 | 1.0000 | 0.9434
248
+ NNS | 0.9500 | 1.0000 | 0.9744
249
+ POS | 1.0000 | 1.0000 | 1.0000
250
+ PRP | 1.0000 | 1.0000 | 1.0000
251
+ PRP$ | 1.0000 | 1.0000 | 1.0000
252
+ RB | 0.4000 | 1.0000 | 0.5714
253
+ RBR | 1.0000 | 0.5000 | 0.6667
254
+ RP | 1.0000 | 1.0000 | 1.0000
255
+ TO | 1.0000 | 1.0000 | 1.0000
256
+ VB | 1.0000 | 1.0000 | 1.0000
257
+ VBD | 0.8571 | 0.8571 | 0.8571
258
+ VBG | 1.0000 | 0.8000 | 0.8889
259
+ VBN | 1.0000 | 0.8000 | 0.8889
260
+ VBP | 1.0000 | 1.0000 | 1.0000
261
+ VBZ | 1.0000 | 1.0000 | 1.0000
262
+ WDT | 0.0000 | 0.0000 | 0.0000
263
+ `` | 1.0000 | 1.0000 | 1.0000
264
+ <BLANKLINE>
265
+
266
+ :param gold: The list of tagged sentences to score the tagger on.
267
+ :type gold: list(list(tuple(str, str)))
268
+ :param alpha: Ratio of the cost of false negative compared to false
269
+ positives, as used in the f-measure computation. Defaults to 0.5,
270
+ where the costs are equal.
271
+ :type alpha: float
272
+ :param truncate: If specified, then only show the specified
273
+ number of values. Any sorting (e.g., sort_by_count)
274
+ will be performed before truncation. Defaults to None
275
+ :type truncate: int, optional
276
+ :param sort_by_count: Whether to sort the outputs on number of
277
+ occurrences of that tag in the ``gold`` data, defaults to False
278
+ :type sort_by_count: bool, optional
279
+ :return: A tabulated recall, precision and f-measure string
280
+ :rtype: str
281
+ """
282
+ cm = self.confusion(gold)
283
+ return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count)
284
+
285
+ def _check_params(self, train, model):
286
+ if (train and model) or (not train and not model):
287
+ raise ValueError("Must specify either training data or trained model.")
288
+
289
+
290
+ class FeaturesetTaggerI(TaggerI):
291
+ """
292
+ A tagger that requires tokens to be ``featuresets``. A featureset
293
+ is a dictionary that maps from feature names to feature
294
+ values. See ``nltk.classify`` for more information about features
295
+ and featuresets.
296
+ """
venv/lib/python3.10/site-packages/nltk/tag/brill.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from collections import Counter, defaultdict
11
+
12
+ from nltk import jsontags
13
+ from nltk.tag import TaggerI
14
+ from nltk.tbl import Feature, Template
15
+
16
+ ######################################################################
17
+ # Brill Templates
18
+ ######################################################################
19
+
20
+
21
+ @jsontags.register_tag
22
+ class Word(Feature):
23
+ """
24
+ Feature which examines the text (word) of nearby tokens.
25
+ """
26
+
27
+ json_tag = "nltk.tag.brill.Word"
28
+
29
+ @staticmethod
30
+ def extract_property(tokens, index):
31
+ """@return: The given token's text."""
32
+ return tokens[index][0]
33
+
34
+
35
+ @jsontags.register_tag
36
+ class Pos(Feature):
37
+ """
38
+ Feature which examines the tags of nearby tokens.
39
+ """
40
+
41
+ json_tag = "nltk.tag.brill.Pos"
42
+
43
+ @staticmethod
44
+ def extract_property(tokens, index):
45
+ """@return: The given token's tag."""
46
+ return tokens[index][1]
47
+
48
+
49
+ def nltkdemo18():
50
+ """
51
+ Return 18 templates, from the original nltk demo, in multi-feature syntax
52
+ """
53
+ return [
54
+ Template(Pos([-1])),
55
+ Template(Pos([1])),
56
+ Template(Pos([-2])),
57
+ Template(Pos([2])),
58
+ Template(Pos([-2, -1])),
59
+ Template(Pos([1, 2])),
60
+ Template(Pos([-3, -2, -1])),
61
+ Template(Pos([1, 2, 3])),
62
+ Template(Pos([-1]), Pos([1])),
63
+ Template(Word([-1])),
64
+ Template(Word([1])),
65
+ Template(Word([-2])),
66
+ Template(Word([2])),
67
+ Template(Word([-2, -1])),
68
+ Template(Word([1, 2])),
69
+ Template(Word([-3, -2, -1])),
70
+ Template(Word([1, 2, 3])),
71
+ Template(Word([-1]), Word([1])),
72
+ ]
73
+
74
+
75
+ def nltkdemo18plus():
76
+ """
77
+ Return 18 templates, from the original nltk demo, and additionally a few
78
+ multi-feature ones (the motivation is easy comparison with nltkdemo18)
79
+ """
80
+ return nltkdemo18() + [
81
+ Template(Word([-1]), Pos([1])),
82
+ Template(Pos([-1]), Word([1])),
83
+ Template(Word([-1]), Word([0]), Pos([1])),
84
+ Template(Pos([-1]), Word([0]), Word([1])),
85
+ Template(Pos([-1]), Word([0]), Pos([1])),
86
+ ]
87
+
88
+
89
+ def fntbl37():
90
+ """
91
+ Return 37 templates taken from the postagging task of the
92
+ fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/
93
+ (37 is after excluding a handful which do not condition on Pos[0];
94
+ fntbl can do that but the current nltk implementation cannot.)
95
+ """
96
+ return [
97
+ Template(Word([0]), Word([1]), Word([2])),
98
+ Template(Word([-1]), Word([0]), Word([1])),
99
+ Template(Word([0]), Word([-1])),
100
+ Template(Word([0]), Word([1])),
101
+ Template(Word([0]), Word([2])),
102
+ Template(Word([0]), Word([-2])),
103
+ Template(Word([1, 2])),
104
+ Template(Word([-2, -1])),
105
+ Template(Word([1, 2, 3])),
106
+ Template(Word([-3, -2, -1])),
107
+ Template(Word([0]), Pos([2])),
108
+ Template(Word([0]), Pos([-2])),
109
+ Template(Word([0]), Pos([1])),
110
+ Template(Word([0]), Pos([-1])),
111
+ Template(Word([0])),
112
+ Template(Word([-2])),
113
+ Template(Word([2])),
114
+ Template(Word([1])),
115
+ Template(Word([-1])),
116
+ Template(Pos([-1]), Pos([1])),
117
+ Template(Pos([1]), Pos([2])),
118
+ Template(Pos([-1]), Pos([-2])),
119
+ Template(Pos([1])),
120
+ Template(Pos([-1])),
121
+ Template(Pos([-2])),
122
+ Template(Pos([2])),
123
+ Template(Pos([1, 2, 3])),
124
+ Template(Pos([1, 2])),
125
+ Template(Pos([-3, -2, -1])),
126
+ Template(Pos([-2, -1])),
127
+ Template(Pos([1]), Word([0]), Word([1])),
128
+ Template(Pos([1]), Word([0]), Word([-1])),
129
+ Template(Pos([-1]), Word([-1]), Word([0])),
130
+ Template(Pos([-1]), Word([0]), Word([1])),
131
+ Template(Pos([-2]), Pos([-1])),
132
+ Template(Pos([1]), Pos([2])),
133
+ Template(Pos([1]), Pos([2]), Word([1])),
134
+ ]
135
+
136
+
137
+ def brill24():
138
+ """
139
+ Return 24 templates of the seminal TBL paper, Brill (1995)
140
+ """
141
+ return [
142
+ Template(Pos([-1])),
143
+ Template(Pos([1])),
144
+ Template(Pos([-2])),
145
+ Template(Pos([2])),
146
+ Template(Pos([-2, -1])),
147
+ Template(Pos([1, 2])),
148
+ Template(Pos([-3, -2, -1])),
149
+ Template(Pos([1, 2, 3])),
150
+ Template(Pos([-1]), Pos([1])),
151
+ Template(Pos([-2]), Pos([-1])),
152
+ Template(Pos([1]), Pos([2])),
153
+ Template(Word([-1])),
154
+ Template(Word([1])),
155
+ Template(Word([-2])),
156
+ Template(Word([2])),
157
+ Template(Word([-2, -1])),
158
+ Template(Word([1, 2])),
159
+ Template(Word([-1, 0])),
160
+ Template(Word([0, 1])),
161
+ Template(Word([0])),
162
+ Template(Word([-1]), Pos([-1])),
163
+ Template(Word([1]), Pos([1])),
164
+ Template(Word([0]), Word([-1]), Pos([-1])),
165
+ Template(Word([0]), Word([1]), Pos([1])),
166
+ ]
167
+
168
+
169
+ def describe_template_sets():
170
+ """
171
+ Print the available template sets in this demo, with a short description"
172
+ """
173
+ import inspect
174
+ import sys
175
+
176
+ # a bit of magic to get all functions in this module
177
+ templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
178
+ for (name, obj) in templatesets:
179
+ if name == "describe_template_sets":
180
+ continue
181
+ print(name, obj.__doc__, "\n")
182
+
183
+
184
+ ######################################################################
185
+ # The Brill Tagger
186
+ ######################################################################
187
+
188
+
189
+ @jsontags.register_tag
190
+ class BrillTagger(TaggerI):
191
+ """
192
+ Brill's transformational rule-based tagger. Brill taggers use an
193
+ initial tagger (such as ``tag.DefaultTagger``) to assign an initial
194
+ tag sequence to a text; and then apply an ordered list of
195
+ transformational rules to correct the tags of individual tokens.
196
+ These transformation rules are specified by the ``TagRule``
197
+ interface.
198
+
199
+ Brill taggers can be created directly, from an initial tagger and
200
+ a list of transformational rules; but more often, Brill taggers
201
+ are created by learning rules from a training corpus, using one
202
+ of the TaggerTrainers available.
203
+ """
204
+
205
+ json_tag = "nltk.tag.BrillTagger"
206
+
207
+ def __init__(self, initial_tagger, rules, training_stats=None):
208
+ """
209
+ :param initial_tagger: The initial tagger
210
+ :type initial_tagger: TaggerI
211
+
212
+ :param rules: An ordered list of transformation rules that
213
+ should be used to correct the initial tagging.
214
+ :type rules: list(TagRule)
215
+
216
+ :param training_stats: A dictionary of statistics collected
217
+ during training, for possible later use
218
+ :type training_stats: dict
219
+
220
+ """
221
+ self._initial_tagger = initial_tagger
222
+ self._rules = tuple(rules)
223
+ self._training_stats = training_stats
224
+
225
+ def encode_json_obj(self):
226
+ return self._initial_tagger, self._rules, self._training_stats
227
+
228
+ @classmethod
229
+ def decode_json_obj(cls, obj):
230
+ _initial_tagger, _rules, _training_stats = obj
231
+ return cls(_initial_tagger, _rules, _training_stats)
232
+
233
+ def rules(self):
234
+ """
235
+ Return the ordered list of transformation rules that this tagger has learnt
236
+
237
+ :return: the ordered list of transformation rules that correct the initial tagging
238
+ :rtype: list of Rules
239
+ """
240
+ return self._rules
241
+
242
+ def train_stats(self, statistic=None):
243
+ """
244
+ Return a named statistic collected during training, or a dictionary of all
245
+ available statistics if no name given
246
+
247
+ :param statistic: name of statistic
248
+ :type statistic: str
249
+ :return: some statistic collected during training of this tagger
250
+ :rtype: any (but usually a number)
251
+ """
252
+ if statistic is None:
253
+ return self._training_stats
254
+ else:
255
+ return self._training_stats.get(statistic)
256
+
257
+ def tag(self, tokens):
258
+ # Inherit documentation from TaggerI
259
+
260
+ # Run the initial tagger.
261
+ tagged_tokens = self._initial_tagger.tag(tokens)
262
+
263
+ # Create a dictionary that maps each tag to a list of the
264
+ # indices of tokens that have that tag.
265
+ tag_to_positions = defaultdict(set)
266
+ for i, (token, tag) in enumerate(tagged_tokens):
267
+ tag_to_positions[tag].add(i)
268
+
269
+ # Apply each rule, in order. Only try to apply rules at
270
+ # positions that have the desired original tag.
271
+ for rule in self._rules:
272
+ # Find the positions where it might apply
273
+ positions = tag_to_positions.get(rule.original_tag, [])
274
+ # Apply the rule at those positions.
275
+ changed = rule.apply(tagged_tokens, positions)
276
+ # Update tag_to_positions with the positions of tags that
277
+ # were modified.
278
+ for i in changed:
279
+ tag_to_positions[rule.original_tag].remove(i)
280
+ tag_to_positions[rule.replacement_tag].add(i)
281
+
282
+ return tagged_tokens
283
+
284
+ def print_template_statistics(self, test_stats=None, printunused=True):
285
+ """
286
+ Print a list of all templates, ranked according to efficiency.
287
+
288
+ If test_stats is available, the templates are ranked according to their
289
+ relative contribution (summed for all rules created from a given template,
290
+ weighted by score) to the performance on the test set. If no test_stats, then
291
+ statistics collected during training are used instead. There is also
292
+ an unweighted measure (just counting the rules). This is less informative,
293
+ though, as many low-score rules will appear towards end of training.
294
+
295
+ :param test_stats: dictionary of statistics collected during testing
296
+ :type test_stats: dict of str -> any (but usually numbers)
297
+ :param printunused: if True, print a list of all unused templates
298
+ :type printunused: bool
299
+ :return: None
300
+ :rtype: None
301
+ """
302
+ tids = [r.templateid for r in self._rules]
303
+ train_stats = self.train_stats()
304
+
305
+ trainscores = train_stats["rulescores"]
306
+ assert len(trainscores) == len(
307
+ tids
308
+ ), "corrupt statistics: " "{} train scores for {} rules".format(
309
+ trainscores, tids
310
+ )
311
+ template_counts = Counter(tids)
312
+ weighted_traincounts = Counter()
313
+ for (tid, score) in zip(tids, trainscores):
314
+ weighted_traincounts[tid] += score
315
+ tottrainscores = sum(trainscores)
316
+
317
+ # det_tplsort() is for deterministic sorting;
318
+ # the otherwise convenient Counter.most_common() unfortunately
319
+ # does not break ties deterministically
320
+ # between python versions and will break cross-version tests
321
+ def det_tplsort(tpl_value):
322
+ return (tpl_value[1], repr(tpl_value[0]))
323
+
324
+ def print_train_stats():
325
+ print(
326
+ "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format(
327
+ len(template_counts), len(tids)
328
+ )
329
+ )
330
+ print(
331
+ "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
332
+ "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats)
333
+ )
334
+ head = "#ID | Score (train) | #Rules | Template"
335
+ print(head, "\n", "-" * len(head), sep="")
336
+ train_tplscores = sorted(
337
+ weighted_traincounts.items(), key=det_tplsort, reverse=True
338
+ )
339
+ for (tid, trainscore) in train_tplscores:
340
+ s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format(
341
+ tid,
342
+ trainscore,
343
+ trainscore / tottrainscores,
344
+ template_counts[tid],
345
+ template_counts[tid] / len(tids),
346
+ Template.ALLTEMPLATES[int(tid)],
347
+ )
348
+ print(s)
349
+
350
+ def print_testtrain_stats():
351
+ testscores = test_stats["rulescores"]
352
+ print(
353
+ "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format(
354
+ len(template_counts), len(tids)
355
+ )
356
+ )
357
+ print(
358
+ "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
359
+ "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats)
360
+ )
361
+ print(
362
+ "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
363
+ "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats)
364
+ )
365
+ weighted_testcounts = Counter()
366
+ for (tid, score) in zip(tids, testscores):
367
+ weighted_testcounts[tid] += score
368
+ tottestscores = sum(testscores)
369
+ head = "#ID | Score (test) | Score (train) | #Rules | Template"
370
+ print(head, "\n", "-" * len(head), sep="")
371
+ test_tplscores = sorted(
372
+ weighted_testcounts.items(), key=det_tplsort, reverse=True
373
+ )
374
+ for (tid, testscore) in test_tplscores:
375
+ s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format(
376
+ tid,
377
+ testscore,
378
+ testscore / tottestscores,
379
+ weighted_traincounts[tid],
380
+ weighted_traincounts[tid] / tottrainscores,
381
+ template_counts[tid],
382
+ template_counts[tid] / len(tids),
383
+ Template.ALLTEMPLATES[int(tid)],
384
+ )
385
+ print(s)
386
+
387
+ def print_unused_templates():
388
+ usedtpls = {int(tid) for tid in tids}
389
+ unused = [
390
+ (tid, tpl)
391
+ for (tid, tpl) in enumerate(Template.ALLTEMPLATES)
392
+ if tid not in usedtpls
393
+ ]
394
+ print(f"UNUSED TEMPLATES ({len(unused)})")
395
+
396
+ for (tid, tpl) in unused:
397
+ print(f"{tid:03d} {str(tpl):s}")
398
+
399
+ if test_stats is None:
400
+ print_train_stats()
401
+ else:
402
+ print_testtrain_stats()
403
+ print()
404
+ if printunused:
405
+ print_unused_templates()
406
+ print()
407
+
408
+ def batch_tag_incremental(self, sequences, gold):
409
+ """
410
+ Tags by applying each rule to the entire corpus (rather than all rules to a
411
+ single sequence). The point is to collect statistics on the test set for
412
+ individual rules.
413
+
414
+ NOTE: This is inefficient (does not build any index, so will traverse the entire
415
+ corpus N times for N rules) -- usually you would not care about statistics for
416
+ individual rules and thus use batch_tag() instead
417
+
418
+ :param sequences: lists of token sequences (sentences, in some applications) to be tagged
419
+ :type sequences: list of list of strings
420
+ :param gold: the gold standard
421
+ :type gold: list of list of strings
422
+ :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
423
+ """
424
+
425
+ def counterrors(xs):
426
+ return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
427
+
428
+ testing_stats = {}
429
+ testing_stats["tokencount"] = sum(len(t) for t in sequences)
430
+ testing_stats["sequencecount"] = len(sequences)
431
+ tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
432
+ testing_stats["initialerrors"] = counterrors(tagged_tokenses)
433
+ testing_stats["initialacc"] = (
434
+ 1 - testing_stats["initialerrors"] / testing_stats["tokencount"]
435
+ )
436
+ # Apply each rule to the entire corpus, in order
437
+ errors = [testing_stats["initialerrors"]]
438
+ for rule in self._rules:
439
+ for tagged_tokens in tagged_tokenses:
440
+ rule.apply(tagged_tokens)
441
+ errors.append(counterrors(tagged_tokenses))
442
+ testing_stats["rulescores"] = [
443
+ err0 - err1 for (err0, err1) in zip(errors, errors[1:])
444
+ ]
445
+ testing_stats["finalerrors"] = errors[-1]
446
+ testing_stats["finalacc"] = (
447
+ 1 - testing_stats["finalerrors"] / testing_stats["tokencount"]
448
+ )
449
+ return (tagged_tokenses, testing_stats)
venv/lib/python3.10/site-packages/nltk/tag/brill_trainer.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2013 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import bisect
11
+ import textwrap
12
+ from collections import defaultdict
13
+
14
+ from nltk.tag import BrillTagger, untag
15
+
16
+ ######################################################################
17
+ # Brill Tagger Trainer
18
+ ######################################################################
19
+
20
+
21
+ class BrillTaggerTrainer:
22
+ """
23
+ A trainer for tbl taggers.
24
+ """
25
+
26
+ def __init__(
27
+ self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str"
28
+ ):
29
+ """
30
+ Construct a Brill tagger from a baseline tagger and a
31
+ set of templates
32
+
33
+ :param initial_tagger: the baseline tagger
34
+ :type initial_tagger: Tagger
35
+ :param templates: templates to be used in training
36
+ :type templates: list of Templates
37
+ :param trace: verbosity level
38
+ :type trace: int
39
+ :param deterministic: if True, adjudicate ties deterministically
40
+ :type deterministic: bool
41
+ :param ruleformat: format of reported Rules
42
+ :type ruleformat: str
43
+ :return: An untrained BrillTagger
44
+ :rtype: BrillTagger
45
+ """
46
+
47
+ if deterministic is None:
48
+ deterministic = trace > 0
49
+ self._initial_tagger = initial_tagger
50
+ self._templates = templates
51
+ self._trace = trace
52
+ self._deterministic = deterministic
53
+ self._ruleformat = ruleformat
54
+
55
+ self._tag_positions = None
56
+ """Mapping from tags to lists of positions that use that tag."""
57
+
58
+ self._rules_by_position = None
59
+ """Mapping from positions to the set of rules that are known
60
+ to occur at that position. Position is (sentnum, wordnum).
61
+ Initially, this will only contain positions where each rule
62
+ applies in a helpful way; but when we examine a rule, we'll
63
+ extend this list to also include positions where each rule
64
+ applies in a harmful or neutral way."""
65
+
66
+ self._positions_by_rule = None
67
+ """Mapping from rule to position to effect, specifying the
68
+ effect that each rule has on the overall score, at each
69
+ position. Position is (sentnum, wordnum); and effect is
70
+ -1, 0, or 1. As with _rules_by_position, this mapping starts
71
+ out only containing rules with positive effects; but when
72
+ we examine a rule, we'll extend this mapping to include
73
+ the positions where the rule is harmful or neutral."""
74
+
75
+ self._rules_by_score = None
76
+ """Mapping from scores to the set of rules whose effect on the
77
+ overall score is upper bounded by that score. Invariant:
78
+ rulesByScore[s] will contain r iff the sum of
79
+ _positions_by_rule[r] is s."""
80
+
81
+ self._rule_scores = None
82
+ """Mapping from rules to upper bounds on their effects on the
83
+ overall score. This is the inverse mapping to _rules_by_score.
84
+ Invariant: ruleScores[r] = sum(_positions_by_rule[r])"""
85
+
86
+ self._first_unknown_position = None
87
+ """Mapping from rules to the first position where we're unsure
88
+ if the rule applies. This records the next position we
89
+ need to check to see if the rule messed anything up."""
90
+
91
+ # Training
92
+
93
+ def train(self, train_sents, max_rules=200, min_score=2, min_acc=None):
94
+ r"""
95
+ Trains the Brill tagger on the corpus *train_sents*,
96
+ producing at most *max_rules* transformations, each of which
97
+ reduces the net number of errors in the corpus by at least
98
+ *min_score*, and each of which has accuracy not lower than
99
+ *min_acc*.
100
+
101
+ >>> # Relevant imports
102
+ >>> from nltk.tbl.template import Template
103
+ >>> from nltk.tag.brill import Pos, Word
104
+ >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer
105
+
106
+ >>> # Load some data
107
+ >>> from nltk.corpus import treebank
108
+ >>> training_data = treebank.tagged_sents()[:100]
109
+ >>> baseline_data = treebank.tagged_sents()[100:200]
110
+ >>> gold_data = treebank.tagged_sents()[200:300]
111
+ >>> testing_data = [untag(s) for s in gold_data]
112
+
113
+ >>> backoff = RegexpTagger([
114
+ ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers
115
+ ... (r'(The|the|A|a|An|an)$', 'AT'), # articles
116
+ ... (r'.*able$', 'JJ'), # adjectives
117
+ ... (r'.*ness$', 'NN'), # nouns formed from adjectives
118
+ ... (r'.*ly$', 'RB'), # adverbs
119
+ ... (r'.*s$', 'NNS'), # plural nouns
120
+ ... (r'.*ing$', 'VBG'), # gerunds
121
+ ... (r'.*ed$', 'VBD'), # past tense verbs
122
+ ... (r'.*', 'NN') # nouns (default)
123
+ ... ])
124
+
125
+ >>> baseline = backoff #see NOTE1
126
+ >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS
127
+ 0.243...
128
+
129
+ >>> # Set up templates
130
+ >>> Template._cleartemplates() #clear any templates created in earlier tests
131
+ >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
132
+
133
+ >>> # Construct a BrillTaggerTrainer
134
+ >>> tt = BrillTaggerTrainer(baseline, templates, trace=3)
135
+
136
+ >>> tagger1 = tt.train(training_data, max_rules=10)
137
+ TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None)
138
+ Finding initial useful rules...
139
+ Found 847 useful rules.
140
+ <BLANKLINE>
141
+ B |
142
+ S F r O | Score = Fixed - Broken
143
+ c i o t | R Fixed = num tags changed incorrect -> correct
144
+ o x k h | u Broken = num tags changed correct -> incorrect
145
+ r e e e | l Other = num tags changed incorrect -> incorrect
146
+ e d n r | e
147
+ ------------------+-------------------------------------------------------
148
+ 132 132 0 0 | AT->DT if Pos:NN@[-1]
149
+ 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0]
150
+ 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0]
151
+ 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0]
152
+ 47 63 16 162 | NN->IN if Pos:NNS@[-1]
153
+ 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0]
154
+ 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0]
155
+ 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0]
156
+ 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1]
157
+ 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0]
158
+
159
+ >>> tagger1.rules()[1:3]
160
+ (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]))
161
+
162
+ >>> train_stats = tagger1.train_stats()
163
+ >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']]
164
+ [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]]
165
+
166
+ >>> tagger1.print_template_statistics(printunused=False)
167
+ TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules)
168
+ TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746
169
+ #ID | Score (train) | #Rules | Template
170
+ --------------------------------------------
171
+ 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0]))
172
+ 000 | 201 0.397 | 3 0.300 | Template(Pos([-1]))
173
+ <BLANKLINE>
174
+ <BLANKLINE>
175
+
176
+ >>> round(tagger1.accuracy(gold_data),5)
177
+ 0.43834
178
+
179
+ >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
180
+
181
+ >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'),
182
+ ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'),
183
+ ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')]
184
+ True
185
+
186
+ >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']]
187
+ [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]]
188
+
189
+ >>> # A high-accuracy tagger
190
+ >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99)
191
+ TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99)
192
+ Finding initial useful rules...
193
+ Found 847 useful rules.
194
+ <BLANKLINE>
195
+ B |
196
+ S F r O | Score = Fixed - Broken
197
+ c i o t | R Fixed = num tags changed incorrect -> correct
198
+ o x k h | u Broken = num tags changed correct -> incorrect
199
+ r e e e | l Other = num tags changed incorrect -> incorrect
200
+ e d n r | e
201
+ ------------------+-------------------------------------------------------
202
+ 132 132 0 0 | AT->DT if Pos:NN@[-1]
203
+ 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0]
204
+ 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0]
205
+ 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0]
206
+ 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0]
207
+ 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0]
208
+ 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0]
209
+ 19 19 0 6 | NN->VB if Pos:TO@[-1]
210
+ 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0]
211
+ 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0]
212
+
213
+ >>> round(tagger2.accuracy(gold_data), 8)
214
+ 0.43996744
215
+
216
+ >>> tagger2.rules()[2:4]
217
+ (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')]))
218
+
219
+ # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger,
220
+ # with a RegexpTagger only as backoff. For instance,
221
+ # >>> baseline = UnigramTagger(baseline_data, backoff=backoff)
222
+ # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results
223
+ # between python versions. The simplistic backoff above is a workaround to make doctests
224
+ # get consistent input.
225
+
226
+ :param train_sents: training data
227
+ :type train_sents: list(list(tuple))
228
+ :param max_rules: output at most max_rules rules
229
+ :type max_rules: int
230
+ :param min_score: stop training when no rules better than min_score can be found
231
+ :type min_score: int
232
+ :param min_acc: discard any rule with lower accuracy than min_acc
233
+ :type min_acc: float or None
234
+ :return: the learned tagger
235
+ :rtype: BrillTagger
236
+ """
237
+ # FIXME: several tests are a bit too dependent on tracing format
238
+ # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates
239
+
240
+ # Basic idea: Keep track of the rules that apply at each position.
241
+ # And keep track of the positions to which each rule applies.
242
+
243
+ # Create a new copy of the training corpus, and run the
244
+ # initial tagger on it. We will progressively update this
245
+ # test corpus to look more like the training corpus.
246
+ test_sents = [
247
+ list(self._initial_tagger.tag(untag(sent))) for sent in train_sents
248
+ ]
249
+
250
+ # Collect some statistics on the training process
251
+ trainstats = {}
252
+ trainstats["min_acc"] = min_acc
253
+ trainstats["min_score"] = min_score
254
+ trainstats["tokencount"] = sum(len(t) for t in test_sents)
255
+ trainstats["sequencecount"] = len(test_sents)
256
+ trainstats["templatecount"] = len(self._templates)
257
+ trainstats["rulescores"] = []
258
+ trainstats["initialerrors"] = sum(
259
+ tag[1] != truth[1]
260
+ for paired in zip(test_sents, train_sents)
261
+ for (tag, truth) in zip(*paired)
262
+ )
263
+ trainstats["initialacc"] = (
264
+ 1 - trainstats["initialerrors"] / trainstats["tokencount"]
265
+ )
266
+ if self._trace > 0:
267
+ print(
268
+ "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; "
269
+ "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format(
270
+ **trainstats
271
+ )
272
+ )
273
+
274
+ # Initialize our mappings. This will find any errors made
275
+ # by the initial tagger, and use those to generate repair
276
+ # rules, which are added to the rule mappings.
277
+ if self._trace:
278
+ print("Finding initial useful rules...")
279
+ self._init_mappings(test_sents, train_sents)
280
+ if self._trace:
281
+ print(f" Found {len(self._rule_scores)} useful rules.")
282
+
283
+ # Let the user know what we're up to.
284
+ if self._trace > 2:
285
+ self._trace_header()
286
+ elif self._trace == 1:
287
+ print("Selecting rules...")
288
+
289
+ # Repeatedly select the best rule, and add it to `rules`.
290
+ rules = []
291
+ try:
292
+ while len(rules) < max_rules:
293
+ # Find the best rule, and add it to our rule list.
294
+ rule = self._best_rule(train_sents, test_sents, min_score, min_acc)
295
+ if rule:
296
+ rules.append(rule)
297
+ score = self._rule_scores[rule]
298
+ trainstats["rulescores"].append(score)
299
+ else:
300
+ break # No more good rules left!
301
+
302
+ # Report the rule that we found.
303
+ if self._trace > 1:
304
+ self._trace_rule(rule)
305
+
306
+ # Apply the new rule at the relevant sites
307
+ self._apply_rule(rule, test_sents)
308
+
309
+ # Update _tag_positions[rule.original_tag] and
310
+ # _tag_positions[rule.replacement_tag] for the affected
311
+ # positions (i.e., self._positions_by_rule[rule]).
312
+ self._update_tag_positions(rule)
313
+
314
+ # Update rules that were affected by the change.
315
+ self._update_rules(rule, train_sents, test_sents)
316
+
317
+ # The user can cancel training manually:
318
+ except KeyboardInterrupt:
319
+ print(f"Training stopped manually -- {len(rules)} rules found")
320
+
321
+ # Discard our tag position mapping & rule mappings.
322
+ self._clean()
323
+ trainstats["finalerrors"] = trainstats["initialerrors"] - sum(
324
+ trainstats["rulescores"]
325
+ )
326
+ trainstats["finalacc"] = (
327
+ 1 - trainstats["finalerrors"] / trainstats["tokencount"]
328
+ )
329
+ # Create and return a tagger from the rules we found.
330
+ return BrillTagger(self._initial_tagger, rules, trainstats)
331
+
332
+ def _init_mappings(self, test_sents, train_sents):
333
+ """
334
+ Initialize the tag position mapping & the rule related
335
+ mappings. For each error in test_sents, find new rules that
336
+ would correct them, and add them to the rule mappings.
337
+ """
338
+ self._tag_positions = defaultdict(list)
339
+ self._rules_by_position = defaultdict(set)
340
+ self._positions_by_rule = defaultdict(dict)
341
+ self._rules_by_score = defaultdict(set)
342
+ self._rule_scores = defaultdict(int)
343
+ self._first_unknown_position = defaultdict(int)
344
+ # Scan through the corpus, initializing the tag_positions
345
+ # mapping and all the rule-related mappings.
346
+ for sentnum, sent in enumerate(test_sents):
347
+ for wordnum, (word, tag) in enumerate(sent):
348
+
349
+ # Initialize tag_positions
350
+ self._tag_positions[tag].append((sentnum, wordnum))
351
+
352
+ # If it's an error token, update the rule-related mappings.
353
+ correct_tag = train_sents[sentnum][wordnum][1]
354
+ if tag != correct_tag:
355
+ for rule in self._find_rules(sent, wordnum, correct_tag):
356
+ self._update_rule_applies(rule, sentnum, wordnum, train_sents)
357
+
358
+ def _clean(self):
359
+ self._tag_positions = None
360
+ self._rules_by_position = None
361
+ self._positions_by_rule = None
362
+ self._rules_by_score = None
363
+ self._rule_scores = None
364
+ self._first_unknown_position = None
365
+
366
+ def _find_rules(self, sent, wordnum, new_tag):
367
+ """
368
+ Use the templates to find rules that apply at index *wordnum*
369
+ in the sentence *sent* and generate the tag *new_tag*.
370
+ """
371
+ for template in self._templates:
372
+ yield from template.applicable_rules(sent, wordnum, new_tag)
373
+
374
+ def _update_rule_applies(self, rule, sentnum, wordnum, train_sents):
375
+ """
376
+ Update the rule data tables to reflect the fact that
377
+ *rule* applies at the position *(sentnum, wordnum)*.
378
+ """
379
+ pos = sentnum, wordnum
380
+
381
+ # If the rule is already known to apply here, ignore.
382
+ # (This only happens if the position's tag hasn't changed.)
383
+ if pos in self._positions_by_rule[rule]:
384
+ return
385
+
386
+ # Update self._positions_by_rule.
387
+ correct_tag = train_sents[sentnum][wordnum][1]
388
+ if rule.replacement_tag == correct_tag:
389
+ self._positions_by_rule[rule][pos] = 1
390
+ elif rule.original_tag == correct_tag:
391
+ self._positions_by_rule[rule][pos] = -1
392
+ else: # was wrong, remains wrong
393
+ self._positions_by_rule[rule][pos] = 0
394
+
395
+ # Update _rules_by_position
396
+ self._rules_by_position[pos].add(rule)
397
+
398
+ # Update _rule_scores.
399
+ old_score = self._rule_scores[rule]
400
+ self._rule_scores[rule] += self._positions_by_rule[rule][pos]
401
+
402
+ # Update _rules_by_score.
403
+ self._rules_by_score[old_score].discard(rule)
404
+ self._rules_by_score[self._rule_scores[rule]].add(rule)
405
+
406
+ def _update_rule_not_applies(self, rule, sentnum, wordnum):
407
+ """
408
+ Update the rule data tables to reflect the fact that *rule*
409
+ does not apply at the position *(sentnum, wordnum)*.
410
+ """
411
+ pos = sentnum, wordnum
412
+
413
+ # Update _rule_scores.
414
+ old_score = self._rule_scores[rule]
415
+ self._rule_scores[rule] -= self._positions_by_rule[rule][pos]
416
+
417
+ # Update _rules_by_score.
418
+ self._rules_by_score[old_score].discard(rule)
419
+ self._rules_by_score[self._rule_scores[rule]].add(rule)
420
+
421
+ # Update _positions_by_rule
422
+ del self._positions_by_rule[rule][pos]
423
+ self._rules_by_position[pos].remove(rule)
424
+
425
+ # Optional addition: if the rule now applies nowhere, delete
426
+ # all its dictionary entries.
427
+
428
+ def _best_rule(self, train_sents, test_sents, min_score, min_acc):
429
+ """
430
+ Find the next best rule. This is done by repeatedly taking a
431
+ rule with the highest score and stepping through the corpus to
432
+ see where it applies. When it makes an error (decreasing its
433
+ score) it's bumped down, and we try a new rule with the
434
+ highest score. When we find a rule which has the highest
435
+ score *and* which has been tested against the entire corpus, we
436
+ can conclude that it's the next best rule.
437
+ """
438
+ for max_score in sorted(self._rules_by_score.keys(), reverse=True):
439
+ if len(self._rules_by_score) == 0:
440
+ return None
441
+ if max_score < min_score or max_score <= 0:
442
+ return None
443
+ best_rules = list(self._rules_by_score[max_score])
444
+ if self._deterministic:
445
+ best_rules.sort(key=repr)
446
+ for rule in best_rules:
447
+ positions = self._tag_positions[rule.original_tag]
448
+
449
+ unk = self._first_unknown_position.get(rule, (0, -1))
450
+ start = bisect.bisect_left(positions, unk)
451
+
452
+ for i in range(start, len(positions)):
453
+ sentnum, wordnum = positions[i]
454
+ if rule.applies(test_sents[sentnum], wordnum):
455
+ self._update_rule_applies(rule, sentnum, wordnum, train_sents)
456
+ if self._rule_scores[rule] < max_score:
457
+ self._first_unknown_position[rule] = (sentnum, wordnum + 1)
458
+ break # The update demoted the rule.
459
+
460
+ if self._rule_scores[rule] == max_score:
461
+ self._first_unknown_position[rule] = (len(train_sents) + 1, 0)
462
+ # optimization: if no min_acc threshold given, don't bother computing accuracy
463
+ if min_acc is None:
464
+ return rule
465
+ else:
466
+ changes = self._positions_by_rule[rule].values()
467
+ num_fixed = len([c for c in changes if c == 1])
468
+ num_broken = len([c for c in changes if c == -1])
469
+ # acc here is fixed/(fixed+broken); could also be
470
+ # fixed/(fixed+broken+other) == num_fixed/len(changes)
471
+ acc = num_fixed / (num_fixed + num_broken)
472
+ if acc >= min_acc:
473
+ return rule
474
+ # else: rule too inaccurate, discard and try next
475
+
476
+ # We demoted (or skipped due to < min_acc, if that was given)
477
+ # all the rules with score==max_score.
478
+
479
+ assert min_acc is not None or not self._rules_by_score[max_score]
480
+ if not self._rules_by_score[max_score]:
481
+ del self._rules_by_score[max_score]
482
+
483
+ def _apply_rule(self, rule, test_sents):
484
+ """
485
+ Update *test_sents* by applying *rule* everywhere where its
486
+ conditions are met.
487
+ """
488
+ update_positions = set(self._positions_by_rule[rule])
489
+ new_tag = rule.replacement_tag
490
+
491
+ if self._trace > 3:
492
+ self._trace_apply(len(update_positions))
493
+
494
+ # Update test_sents.
495
+ for (sentnum, wordnum) in update_positions:
496
+ text = test_sents[sentnum][wordnum][0]
497
+ test_sents[sentnum][wordnum] = (text, new_tag)
498
+
499
+ def _update_tag_positions(self, rule):
500
+ """
501
+ Update _tag_positions to reflect the changes to tags that are
502
+ made by *rule*.
503
+ """
504
+ # Update the tag index.
505
+ for pos in self._positions_by_rule[rule]:
506
+ # Delete the old tag.
507
+ old_tag_positions = self._tag_positions[rule.original_tag]
508
+ old_index = bisect.bisect_left(old_tag_positions, pos)
509
+ del old_tag_positions[old_index]
510
+ # Insert the new tag.
511
+ new_tag_positions = self._tag_positions[rule.replacement_tag]
512
+ bisect.insort_left(new_tag_positions, pos)
513
+
514
+ def _update_rules(self, rule, train_sents, test_sents):
515
+ """
516
+ Check if we should add or remove any rules from consideration,
517
+ given the changes made by *rule*.
518
+ """
519
+ # Collect a list of all positions that might be affected.
520
+ neighbors = set()
521
+ for sentnum, wordnum in self._positions_by_rule[rule]:
522
+ for template in self._templates:
523
+ n = template.get_neighborhood(test_sents[sentnum], wordnum)
524
+ neighbors.update([(sentnum, i) for i in n])
525
+
526
+ # Update the rules at each position.
527
+ num_obsolete = num_new = num_unseen = 0
528
+ for sentnum, wordnum in neighbors:
529
+ test_sent = test_sents[sentnum]
530
+ correct_tag = train_sents[sentnum][wordnum][1]
531
+
532
+ # Check if the change causes any rule at this position to
533
+ # stop matching; if so, then update our rule mappings
534
+ # accordingly.
535
+ old_rules = set(self._rules_by_position[sentnum, wordnum])
536
+ for old_rule in old_rules:
537
+ if not old_rule.applies(test_sent, wordnum):
538
+ num_obsolete += 1
539
+ self._update_rule_not_applies(old_rule, sentnum, wordnum)
540
+
541
+ # Check if the change causes our templates to propose any
542
+ # new rules for this position.
543
+ for template in self._templates:
544
+ for new_rule in template.applicable_rules(
545
+ test_sent, wordnum, correct_tag
546
+ ):
547
+ if new_rule not in old_rules:
548
+ num_new += 1
549
+ if new_rule not in self._rule_scores:
550
+ num_unseen += 1
551
+ old_rules.add(new_rule)
552
+ self._update_rule_applies(
553
+ new_rule, sentnum, wordnum, train_sents
554
+ )
555
+
556
+ # We may have caused other rules to match here, that are
557
+ # not proposed by our templates -- in particular, rules
558
+ # that are harmful or neutral. We therefore need to
559
+ # update any rule whose first_unknown_position is past
560
+ # this rule.
561
+ for new_rule, pos in self._first_unknown_position.items():
562
+ if pos > (sentnum, wordnum):
563
+ if new_rule not in old_rules:
564
+ num_new += 1
565
+ if new_rule.applies(test_sent, wordnum):
566
+ self._update_rule_applies(
567
+ new_rule, sentnum, wordnum, train_sents
568
+ )
569
+
570
+ if self._trace > 3:
571
+ self._trace_update_rules(num_obsolete, num_new, num_unseen)
572
+
573
+ # Tracing
574
+
575
+ def _trace_header(self):
576
+ print(
577
+ """
578
+ B |
579
+ S F r O | Score = Fixed - Broken
580
+ c i o t | R Fixed = num tags changed incorrect -> correct
581
+ o x k h | u Broken = num tags changed correct -> incorrect
582
+ r e e e | l Other = num tags changed incorrect -> incorrect
583
+ e d n r | e
584
+ ------------------+-------------------------------------------------------
585
+ """.rstrip()
586
+ )
587
+
588
+ def _trace_rule(self, rule):
589
+ assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values())
590
+
591
+ changes = self._positions_by_rule[rule].values()
592
+ num_fixed = len([c for c in changes if c == 1])
593
+ num_broken = len([c for c in changes if c == -1])
594
+ num_other = len([c for c in changes if c == 0])
595
+ score = self._rule_scores[rule]
596
+
597
+ rulestr = rule.format(self._ruleformat)
598
+ if self._trace > 2:
599
+ print(
600
+ "{:4d}{:4d}{:4d}{:4d} |".format(
601
+ score, num_fixed, num_broken, num_other
602
+ ),
603
+ end=" ",
604
+ )
605
+ print(
606
+ textwrap.fill(
607
+ rulestr,
608
+ initial_indent=" " * 20,
609
+ width=79,
610
+ subsequent_indent=" " * 18 + "| ",
611
+ ).strip()
612
+ )
613
+ else:
614
+ print(rulestr)
615
+
616
+ def _trace_apply(self, num_updates):
617
+ prefix = " " * 18 + "|"
618
+ print(prefix)
619
+ print(prefix, f"Applying rule to {num_updates} positions.")
620
+
621
+ def _trace_update_rules(self, num_obsolete, num_new, num_unseen):
622
+ prefix = " " * 18 + "|"
623
+ print(prefix, "Updated rule tables:")
624
+ print(prefix, (f" - {num_obsolete} rule applications removed"))
625
+ print(
626
+ prefix,
627
+ (f" - {num_new} rule applications added ({num_unseen} novel)"),
628
+ )
629
+ print(prefix)
venv/lib/python3.10/site-packages/nltk/tag/crf.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the CRFSuite Tagger
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Long Duong <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ A module for POS tagging using CRFSuite
10
+ """
11
+
12
+ import re
13
+ import unicodedata
14
+
15
+ from nltk.tag.api import TaggerI
16
+
17
+ try:
18
+ import pycrfsuite
19
+ except ImportError:
20
+ pass
21
+
22
+
23
+ class CRFTagger(TaggerI):
24
+ """
25
+ A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite
26
+
27
+ >>> from nltk.tag import CRFTagger
28
+ >>> ct = CRFTagger() # doctest: +SKIP
29
+
30
+ >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')],
31
+ ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]]
32
+
33
+ >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP
34
+ >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP
35
+ [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]]
36
+
37
+ >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]]
38
+ >>> ct.accuracy(gold_sentences) # doctest: +SKIP
39
+ 1.0
40
+
41
+ Setting learned model file
42
+ >>> ct = CRFTagger() # doctest: +SKIP
43
+ >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP
44
+ >>> ct.accuracy(gold_sentences) # doctest: +SKIP
45
+ 1.0
46
+ """
47
+
48
+ def __init__(self, feature_func=None, verbose=False, training_opt={}):
49
+ """
50
+ Initialize the CRFSuite tagger
51
+
52
+ :param feature_func: The function that extracts features for each token of a sentence. This function should take
53
+ 2 parameters: tokens and index which extract features at index position from tokens list. See the build in
54
+ _get_features function for more detail.
55
+ :param verbose: output the debugging messages during training.
56
+ :type verbose: boolean
57
+ :param training_opt: python-crfsuite training options
58
+ :type training_opt: dictionary
59
+
60
+ Set of possible training options (using LBFGS training algorithm).
61
+ :'feature.minfreq': The minimum frequency of features.
62
+ :'feature.possible_states': Force to generate possible state features.
63
+ :'feature.possible_transitions': Force to generate possible transition features.
64
+ :'c1': Coefficient for L1 regularization.
65
+ :'c2': Coefficient for L2 regularization.
66
+ :'max_iterations': The maximum number of iterations for L-BFGS optimization.
67
+ :'num_memories': The number of limited memories for approximating the inverse hessian matrix.
68
+ :'epsilon': Epsilon for testing the convergence of the objective.
69
+ :'period': The duration of iterations to test the stopping criterion.
70
+ :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the
71
+ improvement of the log likelihood over the last ${period} iterations is no greater than this threshold.
72
+ :'linesearch': The line search algorithm used in L-BFGS updates:
73
+
74
+ - 'MoreThuente': More and Thuente's method,
75
+ - 'Backtracking': Backtracking method with regular Wolfe condition,
76
+ - 'StrongBacktracking': Backtracking method with strong Wolfe condition
77
+ :'max_linesearch': The maximum number of trials for the line search algorithm.
78
+ """
79
+
80
+ self._model_file = ""
81
+ self._tagger = pycrfsuite.Tagger()
82
+
83
+ if feature_func is None:
84
+ self._feature_func = self._get_features
85
+ else:
86
+ self._feature_func = feature_func
87
+
88
+ self._verbose = verbose
89
+ self._training_options = training_opt
90
+ self._pattern = re.compile(r"\d")
91
+
92
+ def set_model_file(self, model_file):
93
+ self._model_file = model_file
94
+ self._tagger.open(self._model_file)
95
+
96
+ def _get_features(self, tokens, idx):
97
+ """
98
+ Extract basic features about this word including
99
+ - Current word
100
+ - is it capitalized?
101
+ - Does it have punctuation?
102
+ - Does it have a number?
103
+ - Suffixes up to length 3
104
+
105
+ Note that : we might include feature over previous word, next word etc.
106
+
107
+ :return: a list which contains the features
108
+ :rtype: list(str)
109
+ """
110
+ token = tokens[idx]
111
+
112
+ feature_list = []
113
+
114
+ if not token:
115
+ return feature_list
116
+
117
+ # Capitalization
118
+ if token[0].isupper():
119
+ feature_list.append("CAPITALIZATION")
120
+
121
+ # Number
122
+ if re.search(self._pattern, token) is not None:
123
+ feature_list.append("HAS_NUM")
124
+
125
+ # Punctuation
126
+ punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"}
127
+ if all(unicodedata.category(x) in punc_cat for x in token):
128
+ feature_list.append("PUNCTUATION")
129
+
130
+ # Suffix up to length 3
131
+ if len(token) > 1:
132
+ feature_list.append("SUF_" + token[-1:])
133
+ if len(token) > 2:
134
+ feature_list.append("SUF_" + token[-2:])
135
+ if len(token) > 3:
136
+ feature_list.append("SUF_" + token[-3:])
137
+
138
+ feature_list.append("WORD_" + token)
139
+
140
+ return feature_list
141
+
142
+ def tag_sents(self, sents):
143
+ """
144
+ Tag a list of sentences. NB before using this function, user should specify the mode_file either by
145
+
146
+ - Train a new model using ``train`` function
147
+ - Use the pre-trained model which is set via ``set_model_file`` function
148
+
149
+ :params sentences: list of sentences needed to tag.
150
+ :type sentences: list(list(str))
151
+ :return: list of tagged sentences.
152
+ :rtype: list(list(tuple(str,str)))
153
+ """
154
+ if self._model_file == "":
155
+ raise Exception(
156
+ " No model file is found !! Please use train or set_model_file function"
157
+ )
158
+
159
+ # We need the list of sentences instead of the list generator for matching the input and output
160
+ result = []
161
+ for tokens in sents:
162
+ features = [self._feature_func(tokens, i) for i in range(len(tokens))]
163
+ labels = self._tagger.tag(features)
164
+
165
+ if len(labels) != len(tokens):
166
+ raise Exception(" Predicted Length Not Matched, Expect Errors !")
167
+
168
+ tagged_sent = list(zip(tokens, labels))
169
+ result.append(tagged_sent)
170
+
171
+ return result
172
+
173
+ def train(self, train_data, model_file):
174
+ """
175
+ Train the CRF tagger using CRFSuite
176
+ :params train_data : is the list of annotated sentences.
177
+ :type train_data : list (list(tuple(str,str)))
178
+ :params model_file : the model will be saved to this file.
179
+
180
+ """
181
+ trainer = pycrfsuite.Trainer(verbose=self._verbose)
182
+ trainer.set_params(self._training_options)
183
+
184
+ for sent in train_data:
185
+ tokens, labels = zip(*sent)
186
+ features = [self._feature_func(tokens, i) for i in range(len(tokens))]
187
+ trainer.append(features, labels)
188
+
189
+ # Now train the model, the output should be model_file
190
+ trainer.train(model_file)
191
+ # Save the model file
192
+ self.set_model_file(model_file)
193
+
194
+ def tag(self, tokens):
195
+ """
196
+ Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by
197
+
198
+ - Train a new model using ``train`` function
199
+ - Use the pre-trained model which is set via ``set_model_file`` function
200
+
201
+ :params tokens: list of tokens needed to tag.
202
+ :type tokens: list(str)
203
+ :return: list of tagged tokens.
204
+ :rtype: list(tuple(str,str))
205
+ """
206
+
207
+ return self.tag_sents([tokens])[0]
venv/lib/python3.10/site-packages/nltk/tag/hmm.py ADDED
@@ -0,0 +1,1329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Hidden Markov Model
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Philip Blunsom <[email protected]>
6
+ # Tiago Tresoldi <[email protected]> (fixes)
7
+ # Steven Bird <[email protected]> (fixes)
8
+ # Joseph Frazee <[email protected]> (fixes)
9
+ # Steven Xu <[email protected]> (fixes)
10
+ # URL: <https://www.nltk.org/>
11
+ # For license information, see LICENSE.TXT
12
+
13
+ """
14
+ Hidden Markov Models (HMMs) largely used to assign the correct label sequence
15
+ to sequential data or assess the probability of a given label and data
16
+ sequence. These models are finite state machines characterised by a number of
17
+ states, transitions between these states, and output symbols emitted while in
18
+ each state. The HMM is an extension to the Markov chain, where each state
19
+ corresponds deterministically to a given event. In the HMM the observation is
20
+ a probabilistic function of the state. HMMs share the Markov chain's
21
+ assumption, being that the probability of transition from one state to another
22
+ only depends on the current state - i.e. the series of states that led to the
23
+ current state are not used. They are also time invariant.
24
+
25
+ The HMM is a directed graph, with probability weighted edges (representing the
26
+ probability of a transition between the source and sink states) where each
27
+ vertex emits an output symbol when entered. The symbol (or observation) is
28
+ non-deterministically generated. For this reason, knowing that a sequence of
29
+ output observations was generated by a given HMM does not mean that the
30
+ corresponding sequence of states (and what the current state is) is known.
31
+ This is the 'hidden' in the hidden markov model.
32
+
33
+ Formally, a HMM can be characterised by:
34
+
35
+ - the output observation alphabet. This is the set of symbols which may be
36
+ observed as output of the system.
37
+ - the set of states.
38
+ - the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These
39
+ represent the probability of transition to each state from a given state.
40
+ - the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These
41
+ represent the probability of observing each symbol in a given state.
42
+ - the initial state distribution. This gives the probability of starting
43
+ in each state.
44
+
45
+ To ground this discussion, take a common NLP application, part-of-speech (POS)
46
+ tagging. An HMM is desirable for this task as the highest probability tag
47
+ sequence can be calculated for a given sequence of word forms. This differs
48
+ from other tagging techniques which often tag each word individually, seeking
49
+ to optimise each individual tagging greedily without regard to the optimal
50
+ combination of tags for a larger unit, such as a sentence. The HMM does this
51
+ with the Viterbi algorithm, which efficiently computes the optimal path
52
+ through the graph given the sequence of words forms.
53
+
54
+ In POS tagging the states usually have a 1:1 correspondence with the tag
55
+ alphabet - i.e. each state represents a single tag. The output observation
56
+ alphabet is the set of word forms (the lexicon), and the remaining three
57
+ parameters are derived by a training regime. With this information the
58
+ probability of a given sentence can be easily derived, by simply summing the
59
+ probability of each distinct path through the model. Similarly, the highest
60
+ probability tagging sequence can be derived with the Viterbi algorithm,
61
+ yielding a state sequence which can be mapped into a tag sequence.
62
+
63
+ This discussion assumes that the HMM has been trained. This is probably the
64
+ most difficult task with the model, and requires either MLE estimates of the
65
+ parameters or unsupervised learning using the Baum-Welch algorithm, a variant
66
+ of EM.
67
+
68
+ For more information, please consult the source code for this module,
69
+ which includes extensive demonstration code.
70
+ """
71
+
72
+ import itertools
73
+ import re
74
+
75
+ try:
76
+ import numpy as np
77
+ except ImportError:
78
+ pass
79
+
80
+ from nltk.metrics import accuracy
81
+ from nltk.probability import (
82
+ ConditionalFreqDist,
83
+ ConditionalProbDist,
84
+ DictionaryConditionalProbDist,
85
+ DictionaryProbDist,
86
+ FreqDist,
87
+ LidstoneProbDist,
88
+ MLEProbDist,
89
+ MutableProbDist,
90
+ RandomProbDist,
91
+ )
92
+ from nltk.tag.api import TaggerI
93
+ from nltk.util import LazyMap, unique_list
94
+
95
+ _TEXT = 0 # index of text in a tuple
96
+ _TAG = 1 # index of tag in a tuple
97
+
98
+
99
+ def _identity(labeled_symbols):
100
+ return labeled_symbols
101
+
102
+
103
+ class HiddenMarkovModelTagger(TaggerI):
104
+ """
105
+ Hidden Markov model class, a generative model for labelling sequence data.
106
+ These models define the joint probability of a sequence of symbols and
107
+ their labels (state transitions) as the product of the starting state
108
+ probability, the probability of each state transition, and the probability
109
+ of each observation being generated from each state. This is described in
110
+ more detail in the module documentation.
111
+
112
+ This implementation is based on the HMM description in Chapter 8, Huang,
113
+ Acero and Hon, Spoken Language Processing and includes an extension for
114
+ training shallow HMM parsers or specialized HMMs as in Molina et.
115
+ al, 2002. A specialized HMM modifies training data by applying a
116
+ specialization function to create a new training set that is more
117
+ appropriate for sequential tagging with an HMM. A typical use case is
118
+ chunking.
119
+
120
+ :param symbols: the set of output symbols (alphabet)
121
+ :type symbols: seq of any
122
+ :param states: a set of states representing state space
123
+ :type states: seq of any
124
+ :param transitions: transition probabilities; Pr(s_i | s_j) is the
125
+ probability of transition from state i given the model is in
126
+ state_j
127
+ :type transitions: ConditionalProbDistI
128
+ :param outputs: output probabilities; Pr(o_k | s_i) is the probability
129
+ of emitting symbol k when entering state i
130
+ :type outputs: ConditionalProbDistI
131
+ :param priors: initial state distribution; Pr(s_i) is the probability
132
+ of starting in state i
133
+ :type priors: ProbDistI
134
+ :param transform: an optional function for transforming training
135
+ instances, defaults to the identity function.
136
+ :type transform: callable
137
+ """
138
+
139
+ def __init__(
140
+ self, symbols, states, transitions, outputs, priors, transform=_identity
141
+ ):
142
+ self._symbols = unique_list(symbols)
143
+ self._states = unique_list(states)
144
+ self._transitions = transitions
145
+ self._outputs = outputs
146
+ self._priors = priors
147
+ self._cache = None
148
+ self._transform = transform
149
+
150
+ @classmethod
151
+ def _train(
152
+ cls,
153
+ labeled_sequence,
154
+ test_sequence=None,
155
+ unlabeled_sequence=None,
156
+ transform=_identity,
157
+ estimator=None,
158
+ **kwargs,
159
+ ):
160
+
161
+ if estimator is None:
162
+
163
+ def estimator(fd, bins):
164
+ return LidstoneProbDist(fd, 0.1, bins)
165
+
166
+ labeled_sequence = LazyMap(transform, labeled_sequence)
167
+ symbols = unique_list(word for sent in labeled_sequence for word, tag in sent)
168
+ tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent)
169
+
170
+ trainer = HiddenMarkovModelTrainer(tag_set, symbols)
171
+ hmm = trainer.train_supervised(labeled_sequence, estimator=estimator)
172
+ hmm = cls(
173
+ hmm._symbols,
174
+ hmm._states,
175
+ hmm._transitions,
176
+ hmm._outputs,
177
+ hmm._priors,
178
+ transform=transform,
179
+ )
180
+
181
+ if test_sequence:
182
+ hmm.test(test_sequence, verbose=kwargs.get("verbose", False))
183
+
184
+ if unlabeled_sequence:
185
+ max_iterations = kwargs.get("max_iterations", 5)
186
+ hmm = trainer.train_unsupervised(
187
+ unlabeled_sequence, model=hmm, max_iterations=max_iterations
188
+ )
189
+ if test_sequence:
190
+ hmm.test(test_sequence, verbose=kwargs.get("verbose", False))
191
+
192
+ return hmm
193
+
194
+ @classmethod
195
+ def train(
196
+ cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs
197
+ ):
198
+ """
199
+ Train a new HiddenMarkovModelTagger using the given labeled and
200
+ unlabeled training instances. Testing will be performed if test
201
+ instances are provided.
202
+
203
+ :return: a hidden markov model tagger
204
+ :rtype: HiddenMarkovModelTagger
205
+ :param labeled_sequence: a sequence of labeled training instances,
206
+ i.e. a list of sentences represented as tuples
207
+ :type labeled_sequence: list(list)
208
+ :param test_sequence: a sequence of labeled test instances
209
+ :type test_sequence: list(list)
210
+ :param unlabeled_sequence: a sequence of unlabeled training instances,
211
+ i.e. a list of sentences represented as words
212
+ :type unlabeled_sequence: list(list)
213
+ :param transform: an optional function for transforming training
214
+ instances, defaults to the identity function, see ``transform()``
215
+ :type transform: function
216
+ :param estimator: an optional function or class that maps a
217
+ condition's frequency distribution to its probability
218
+ distribution, defaults to a Lidstone distribution with gamma = 0.1
219
+ :type estimator: class or function
220
+ :param verbose: boolean flag indicating whether training should be
221
+ verbose or include printed output
222
+ :type verbose: bool
223
+ :param max_iterations: number of Baum-Welch iterations to perform
224
+ :type max_iterations: int
225
+ """
226
+ return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs)
227
+
228
+ def probability(self, sequence):
229
+ """
230
+ Returns the probability of the given symbol sequence. If the sequence
231
+ is labelled, then returns the joint probability of the symbol, state
232
+ sequence. Otherwise, uses the forward algorithm to find the
233
+ probability over all label sequences.
234
+
235
+ :return: the probability of the sequence
236
+ :rtype: float
237
+ :param sequence: the sequence of symbols which must contain the TEXT
238
+ property, and optionally the TAG property
239
+ :type sequence: Token
240
+ """
241
+ return 2 ** (self.log_probability(self._transform(sequence)))
242
+
243
+ def log_probability(self, sequence):
244
+ """
245
+ Returns the log-probability of the given symbol sequence. If the
246
+ sequence is labelled, then returns the joint log-probability of the
247
+ symbol, state sequence. Otherwise, uses the forward algorithm to find
248
+ the log-probability over all label sequences.
249
+
250
+ :return: the log-probability of the sequence
251
+ :rtype: float
252
+ :param sequence: the sequence of symbols which must contain the TEXT
253
+ property, and optionally the TAG property
254
+ :type sequence: Token
255
+ """
256
+ sequence = self._transform(sequence)
257
+
258
+ T = len(sequence)
259
+
260
+ if T > 0 and sequence[0][_TAG]:
261
+ last_state = sequence[0][_TAG]
262
+ p = self._priors.logprob(last_state) + self._output_logprob(
263
+ last_state, sequence[0][_TEXT]
264
+ )
265
+ for t in range(1, T):
266
+ state = sequence[t][_TAG]
267
+ p += self._transitions[last_state].logprob(
268
+ state
269
+ ) + self._output_logprob(state, sequence[t][_TEXT])
270
+ last_state = state
271
+ return p
272
+ else:
273
+ alpha = self._forward_probability(sequence)
274
+ p = logsumexp2(alpha[T - 1])
275
+ return p
276
+
277
+ def tag(self, unlabeled_sequence):
278
+ """
279
+ Tags the sequence with the highest probability state sequence. This
280
+ uses the best_path method to find the Viterbi path.
281
+
282
+ :return: a labelled sequence of symbols
283
+ :rtype: list
284
+ :param unlabeled_sequence: the sequence of unlabeled symbols
285
+ :type unlabeled_sequence: list
286
+ """
287
+ unlabeled_sequence = self._transform(unlabeled_sequence)
288
+ return self._tag(unlabeled_sequence)
289
+
290
+ def _tag(self, unlabeled_sequence):
291
+ path = self._best_path(unlabeled_sequence)
292
+ return list(zip(unlabeled_sequence, path))
293
+
294
+ def _output_logprob(self, state, symbol):
295
+ """
296
+ :return: the log probability of the symbol being observed in the given
297
+ state
298
+ :rtype: float
299
+ """
300
+ return self._outputs[state].logprob(symbol)
301
+
302
+ def _create_cache(self):
303
+ """
304
+ The cache is a tuple (P, O, X, S) where:
305
+
306
+ - S maps symbols to integers. I.e., it is the inverse
307
+ mapping from self._symbols; for each symbol s in
308
+ self._symbols, the following is true::
309
+
310
+ self._symbols[S[s]] == s
311
+
312
+ - O is the log output probabilities::
313
+
314
+ O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) )
315
+
316
+ - X is the log transition probabilities::
317
+
318
+ X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) )
319
+
320
+ - P is the log prior probabilities::
321
+
322
+ P[i] = log( P(tag[0]=state[i]) )
323
+ """
324
+ if not self._cache:
325
+ N = len(self._states)
326
+ M = len(self._symbols)
327
+ P = np.zeros(N, np.float32)
328
+ X = np.zeros((N, N), np.float32)
329
+ O = np.zeros((N, M), np.float32)
330
+ for i in range(N):
331
+ si = self._states[i]
332
+ P[i] = self._priors.logprob(si)
333
+ for j in range(N):
334
+ X[i, j] = self._transitions[si].logprob(self._states[j])
335
+ for k in range(M):
336
+ O[i, k] = self._output_logprob(si, self._symbols[k])
337
+ S = {}
338
+ for k in range(M):
339
+ S[self._symbols[k]] = k
340
+ self._cache = (P, O, X, S)
341
+
342
+ def _update_cache(self, symbols):
343
+ # add new symbols to the symbol table and repopulate the output
344
+ # probabilities and symbol table mapping
345
+ if symbols:
346
+ self._create_cache()
347
+ P, O, X, S = self._cache
348
+ for symbol in symbols:
349
+ if symbol not in self._symbols:
350
+ self._cache = None
351
+ self._symbols.append(symbol)
352
+ # don't bother with the work if there aren't any new symbols
353
+ if not self._cache:
354
+ N = len(self._states)
355
+ M = len(self._symbols)
356
+ Q = O.shape[1]
357
+ # add new columns to the output probability table without
358
+ # destroying the old probabilities
359
+ O = np.hstack([O, np.zeros((N, M - Q), np.float32)])
360
+ for i in range(N):
361
+ si = self._states[i]
362
+ # only calculate probabilities for new symbols
363
+ for k in range(Q, M):
364
+ O[i, k] = self._output_logprob(si, self._symbols[k])
365
+ # only create symbol mappings for new symbols
366
+ for k in range(Q, M):
367
+ S[self._symbols[k]] = k
368
+ self._cache = (P, O, X, S)
369
+
370
+ def reset_cache(self):
371
+ self._cache = None
372
+
373
+ def best_path(self, unlabeled_sequence):
374
+ """
375
+ Returns the state sequence of the optimal (most probable) path through
376
+ the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
377
+ programming.
378
+
379
+ :return: the state sequence
380
+ :rtype: sequence of any
381
+ :param unlabeled_sequence: the sequence of unlabeled symbols
382
+ :type unlabeled_sequence: list
383
+ """
384
+ unlabeled_sequence = self._transform(unlabeled_sequence)
385
+ return self._best_path(unlabeled_sequence)
386
+
387
+ def _best_path(self, unlabeled_sequence):
388
+ T = len(unlabeled_sequence)
389
+ N = len(self._states)
390
+ self._create_cache()
391
+ self._update_cache(unlabeled_sequence)
392
+ P, O, X, S = self._cache
393
+
394
+ V = np.zeros((T, N), np.float32)
395
+ B = -np.ones((T, N), int)
396
+
397
+ V[0] = P + O[:, S[unlabeled_sequence[0]]]
398
+ for t in range(1, T):
399
+ for j in range(N):
400
+ vs = V[t - 1, :] + X[:, j]
401
+ best = np.argmax(vs)
402
+ V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]]
403
+ B[t, j] = best
404
+
405
+ current = np.argmax(V[T - 1, :])
406
+ sequence = [current]
407
+ for t in range(T - 1, 0, -1):
408
+ last = B[t, current]
409
+ sequence.append(last)
410
+ current = last
411
+
412
+ sequence.reverse()
413
+ return list(map(self._states.__getitem__, sequence))
414
+
415
+ def best_path_simple(self, unlabeled_sequence):
416
+ """
417
+ Returns the state sequence of the optimal (most probable) path through
418
+ the HMM. Uses the Viterbi algorithm to calculate this part by dynamic
419
+ programming. This uses a simple, direct method, and is included for
420
+ teaching purposes.
421
+
422
+ :return: the state sequence
423
+ :rtype: sequence of any
424
+ :param unlabeled_sequence: the sequence of unlabeled symbols
425
+ :type unlabeled_sequence: list
426
+ """
427
+ unlabeled_sequence = self._transform(unlabeled_sequence)
428
+ return self._best_path_simple(unlabeled_sequence)
429
+
430
+ def _best_path_simple(self, unlabeled_sequence):
431
+ T = len(unlabeled_sequence)
432
+ N = len(self._states)
433
+ V = np.zeros((T, N), np.float64)
434
+ B = {}
435
+
436
+ # find the starting log probabilities for each state
437
+ symbol = unlabeled_sequence[0]
438
+ for i, state in enumerate(self._states):
439
+ V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol)
440
+ B[0, state] = None
441
+
442
+ # find the maximum log probabilities for reaching each state at time t
443
+ for t in range(1, T):
444
+ symbol = unlabeled_sequence[t]
445
+ for j in range(N):
446
+ sj = self._states[j]
447
+ best = None
448
+ for i in range(N):
449
+ si = self._states[i]
450
+ va = V[t - 1, i] + self._transitions[si].logprob(sj)
451
+ if not best or va > best[0]:
452
+ best = (va, si)
453
+ V[t, j] = best[0] + self._output_logprob(sj, symbol)
454
+ B[t, sj] = best[1]
455
+
456
+ # find the highest probability final state
457
+ best = None
458
+ for i in range(N):
459
+ val = V[T - 1, i]
460
+ if not best or val > best[0]:
461
+ best = (val, self._states[i])
462
+
463
+ # traverse the back-pointers B to find the state sequence
464
+ current = best[1]
465
+ sequence = [current]
466
+ for t in range(T - 1, 0, -1):
467
+ last = B[t, current]
468
+ sequence.append(last)
469
+ current = last
470
+
471
+ sequence.reverse()
472
+ return sequence
473
+
474
+ def random_sample(self, rng, length):
475
+ """
476
+ Randomly sample the HMM to generate a sentence of a given length. This
477
+ samples the prior distribution then the observation distribution and
478
+ transition distribution for each subsequent observation and state.
479
+ This will mostly generate unintelligible garbage, but can provide some
480
+ amusement.
481
+
482
+ :return: the randomly created state/observation sequence,
483
+ generated according to the HMM's probability
484
+ distributions. The SUBTOKENS have TEXT and TAG
485
+ properties containing the observation and state
486
+ respectively.
487
+ :rtype: list
488
+ :param rng: random number generator
489
+ :type rng: Random (or any object with a random() method)
490
+ :param length: desired output length
491
+ :type length: int
492
+ """
493
+
494
+ # sample the starting state and symbol prob dists
495
+ tokens = []
496
+ state = self._sample_probdist(self._priors, rng.random(), self._states)
497
+ symbol = self._sample_probdist(
498
+ self._outputs[state], rng.random(), self._symbols
499
+ )
500
+ tokens.append((symbol, state))
501
+
502
+ for i in range(1, length):
503
+ # sample the state transition and symbol prob dists
504
+ state = self._sample_probdist(
505
+ self._transitions[state], rng.random(), self._states
506
+ )
507
+ symbol = self._sample_probdist(
508
+ self._outputs[state], rng.random(), self._symbols
509
+ )
510
+ tokens.append((symbol, state))
511
+
512
+ return tokens
513
+
514
+ def _sample_probdist(self, probdist, p, samples):
515
+ cum_p = 0
516
+ for sample in samples:
517
+ add_p = probdist.prob(sample)
518
+ if cum_p <= p <= cum_p + add_p:
519
+ return sample
520
+ cum_p += add_p
521
+ raise Exception("Invalid probability distribution - " "does not sum to one")
522
+
523
+ def entropy(self, unlabeled_sequence):
524
+ """
525
+ Returns the entropy over labellings of the given sequence. This is
526
+ given by::
527
+
528
+ H(O) = - sum_S Pr(S | O) log Pr(S | O)
529
+
530
+ where the summation ranges over all state sequences, S. Let
531
+ *Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state
532
+ sequences and O is the observation sequence. As such the entropy can
533
+ be re-expressed as::
534
+
535
+ H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ]
536
+ = log Z - sum_S Pr(S | O) log Pr(S, 0)
537
+ = log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ]
538
+
539
+ The order of summation for the log terms can be flipped, allowing
540
+ dynamic programming to be used to calculate the entropy. Specifically,
541
+ we use the forward and backward probabilities (alpha, beta) giving::
542
+
543
+ H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0)
544
+ + sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si)
545
+ + sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st)
546
+
547
+ This simply uses alpha and beta to find the probabilities of partial
548
+ sequences, constrained to include the given state(s) at some point in
549
+ time.
550
+ """
551
+ unlabeled_sequence = self._transform(unlabeled_sequence)
552
+
553
+ T = len(unlabeled_sequence)
554
+ N = len(self._states)
555
+
556
+ alpha = self._forward_probability(unlabeled_sequence)
557
+ beta = self._backward_probability(unlabeled_sequence)
558
+ normalisation = logsumexp2(alpha[T - 1])
559
+
560
+ entropy = normalisation
561
+
562
+ # starting state, t = 0
563
+ for i, state in enumerate(self._states):
564
+ p = 2 ** (alpha[0, i] + beta[0, i] - normalisation)
565
+ entropy -= p * self._priors.logprob(state)
566
+ # print('p(s_0 = %s) =' % state, p)
567
+
568
+ # state transitions
569
+ for t0 in range(T - 1):
570
+ t1 = t0 + 1
571
+ for i0, s0 in enumerate(self._states):
572
+ for i1, s1 in enumerate(self._states):
573
+ p = 2 ** (
574
+ alpha[t0, i0]
575
+ + self._transitions[s0].logprob(s1)
576
+ + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT])
577
+ + beta[t1, i1]
578
+ - normalisation
579
+ )
580
+ entropy -= p * self._transitions[s0].logprob(s1)
581
+ # print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p)
582
+
583
+ # symbol emissions
584
+ for t in range(T):
585
+ for i, state in enumerate(self._states):
586
+ p = 2 ** (alpha[t, i] + beta[t, i] - normalisation)
587
+ entropy -= p * self._outputs[state].logprob(
588
+ unlabeled_sequence[t][_TEXT]
589
+ )
590
+ # print('p(s_%d = %s) =' % (t, state), p)
591
+
592
+ return entropy
593
+
594
+ def point_entropy(self, unlabeled_sequence):
595
+ """
596
+ Returns the pointwise entropy over the possible states at each
597
+ position in the chain, given the observation sequence.
598
+ """
599
+ unlabeled_sequence = self._transform(unlabeled_sequence)
600
+
601
+ T = len(unlabeled_sequence)
602
+ N = len(self._states)
603
+
604
+ alpha = self._forward_probability(unlabeled_sequence)
605
+ beta = self._backward_probability(unlabeled_sequence)
606
+ normalisation = logsumexp2(alpha[T - 1])
607
+
608
+ entropies = np.zeros(T, np.float64)
609
+ probs = np.zeros(N, np.float64)
610
+ for t in range(T):
611
+ for s in range(N):
612
+ probs[s] = alpha[t, s] + beta[t, s] - normalisation
613
+
614
+ for s in range(N):
615
+ entropies[t] -= 2 ** (probs[s]) * probs[s]
616
+
617
+ return entropies
618
+
619
+ def _exhaustive_entropy(self, unlabeled_sequence):
620
+ unlabeled_sequence = self._transform(unlabeled_sequence)
621
+
622
+ T = len(unlabeled_sequence)
623
+ N = len(self._states)
624
+
625
+ labellings = [[state] for state in self._states]
626
+ for t in range(T - 1):
627
+ current = labellings
628
+ labellings = []
629
+ for labelling in current:
630
+ for state in self._states:
631
+ labellings.append(labelling + [state])
632
+
633
+ log_probs = []
634
+ for labelling in labellings:
635
+ labeled_sequence = unlabeled_sequence[:]
636
+ for t, label in enumerate(labelling):
637
+ labeled_sequence[t] = (labeled_sequence[t][_TEXT], label)
638
+ lp = self.log_probability(labeled_sequence)
639
+ log_probs.append(lp)
640
+ normalisation = _log_add(*log_probs)
641
+
642
+ entropy = 0
643
+ for lp in log_probs:
644
+ lp -= normalisation
645
+ entropy -= 2 ** (lp) * lp
646
+
647
+ return entropy
648
+
649
+ def _exhaustive_point_entropy(self, unlabeled_sequence):
650
+ unlabeled_sequence = self._transform(unlabeled_sequence)
651
+
652
+ T = len(unlabeled_sequence)
653
+ N = len(self._states)
654
+
655
+ labellings = [[state] for state in self._states]
656
+ for t in range(T - 1):
657
+ current = labellings
658
+ labellings = []
659
+ for labelling in current:
660
+ for state in self._states:
661
+ labellings.append(labelling + [state])
662
+
663
+ log_probs = []
664
+ for labelling in labellings:
665
+ labelled_sequence = unlabeled_sequence[:]
666
+ for t, label in enumerate(labelling):
667
+ labelled_sequence[t] = (labelled_sequence[t][_TEXT], label)
668
+ lp = self.log_probability(labelled_sequence)
669
+ log_probs.append(lp)
670
+
671
+ normalisation = _log_add(*log_probs)
672
+
673
+ probabilities = _ninf_array((T, N))
674
+
675
+ for labelling, lp in zip(labellings, log_probs):
676
+ lp -= normalisation
677
+ for t, label in enumerate(labelling):
678
+ index = self._states.index(label)
679
+ probabilities[t, index] = _log_add(probabilities[t, index], lp)
680
+
681
+ entropies = np.zeros(T, np.float64)
682
+ for t in range(T):
683
+ for s in range(N):
684
+ entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s]
685
+
686
+ return entropies
687
+
688
+ def _transitions_matrix(self):
689
+ """Return a matrix of transition log probabilities."""
690
+ trans_iter = (
691
+ self._transitions[sj].logprob(si)
692
+ for sj in self._states
693
+ for si in self._states
694
+ )
695
+
696
+ transitions_logprob = np.fromiter(trans_iter, dtype=np.float64)
697
+ N = len(self._states)
698
+ return transitions_logprob.reshape((N, N)).T
699
+
700
+ def _outputs_vector(self, symbol):
701
+ """
702
+ Return a vector with log probabilities of emitting a symbol
703
+ when entering states.
704
+ """
705
+ out_iter = (self._output_logprob(sj, symbol) for sj in self._states)
706
+ return np.fromiter(out_iter, dtype=np.float64)
707
+
708
+ def _forward_probability(self, unlabeled_sequence):
709
+ """
710
+ Return the forward probability matrix, a T by N array of
711
+ log-probabilities, where T is the length of the sequence and N is the
712
+ number of states. Each entry (t, s) gives the probability of being in
713
+ state s at time t after observing the partial symbol sequence up to
714
+ and including t.
715
+
716
+ :param unlabeled_sequence: the sequence of unlabeled symbols
717
+ :type unlabeled_sequence: list
718
+ :return: the forward log probability matrix
719
+ :rtype: array
720
+ """
721
+ T = len(unlabeled_sequence)
722
+ N = len(self._states)
723
+ alpha = _ninf_array((T, N))
724
+
725
+ transitions_logprob = self._transitions_matrix()
726
+
727
+ # Initialization
728
+ symbol = unlabeled_sequence[0][_TEXT]
729
+ for i, state in enumerate(self._states):
730
+ alpha[0, i] = self._priors.logprob(state) + self._output_logprob(
731
+ state, symbol
732
+ )
733
+
734
+ # Induction
735
+ for t in range(1, T):
736
+ symbol = unlabeled_sequence[t][_TEXT]
737
+ output_logprob = self._outputs_vector(symbol)
738
+
739
+ for i in range(N):
740
+ summand = alpha[t - 1] + transitions_logprob[i]
741
+ alpha[t, i] = logsumexp2(summand) + output_logprob[i]
742
+
743
+ return alpha
744
+
745
+ def _backward_probability(self, unlabeled_sequence):
746
+ """
747
+ Return the backward probability matrix, a T by N array of
748
+ log-probabilities, where T is the length of the sequence and N is the
749
+ number of states. Each entry (t, s) gives the probability of being in
750
+ state s at time t after observing the partial symbol sequence from t
751
+ .. T.
752
+
753
+ :return: the backward log probability matrix
754
+ :rtype: array
755
+ :param unlabeled_sequence: the sequence of unlabeled symbols
756
+ :type unlabeled_sequence: list
757
+ """
758
+ T = len(unlabeled_sequence)
759
+ N = len(self._states)
760
+ beta = _ninf_array((T, N))
761
+
762
+ transitions_logprob = self._transitions_matrix().T
763
+
764
+ # initialise the backward values;
765
+ # "1" is an arbitrarily chosen value from Rabiner tutorial
766
+ beta[T - 1, :] = np.log2(1)
767
+
768
+ # inductively calculate remaining backward values
769
+ for t in range(T - 2, -1, -1):
770
+ symbol = unlabeled_sequence[t + 1][_TEXT]
771
+ outputs = self._outputs_vector(symbol)
772
+
773
+ for i in range(N):
774
+ summand = transitions_logprob[i] + beta[t + 1] + outputs
775
+ beta[t, i] = logsumexp2(summand)
776
+
777
+ return beta
778
+
779
+ def test(self, test_sequence, verbose=False, **kwargs):
780
+ """
781
+ Tests the HiddenMarkovModelTagger instance.
782
+
783
+ :param test_sequence: a sequence of labeled test instances
784
+ :type test_sequence: list(list)
785
+ :param verbose: boolean flag indicating whether training should be
786
+ verbose or include printed output
787
+ :type verbose: bool
788
+ """
789
+
790
+ def words(sent):
791
+ return [word for (word, tag) in sent]
792
+
793
+ def tags(sent):
794
+ return [tag for (word, tag) in sent]
795
+
796
+ def flatten(seq):
797
+ return list(itertools.chain(*seq))
798
+
799
+ test_sequence = self._transform(test_sequence)
800
+ predicted_sequence = list(map(self._tag, map(words, test_sequence)))
801
+
802
+ if verbose:
803
+ for test_sent, predicted_sent in zip(test_sequence, predicted_sequence):
804
+ print(
805
+ "Test:",
806
+ " ".join(f"{token}/{tag}" for (token, tag) in test_sent),
807
+ )
808
+ print()
809
+ print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent))
810
+ print()
811
+ print(
812
+ "HMM-tagged:",
813
+ " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent),
814
+ )
815
+ print()
816
+ print(
817
+ "Entropy:",
818
+ self.entropy([(token, None) for (token, tag) in predicted_sent]),
819
+ )
820
+ print()
821
+ print("-" * 60)
822
+
823
+ test_tags = flatten(map(tags, test_sequence))
824
+ predicted_tags = flatten(map(tags, predicted_sequence))
825
+
826
+ acc = accuracy(test_tags, predicted_tags)
827
+ count = sum(len(sent) for sent in test_sequence)
828
+ print("accuracy over %d tokens: %.2f" % (count, acc * 100))
829
+
830
+ def __repr__(self):
831
+ return "<HiddenMarkovModelTagger %d states and %d output symbols>" % (
832
+ len(self._states),
833
+ len(self._symbols),
834
+ )
835
+
836
+
837
+ class HiddenMarkovModelTrainer:
838
+ """
839
+ Algorithms for learning HMM parameters from training data. These include
840
+ both supervised learning (MLE) and unsupervised learning (Baum-Welch).
841
+
842
+ Creates an HMM trainer to induce an HMM with the given states and
843
+ output symbol alphabet. A supervised and unsupervised training
844
+ method may be used. If either of the states or symbols are not given,
845
+ these may be derived from supervised training.
846
+
847
+ :param states: the set of state labels
848
+ :type states: sequence of any
849
+ :param symbols: the set of observation symbols
850
+ :type symbols: sequence of any
851
+ """
852
+
853
+ def __init__(self, states=None, symbols=None):
854
+ self._states = states if states else []
855
+ self._symbols = symbols if symbols else []
856
+
857
+ def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs):
858
+ """
859
+ Trains the HMM using both (or either of) supervised and unsupervised
860
+ techniques.
861
+
862
+ :return: the trained model
863
+ :rtype: HiddenMarkovModelTagger
864
+ :param labelled_sequences: the supervised training data, a set of
865
+ labelled sequences of observations
866
+ ex: [ (word_1, tag_1),...,(word_n,tag_n) ]
867
+ :type labelled_sequences: list
868
+ :param unlabeled_sequences: the unsupervised training data, a set of
869
+ sequences of observations
870
+ ex: [ word_1, ..., word_n ]
871
+ :type unlabeled_sequences: list
872
+ :param kwargs: additional arguments to pass to the training methods
873
+ """
874
+ assert labeled_sequences or unlabeled_sequences
875
+ model = None
876
+ if labeled_sequences:
877
+ model = self.train_supervised(labeled_sequences, **kwargs)
878
+ if unlabeled_sequences:
879
+ if model:
880
+ kwargs["model"] = model
881
+ model = self.train_unsupervised(unlabeled_sequences, **kwargs)
882
+ return model
883
+
884
+ def _baum_welch_step(self, sequence, model, symbol_to_number):
885
+
886
+ N = len(model._states)
887
+ M = len(model._symbols)
888
+ T = len(sequence)
889
+
890
+ # compute forward and backward probabilities
891
+ alpha = model._forward_probability(sequence)
892
+ beta = model._backward_probability(sequence)
893
+
894
+ # find the log probability of the sequence
895
+ lpk = logsumexp2(alpha[T - 1])
896
+
897
+ A_numer = _ninf_array((N, N))
898
+ B_numer = _ninf_array((N, M))
899
+ A_denom = _ninf_array(N)
900
+ B_denom = _ninf_array(N)
901
+
902
+ transitions_logprob = model._transitions_matrix().T
903
+
904
+ for t in range(T):
905
+ symbol = sequence[t][_TEXT] # not found? FIXME
906
+ next_symbol = None
907
+ if t < T - 1:
908
+ next_symbol = sequence[t + 1][_TEXT] # not found? FIXME
909
+ xi = symbol_to_number[symbol]
910
+
911
+ next_outputs_logprob = model._outputs_vector(next_symbol)
912
+ alpha_plus_beta = alpha[t] + beta[t]
913
+
914
+ if t < T - 1:
915
+ numer_add = (
916
+ transitions_logprob
917
+ + next_outputs_logprob
918
+ + beta[t + 1]
919
+ + alpha[t].reshape(N, 1)
920
+ )
921
+ A_numer = np.logaddexp2(A_numer, numer_add)
922
+ A_denom = np.logaddexp2(A_denom, alpha_plus_beta)
923
+ else:
924
+ B_denom = np.logaddexp2(A_denom, alpha_plus_beta)
925
+
926
+ B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta)
927
+
928
+ return lpk, A_numer, A_denom, B_numer, B_denom
929
+
930
+ def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs):
931
+ """
932
+ Trains the HMM using the Baum-Welch algorithm to maximise the
933
+ probability of the data sequence. This is a variant of the EM
934
+ algorithm, and is unsupervised in that it doesn't need the state
935
+ sequences for the symbols. The code is based on 'A Tutorial on Hidden
936
+ Markov Models and Selected Applications in Speech Recognition',
937
+ Lawrence Rabiner, IEEE, 1989.
938
+
939
+ :return: the trained model
940
+ :rtype: HiddenMarkovModelTagger
941
+ :param unlabeled_sequences: the training data, a set of
942
+ sequences of observations
943
+ :type unlabeled_sequences: list
944
+
945
+ kwargs may include following parameters:
946
+
947
+ :param model: a HiddenMarkovModelTagger instance used to begin
948
+ the Baum-Welch algorithm
949
+ :param max_iterations: the maximum number of EM iterations
950
+ :param convergence_logprob: the maximum change in log probability to
951
+ allow convergence
952
+ """
953
+
954
+ # create a uniform HMM, which will be iteratively refined, unless
955
+ # given an existing model
956
+ model = kwargs.get("model")
957
+ if not model:
958
+ priors = RandomProbDist(self._states)
959
+ transitions = DictionaryConditionalProbDist(
960
+ {state: RandomProbDist(self._states) for state in self._states}
961
+ )
962
+ outputs = DictionaryConditionalProbDist(
963
+ {state: RandomProbDist(self._symbols) for state in self._states}
964
+ )
965
+ model = HiddenMarkovModelTagger(
966
+ self._symbols, self._states, transitions, outputs, priors
967
+ )
968
+
969
+ self._states = model._states
970
+ self._symbols = model._symbols
971
+
972
+ N = len(self._states)
973
+ M = len(self._symbols)
974
+ symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)}
975
+
976
+ # update model prob dists so that they can be modified
977
+ # model._priors = MutableProbDist(model._priors, self._states)
978
+
979
+ model._transitions = DictionaryConditionalProbDist(
980
+ {
981
+ s: MutableProbDist(model._transitions[s], self._states)
982
+ for s in self._states
983
+ }
984
+ )
985
+
986
+ if update_outputs:
987
+ model._outputs = DictionaryConditionalProbDist(
988
+ {
989
+ s: MutableProbDist(model._outputs[s], self._symbols)
990
+ for s in self._states
991
+ }
992
+ )
993
+
994
+ model.reset_cache()
995
+
996
+ # iterate until convergence
997
+ converged = False
998
+ last_logprob = None
999
+ iteration = 0
1000
+ max_iterations = kwargs.get("max_iterations", 1000)
1001
+ epsilon = kwargs.get("convergence_logprob", 1e-6)
1002
+
1003
+ while not converged and iteration < max_iterations:
1004
+ A_numer = _ninf_array((N, N))
1005
+ B_numer = _ninf_array((N, M))
1006
+ A_denom = _ninf_array(N)
1007
+ B_denom = _ninf_array(N)
1008
+
1009
+ logprob = 0
1010
+ for sequence in unlabeled_sequences:
1011
+ sequence = list(sequence)
1012
+ if not sequence:
1013
+ continue
1014
+
1015
+ (
1016
+ lpk,
1017
+ seq_A_numer,
1018
+ seq_A_denom,
1019
+ seq_B_numer,
1020
+ seq_B_denom,
1021
+ ) = self._baum_welch_step(sequence, model, symbol_numbers)
1022
+
1023
+ # add these sums to the global A and B values
1024
+ for i in range(N):
1025
+ A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk)
1026
+ B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk)
1027
+
1028
+ A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk)
1029
+ B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk)
1030
+
1031
+ logprob += lpk
1032
+
1033
+ # use the calculated values to update the transition and output
1034
+ # probability values
1035
+ for i in range(N):
1036
+ logprob_Ai = A_numer[i] - A_denom[i]
1037
+ logprob_Bi = B_numer[i] - B_denom[i]
1038
+
1039
+ # We should normalize all probabilities (see p.391 Huang et al)
1040
+ # Let sum(P) be K.
1041
+ # We can divide each Pi by K to make sum(P) == 1.
1042
+ # Pi' = Pi/K
1043
+ # log2(Pi') = log2(Pi) - log2(K)
1044
+ logprob_Ai -= logsumexp2(logprob_Ai)
1045
+ logprob_Bi -= logsumexp2(logprob_Bi)
1046
+
1047
+ # update output and transition probabilities
1048
+ si = self._states[i]
1049
+
1050
+ for j in range(N):
1051
+ sj = self._states[j]
1052
+ model._transitions[si].update(sj, logprob_Ai[j])
1053
+
1054
+ if update_outputs:
1055
+ for k in range(M):
1056
+ ok = self._symbols[k]
1057
+ model._outputs[si].update(ok, logprob_Bi[k])
1058
+
1059
+ # Rabiner says the priors don't need to be updated. I don't
1060
+ # believe him. FIXME
1061
+
1062
+ # test for convergence
1063
+ if iteration > 0 and abs(logprob - last_logprob) < epsilon:
1064
+ converged = True
1065
+
1066
+ print("iteration", iteration, "logprob", logprob)
1067
+ iteration += 1
1068
+ last_logprob = logprob
1069
+
1070
+ return model
1071
+
1072
+ def train_supervised(self, labelled_sequences, estimator=None):
1073
+ """
1074
+ Supervised training maximising the joint probability of the symbol and
1075
+ state sequences. This is done via collecting frequencies of
1076
+ transitions between states, symbol observations while within each
1077
+ state and which states start a sentence. These frequency distributions
1078
+ are then normalised into probability estimates, which can be
1079
+ smoothed if desired.
1080
+
1081
+ :return: the trained model
1082
+ :rtype: HiddenMarkovModelTagger
1083
+ :param labelled_sequences: the training data, a set of
1084
+ labelled sequences of observations
1085
+ :type labelled_sequences: list
1086
+ :param estimator: a function taking
1087
+ a FreqDist and a number of bins and returning a CProbDistI;
1088
+ otherwise a MLE estimate is used
1089
+ """
1090
+
1091
+ # default to the MLE estimate
1092
+ if estimator is None:
1093
+ estimator = lambda fdist, bins: MLEProbDist(fdist)
1094
+
1095
+ # count occurrences of starting states, transitions out of each state
1096
+ # and output symbols observed in each state
1097
+ known_symbols = set(self._symbols)
1098
+ known_states = set(self._states)
1099
+
1100
+ starting = FreqDist()
1101
+ transitions = ConditionalFreqDist()
1102
+ outputs = ConditionalFreqDist()
1103
+ for sequence in labelled_sequences:
1104
+ lasts = None
1105
+ for token in sequence:
1106
+ state = token[_TAG]
1107
+ symbol = token[_TEXT]
1108
+ if lasts is None:
1109
+ starting[state] += 1
1110
+ else:
1111
+ transitions[lasts][state] += 1
1112
+ outputs[state][symbol] += 1
1113
+ lasts = state
1114
+
1115
+ # update the state and symbol lists
1116
+ if state not in known_states:
1117
+ self._states.append(state)
1118
+ known_states.add(state)
1119
+
1120
+ if symbol not in known_symbols:
1121
+ self._symbols.append(symbol)
1122
+ known_symbols.add(symbol)
1123
+
1124
+ # create probability distributions (with smoothing)
1125
+ N = len(self._states)
1126
+ pi = estimator(starting, N)
1127
+ A = ConditionalProbDist(transitions, estimator, N)
1128
+ B = ConditionalProbDist(outputs, estimator, len(self._symbols))
1129
+
1130
+ return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
1131
+
1132
+
1133
+ def _ninf_array(shape):
1134
+ res = np.empty(shape, np.float64)
1135
+ res.fill(-np.inf)
1136
+ return res
1137
+
1138
+
1139
+ def logsumexp2(arr):
1140
+ max_ = arr.max()
1141
+ return np.log2(np.sum(2 ** (arr - max_))) + max_
1142
+
1143
+
1144
+ def _log_add(*values):
1145
+ """
1146
+ Adds the logged values, returning the logarithm of the addition.
1147
+ """
1148
+ x = max(values)
1149
+ if x > -np.inf:
1150
+ sum_diffs = 0
1151
+ for value in values:
1152
+ sum_diffs += 2 ** (value - x)
1153
+ return x + np.log2(sum_diffs)
1154
+ else:
1155
+ return x
1156
+
1157
+
1158
+ def _create_hmm_tagger(states, symbols, A, B, pi):
1159
+ def pd(values, samples):
1160
+ d = dict(zip(samples, values))
1161
+ return DictionaryProbDist(d)
1162
+
1163
+ def cpd(array, conditions, samples):
1164
+ d = {}
1165
+ for values, condition in zip(array, conditions):
1166
+ d[condition] = pd(values, samples)
1167
+ return DictionaryConditionalProbDist(d)
1168
+
1169
+ A = cpd(A, states, states)
1170
+ B = cpd(B, states, symbols)
1171
+ pi = pd(pi, states)
1172
+ return HiddenMarkovModelTagger(
1173
+ symbols=symbols, states=states, transitions=A, outputs=B, priors=pi
1174
+ )
1175
+
1176
+
1177
+ def _market_hmm_example():
1178
+ """
1179
+ Return an example HMM (described at page 381, Huang et al)
1180
+ """
1181
+ states = ["bull", "bear", "static"]
1182
+ symbols = ["up", "down", "unchanged"]
1183
+ A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64)
1184
+ B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64)
1185
+ pi = np.array([0.5, 0.2, 0.3], np.float64)
1186
+
1187
+ model = _create_hmm_tagger(states, symbols, A, B, pi)
1188
+ return model, states, symbols
1189
+
1190
+
1191
+ def demo():
1192
+ # demonstrates HMM probability calculation
1193
+
1194
+ print()
1195
+ print("HMM probability calculation demo")
1196
+ print()
1197
+
1198
+ model, states, symbols = _market_hmm_example()
1199
+
1200
+ print("Testing", model)
1201
+
1202
+ for test in [
1203
+ ["up", "up"],
1204
+ ["up", "down", "up"],
1205
+ ["down"] * 5,
1206
+ ["unchanged"] * 5 + ["up"],
1207
+ ]:
1208
+
1209
+ sequence = [(t, None) for t in test]
1210
+
1211
+ print("Testing with state sequence", test)
1212
+ print("probability =", model.probability(sequence))
1213
+ print("tagging = ", model.tag([word for (word, tag) in sequence]))
1214
+ print("p(tagged) = ", model.probability(sequence))
1215
+ print("H = ", model.entropy(sequence))
1216
+ print("H_exh = ", model._exhaustive_entropy(sequence))
1217
+ print("H(point) = ", model.point_entropy(sequence))
1218
+ print("H_exh(point)=", model._exhaustive_point_entropy(sequence))
1219
+ print()
1220
+
1221
+
1222
+ def load_pos(num_sents):
1223
+ from nltk.corpus import brown
1224
+
1225
+ sentences = brown.tagged_sents(categories="news")[:num_sents]
1226
+
1227
+ tag_re = re.compile(r"[*]|--|[^+*-]+")
1228
+ tag_set = set()
1229
+ symbols = set()
1230
+
1231
+ cleaned_sentences = []
1232
+ for sentence in sentences:
1233
+ for i in range(len(sentence)):
1234
+ word, tag = sentence[i]
1235
+ word = word.lower() # normalize
1236
+ symbols.add(word) # log this word
1237
+ # Clean up the tag.
1238
+ tag = tag_re.match(tag).group()
1239
+ tag_set.add(tag)
1240
+ sentence[i] = (word, tag) # store cleaned-up tagged token
1241
+ cleaned_sentences += [sentence]
1242
+
1243
+ return cleaned_sentences, list(tag_set), list(symbols)
1244
+
1245
+
1246
+ def demo_pos():
1247
+ # demonstrates POS tagging using supervised training
1248
+
1249
+ print()
1250
+ print("HMM POS tagging demo")
1251
+ print()
1252
+
1253
+ print("Training HMM...")
1254
+ labelled_sequences, tag_set, symbols = load_pos(20000)
1255
+ trainer = HiddenMarkovModelTrainer(tag_set, symbols)
1256
+ hmm = trainer.train_supervised(
1257
+ labelled_sequences[10:],
1258
+ estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
1259
+ )
1260
+
1261
+ print("Testing...")
1262
+ hmm.test(labelled_sequences[:10], verbose=True)
1263
+
1264
+
1265
+ def _untag(sentences):
1266
+ unlabeled = []
1267
+ for sentence in sentences:
1268
+ unlabeled.append([(token[_TEXT], None) for token in sentence])
1269
+ return unlabeled
1270
+
1271
+
1272
+ def demo_pos_bw(
1273
+ test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5
1274
+ ):
1275
+ # demonstrates the Baum-Welch algorithm in POS tagging
1276
+
1277
+ print()
1278
+ print("Baum-Welch demo for POS tagging")
1279
+ print()
1280
+
1281
+ print("Training HMM (supervised, %d sentences)..." % supervised)
1282
+
1283
+ sentences, tag_set, symbols = load_pos(test + supervised + unsupervised)
1284
+
1285
+ symbols = set()
1286
+ for sentence in sentences:
1287
+ for token in sentence:
1288
+ symbols.add(token[_TEXT])
1289
+
1290
+ trainer = HiddenMarkovModelTrainer(tag_set, list(symbols))
1291
+ hmm = trainer.train_supervised(
1292
+ sentences[test : test + supervised],
1293
+ estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins),
1294
+ )
1295
+
1296
+ hmm.test(sentences[:test], verbose=verbose)
1297
+
1298
+ print("Training (unsupervised, %d sentences)..." % unsupervised)
1299
+ # it's rather slow - so only use 10 samples by default
1300
+ unlabeled = _untag(sentences[test + supervised :])
1301
+ hmm = trainer.train_unsupervised(
1302
+ unlabeled, model=hmm, max_iterations=max_iterations
1303
+ )
1304
+ hmm.test(sentences[:test], verbose=verbose)
1305
+
1306
+
1307
+ def demo_bw():
1308
+ # demo Baum Welch by generating some sequences and then performing
1309
+ # unsupervised training on them
1310
+
1311
+ print()
1312
+ print("Baum-Welch demo for market example")
1313
+ print()
1314
+
1315
+ model, states, symbols = _market_hmm_example()
1316
+
1317
+ # generate some random sequences
1318
+ training = []
1319
+ import random
1320
+
1321
+ rng = random.Random()
1322
+ rng.seed(0)
1323
+ for i in range(10):
1324
+ item = model.random_sample(rng, 5)
1325
+ training.append([(i[0], None) for i in item])
1326
+
1327
+ # train on those examples, starting with the model that generated them
1328
+ trainer = HiddenMarkovModelTrainer(states, symbols)
1329
+ hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000)
venv/lib/python3.10/site-packages/nltk/tag/hunpos.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the HunPos POS-tagger
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Peter Ljunglöf <[email protected]>
5
+ # Dávid Márk Nemeskey <[email protected]> (modifications)
6
+ # Attila Zséder <[email protected]> (modifications)
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ A module for interfacing with the HunPos open-source POS-tagger.
12
+ """
13
+
14
+ import os
15
+ from subprocess import PIPE, Popen
16
+
17
+ from nltk.internals import find_binary, find_file
18
+ from nltk.tag.api import TaggerI
19
+
20
+ _hunpos_url = "https://code.google.com/p/hunpos/"
21
+
22
+ _hunpos_charset = "ISO-8859-1"
23
+ """The default encoding used by hunpos: ISO-8859-1."""
24
+
25
+
26
+ class HunposTagger(TaggerI):
27
+ """
28
+ A class for pos tagging with HunPos. The input is the paths to:
29
+ - a model trained on training data
30
+ - (optionally) the path to the hunpos-tag binary
31
+ - (optionally) the encoding of the training data (default: ISO-8859-1)
32
+
33
+ Check whether the required "hunpos-tag" binary is available:
34
+
35
+ >>> from nltk.test.setup_fixt import check_binary
36
+ >>> check_binary('hunpos-tag')
37
+
38
+ Example:
39
+ >>> from nltk.tag import HunposTagger
40
+ >>> ht = HunposTagger('en_wsj.model')
41
+ >>> ht.tag('What is the airspeed of an unladen swallow ?'.split())
42
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')]
43
+ >>> ht.close()
44
+
45
+ This class communicates with the hunpos-tag binary via pipes. When the
46
+ tagger object is no longer needed, the close() method should be called to
47
+ free system resources. The class supports the context manager interface; if
48
+ used in a with statement, the close() method is invoked automatically:
49
+
50
+ >>> with HunposTagger('en_wsj.model') as ht:
51
+ ... ht.tag('What is the airspeed of an unladen swallow ?'.split())
52
+ ...
53
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')]
54
+ """
55
+
56
+ def __init__(
57
+ self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False
58
+ ):
59
+ """
60
+ Starts the hunpos-tag executable and establishes a connection with it.
61
+
62
+ :param path_to_model: The model file.
63
+ :param path_to_bin: The hunpos-tag binary.
64
+ :param encoding: The encoding used by the model. Unicode tokens
65
+ passed to the tag() and tag_sents() methods are converted to
66
+ this charset when they are sent to hunpos-tag.
67
+ The default is ISO-8859-1 (Latin-1).
68
+
69
+ This parameter is ignored for str tokens, which are sent as-is.
70
+ The caller must ensure that tokens are encoded in the right charset.
71
+ """
72
+ self._closed = True
73
+ hunpos_paths = [
74
+ ".",
75
+ "/usr/bin",
76
+ "/usr/local/bin",
77
+ "/opt/local/bin",
78
+ "/Applications/bin",
79
+ "~/bin",
80
+ "~/Applications/bin",
81
+ ]
82
+ hunpos_paths = list(map(os.path.expanduser, hunpos_paths))
83
+
84
+ self._hunpos_bin = find_binary(
85
+ "hunpos-tag",
86
+ path_to_bin,
87
+ env_vars=("HUNPOS_TAGGER",),
88
+ searchpath=hunpos_paths,
89
+ url=_hunpos_url,
90
+ verbose=verbose,
91
+ )
92
+
93
+ self._hunpos_model = find_file(
94
+ path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose
95
+ )
96
+ self._encoding = encoding
97
+ self._hunpos = Popen(
98
+ [self._hunpos_bin, self._hunpos_model],
99
+ shell=False,
100
+ stdin=PIPE,
101
+ stdout=PIPE,
102
+ stderr=PIPE,
103
+ )
104
+ self._closed = False
105
+
106
+ def __del__(self):
107
+ self.close()
108
+
109
+ def close(self):
110
+ """Closes the pipe to the hunpos executable."""
111
+ if not self._closed:
112
+ self._hunpos.communicate()
113
+ self._closed = True
114
+
115
+ def __enter__(self):
116
+ return self
117
+
118
+ def __exit__(self, exc_type, exc_value, traceback):
119
+ self.close()
120
+
121
+ def tag(self, tokens):
122
+ """Tags a single sentence: a list of words.
123
+ The tokens should not contain any newline characters.
124
+ """
125
+ for token in tokens:
126
+ assert "\n" not in token, "Tokens should not contain newlines"
127
+ if isinstance(token, str):
128
+ token = token.encode(self._encoding)
129
+ self._hunpos.stdin.write(token + b"\n")
130
+ # We write a final empty line to tell hunpos that the sentence is finished:
131
+ self._hunpos.stdin.write(b"\n")
132
+ self._hunpos.stdin.flush()
133
+
134
+ tagged_tokens = []
135
+ for token in tokens:
136
+ tagged = self._hunpos.stdout.readline().strip().split(b"\t")
137
+ tag = tagged[1] if len(tagged) > 1 else None
138
+ tagged_tokens.append((token, tag))
139
+ # We have to read (and dismiss) the final empty line:
140
+ self._hunpos.stdout.readline()
141
+
142
+ return tagged_tokens
venv/lib/python3.10/site-packages/nltk/tag/mapping.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagset Mapping
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nathan Schneider <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Interface for converting POS tags from various treebanks
11
+ to the universal tagset of Petrov, Das, & McDonald.
12
+
13
+ The tagset consists of the following 12 coarse tags:
14
+
15
+ VERB - verbs (all tenses and modes)
16
+ NOUN - nouns (common and proper)
17
+ PRON - pronouns
18
+ ADJ - adjectives
19
+ ADV - adverbs
20
+ ADP - adpositions (prepositions and postpositions)
21
+ CONJ - conjunctions
22
+ DET - determiners
23
+ NUM - cardinal numbers
24
+ PRT - particles or other function words
25
+ X - other: foreign words, typos, abbreviations
26
+ . - punctuation
27
+
28
+ @see: https://arxiv.org/abs/1104.2086 and https://code.google.com/p/universal-pos-tags/
29
+
30
+ """
31
+
32
+ from collections import defaultdict
33
+ from os.path import join
34
+
35
+ from nltk.data import load
36
+
37
+ _UNIVERSAL_DATA = "taggers/universal_tagset"
38
+ _UNIVERSAL_TAGS = (
39
+ "VERB",
40
+ "NOUN",
41
+ "PRON",
42
+ "ADJ",
43
+ "ADV",
44
+ "ADP",
45
+ "CONJ",
46
+ "DET",
47
+ "NUM",
48
+ "PRT",
49
+ "X",
50
+ ".",
51
+ )
52
+
53
+ # _MAPPINGS = defaultdict(lambda: defaultdict(dict))
54
+ # the mapping between tagset T1 and T2 returns UNK if applied to an unrecognized tag
55
+ _MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK")))
56
+
57
+
58
+ def _load_universal_map(fileid):
59
+ contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text")
60
+
61
+ # When mapping to the Universal Tagset,
62
+ # map unknown inputs to 'X' not 'UNK'
63
+ _MAPPINGS[fileid]["universal"].default_factory = lambda: "X"
64
+
65
+ for line in contents.splitlines():
66
+ line = line.strip()
67
+ if line == "":
68
+ continue
69
+ fine, coarse = line.split("\t")
70
+
71
+ assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}"
72
+ assert (
73
+ fine not in _MAPPINGS[fileid]["universal"]
74
+ ), f"Multiple entries for original tag: {fine}"
75
+
76
+ _MAPPINGS[fileid]["universal"][fine] = coarse
77
+
78
+
79
+ def tagset_mapping(source, target):
80
+ """
81
+ Retrieve the mapping dictionary between tagsets.
82
+
83
+ >>> tagset_mapping('ru-rnc', 'universal') == {'!': '.', 'A': 'ADJ', 'C': 'CONJ', 'AD': 'ADV',\
84
+ 'NN': 'NOUN', 'VG': 'VERB', 'COMP': 'CONJ', 'NC': 'NUM', 'VP': 'VERB', 'P': 'ADP',\
85
+ 'IJ': 'X', 'V': 'VERB', 'Z': 'X', 'VI': 'VERB', 'YES_NO_SENT': 'X', 'PTCL': 'PRT'}
86
+ True
87
+ """
88
+
89
+ if source not in _MAPPINGS or target not in _MAPPINGS[source]:
90
+ if target == "universal":
91
+ _load_universal_map(source)
92
+ # Added the new Russian National Corpus mappings because the
93
+ # Russian model for nltk.pos_tag() uses it.
94
+ _MAPPINGS["ru-rnc-new"]["universal"] = {
95
+ "A": "ADJ",
96
+ "A-PRO": "PRON",
97
+ "ADV": "ADV",
98
+ "ADV-PRO": "PRON",
99
+ "ANUM": "ADJ",
100
+ "CONJ": "CONJ",
101
+ "INTJ": "X",
102
+ "NONLEX": ".",
103
+ "NUM": "NUM",
104
+ "PARENTH": "PRT",
105
+ "PART": "PRT",
106
+ "PR": "ADP",
107
+ "PRAEDIC": "PRT",
108
+ "PRAEDIC-PRO": "PRON",
109
+ "S": "NOUN",
110
+ "S-PRO": "PRON",
111
+ "V": "VERB",
112
+ }
113
+
114
+ return _MAPPINGS[source][target]
115
+
116
+
117
+ def map_tag(source, target, source_tag):
118
+ """
119
+ Maps the tag from the source tagset to the target tagset.
120
+
121
+ >>> map_tag('en-ptb', 'universal', 'VBZ')
122
+ 'VERB'
123
+ >>> map_tag('en-ptb', 'universal', 'VBP')
124
+ 'VERB'
125
+ >>> map_tag('en-ptb', 'universal', '``')
126
+ '.'
127
+ """
128
+
129
+ # we need a systematic approach to naming
130
+ if target == "universal":
131
+ if source == "wsj":
132
+ source = "en-ptb"
133
+ if source == "brown":
134
+ source = "en-brown"
135
+
136
+ return tagset_mapping(source, target)[source_tag]
venv/lib/python3.10/site-packages/nltk/tag/perceptron.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This module is a port of the Textblob Averaged Perceptron Tagger
2
+ # Author: Matthew Honnibal <[email protected]>,
3
+ # Long Duong <[email protected]> (NLTK port)
4
+ # URL: <https://github.com/sloria/textblob-aptagger>
5
+ # <https://www.nltk.org/>
6
+ # Copyright 2013 Matthew Honnibal
7
+ # NLTK modifications Copyright 2015 The NLTK Project
8
+ #
9
+ # This module is provided under the terms of the MIT License.
10
+
11
+ import logging
12
+ import pickle
13
+ import random
14
+ from collections import defaultdict
15
+
16
+ from nltk import jsontags
17
+ from nltk.data import find, load
18
+ from nltk.tag.api import TaggerI
19
+
20
+ try:
21
+ import numpy as np
22
+ except ImportError:
23
+ pass
24
+
25
+ PICKLE = "averaged_perceptron_tagger.pickle"
26
+
27
+
28
+ @jsontags.register_tag
29
+ class AveragedPerceptron:
30
+
31
+ """An averaged perceptron, as implemented by Matthew Honnibal.
32
+
33
+ See more implementation details here:
34
+ https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
35
+ """
36
+
37
+ json_tag = "nltk.tag.perceptron.AveragedPerceptron"
38
+
39
+ def __init__(self, weights=None):
40
+ # Each feature gets its own weight vector, so weights is a dict-of-dicts
41
+ self.weights = weights if weights else {}
42
+ self.classes = set()
43
+ # The accumulated values, for the averaging. These will be keyed by
44
+ # feature/clas tuples
45
+ self._totals = defaultdict(int)
46
+ # The last time the feature was changed, for the averaging. Also
47
+ # keyed by feature/clas tuples
48
+ # (tstamps is short for timestamps)
49
+ self._tstamps = defaultdict(int)
50
+ # Number of instances seen
51
+ self.i = 0
52
+
53
+ def _softmax(self, scores):
54
+ s = np.fromiter(scores.values(), dtype=float)
55
+ exps = np.exp(s)
56
+ return exps / np.sum(exps)
57
+
58
+ def predict(self, features, return_conf=False):
59
+ """Dot-product the features and current weights and return the best label."""
60
+ scores = defaultdict(float)
61
+ for feat, value in features.items():
62
+ if feat not in self.weights or value == 0:
63
+ continue
64
+ weights = self.weights[feat]
65
+ for label, weight in weights.items():
66
+ scores[label] += value * weight
67
+
68
+ # Do a secondary alphabetic sort, for stability
69
+ best_label = max(self.classes, key=lambda label: (scores[label], label))
70
+ # compute the confidence
71
+ conf = max(self._softmax(scores)) if return_conf == True else None
72
+
73
+ return best_label, conf
74
+
75
+ def update(self, truth, guess, features):
76
+ """Update the feature weights."""
77
+
78
+ def upd_feat(c, f, w, v):
79
+ param = (f, c)
80
+ self._totals[param] += (self.i - self._tstamps[param]) * w
81
+ self._tstamps[param] = self.i
82
+ self.weights[f][c] = w + v
83
+
84
+ self.i += 1
85
+ if truth == guess:
86
+ return None
87
+ for f in features:
88
+ weights = self.weights.setdefault(f, {})
89
+ upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
90
+ upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
91
+
92
+ def average_weights(self):
93
+ """Average weights from all iterations."""
94
+ for feat, weights in self.weights.items():
95
+ new_feat_weights = {}
96
+ for clas, weight in weights.items():
97
+ param = (feat, clas)
98
+ total = self._totals[param]
99
+ total += (self.i - self._tstamps[param]) * weight
100
+ averaged = round(total / self.i, 3)
101
+ if averaged:
102
+ new_feat_weights[clas] = averaged
103
+ self.weights[feat] = new_feat_weights
104
+
105
+ def save(self, path):
106
+ """Save the pickled model weights."""
107
+ with open(path, "wb") as fout:
108
+ return pickle.dump(dict(self.weights), fout)
109
+
110
+ def load(self, path):
111
+ """Load the pickled model weights."""
112
+ self.weights = load(path)
113
+
114
+ def encode_json_obj(self):
115
+ return self.weights
116
+
117
+ @classmethod
118
+ def decode_json_obj(cls, obj):
119
+ return cls(obj)
120
+
121
+
122
+ @jsontags.register_tag
123
+ class PerceptronTagger(TaggerI):
124
+
125
+ """
126
+ Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
127
+ See more implementation details here:
128
+ https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
129
+
130
+ >>> from nltk.tag.perceptron import PerceptronTagger
131
+
132
+ Train the model
133
+
134
+ >>> tagger = PerceptronTagger(load=False)
135
+
136
+ >>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')],
137
+ ... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]])
138
+
139
+ >>> tagger.tag(['today','is','a','beautiful','day'])
140
+ [('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')]
141
+
142
+ Use the pretrain model (the default constructor)
143
+
144
+ >>> pretrain = PerceptronTagger()
145
+
146
+ >>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split())
147
+ [('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')]
148
+
149
+ >>> pretrain.tag("The red cat".split())
150
+ [('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')]
151
+ """
152
+
153
+ json_tag = "nltk.tag.sequential.PerceptronTagger"
154
+
155
+ START = ["-START-", "-START2-"]
156
+ END = ["-END-", "-END2-"]
157
+
158
+ def __init__(self, load=True):
159
+ """
160
+ :param load: Load the pickled model upon instantiation.
161
+ """
162
+ self.model = AveragedPerceptron()
163
+ self.tagdict = {}
164
+ self.classes = set()
165
+ if load:
166
+ AP_MODEL_LOC = "file:" + str(
167
+ find("taggers/averaged_perceptron_tagger/" + PICKLE)
168
+ )
169
+ self.load(AP_MODEL_LOC)
170
+
171
+ def tag(self, tokens, return_conf=False, use_tagdict=True):
172
+ """
173
+ Tag tokenized sentences.
174
+ :params tokens: list of word
175
+ :type tokens: list(str)
176
+ """
177
+ prev, prev2 = self.START
178
+ output = []
179
+
180
+ context = self.START + [self.normalize(w) for w in tokens] + self.END
181
+ for i, word in enumerate(tokens):
182
+ tag, conf = (
183
+ (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None)
184
+ )
185
+ if not tag:
186
+ features = self._get_features(i, word, context, prev, prev2)
187
+ tag, conf = self.model.predict(features, return_conf)
188
+ output.append((word, tag, conf) if return_conf == True else (word, tag))
189
+
190
+ prev2 = prev
191
+ prev = tag
192
+
193
+ return output
194
+
195
+ def train(self, sentences, save_loc=None, nr_iter=5):
196
+ """Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
197
+ controls the number of Perceptron training iterations.
198
+
199
+ :param sentences: A list or iterator of sentences, where each sentence
200
+ is a list of (words, tags) tuples.
201
+ :param save_loc: If not ``None``, saves a pickled model in this location.
202
+ :param nr_iter: Number of training iterations.
203
+ """
204
+ # We'd like to allow ``sentences`` to be either a list or an iterator,
205
+ # the latter being especially important for a large training dataset.
206
+ # Because ``self._make_tagdict(sentences)`` runs regardless, we make
207
+ # it populate ``self._sentences`` (a list) with all the sentences.
208
+ # This saves the overheard of just iterating through ``sentences`` to
209
+ # get the list by ``sentences = list(sentences)``.
210
+
211
+ self._sentences = list() # to be populated by self._make_tagdict...
212
+ self._make_tagdict(sentences)
213
+ self.model.classes = self.classes
214
+ for iter_ in range(nr_iter):
215
+ c = 0
216
+ n = 0
217
+ for sentence in self._sentences:
218
+ words, tags = zip(*sentence)
219
+
220
+ prev, prev2 = self.START
221
+ context = self.START + [self.normalize(w) for w in words] + self.END
222
+ for i, word in enumerate(words):
223
+ guess = self.tagdict.get(word)
224
+ if not guess:
225
+ feats = self._get_features(i, word, context, prev, prev2)
226
+ guess, _ = self.model.predict(feats)
227
+ self.model.update(tags[i], guess, feats)
228
+ prev2 = prev
229
+ prev = guess
230
+ c += guess == tags[i]
231
+ n += 1
232
+ random.shuffle(self._sentences)
233
+ logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}")
234
+
235
+ # We don't need the training sentences anymore, and we don't want to
236
+ # waste space on them when we pickle the trained tagger.
237
+ self._sentences = None
238
+
239
+ self.model.average_weights()
240
+ # Pickle as a binary file
241
+ if save_loc is not None:
242
+ with open(save_loc, "wb") as fout:
243
+ # changed protocol from -1 to 2 to make pickling Python 2 compatible
244
+ pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2)
245
+
246
+ def load(self, loc):
247
+ """
248
+ :param loc: Load a pickled model at location.
249
+ :type loc: str
250
+ """
251
+
252
+ self.model.weights, self.tagdict, self.classes = load(loc)
253
+ self.model.classes = self.classes
254
+
255
+ def encode_json_obj(self):
256
+ return self.model.weights, self.tagdict, list(self.classes)
257
+
258
+ @classmethod
259
+ def decode_json_obj(cls, obj):
260
+ tagger = cls(load=False)
261
+ tagger.model.weights, tagger.tagdict, tagger.classes = obj
262
+ tagger.classes = set(tagger.classes)
263
+ tagger.model.classes = tagger.classes
264
+ return tagger
265
+
266
+ def normalize(self, word):
267
+ """
268
+ Normalization used in pre-processing.
269
+ - All words are lower cased
270
+ - Groups of digits of length 4 are represented as !YEAR;
271
+ - Other digits are represented as !DIGITS
272
+
273
+ :rtype: str
274
+ """
275
+ if "-" in word and word[0] != "-":
276
+ return "!HYPHEN"
277
+ if word.isdigit() and len(word) == 4:
278
+ return "!YEAR"
279
+ if word and word[0].isdigit():
280
+ return "!DIGITS"
281
+ return word.lower()
282
+
283
+ def _get_features(self, i, word, context, prev, prev2):
284
+ """Map tokens into a feature representation, implemented as a
285
+ {hashable: int} dict. If the features change, a new model must be
286
+ trained.
287
+ """
288
+
289
+ def add(name, *args):
290
+ features[" ".join((name,) + tuple(args))] += 1
291
+
292
+ i += len(self.START)
293
+ features = defaultdict(int)
294
+ # It's useful to have a constant feature, which acts sort of like a prior
295
+ add("bias")
296
+ add("i suffix", word[-3:])
297
+ add("i pref1", word[0] if word else "")
298
+ add("i-1 tag", prev)
299
+ add("i-2 tag", prev2)
300
+ add("i tag+i-2 tag", prev, prev2)
301
+ add("i word", context[i])
302
+ add("i-1 tag+i word", prev, context[i])
303
+ add("i-1 word", context[i - 1])
304
+ add("i-1 suffix", context[i - 1][-3:])
305
+ add("i-2 word", context[i - 2])
306
+ add("i+1 word", context[i + 1])
307
+ add("i+1 suffix", context[i + 1][-3:])
308
+ add("i+2 word", context[i + 2])
309
+ return features
310
+
311
+ def _make_tagdict(self, sentences):
312
+ """
313
+ Make a tag dictionary for single-tag words.
314
+ :param sentences: A list of list of (word, tag) tuples.
315
+ """
316
+ counts = defaultdict(lambda: defaultdict(int))
317
+ for sentence in sentences:
318
+ self._sentences.append(sentence)
319
+ for word, tag in sentence:
320
+ counts[word][tag] += 1
321
+ self.classes.add(tag)
322
+ freq_thresh = 20
323
+ ambiguity_thresh = 0.97
324
+ for word, tag_freqs in counts.items():
325
+ tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
326
+ n = sum(tag_freqs.values())
327
+ # Don't add rare words to the tag dictionary
328
+ # Only add quite unambiguous words
329
+ if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
330
+ self.tagdict[word] = tag
331
+
332
+
333
+ def _pc(n, d):
334
+ return (n / d) * 100
335
+
336
+
337
+ def _load_data_conll_format(filename):
338
+ print("Read from file: ", filename)
339
+ with open(filename, "rb") as fin:
340
+ sentences = []
341
+ sentence = []
342
+ for line in fin.readlines():
343
+ line = line.strip()
344
+ # print line
345
+ if len(line) == 0:
346
+ sentences.append(sentence)
347
+ sentence = []
348
+ continue
349
+ tokens = line.split("\t")
350
+ word = tokens[1]
351
+ tag = tokens[4]
352
+ sentence.append((word, tag))
353
+ return sentences
354
+
355
+
356
+ def _get_pretrain_model():
357
+ # Train and test on English part of ConLL data (WSJ part of Penn Treebank)
358
+ # Train: section 2-11
359
+ # Test : section 23
360
+ tagger = PerceptronTagger()
361
+ training = _load_data_conll_format("english_ptb_train.conll")
362
+ testing = _load_data_conll_format("english_ptb_test.conll")
363
+ print("Size of training and testing (sentence)", len(training), len(testing))
364
+ # Train and save the model
365
+ tagger.train(training, PICKLE)
366
+ print("Accuracy : ", tagger.accuracy(testing))
367
+
368
+
369
+ if __name__ == "__main__":
370
+ # _get_pretrain_model()
371
+ pass
venv/lib/python3.10/site-packages/nltk/tag/senna.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Senna POS Tagger
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Rami Al-Rfou' <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ """
9
+ Senna POS tagger, NER Tagger, Chunk Tagger
10
+
11
+ The input is:
12
+
13
+ - path to the directory that contains SENNA executables. If the path is incorrect,
14
+ SennaTagger will automatically search for executable file specified in SENNA environment variable
15
+ - (optionally) the encoding of the input data (default:utf-8)
16
+
17
+ Note: Unit tests for this module can be found in test/unit/test_senna.py
18
+
19
+ >>> from nltk.tag import SennaTagger
20
+ >>> tagger = SennaTagger('/usr/share/senna-v3.0') # doctest: +SKIP
21
+ >>> tagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
22
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'),
23
+ ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'NN'), ('?', '.')]
24
+
25
+ >>> from nltk.tag import SennaChunkTagger
26
+ >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP
27
+ >>> chktagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
28
+ [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'),
29
+ ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'),
30
+ ('?', 'O')]
31
+
32
+ >>> from nltk.tag import SennaNERTagger
33
+ >>> nertagger = SennaNERTagger('/usr/share/senna-v3.0') # doctest: +SKIP
34
+ >>> nertagger.tag('Shakespeare theatre was in London .'.split()) # doctest: +SKIP
35
+ [('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'), ('in', 'O'),
36
+ ('London', 'B-LOC'), ('.', 'O')]
37
+ >>> nertagger.tag('UN headquarters are in NY , USA .'.split()) # doctest: +SKIP
38
+ [('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'),
39
+ ('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')]
40
+ """
41
+
42
+ from nltk.classify import Senna
43
+
44
+
45
+ class SennaTagger(Senna):
46
+ def __init__(self, path, encoding="utf-8"):
47
+ super().__init__(path, ["pos"], encoding)
48
+
49
+ def tag_sents(self, sentences):
50
+ """
51
+ Applies the tag method over a list of sentences. This method will return
52
+ for each sentence a list of tuples of (word, tag).
53
+ """
54
+ tagged_sents = super().tag_sents(sentences)
55
+ for i in range(len(tagged_sents)):
56
+ for j in range(len(tagged_sents[i])):
57
+ annotations = tagged_sents[i][j]
58
+ tagged_sents[i][j] = (annotations["word"], annotations["pos"])
59
+ return tagged_sents
60
+
61
+
62
+ class SennaChunkTagger(Senna):
63
+ def __init__(self, path, encoding="utf-8"):
64
+ super().__init__(path, ["chk"], encoding)
65
+
66
+ def tag_sents(self, sentences):
67
+ """
68
+ Applies the tag method over a list of sentences. This method will return
69
+ for each sentence a list of tuples of (word, tag).
70
+ """
71
+ tagged_sents = super().tag_sents(sentences)
72
+ for i in range(len(tagged_sents)):
73
+ for j in range(len(tagged_sents[i])):
74
+ annotations = tagged_sents[i][j]
75
+ tagged_sents[i][j] = (annotations["word"], annotations["chk"])
76
+ return tagged_sents
77
+
78
+ def bio_to_chunks(self, tagged_sent, chunk_type):
79
+ """
80
+ Extracts the chunks in a BIO chunk-tagged sentence.
81
+
82
+ >>> from nltk.tag import SennaChunkTagger
83
+ >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP
84
+ >>> sent = 'What is the airspeed of an unladen swallow ?'.split()
85
+ >>> tagged_sent = chktagger.tag(sent) # doctest: +SKIP
86
+ >>> tagged_sent # doctest: +SKIP
87
+ [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'),
88
+ ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'),
89
+ ('?', 'O')]
90
+ >>> list(chktagger.bio_to_chunks(tagged_sent, chunk_type='NP')) # doctest: +SKIP
91
+ [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow', '5-6-7')]
92
+
93
+ :param tagged_sent: A list of tuples of word and BIO chunk tag.
94
+ :type tagged_sent: list(tuple)
95
+ :param tagged_sent: The chunk tag that users want to extract, e.g. 'NP' or 'VP'
96
+ :type tagged_sent: str
97
+
98
+ :return: An iterable of tuples of chunks that users want to extract
99
+ and their corresponding indices.
100
+ :rtype: iter(tuple(str))
101
+ """
102
+ current_chunk = []
103
+ current_chunk_position = []
104
+ for idx, word_pos in enumerate(tagged_sent):
105
+ word, pos = word_pos
106
+ if "-" + chunk_type in pos: # Append the word to the current_chunk.
107
+ current_chunk.append(word)
108
+ current_chunk_position.append(idx)
109
+ else:
110
+ if current_chunk: # Flush the full chunk when out of an NP.
111
+ _chunk_str = " ".join(current_chunk)
112
+ _chunk_pos_str = "-".join(map(str, current_chunk_position))
113
+ yield _chunk_str, _chunk_pos_str
114
+ current_chunk = []
115
+ current_chunk_position = []
116
+ if current_chunk: # Flush the last chunk.
117
+ yield " ".join(current_chunk), "-".join(map(str, current_chunk_position))
118
+
119
+
120
+ class SennaNERTagger(Senna):
121
+ def __init__(self, path, encoding="utf-8"):
122
+ super().__init__(path, ["ner"], encoding)
123
+
124
+ def tag_sents(self, sentences):
125
+ """
126
+ Applies the tag method over a list of sentences. This method will return
127
+ for each sentence a list of tuples of (word, tag).
128
+ """
129
+ tagged_sents = super().tag_sents(sentences)
130
+ for i in range(len(tagged_sents)):
131
+ for j in range(len(tagged_sents[i])):
132
+ annotations = tagged_sents[i][j]
133
+ tagged_sents[i][j] = (annotations["word"], annotations["ner"])
134
+ return tagged_sents
venv/lib/python3.10/site-packages/nltk/tag/sequential.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Sequential Backoff Taggers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # Tiago Tresoldi <[email protected]> (original affix tagger)
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Classes for tagging sentences sequentially, left to right. The
12
+ abstract base class SequentialBackoffTagger serves as the base
13
+ class for all the taggers in this module. Tagging of individual words
14
+ is performed by the method ``choose_tag()``, which is defined by
15
+ subclasses of SequentialBackoffTagger. If a tagger is unable to
16
+ determine a tag for the specified token, then its backoff tagger is
17
+ consulted instead. Any SequentialBackoffTagger may serve as a
18
+ backoff tagger for any other SequentialBackoffTagger.
19
+ """
20
+ import ast
21
+ import re
22
+ from abc import abstractmethod
23
+ from typing import List, Optional, Tuple
24
+
25
+ from nltk import jsontags
26
+ from nltk.classify import NaiveBayesClassifier
27
+ from nltk.probability import ConditionalFreqDist
28
+ from nltk.tag.api import FeaturesetTaggerI, TaggerI
29
+
30
+
31
+ ######################################################################
32
+ # Abstract Base Classes
33
+ ######################################################################
34
+ class SequentialBackoffTagger(TaggerI):
35
+ """
36
+ An abstract base class for taggers that tags words sequentially,
37
+ left to right. Tagging of individual words is performed by the
38
+ ``choose_tag()`` method, which should be defined by subclasses. If
39
+ a tagger is unable to determine a tag for the specified token,
40
+ then its backoff tagger is consulted.
41
+
42
+ :ivar _taggers: A list of all the taggers that should be tried to
43
+ tag a token (i.e., self and its backoff taggers).
44
+ """
45
+
46
+ def __init__(self, backoff=None):
47
+ if backoff is None:
48
+ self._taggers = [self]
49
+ else:
50
+ self._taggers = [self] + backoff._taggers
51
+
52
+ @property
53
+ def backoff(self):
54
+ """The backoff tagger for this tagger."""
55
+ return self._taggers[1] if len(self._taggers) > 1 else None
56
+
57
+ def tag(self, tokens):
58
+ # docs inherited from TaggerI
59
+ tags = []
60
+ for i in range(len(tokens)):
61
+ tags.append(self.tag_one(tokens, i, tags))
62
+ return list(zip(tokens, tags))
63
+
64
+ def tag_one(self, tokens, index, history):
65
+ """
66
+ Determine an appropriate tag for the specified token, and
67
+ return that tag. If this tagger is unable to determine a tag
68
+ for the specified token, then its backoff tagger is consulted.
69
+
70
+ :rtype: str
71
+ :type tokens: list
72
+ :param tokens: The list of words that are being tagged.
73
+ :type index: int
74
+ :param index: The index of the word whose tag should be
75
+ returned.
76
+ :type history: list(str)
77
+ :param history: A list of the tags for all words before *index*.
78
+ """
79
+ tag = None
80
+ for tagger in self._taggers:
81
+ tag = tagger.choose_tag(tokens, index, history)
82
+ if tag is not None:
83
+ break
84
+ return tag
85
+
86
+ @abstractmethod
87
+ def choose_tag(self, tokens, index, history):
88
+ """
89
+ Decide which tag should be used for the specified token, and
90
+ return that tag. If this tagger is unable to determine a tag
91
+ for the specified token, return None -- do not consult
92
+ the backoff tagger. This method should be overridden by
93
+ subclasses of SequentialBackoffTagger.
94
+
95
+ :rtype: str
96
+ :type tokens: list
97
+ :param tokens: The list of words that are being tagged.
98
+ :type index: int
99
+ :param index: The index of the word whose tag should be
100
+ returned.
101
+ :type history: list(str)
102
+ :param history: A list of the tags for all words before *index*.
103
+ """
104
+
105
+
106
+ class ContextTagger(SequentialBackoffTagger):
107
+ """
108
+ An abstract base class for sequential backoff taggers that choose
109
+ a tag for a token based on the value of its "context". Different
110
+ subclasses are used to define different contexts.
111
+
112
+ A ContextTagger chooses the tag for a token by calculating the
113
+ token's context, and looking up the corresponding tag in a table.
114
+ This table can be constructed manually; or it can be automatically
115
+ constructed based on a training corpus, using the ``_train()``
116
+ factory method.
117
+
118
+ :ivar _context_to_tag: Dictionary mapping contexts to tags.
119
+ """
120
+
121
+ def __init__(self, context_to_tag, backoff=None):
122
+ """
123
+ :param context_to_tag: A dictionary mapping contexts to tags.
124
+ :param backoff: The backoff tagger that should be used for this tagger.
125
+ """
126
+ super().__init__(backoff)
127
+ self._context_to_tag = context_to_tag if context_to_tag else {}
128
+
129
+ @abstractmethod
130
+ def context(self, tokens, index, history):
131
+ """
132
+ :return: the context that should be used to look up the tag
133
+ for the specified token; or None if the specified token
134
+ should not be handled by this tagger.
135
+ :rtype: (hashable)
136
+ """
137
+
138
+ def choose_tag(self, tokens, index, history):
139
+ context = self.context(tokens, index, history)
140
+ return self._context_to_tag.get(context)
141
+
142
+ def size(self):
143
+ """
144
+ :return: The number of entries in the table used by this
145
+ tagger to map from contexts to tags.
146
+ """
147
+ return len(self._context_to_tag)
148
+
149
+ def __repr__(self):
150
+ return f"<{self.__class__.__name__}: size={self.size()}>"
151
+
152
+ def _train(self, tagged_corpus, cutoff=0, verbose=False):
153
+ """
154
+ Initialize this ContextTagger's ``_context_to_tag`` table
155
+ based on the given training data. In particular, for each
156
+ context ``c`` in the training data, set
157
+ ``_context_to_tag[c]`` to the most frequent tag for that
158
+ context. However, exclude any contexts that are already
159
+ tagged perfectly by the backoff tagger(s).
160
+
161
+ The old value of ``self._context_to_tag`` (if any) is discarded.
162
+
163
+ :param tagged_corpus: A tagged corpus. Each item should be
164
+ a list of (word, tag tuples.
165
+ :param cutoff: If the most likely tag for a context occurs
166
+ fewer than cutoff times, then exclude it from the
167
+ context-to-tag table for the new tagger.
168
+ """
169
+
170
+ token_count = hit_count = 0
171
+
172
+ # A context is considered 'useful' if it's not already tagged
173
+ # perfectly by the backoff tagger.
174
+ useful_contexts = set()
175
+
176
+ # Count how many times each tag occurs in each context.
177
+ fd = ConditionalFreqDist()
178
+ for sentence in tagged_corpus:
179
+ tokens, tags = zip(*sentence)
180
+ for index, (token, tag) in enumerate(sentence):
181
+ # Record the event.
182
+ token_count += 1
183
+ context = self.context(tokens, index, tags[:index])
184
+ if context is None:
185
+ continue
186
+ fd[context][tag] += 1
187
+ # If the backoff got it wrong, this context is useful:
188
+ if self.backoff is None or tag != self.backoff.tag_one(
189
+ tokens, index, tags[:index]
190
+ ):
191
+ useful_contexts.add(context)
192
+
193
+ # Build the context_to_tag table -- for each context, figure
194
+ # out what the most likely tag is. Only include contexts that
195
+ # we've seen at least `cutoff` times.
196
+ for context in useful_contexts:
197
+ best_tag = fd[context].max()
198
+ hits = fd[context][best_tag]
199
+ if hits > cutoff:
200
+ self._context_to_tag[context] = best_tag
201
+ hit_count += hits
202
+
203
+ # Display some stats, if requested.
204
+ if verbose:
205
+ size = len(self._context_to_tag)
206
+ backoff = 100 - (hit_count * 100.0) / token_count
207
+ pruning = 100 - (size * 100.0) / len(fd.conditions())
208
+ print("[Trained Unigram tagger:", end=" ")
209
+ print(
210
+ "size={}, backoff={:.2f}%, pruning={:.2f}%]".format(
211
+ size, backoff, pruning
212
+ )
213
+ )
214
+
215
+
216
+ ######################################################################
217
+ # Tagger Classes
218
+ ######################################################################
219
+
220
+
221
+ @jsontags.register_tag
222
+ class DefaultTagger(SequentialBackoffTagger):
223
+ """
224
+ A tagger that assigns the same tag to every token.
225
+
226
+ >>> from nltk.tag import DefaultTagger
227
+ >>> default_tagger = DefaultTagger('NN')
228
+ >>> list(default_tagger.tag('This is a test'.split()))
229
+ [('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')]
230
+
231
+ This tagger is recommended as a backoff tagger, in cases where
232
+ a more powerful tagger is unable to assign a tag to the word
233
+ (e.g. because the word was not seen during training).
234
+
235
+ :param tag: The tag to assign to each token
236
+ :type tag: str
237
+ """
238
+
239
+ json_tag = "nltk.tag.sequential.DefaultTagger"
240
+
241
+ def __init__(self, tag):
242
+ self._tag = tag
243
+ super().__init__(None)
244
+
245
+ def encode_json_obj(self):
246
+ return self._tag
247
+
248
+ @classmethod
249
+ def decode_json_obj(cls, obj):
250
+ tag = obj
251
+ return cls(tag)
252
+
253
+ def choose_tag(self, tokens, index, history):
254
+ return self._tag # ignore token and history
255
+
256
+ def __repr__(self):
257
+ return f"<DefaultTagger: tag={self._tag}>"
258
+
259
+
260
+ @jsontags.register_tag
261
+ class NgramTagger(ContextTagger):
262
+ """
263
+ A tagger that chooses a token's tag based on its word string and
264
+ on the preceding n word's tags. In particular, a tuple
265
+ (tags[i-n:i-1], words[i]) is looked up in a table, and the
266
+ corresponding tag is returned. N-gram taggers are typically
267
+ trained on a tagged corpus.
268
+
269
+ Train a new NgramTagger using the given training data or
270
+ the supplied model. In particular, construct a new tagger
271
+ whose table maps from each context (tag[i-n:i-1], word[i])
272
+ to the most frequent tag for that context. But exclude any
273
+ contexts that are already tagged perfectly by the backoff
274
+ tagger.
275
+
276
+ :param train: A tagged corpus consisting of a list of tagged
277
+ sentences, where each sentence is a list of (word, tag) tuples.
278
+ :param backoff: A backoff tagger, to be used by the new
279
+ tagger if it encounters an unknown context.
280
+ :param cutoff: If the most likely tag for a context occurs
281
+ fewer than *cutoff* times, then exclude it from the
282
+ context-to-tag table for the new tagger.
283
+ """
284
+
285
+ json_tag = "nltk.tag.sequential.NgramTagger"
286
+
287
+ def __init__(
288
+ self, n, train=None, model=None, backoff=None, cutoff=0, verbose=False
289
+ ):
290
+ self._n = n
291
+ self._check_params(train, model)
292
+
293
+ super().__init__(model, backoff)
294
+
295
+ if train:
296
+ self._train(train, cutoff, verbose)
297
+
298
+ def encode_json_obj(self):
299
+ _context_to_tag = {repr(k): v for k, v in self._context_to_tag.items()}
300
+ if "NgramTagger" in self.__class__.__name__:
301
+ return self._n, _context_to_tag, self.backoff
302
+ else:
303
+ return _context_to_tag, self.backoff
304
+
305
+ @classmethod
306
+ def decode_json_obj(cls, obj):
307
+ try:
308
+ _n, _context_to_tag, backoff = obj
309
+ except ValueError:
310
+ _context_to_tag, backoff = obj
311
+
312
+ if not _context_to_tag:
313
+ return backoff
314
+
315
+ _context_to_tag = {ast.literal_eval(k): v for k, v in _context_to_tag.items()}
316
+
317
+ if "NgramTagger" in cls.__name__:
318
+ return cls(_n, model=_context_to_tag, backoff=backoff)
319
+ else:
320
+ return cls(model=_context_to_tag, backoff=backoff)
321
+
322
+ def context(self, tokens, index, history):
323
+ tag_context = tuple(history[max(0, index - self._n + 1) : index])
324
+ return tag_context, tokens[index]
325
+
326
+
327
+ @jsontags.register_tag
328
+ class UnigramTagger(NgramTagger):
329
+ """
330
+ Unigram Tagger
331
+
332
+ The UnigramTagger finds the most likely tag for each word in a training
333
+ corpus, and then uses that information to assign tags to new tokens.
334
+
335
+ >>> from nltk.corpus import brown
336
+ >>> from nltk.tag import UnigramTagger
337
+ >>> test_sent = brown.sents(categories='news')[0]
338
+ >>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500])
339
+ >>> for tok, tag in unigram_tagger.tag(test_sent):
340
+ ... print("({}, {}), ".format(tok, tag)) # doctest: +NORMALIZE_WHITESPACE
341
+ (The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL),
342
+ (Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT),
343
+ (investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ),
344
+ (primary, NN), (election, NN), (produced, VBD), (``, ``),
345
+ (no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI),
346
+ (irregularities, NNS), (took, VBD), (place, NN), (., .),
347
+
348
+ :param train: The corpus of training data, a list of tagged sentences
349
+ :type train: list(list(tuple(str, str)))
350
+ :param model: The tagger model
351
+ :type model: dict
352
+ :param backoff: Another tagger which this tagger will consult when it is
353
+ unable to tag a word
354
+ :type backoff: TaggerI
355
+ :param cutoff: The number of instances of training data the tagger must see
356
+ in order not to use the backoff tagger
357
+ :type cutoff: int
358
+ """
359
+
360
+ json_tag = "nltk.tag.sequential.UnigramTagger"
361
+
362
+ def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False):
363
+ super().__init__(1, train, model, backoff, cutoff, verbose)
364
+
365
+ def context(self, tokens, index, history):
366
+ return tokens[index]
367
+
368
+
369
+ @jsontags.register_tag
370
+ class BigramTagger(NgramTagger):
371
+ """
372
+ A tagger that chooses a token's tag based its word string and on
373
+ the preceding words' tag. In particular, a tuple consisting
374
+ of the previous tag and the word is looked up in a table, and
375
+ the corresponding tag is returned.
376
+
377
+ :param train: The corpus of training data, a list of tagged sentences
378
+ :type train: list(list(tuple(str, str)))
379
+ :param model: The tagger model
380
+ :type model: dict
381
+ :param backoff: Another tagger which this tagger will consult when it is
382
+ unable to tag a word
383
+ :type backoff: TaggerI
384
+ :param cutoff: The number of instances of training data the tagger must see
385
+ in order not to use the backoff tagger
386
+ :type cutoff: int
387
+ """
388
+
389
+ json_tag = "nltk.tag.sequential.BigramTagger"
390
+
391
+ def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False):
392
+ super().__init__(2, train, model, backoff, cutoff, verbose)
393
+
394
+
395
+ @jsontags.register_tag
396
+ class TrigramTagger(NgramTagger):
397
+ """
398
+ A tagger that chooses a token's tag based its word string and on
399
+ the preceding two words' tags. In particular, a tuple consisting
400
+ of the previous two tags and the word is looked up in a table, and
401
+ the corresponding tag is returned.
402
+
403
+ :param train: The corpus of training data, a list of tagged sentences
404
+ :type train: list(list(tuple(str, str)))
405
+ :param model: The tagger model
406
+ :type model: dict
407
+ :param backoff: Another tagger which this tagger will consult when it is
408
+ unable to tag a word
409
+ :type backoff: TaggerI
410
+ :param cutoff: The number of instances of training data the tagger must see
411
+ in order not to use the backoff tagger
412
+ :type cutoff: int
413
+ """
414
+
415
+ json_tag = "nltk.tag.sequential.TrigramTagger"
416
+
417
+ def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False):
418
+ super().__init__(3, train, model, backoff, cutoff, verbose)
419
+
420
+
421
+ @jsontags.register_tag
422
+ class AffixTagger(ContextTagger):
423
+ """
424
+ A tagger that chooses a token's tag based on a leading or trailing
425
+ substring of its word string. (It is important to note that these
426
+ substrings are not necessarily "true" morphological affixes). In
427
+ particular, a fixed-length substring of the word is looked up in a
428
+ table, and the corresponding tag is returned. Affix taggers are
429
+ typically constructed by training them on a tagged corpus.
430
+
431
+ Construct a new affix tagger.
432
+
433
+ :param affix_length: The length of the affixes that should be
434
+ considered during training and tagging. Use negative
435
+ numbers for suffixes.
436
+ :param min_stem_length: Any words whose length is less than
437
+ min_stem_length+abs(affix_length) will be assigned a
438
+ tag of None by this tagger.
439
+ """
440
+
441
+ json_tag = "nltk.tag.sequential.AffixTagger"
442
+
443
+ def __init__(
444
+ self,
445
+ train=None,
446
+ model=None,
447
+ affix_length=-3,
448
+ min_stem_length=2,
449
+ backoff=None,
450
+ cutoff=0,
451
+ verbose=False,
452
+ ):
453
+
454
+ self._check_params(train, model)
455
+
456
+ super().__init__(model, backoff)
457
+
458
+ self._affix_length = affix_length
459
+ self._min_word_length = min_stem_length + abs(affix_length)
460
+
461
+ if train:
462
+ self._train(train, cutoff, verbose)
463
+
464
+ def encode_json_obj(self):
465
+ return (
466
+ self._affix_length,
467
+ self._min_word_length,
468
+ self._context_to_tag,
469
+ self.backoff,
470
+ )
471
+
472
+ @classmethod
473
+ def decode_json_obj(cls, obj):
474
+ _affix_length, _min_word_length, _context_to_tag, backoff = obj
475
+ return cls(
476
+ affix_length=_affix_length,
477
+ min_stem_length=_min_word_length - abs(_affix_length),
478
+ model=_context_to_tag,
479
+ backoff=backoff,
480
+ )
481
+
482
+ def context(self, tokens, index, history):
483
+ token = tokens[index]
484
+ if len(token) < self._min_word_length:
485
+ return None
486
+ elif self._affix_length > 0:
487
+ return token[: self._affix_length]
488
+ else:
489
+ return token[self._affix_length :]
490
+
491
+
492
+ @jsontags.register_tag
493
+ class RegexpTagger(SequentialBackoffTagger):
494
+ r"""
495
+ Regular Expression Tagger
496
+
497
+ The RegexpTagger assigns tags to tokens by comparing their
498
+ word strings to a series of regular expressions. The following tagger
499
+ uses word suffixes to make guesses about the correct Brown Corpus part
500
+ of speech tag:
501
+
502
+ >>> from nltk.corpus import brown
503
+ >>> from nltk.tag import RegexpTagger
504
+ >>> test_sent = brown.sents(categories='news')[0]
505
+ >>> regexp_tagger = RegexpTagger(
506
+ ... [(r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers
507
+ ... (r'(The|the|A|a|An|an)$', 'AT'), # articles
508
+ ... (r'.*able$', 'JJ'), # adjectives
509
+ ... (r'.*ness$', 'NN'), # nouns formed from adjectives
510
+ ... (r'.*ly$', 'RB'), # adverbs
511
+ ... (r'.*s$', 'NNS'), # plural nouns
512
+ ... (r'.*ing$', 'VBG'), # gerunds
513
+ ... (r'.*ed$', 'VBD'), # past tense verbs
514
+ ... (r'.*', 'NN') # nouns (default)
515
+ ... ])
516
+ >>> regexp_tagger
517
+ <Regexp Tagger: size=9>
518
+ >>> regexp_tagger.tag(test_sent) # doctest: +NORMALIZE_WHITESPACE
519
+ [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'),
520
+ ('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'),
521
+ ("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'),
522
+ ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'),
523
+ ('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'),
524
+ ('place', 'NN'), ('.', 'NN')]
525
+
526
+ :type regexps: list(tuple(str, str))
527
+ :param regexps: A list of ``(regexp, tag)`` pairs, each of
528
+ which indicates that a word matching ``regexp`` should
529
+ be tagged with ``tag``. The pairs will be evaluated in
530
+ order. If none of the regexps match a word, then the
531
+ optional backoff tagger is invoked, else it is
532
+ assigned the tag None.
533
+ """
534
+
535
+ json_tag = "nltk.tag.sequential.RegexpTagger"
536
+
537
+ def __init__(
538
+ self, regexps: List[Tuple[str, str]], backoff: Optional[TaggerI] = None
539
+ ):
540
+ super().__init__(backoff)
541
+ self._regexps = []
542
+ for regexp, tag in regexps:
543
+ try:
544
+ self._regexps.append((re.compile(regexp), tag))
545
+ except Exception as e:
546
+ raise Exception(
547
+ f"Invalid RegexpTagger regexp: {e}\n- regexp: {regexp!r}\n- tag: {tag!r}"
548
+ ) from e
549
+
550
+ def encode_json_obj(self):
551
+ return [(regexp.pattern, tag) for regexp, tag in self._regexps], self.backoff
552
+
553
+ @classmethod
554
+ def decode_json_obj(cls, obj):
555
+ regexps, backoff = obj
556
+ return cls(regexps, backoff)
557
+
558
+ def choose_tag(self, tokens, index, history):
559
+ for regexp, tag in self._regexps:
560
+ if re.match(regexp, tokens[index]):
561
+ return tag
562
+ return None
563
+
564
+ def __repr__(self):
565
+ return f"<Regexp Tagger: size={len(self._regexps)}>"
566
+
567
+
568
+ class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI):
569
+ """
570
+ A sequential tagger that uses a classifier to choose the tag for
571
+ each token in a sentence. The featureset input for the classifier
572
+ is generated by a feature detector function::
573
+
574
+ feature_detector(tokens, index, history) -> featureset
575
+
576
+ Where tokens is the list of unlabeled tokens in the sentence;
577
+ index is the index of the token for which feature detection
578
+ should be performed; and history is list of the tags for all
579
+ tokens before index.
580
+
581
+ Construct a new classifier-based sequential tagger.
582
+
583
+ :param feature_detector: A function used to generate the
584
+ featureset input for the classifier::
585
+ feature_detector(tokens, index, history) -> featureset
586
+
587
+ :param train: A tagged corpus consisting of a list of tagged
588
+ sentences, where each sentence is a list of (word, tag) tuples.
589
+
590
+ :param backoff: A backoff tagger, to be used by the new tagger
591
+ if it encounters an unknown context.
592
+
593
+ :param classifier_builder: A function used to train a new
594
+ classifier based on the data in *train*. It should take
595
+ one argument, a list of labeled featuresets (i.e.,
596
+ (featureset, label) tuples).
597
+
598
+ :param classifier: The classifier that should be used by the
599
+ tagger. This is only useful if you want to manually
600
+ construct the classifier; normally, you would use *train*
601
+ instead.
602
+
603
+ :param backoff: A backoff tagger, used if this tagger is
604
+ unable to determine a tag for a given token.
605
+
606
+ :param cutoff_prob: If specified, then this tagger will fall
607
+ back on its backoff tagger if the probability of the most
608
+ likely tag is less than *cutoff_prob*.
609
+ """
610
+
611
+ def __init__(
612
+ self,
613
+ feature_detector=None,
614
+ train=None,
615
+ classifier_builder=NaiveBayesClassifier.train,
616
+ classifier=None,
617
+ backoff=None,
618
+ cutoff_prob=None,
619
+ verbose=False,
620
+ ):
621
+ self._check_params(train, classifier)
622
+
623
+ super().__init__(backoff)
624
+
625
+ if (train and classifier) or (not train and not classifier):
626
+ raise ValueError(
627
+ "Must specify either training data or " "trained classifier."
628
+ )
629
+
630
+ if feature_detector is not None:
631
+ self._feature_detector = feature_detector
632
+ # The feature detector function, used to generate a featureset
633
+ # or each token: feature_detector(tokens, index, history) -> featureset
634
+
635
+ self._cutoff_prob = cutoff_prob
636
+ """Cutoff probability for tagging -- if the probability of the
637
+ most likely tag is less than this, then use backoff."""
638
+
639
+ self._classifier = classifier
640
+ """The classifier used to choose a tag for each token."""
641
+
642
+ if train:
643
+ self._train(train, classifier_builder, verbose)
644
+
645
+ def choose_tag(self, tokens, index, history):
646
+ # Use our feature detector to get the featureset.
647
+ featureset = self.feature_detector(tokens, index, history)
648
+
649
+ # Use the classifier to pick a tag. If a cutoff probability
650
+ # was specified, then check that the tag's probability is
651
+ # higher than that cutoff first; otherwise, return None.
652
+ if self._cutoff_prob is None:
653
+ return self._classifier.classify(featureset)
654
+
655
+ pdist = self._classifier.prob_classify(featureset)
656
+ tag = pdist.max()
657
+ return tag if pdist.prob(tag) >= self._cutoff_prob else None
658
+
659
+ def _train(self, tagged_corpus, classifier_builder, verbose):
660
+ """
661
+ Build a new classifier, based on the given training data
662
+ *tagged_corpus*.
663
+ """
664
+
665
+ classifier_corpus = []
666
+ if verbose:
667
+ print("Constructing training corpus for classifier.")
668
+
669
+ for sentence in tagged_corpus:
670
+ history = []
671
+ untagged_sentence, tags = zip(*sentence)
672
+ for index in range(len(sentence)):
673
+ featureset = self.feature_detector(untagged_sentence, index, history)
674
+ classifier_corpus.append((featureset, tags[index]))
675
+ history.append(tags[index])
676
+
677
+ if verbose:
678
+ print(f"Training classifier ({len(classifier_corpus)} instances)")
679
+ self._classifier = classifier_builder(classifier_corpus)
680
+
681
+ def __repr__(self):
682
+ return f"<ClassifierBasedTagger: {self._classifier}>"
683
+
684
+ def feature_detector(self, tokens, index, history):
685
+ """
686
+ Return the feature detector that this tagger uses to generate
687
+ featuresets for its classifier. The feature detector is a
688
+ function with the signature::
689
+
690
+ feature_detector(tokens, index, history) -> featureset
691
+
692
+ See ``classifier()``
693
+ """
694
+ return self._feature_detector(tokens, index, history)
695
+
696
+ def classifier(self):
697
+ """
698
+ Return the classifier that this tagger uses to choose a tag
699
+ for each word in a sentence. The input for this classifier is
700
+ generated using this tagger's feature detector.
701
+ See ``feature_detector()``
702
+ """
703
+ return self._classifier
704
+
705
+
706
+ class ClassifierBasedPOSTagger(ClassifierBasedTagger):
707
+ """
708
+ A classifier based part of speech tagger.
709
+ """
710
+
711
+ def feature_detector(self, tokens, index, history):
712
+ word = tokens[index]
713
+ if index == 0:
714
+ prevword = prevprevword = None
715
+ prevtag = prevprevtag = None
716
+ elif index == 1:
717
+ prevword = tokens[index - 1].lower()
718
+ prevprevword = None
719
+ prevtag = history[index - 1]
720
+ prevprevtag = None
721
+ else:
722
+ prevword = tokens[index - 1].lower()
723
+ prevprevword = tokens[index - 2].lower()
724
+ prevtag = history[index - 1]
725
+ prevprevtag = history[index - 2]
726
+
727
+ if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word):
728
+ shape = "number"
729
+ elif re.match(r"\W+$", word):
730
+ shape = "punct"
731
+ elif re.match("[A-Z][a-z]+$", word):
732
+ shape = "upcase"
733
+ elif re.match("[a-z]+$", word):
734
+ shape = "downcase"
735
+ elif re.match(r"\w+$", word):
736
+ shape = "mixedcase"
737
+ else:
738
+ shape = "other"
739
+
740
+ features = {
741
+ "prevtag": prevtag,
742
+ "prevprevtag": prevprevtag,
743
+ "word": word,
744
+ "word.lower": word.lower(),
745
+ "suffix3": word.lower()[-3:],
746
+ "suffix2": word.lower()[-2:],
747
+ "suffix1": word.lower()[-1:],
748
+ "prevprevword": prevprevword,
749
+ "prevword": prevword,
750
+ "prevtag+word": f"{prevtag}+{word.lower()}",
751
+ "prevprevtag+word": f"{prevprevtag}+{word.lower()}",
752
+ "prevword+word": f"{prevword}+{word.lower()}",
753
+ "shape": shape,
754
+ }
755
+ return features
venv/lib/python3.10/site-packages/nltk/tag/stanford.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Nitin Madnani <[email protected]>
5
+ # Rami Al-Rfou' <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ A module for interfacing with the Stanford taggers.
11
+
12
+ Tagger models need to be downloaded from https://nlp.stanford.edu/software
13
+ and the STANFORD_MODELS environment variable set (a colon-separated
14
+ list of paths).
15
+
16
+ For more details see the documentation for StanfordPOSTagger and StanfordNERTagger.
17
+ """
18
+
19
+ import os
20
+ import tempfile
21
+ import warnings
22
+ from abc import abstractmethod
23
+ from subprocess import PIPE
24
+
25
+ from nltk.internals import _java_options, config_java, find_file, find_jar, java
26
+ from nltk.tag.api import TaggerI
27
+
28
+ _stanford_url = "https://nlp.stanford.edu/software"
29
+
30
+
31
+ class StanfordTagger(TaggerI):
32
+ """
33
+ An interface to Stanford taggers. Subclasses must define:
34
+
35
+ - ``_cmd`` property: A property that returns the command that will be
36
+ executed.
37
+ - ``_SEPARATOR``: Class constant that represents that character that
38
+ is used to separate the tokens from their tags.
39
+ - ``_JAR`` file: Class constant that represents the jar file name.
40
+ """
41
+
42
+ _SEPARATOR = ""
43
+ _JAR = ""
44
+
45
+ def __init__(
46
+ self,
47
+ model_filename,
48
+ path_to_jar=None,
49
+ encoding="utf8",
50
+ verbose=False,
51
+ java_options="-mx1000m",
52
+ ):
53
+ # Raise deprecation warning.
54
+ warnings.warn(
55
+ str(
56
+ "\nThe StanfordTokenizer will "
57
+ "be deprecated in version 3.2.6.\n"
58
+ "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead."
59
+ ),
60
+ DeprecationWarning,
61
+ stacklevel=2,
62
+ )
63
+
64
+ if not self._JAR:
65
+ warnings.warn(
66
+ "The StanfordTagger class is not meant to be "
67
+ "instantiated directly. Did you mean "
68
+ "StanfordPOSTagger or StanfordNERTagger?"
69
+ )
70
+ self._stanford_jar = find_jar(
71
+ self._JAR, path_to_jar, searchpath=(), url=_stanford_url, verbose=verbose
72
+ )
73
+
74
+ self._stanford_model = find_file(
75
+ model_filename, env_vars=("STANFORD_MODELS",), verbose=verbose
76
+ )
77
+
78
+ self._encoding = encoding
79
+ self.java_options = java_options
80
+
81
+ @property
82
+ @abstractmethod
83
+ def _cmd(self):
84
+ """
85
+ A property that returns the command that will be executed.
86
+ """
87
+
88
+ def tag(self, tokens):
89
+ # This function should return list of tuple rather than list of list
90
+ return sum(self.tag_sents([tokens]), [])
91
+
92
+ def tag_sents(self, sentences):
93
+ encoding = self._encoding
94
+ default_options = " ".join(_java_options)
95
+ config_java(options=self.java_options, verbose=False)
96
+
97
+ # Create a temporary input file
98
+ _input_fh, self._input_file_path = tempfile.mkstemp(text=True)
99
+
100
+ cmd = list(self._cmd)
101
+ cmd.extend(["-encoding", encoding])
102
+
103
+ # Write the actual sentences to the temporary input file
104
+ _input_fh = os.fdopen(_input_fh, "wb")
105
+ _input = "\n".join(" ".join(x) for x in sentences)
106
+ if isinstance(_input, str) and encoding:
107
+ _input = _input.encode(encoding)
108
+ _input_fh.write(_input)
109
+ _input_fh.close()
110
+
111
+ # Run the tagger and get the output
112
+ stanpos_output, _stderr = java(
113
+ cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE
114
+ )
115
+ stanpos_output = stanpos_output.decode(encoding)
116
+
117
+ # Delete the temporary file
118
+ os.unlink(self._input_file_path)
119
+
120
+ # Return java configurations to their default values
121
+ config_java(options=default_options, verbose=False)
122
+
123
+ return self.parse_output(stanpos_output, sentences)
124
+
125
+ def parse_output(self, text, sentences=None):
126
+ # Output the tagged sentences
127
+ tagged_sentences = []
128
+ for tagged_sentence in text.strip().split("\n"):
129
+ sentence = []
130
+ for tagged_word in tagged_sentence.strip().split():
131
+ word_tags = tagged_word.strip().split(self._SEPARATOR)
132
+ sentence.append(
133
+ ("".join(word_tags[:-1]), word_tags[-1].replace("0", "").upper())
134
+ )
135
+ tagged_sentences.append(sentence)
136
+ return tagged_sentences
137
+
138
+
139
+ class StanfordPOSTagger(StanfordTagger):
140
+ """
141
+ A class for pos tagging with Stanford Tagger. The input is the paths to:
142
+ - a model trained on training data
143
+ - (optionally) the path to the stanford tagger jar file. If not specified here,
144
+ then this jar file must be specified in the CLASSPATH environment variable.
145
+ - (optionally) the encoding of the training data (default: UTF-8)
146
+
147
+ Example:
148
+
149
+ >>> from nltk.tag import StanfordPOSTagger
150
+ >>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP
151
+ >>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP
152
+ [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]
153
+ """
154
+
155
+ _SEPARATOR = "_"
156
+ _JAR = "stanford-postagger.jar"
157
+
158
+ def __init__(self, *args, **kwargs):
159
+ super().__init__(*args, **kwargs)
160
+
161
+ @property
162
+ def _cmd(self):
163
+ return [
164
+ "edu.stanford.nlp.tagger.maxent.MaxentTagger",
165
+ "-model",
166
+ self._stanford_model,
167
+ "-textFile",
168
+ self._input_file_path,
169
+ "-tokenize",
170
+ "false",
171
+ "-outputFormatOptions",
172
+ "keepEmptySentences",
173
+ ]
174
+
175
+
176
+ class StanfordNERTagger(StanfordTagger):
177
+ """
178
+ A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to:
179
+
180
+ - a model trained on training data
181
+ - (optionally) the path to the stanford tagger jar file. If not specified here,
182
+ then this jar file must be specified in the CLASSPATH environment variable.
183
+ - (optionally) the encoding of the training data (default: UTF-8)
184
+
185
+ Example:
186
+
187
+ >>> from nltk.tag import StanfordNERTagger
188
+ >>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP
189
+ >>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP
190
+ [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'),
191
+ ('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'),
192
+ ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')]
193
+ """
194
+
195
+ _SEPARATOR = "/"
196
+ _JAR = "stanford-ner.jar"
197
+ _FORMAT = "slashTags"
198
+
199
+ def __init__(self, *args, **kwargs):
200
+ super().__init__(*args, **kwargs)
201
+
202
+ @property
203
+ def _cmd(self):
204
+ # Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer
205
+ return [
206
+ "edu.stanford.nlp.ie.crf.CRFClassifier",
207
+ "-loadClassifier",
208
+ self._stanford_model,
209
+ "-textFile",
210
+ self._input_file_path,
211
+ "-outputFormat",
212
+ self._FORMAT,
213
+ "-tokenizerFactory",
214
+ "edu.stanford.nlp.process.WhitespaceTokenizer",
215
+ "-tokenizerOptions",
216
+ '"tokenizeNLs=false"',
217
+ ]
218
+
219
+ def parse_output(self, text, sentences):
220
+ if self._FORMAT == "slashTags":
221
+ # Joint together to a big list
222
+ tagged_sentences = []
223
+ for tagged_sentence in text.strip().split("\n"):
224
+ for tagged_word in tagged_sentence.strip().split():
225
+ word_tags = tagged_word.strip().split(self._SEPARATOR)
226
+ tagged_sentences.append(("".join(word_tags[:-1]), word_tags[-1]))
227
+
228
+ # Separate it according to the input
229
+ result = []
230
+ start = 0
231
+ for sent in sentences:
232
+ result.append(tagged_sentences[start : start + len(sent)])
233
+ start += len(sent)
234
+ return result
235
+
236
+ raise NotImplementedError
venv/lib/python3.10/site-packages/nltk/tag/tnt.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: TnT Tagger
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Sam Huston <[email protected]>
5
+ #
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ """
10
+ Implementation of 'TnT - A Statisical Part of Speech Tagger'
11
+ by Thorsten Brants
12
+
13
+ https://aclanthology.org/A00-1031.pdf
14
+ """
15
+
16
+ from math import log
17
+ from operator import itemgetter
18
+
19
+ from nltk.probability import ConditionalFreqDist, FreqDist
20
+ from nltk.tag.api import TaggerI
21
+
22
+
23
+ class TnT(TaggerI):
24
+ """
25
+ TnT - Statistical POS tagger
26
+
27
+ IMPORTANT NOTES:
28
+
29
+ * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS
30
+
31
+ - It is possible to provide an untrained POS tagger to
32
+ create tags for unknown words, see __init__ function
33
+
34
+ * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT
35
+
36
+ - Due to the nature of this tagger, it works best when
37
+ trained over sentence delimited input.
38
+ - However it still produces good results if the training
39
+ data and testing data are separated on all punctuation eg: [,.?!]
40
+ - Input for training is expected to be a list of sentences
41
+ where each sentence is a list of (word, tag) tuples
42
+ - Input for tag function is a single sentence
43
+ Input for tagdata function is a list of sentences
44
+ Output is of a similar form
45
+
46
+ * Function provided to process text that is unsegmented
47
+
48
+ - Please see basic_sent_chop()
49
+
50
+
51
+ TnT uses a second order Markov model to produce tags for
52
+ a sequence of input, specifically:
53
+
54
+ argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T)
55
+
56
+ IE: the maximum projection of a set of probabilities
57
+
58
+ The set of possible tags for a given word is derived
59
+ from the training data. It is the set of all tags
60
+ that exact word has been assigned.
61
+
62
+ To speed up and get more precision, we can use log addition
63
+ to instead multiplication, specifically:
64
+
65
+ argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] +
66
+ log(P(t_T+1|t_T))
67
+
68
+ The probability of a tag for a given word is the linear
69
+ interpolation of 3 markov models; a zero-order, first-order,
70
+ and a second order model.
71
+
72
+ P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) +
73
+ l3*P(t_i| t_i-1, t_i-2)
74
+
75
+ A beam search is used to limit the memory usage of the algorithm.
76
+ The degree of the beam can be changed using N in the initialization.
77
+ N represents the maximum number of possible solutions to maintain
78
+ while tagging.
79
+
80
+ It is possible to differentiate the tags which are assigned to
81
+ capitalized words. However this does not result in a significant
82
+ gain in the accuracy of the results.
83
+ """
84
+
85
+ def __init__(self, unk=None, Trained=False, N=1000, C=False):
86
+ """
87
+ Construct a TnT statistical tagger. Tagger must be trained
88
+ before being used to tag input.
89
+
90
+ :param unk: instance of a POS tagger, conforms to TaggerI
91
+ :type unk: TaggerI
92
+ :param Trained: Indication that the POS tagger is trained or not
93
+ :type Trained: bool
94
+ :param N: Beam search degree (see above)
95
+ :type N: int
96
+ :param C: Capitalization flag
97
+ :type C: bool
98
+
99
+ Initializer, creates frequency distributions to be used
100
+ for tagging
101
+
102
+ _lx values represent the portion of the tri/bi/uni taggers
103
+ to be used to calculate the probability
104
+
105
+ N value is the number of possible solutions to maintain
106
+ while tagging. A good value for this is 1000
107
+
108
+ C is a boolean value which specifies to use or
109
+ not use the Capitalization of the word as additional
110
+ information for tagging.
111
+ NOTE: using capitalization may not increase the accuracy
112
+ of the tagger
113
+ """
114
+
115
+ self._uni = FreqDist()
116
+ self._bi = ConditionalFreqDist()
117
+ self._tri = ConditionalFreqDist()
118
+ self._wd = ConditionalFreqDist()
119
+ self._eos = ConditionalFreqDist()
120
+ self._l1 = 0.0
121
+ self._l2 = 0.0
122
+ self._l3 = 0.0
123
+ self._N = N
124
+ self._C = C
125
+ self._T = Trained
126
+
127
+ self._unk = unk
128
+
129
+ # statistical tools (ignore or delete me)
130
+ self.unknown = 0
131
+ self.known = 0
132
+
133
+ def train(self, data):
134
+ """
135
+ Uses a set of tagged data to train the tagger.
136
+ If an unknown word tagger is specified,
137
+ it is trained on the same data.
138
+
139
+ :param data: List of lists of (word, tag) tuples
140
+ :type data: tuple(str)
141
+ """
142
+
143
+ # Ensure that local C flag is initialized before use
144
+ C = False
145
+
146
+ if self._unk is not None and self._T == False:
147
+ self._unk.train(data)
148
+
149
+ for sent in data:
150
+ history = [("BOS", False), ("BOS", False)]
151
+ for w, t in sent:
152
+
153
+ # if capitalization is requested,
154
+ # and the word begins with a capital
155
+ # set local flag C to True
156
+ if self._C and w[0].isupper():
157
+ C = True
158
+
159
+ self._wd[w][t] += 1
160
+ self._uni[(t, C)] += 1
161
+ self._bi[history[1]][(t, C)] += 1
162
+ self._tri[tuple(history)][(t, C)] += 1
163
+
164
+ history.append((t, C))
165
+ history.pop(0)
166
+
167
+ # set local flag C to false for the next word
168
+ C = False
169
+
170
+ self._eos[t]["EOS"] += 1
171
+
172
+ # compute lambda values from the trained frequency distributions
173
+ self._compute_lambda()
174
+
175
+ def _compute_lambda(self):
176
+ """
177
+ creates lambda values based upon training data
178
+
179
+ NOTE: no need to explicitly reference C,
180
+ it is contained within the tag variable :: tag == (tag,C)
181
+
182
+ for each tag trigram (t1, t2, t3)
183
+ depending on the maximum value of
184
+ - f(t1,t2,t3)-1 / f(t1,t2)-1
185
+ - f(t2,t3)-1 / f(t2)-1
186
+ - f(t3)-1 / N-1
187
+
188
+ increment l3,l2, or l1 by f(t1,t2,t3)
189
+
190
+ ISSUES -- Resolutions:
191
+ if 2 values are equal, increment both lambda values
192
+ by (f(t1,t2,t3) / 2)
193
+ """
194
+
195
+ # temporary lambda variables
196
+ tl1 = 0.0
197
+ tl2 = 0.0
198
+ tl3 = 0.0
199
+
200
+ # for each t1,t2 in system
201
+ for history in self._tri.conditions():
202
+ (h1, h2) = history
203
+
204
+ # for each t3 given t1,t2 in system
205
+ # (NOTE: tag actually represents (tag,C))
206
+ # However no effect within this function
207
+ for tag in self._tri[history].keys():
208
+
209
+ # if there has only been 1 occurrence of this tag in the data
210
+ # then ignore this trigram.
211
+ if self._uni[tag] == 1:
212
+ continue
213
+
214
+ # safe_div provides a safe floating point division
215
+ # it returns -1 if the denominator is 0
216
+ c3 = self._safe_div(
217
+ (self._tri[history][tag] - 1), (self._tri[history].N() - 1)
218
+ )
219
+ c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1))
220
+ c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1))
221
+
222
+ # if c1 is the maximum value:
223
+ if (c1 > c3) and (c1 > c2):
224
+ tl1 += self._tri[history][tag]
225
+
226
+ # if c2 is the maximum value
227
+ elif (c2 > c3) and (c2 > c1):
228
+ tl2 += self._tri[history][tag]
229
+
230
+ # if c3 is the maximum value
231
+ elif (c3 > c2) and (c3 > c1):
232
+ tl3 += self._tri[history][tag]
233
+
234
+ # if c3, and c2 are equal and larger than c1
235
+ elif (c3 == c2) and (c3 > c1):
236
+ tl2 += self._tri[history][tag] / 2.0
237
+ tl3 += self._tri[history][tag] / 2.0
238
+
239
+ # if c1, and c2 are equal and larger than c3
240
+ # this might be a dumb thing to do....(not sure yet)
241
+ elif (c2 == c1) and (c1 > c3):
242
+ tl1 += self._tri[history][tag] / 2.0
243
+ tl2 += self._tri[history][tag] / 2.0
244
+
245
+ # otherwise there might be a problem
246
+ # eg: all values = 0
247
+ else:
248
+ pass
249
+
250
+ # Lambda normalisation:
251
+ # ensures that l1+l2+l3 = 1
252
+ self._l1 = tl1 / (tl1 + tl2 + tl3)
253
+ self._l2 = tl2 / (tl1 + tl2 + tl3)
254
+ self._l3 = tl3 / (tl1 + tl2 + tl3)
255
+
256
+ def _safe_div(self, v1, v2):
257
+ """
258
+ Safe floating point division function, does not allow division by 0
259
+ returns -1 if the denominator is 0
260
+ """
261
+ if v2 == 0:
262
+ return -1
263
+ else:
264
+ return v1 / v2
265
+
266
+ def tagdata(self, data):
267
+ """
268
+ Tags each sentence in a list of sentences
269
+
270
+ :param data:list of list of words
271
+ :type data: [[string,],]
272
+ :return: list of list of (word, tag) tuples
273
+
274
+ Invokes tag(sent) function for each sentence
275
+ compiles the results into a list of tagged sentences
276
+ each tagged sentence is a list of (word, tag) tuples
277
+ """
278
+ res = []
279
+ for sent in data:
280
+ res1 = self.tag(sent)
281
+ res.append(res1)
282
+ return res
283
+
284
+ def tag(self, data):
285
+ """
286
+ Tags a single sentence
287
+
288
+ :param data: list of words
289
+ :type data: [string,]
290
+
291
+ :return: [(word, tag),]
292
+
293
+ Calls recursive function '_tagword'
294
+ to produce a list of tags
295
+
296
+ Associates the sequence of returned tags
297
+ with the correct words in the input sequence
298
+
299
+ returns a list of (word, tag) tuples
300
+ """
301
+
302
+ current_state = [(["BOS", "BOS"], 0.0)]
303
+
304
+ sent = list(data)
305
+
306
+ tags = self._tagword(sent, current_state)
307
+
308
+ res = []
309
+ for i in range(len(sent)):
310
+ # unpack and discard the C flags
311
+ (t, C) = tags[i + 2]
312
+ res.append((sent[i], t))
313
+
314
+ return res
315
+
316
+ def _tagword(self, sent, current_states):
317
+ """
318
+ :param sent : List of words remaining in the sentence
319
+ :type sent : [word,]
320
+ :param current_states : List of possible tag combinations for
321
+ the sentence so far, and the log probability
322
+ associated with each tag combination
323
+ :type current_states : [([tag, ], logprob), ]
324
+
325
+ Tags the first word in the sentence and
326
+ recursively tags the reminder of sentence
327
+
328
+ Uses formula specified above to calculate the probability
329
+ of a particular tag
330
+ """
331
+
332
+ # if this word marks the end of the sentence,
333
+ # return the most probable tag
334
+ if sent == []:
335
+ (h, logp) = current_states[0]
336
+ return h
337
+
338
+ # otherwise there are more words to be tagged
339
+ word = sent[0]
340
+ sent = sent[1:]
341
+ new_states = []
342
+
343
+ # if the Capitalisation is requested,
344
+ # initialise the flag for this word
345
+ C = False
346
+ if self._C and word[0].isupper():
347
+ C = True
348
+
349
+ # if word is known
350
+ # compute the set of possible tags
351
+ # and their associated log probabilities
352
+ if word in self._wd:
353
+ self.known += 1
354
+
355
+ for (history, curr_sent_logprob) in current_states:
356
+ logprobs = []
357
+
358
+ for t in self._wd[word].keys():
359
+ tC = (t, C)
360
+ p_uni = self._uni.freq(tC)
361
+ p_bi = self._bi[history[-1]].freq(tC)
362
+ p_tri = self._tri[tuple(history[-2:])].freq(tC)
363
+ p_wd = self._wd[word][t] / self._uni[tC]
364
+ p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri
365
+ p2 = log(p, 2) + log(p_wd, 2)
366
+
367
+ # compute the result of appending each tag to this history
368
+ new_states.append((history + [tC], curr_sent_logprob + p2))
369
+
370
+ # otherwise a new word, set of possible tags is unknown
371
+ else:
372
+ self.unknown += 1
373
+
374
+ # since a set of possible tags,
375
+ # and the probability of each specific tag
376
+ # can not be returned from most classifiers:
377
+ # specify that any unknown words are tagged with certainty
378
+ p = 1
379
+
380
+ # if no unknown word tagger has been specified
381
+ # then use the tag 'Unk'
382
+ if self._unk is None:
383
+ tag = ("Unk", C)
384
+
385
+ # otherwise apply the unknown word tagger
386
+ else:
387
+ [(_w, t)] = list(self._unk.tag([word]))
388
+ tag = (t, C)
389
+
390
+ for (history, logprob) in current_states:
391
+ history.append(tag)
392
+
393
+ new_states = current_states
394
+
395
+ # now have computed a set of possible new_states
396
+
397
+ # sort states by log prob
398
+ # set is now ordered greatest to least log probability
399
+ new_states.sort(reverse=True, key=itemgetter(1))
400
+
401
+ # del everything after N (threshold)
402
+ # this is the beam search cut
403
+ if len(new_states) > self._N:
404
+ new_states = new_states[: self._N]
405
+
406
+ # compute the tags for the rest of the sentence
407
+ # return the best list of tags for the sentence
408
+ return self._tagword(sent, new_states)
409
+
410
+
411
+ ########################################
412
+ # helper function -- basic sentence tokenizer
413
+ ########################################
414
+
415
+
416
+ def basic_sent_chop(data, raw=True):
417
+ """
418
+ Basic method for tokenizing input into sentences
419
+ for this tagger:
420
+
421
+ :param data: list of tokens (words or (word, tag) tuples)
422
+ :type data: str or tuple(str, str)
423
+ :param raw: boolean flag marking the input data
424
+ as a list of words or a list of tagged words
425
+ :type raw: bool
426
+ :return: list of sentences
427
+ sentences are a list of tokens
428
+ tokens are the same as the input
429
+
430
+ Function takes a list of tokens and separates the tokens into lists
431
+ where each list represents a sentence fragment
432
+ This function can separate both tagged and raw sequences into
433
+ basic sentences.
434
+
435
+ Sentence markers are the set of [,.!?]
436
+
437
+ This is a simple method which enhances the performance of the TnT
438
+ tagger. Better sentence tokenization will further enhance the results.
439
+ """
440
+
441
+ new_data = []
442
+ curr_sent = []
443
+ sent_mark = [",", ".", "?", "!"]
444
+
445
+ if raw:
446
+ for word in data:
447
+ if word in sent_mark:
448
+ curr_sent.append(word)
449
+ new_data.append(curr_sent)
450
+ curr_sent = []
451
+ else:
452
+ curr_sent.append(word)
453
+
454
+ else:
455
+ for (word, tag) in data:
456
+ if word in sent_mark:
457
+ curr_sent.append((word, tag))
458
+ new_data.append(curr_sent)
459
+ curr_sent = []
460
+ else:
461
+ curr_sent.append((word, tag))
462
+ return new_data
463
+
464
+
465
+ def demo():
466
+ from nltk.corpus import brown
467
+
468
+ sents = list(brown.tagged_sents())
469
+ test = list(brown.sents())
470
+
471
+ tagger = TnT()
472
+ tagger.train(sents[200:1000])
473
+
474
+ tagged_data = tagger.tagdata(test[100:120])
475
+
476
+ for j in range(len(tagged_data)):
477
+ s = tagged_data[j]
478
+ t = sents[j + 100]
479
+ for i in range(len(s)):
480
+ print(s[i], "--", t[i])
481
+ print()
482
+
483
+
484
+ def demo2():
485
+ from nltk.corpus import treebank
486
+
487
+ d = list(treebank.tagged_sents())
488
+
489
+ t = TnT(N=1000, C=False)
490
+ s = TnT(N=1000, C=True)
491
+ t.train(d[(11) * 100 :])
492
+ s.train(d[(11) * 100 :])
493
+
494
+ for i in range(10):
495
+ tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)])
496
+ tp_un = t.unknown / (t.known + t.unknown)
497
+ tp_kn = t.known / (t.known + t.unknown)
498
+ t.unknown = 0
499
+ t.known = 0
500
+
501
+ print("Capitalization off:")
502
+ print("Accuracy:", tacc)
503
+ print("Percentage known:", tp_kn)
504
+ print("Percentage unknown:", tp_un)
505
+ print("Accuracy over known words:", (tacc / tp_kn))
506
+
507
+ sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)])
508
+ sp_un = s.unknown / (s.known + s.unknown)
509
+ sp_kn = s.known / (s.known + s.unknown)
510
+ s.unknown = 0
511
+ s.known = 0
512
+
513
+ print("Capitalization on:")
514
+ print("Accuracy:", sacc)
515
+ print("Percentage known:", sp_kn)
516
+ print("Percentage unknown:", sp_un)
517
+ print("Accuracy over known words:", (sacc / sp_kn))
518
+
519
+
520
+ def demo3():
521
+ from nltk.corpus import brown, treebank
522
+
523
+ d = list(treebank.tagged_sents())
524
+ e = list(brown.tagged_sents())
525
+
526
+ d = d[:1000]
527
+ e = e[:1000]
528
+
529
+ d10 = int(len(d) * 0.1)
530
+ e10 = int(len(e) * 0.1)
531
+
532
+ tknacc = 0
533
+ sknacc = 0
534
+ tallacc = 0
535
+ sallacc = 0
536
+ tknown = 0
537
+ sknown = 0
538
+
539
+ for i in range(10):
540
+
541
+ t = TnT(N=1000, C=False)
542
+ s = TnT(N=1000, C=False)
543
+
544
+ dtest = d[(i * d10) : ((i + 1) * d10)]
545
+ etest = e[(i * e10) : ((i + 1) * e10)]
546
+
547
+ dtrain = d[: (i * d10)] + d[((i + 1) * d10) :]
548
+ etrain = e[: (i * e10)] + e[((i + 1) * e10) :]
549
+
550
+ t.train(dtrain)
551
+ s.train(etrain)
552
+
553
+ tacc = t.accuracy(dtest)
554
+ tp_un = t.unknown / (t.known + t.unknown)
555
+ tp_kn = t.known / (t.known + t.unknown)
556
+ tknown += tp_kn
557
+ t.unknown = 0
558
+ t.known = 0
559
+
560
+ sacc = s.accuracy(etest)
561
+ sp_un = s.unknown / (s.known + s.unknown)
562
+ sp_kn = s.known / (s.known + s.unknown)
563
+ sknown += sp_kn
564
+ s.unknown = 0
565
+ s.known = 0
566
+
567
+ tknacc += tacc / tp_kn
568
+ sknacc += sacc / tp_kn
569
+ tallacc += tacc
570
+ sallacc += sacc
571
+
572
+ # print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc)
573
+
574
+ print("brown: acc over words known:", 10 * tknacc)
575
+ print(" : overall accuracy:", 10 * tallacc)
576
+ print(" : words known:", 10 * tknown)
577
+ print("treebank: acc over words known:", 10 * sknacc)
578
+ print(" : overall accuracy:", 10 * sallacc)
579
+ print(" : words known:", 10 * sknown)
venv/lib/python3.10/site-packages/nltk/tag/util.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tagger Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+
10
+ def str2tuple(s, sep="/"):
11
+ """
12
+ Given the string representation of a tagged token, return the
13
+ corresponding tuple representation. The rightmost occurrence of
14
+ *sep* in *s* will be used to divide *s* into a word string and
15
+ a tag string. If *sep* does not occur in *s*, return (s, None).
16
+
17
+ >>> from nltk.tag.util import str2tuple
18
+ >>> str2tuple('fly/NN')
19
+ ('fly', 'NN')
20
+
21
+ :type s: str
22
+ :param s: The string representation of a tagged token.
23
+ :type sep: str
24
+ :param sep: The separator string used to separate word strings
25
+ from tags.
26
+ """
27
+ loc = s.rfind(sep)
28
+ if loc >= 0:
29
+ return (s[:loc], s[loc + len(sep) :].upper())
30
+ else:
31
+ return (s, None)
32
+
33
+
34
+ def tuple2str(tagged_token, sep="/"):
35
+ """
36
+ Given the tuple representation of a tagged token, return the
37
+ corresponding string representation. This representation is
38
+ formed by concatenating the token's word string, followed by the
39
+ separator, followed by the token's tag. (If the tag is None,
40
+ then just return the bare word string.)
41
+
42
+ >>> from nltk.tag.util import tuple2str
43
+ >>> tagged_token = ('fly', 'NN')
44
+ >>> tuple2str(tagged_token)
45
+ 'fly/NN'
46
+
47
+ :type tagged_token: tuple(str, str)
48
+ :param tagged_token: The tuple representation of a tagged token.
49
+ :type sep: str
50
+ :param sep: The separator string used to separate word strings
51
+ from tags.
52
+ """
53
+ word, tag = tagged_token
54
+ if tag is None:
55
+ return word
56
+ else:
57
+ assert sep not in tag, "tag may not contain sep!"
58
+ return f"{word}{sep}{tag}"
59
+
60
+
61
+ def untag(tagged_sentence):
62
+ """
63
+ Given a tagged sentence, return an untagged version of that
64
+ sentence. I.e., return a list containing the first element
65
+ of each tuple in *tagged_sentence*.
66
+
67
+ >>> from nltk.tag.util import untag
68
+ >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')])
69
+ ['John', 'saw', 'Mary']
70
+
71
+ """
72
+ return [w for (w, t) in tagged_sentence]
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc ADDED
Binary file (26.7 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc ADDED
Binary file (7.64 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc ADDED
Binary file (12.8 kB). View file