applied-ai-018 commited on
Commit
84b0ee5
·
verified ·
1 Parent(s): 011083c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__init__.py +197 -0
  2. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/nltk/chunk/regexp.py +1475 -0
  8. env-llmeval/lib/python3.10/site-packages/nltk/cluster/api.py +74 -0
  9. env-llmeval/lib/python3.10/site-packages/nltk/cluster/em.py +219 -0
  10. env-llmeval/lib/python3.10/site-packages/nltk/cluster/gaac.py +170 -0
  11. env-llmeval/lib/python3.10/site-packages/nltk/cluster/kmeans.py +231 -0
  12. env-llmeval/lib/python3.10/site-packages/nltk/cluster/util.py +300 -0
  13. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__init__.py +31 -0
  24. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/nltk/tbl/api.py +0 -0
  32. env-llmeval/lib/python3.10/site-packages/nltk/tbl/demo.py +418 -0
  33. env-llmeval/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py +38 -0
  34. env-llmeval/lib/python3.10/site-packages/nltk/tbl/rule.py +322 -0
  35. env-llmeval/lib/python3.10/site-packages/nltk/tbl/template.py +325 -0
  36. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__init__.py +132 -0
  37. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__init__.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Chunkers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Steven Bird <[email protected]>
5
+ # Edward Loper <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ #
9
+
10
+ """
11
+ Classes and interfaces for identifying non-overlapping linguistic
12
+ groups (such as base noun phrases) in unrestricted text. This task is
13
+ called "chunk parsing" or "chunking", and the identified groups are
14
+ called "chunks". The chunked text is represented using a shallow
15
+ tree called a "chunk structure." A chunk structure is a tree
16
+ containing tokens and chunks, where each chunk is a subtree containing
17
+ only tokens. For example, the chunk structure for base noun phrase
18
+ chunks in the sentence "I saw the big dog on the hill" is::
19
+
20
+ (SENTENCE:
21
+ (NP: <I>)
22
+ <saw>
23
+ (NP: <the> <big> <dog>)
24
+ <on>
25
+ (NP: <the> <hill>))
26
+
27
+ To convert a chunk structure back to a list of tokens, simply use the
28
+ chunk structure's ``leaves()`` method.
29
+
30
+ This module defines ``ChunkParserI``, a standard interface for
31
+ chunking texts; and ``RegexpChunkParser``, a regular-expression based
32
+ implementation of that interface. It also defines ``ChunkScore``, a
33
+ utility class for scoring chunk parsers.
34
+
35
+ RegexpChunkParser
36
+ =================
37
+
38
+ ``RegexpChunkParser`` is an implementation of the chunk parser interface
39
+ that uses regular-expressions over tags to chunk a text. Its
40
+ ``parse()`` method first constructs a ``ChunkString``, which encodes a
41
+ particular chunking of the input text. Initially, nothing is
42
+ chunked. ``parse.RegexpChunkParser`` then applies a sequence of
43
+ ``RegexpChunkRule`` rules to the ``ChunkString``, each of which modifies
44
+ the chunking that it encodes. Finally, the ``ChunkString`` is
45
+ transformed back into a chunk structure, which is returned.
46
+
47
+ ``RegexpChunkParser`` can only be used to chunk a single kind of phrase.
48
+ For example, you can use an ``RegexpChunkParser`` to chunk the noun
49
+ phrases in a text, or the verb phrases in a text; but you can not
50
+ use it to simultaneously chunk both noun phrases and verb phrases in
51
+ the same text. (This is a limitation of ``RegexpChunkParser``, not of
52
+ chunk parsers in general.)
53
+
54
+ RegexpChunkRules
55
+ ----------------
56
+
57
+ A ``RegexpChunkRule`` is a transformational rule that updates the
58
+ chunking of a text by modifying its ``ChunkString``. Each
59
+ ``RegexpChunkRule`` defines the ``apply()`` method, which modifies
60
+ the chunking encoded by a ``ChunkString``. The
61
+ ``RegexpChunkRule`` class itself can be used to implement any
62
+ transformational rule based on regular expressions. There are
63
+ also a number of subclasses, which can be used to implement
64
+ simpler types of rules:
65
+
66
+ - ``ChunkRule`` chunks anything that matches a given regular
67
+ expression.
68
+ - ``StripRule`` strips anything that matches a given regular
69
+ expression.
70
+ - ``UnChunkRule`` will un-chunk any chunk that matches a given
71
+ regular expression.
72
+ - ``MergeRule`` can be used to merge two contiguous chunks.
73
+ - ``SplitRule`` can be used to split a single chunk into two
74
+ smaller chunks.
75
+ - ``ExpandLeftRule`` will expand a chunk to incorporate new
76
+ unchunked material on the left.
77
+ - ``ExpandRightRule`` will expand a chunk to incorporate new
78
+ unchunked material on the right.
79
+
80
+ Tag Patterns
81
+ ~~~~~~~~~~~~
82
+
83
+ A ``RegexpChunkRule`` uses a modified version of regular
84
+ expression patterns, called "tag patterns". Tag patterns are
85
+ used to match sequences of tags. Examples of tag patterns are::
86
+
87
+ r'(<DT>|<JJ>|<NN>)+'
88
+ r'<NN>+'
89
+ r'<NN.*>'
90
+
91
+ The differences between regular expression patterns and tag
92
+ patterns are:
93
+
94
+ - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
95
+ ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
96
+ ``'<NN'`` followed by one or more repetitions of ``'>'``.
97
+ - Whitespace in tag patterns is ignored. So
98
+ ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
99
+ - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
100
+ ``'<NN.*>'`` matches any single tag starting with ``'NN'``.
101
+
102
+ The function ``tag_pattern2re_pattern`` can be used to transform
103
+ a tag pattern to an equivalent regular expression pattern.
104
+
105
+ Efficiency
106
+ ----------
107
+
108
+ Preliminary tests indicate that ``RegexpChunkParser`` can chunk at a
109
+ rate of about 300 tokens/second, with a moderately complex rule set.
110
+
111
+ There may be problems if ``RegexpChunkParser`` is used with more than
112
+ 5,000 tokens at a time. In particular, evaluation of some regular
113
+ expressions may cause the Python regular expression engine to
114
+ exceed its maximum recursion depth. We have attempted to minimize
115
+ these problems, but it is impossible to avoid them completely. We
116
+ therefore recommend that you apply the chunk parser to a single
117
+ sentence at a time.
118
+
119
+ Emacs Tip
120
+ ---------
121
+
122
+ If you evaluate the following elisp expression in emacs, it will
123
+ colorize a ``ChunkString`` when you use an interactive python shell
124
+ with emacs or xemacs ("C-c !")::
125
+
126
+ (let ()
127
+ (defconst comint-mode-font-lock-keywords
128
+ '(("<[^>]+>" 0 'font-lock-reference-face)
129
+ ("[{}]" 0 'font-lock-function-name-face)))
130
+ (add-hook 'comint-mode-hook (lambda () (turn-on-font-lock))))
131
+
132
+ You can evaluate this code by copying it to a temporary buffer,
133
+ placing the cursor after the last close parenthesis, and typing
134
+ "``C-x C-e``". You should evaluate it before running the interactive
135
+ session. The change will last until you close emacs.
136
+
137
+ Unresolved Issues
138
+ -----------------
139
+
140
+ If we use the ``re`` module for regular expressions, Python's
141
+ regular expression engine generates "maximum recursion depth
142
+ exceeded" errors when processing very large texts, even for
143
+ regular expressions that should not require any recursion. We
144
+ therefore use the ``pre`` module instead. But note that ``pre``
145
+ does not include Unicode support, so this module will not work
146
+ with unicode strings. Note also that ``pre`` regular expressions
147
+ are not quite as advanced as ``re`` ones (e.g., no leftward
148
+ zero-length assertions).
149
+
150
+ :type CHUNK_TAG_PATTERN: regexp
151
+ :var CHUNK_TAG_PATTERN: A regular expression to test whether a tag
152
+ pattern is valid.
153
+ """
154
+
155
+ from nltk.chunk.api import ChunkParserI
156
+ from nltk.chunk.regexp import RegexpChunkParser, RegexpParser
157
+ from nltk.chunk.util import (
158
+ ChunkScore,
159
+ accuracy,
160
+ conllstr2tree,
161
+ conlltags2tree,
162
+ ieerstr2tree,
163
+ tagstr2tree,
164
+ tree2conllstr,
165
+ tree2conlltags,
166
+ )
167
+ from nltk.data import load
168
+
169
+ # Standard treebank POS tagger
170
+ _BINARY_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_binary.pickle"
171
+ _MULTICLASS_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_multiclass.pickle"
172
+
173
+
174
+ def ne_chunk(tagged_tokens, binary=False):
175
+ """
176
+ Use NLTK's currently recommended named entity chunker to
177
+ chunk the given list of tagged tokens.
178
+ """
179
+ if binary:
180
+ chunker_pickle = _BINARY_NE_CHUNKER
181
+ else:
182
+ chunker_pickle = _MULTICLASS_NE_CHUNKER
183
+ chunker = load(chunker_pickle)
184
+ return chunker.parse(tagged_tokens)
185
+
186
+
187
+ def ne_chunk_sents(tagged_sentences, binary=False):
188
+ """
189
+ Use NLTK's currently recommended named entity chunker to chunk the
190
+ given list of tagged sentences, each consisting of a list of tagged tokens.
191
+ """
192
+ if binary:
193
+ chunker_pickle = _BINARY_NE_CHUNKER
194
+ else:
195
+ chunker_pickle = _MULTICLASS_NE_CHUNKER
196
+ chunker = load(chunker_pickle)
197
+ return chunker.parse_sents(tagged_sentences)
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc ADDED
Binary file (9.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc ADDED
Binary file (47.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/chunk/regexp.py ADDED
@@ -0,0 +1,1475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Regular Expression Chunkers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ import re
10
+
11
+ import regex
12
+
13
+ from nltk.chunk.api import ChunkParserI
14
+ from nltk.tree import Tree
15
+
16
+ # //////////////////////////////////////////////////////
17
+ # ChunkString
18
+ # //////////////////////////////////////////////////////
19
+
20
+
21
+ class ChunkString:
22
+ """
23
+ A string-based encoding of a particular chunking of a text.
24
+ Internally, the ``ChunkString`` class uses a single string to
25
+ encode the chunking of the input text. This string contains a
26
+ sequence of angle-bracket delimited tags, with chunking indicated
27
+ by braces. An example of this encoding is::
28
+
29
+ {<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
30
+
31
+ ``ChunkString`` are created from tagged texts (i.e., lists of
32
+ ``tokens`` whose type is ``TaggedType``). Initially, nothing is
33
+ chunked.
34
+
35
+ The chunking of a ``ChunkString`` can be modified with the ``xform()``
36
+ method, which uses a regular expression to transform the string
37
+ representation. These transformations should only add and remove
38
+ braces; they should *not* modify the sequence of angle-bracket
39
+ delimited tags.
40
+
41
+ :type _str: str
42
+ :ivar _str: The internal string representation of the text's
43
+ encoding. This string representation contains a sequence of
44
+ angle-bracket delimited tags, with chunking indicated by
45
+ braces. An example of this encoding is::
46
+
47
+ {<DT><JJ><NN>}<VBN><IN>{<DT><NN>}<.>{<DT><NN>}<VBD><.>
48
+
49
+ :type _pieces: list(tagged tokens and chunks)
50
+ :ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``.
51
+ :ivar _debug: The debug level. See the constructor docs.
52
+
53
+ :cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that
54
+ will only match positions that are in chunks.
55
+ :cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that
56
+ will only match positions that are in strips.
57
+ """
58
+
59
+ CHUNK_TAG_CHAR = r"[^\{\}<>]"
60
+ CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR
61
+
62
+ IN_CHUNK_PATTERN = r"(?=[^\{]*\})"
63
+ IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))"
64
+
65
+ # These are used by _verify
66
+ _CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG
67
+ _STRIP = r"(%s+?)+?" % CHUNK_TAG
68
+ _VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG)
69
+ _BRACKETS = re.compile(r"[^\{\}]+")
70
+ _BALANCED_BRACKETS = re.compile(r"(\{\})*$")
71
+
72
+ def __init__(self, chunk_struct, debug_level=1):
73
+ """
74
+ Construct a new ``ChunkString`` that encodes the chunking of
75
+ the text ``tagged_tokens``.
76
+
77
+ :type chunk_struct: Tree
78
+ :param chunk_struct: The chunk structure to be further chunked.
79
+ :type debug_level: int
80
+ :param debug_level: The level of debugging which should be
81
+ applied to transformations on the ``ChunkString``. The
82
+ valid levels are:
83
+
84
+ - 0: no checks
85
+ - 1: full check on to_chunkstruct
86
+ - 2: full check on to_chunkstruct and cursory check after
87
+ each transformation.
88
+ - 3: full check on to_chunkstruct and full check after
89
+ each transformation.
90
+
91
+ We recommend you use at least level 1. You should
92
+ probably use level 3 if you use any non-standard
93
+ subclasses of ``RegexpChunkRule``.
94
+ """
95
+ self._root_label = chunk_struct.label()
96
+ self._pieces = chunk_struct[:]
97
+ tags = [self._tag(tok) for tok in self._pieces]
98
+ self._str = "<" + "><".join(tags) + ">"
99
+ self._debug = debug_level
100
+
101
+ def _tag(self, tok):
102
+ if isinstance(tok, tuple):
103
+ return tok[1]
104
+ elif isinstance(tok, Tree):
105
+ return tok.label()
106
+ else:
107
+ raise ValueError("chunk structures must contain tagged " "tokens or trees")
108
+
109
+ def _verify(self, s, verify_tags):
110
+ """
111
+ Check to make sure that ``s`` still corresponds to some chunked
112
+ version of ``_pieces``.
113
+
114
+ :type verify_tags: bool
115
+ :param verify_tags: Whether the individual tags should be
116
+ checked. If this is false, ``_verify`` will check to make
117
+ sure that ``_str`` encodes a chunked version of *some*
118
+ list of tokens. If this is true, then ``_verify`` will
119
+ check to make sure that the tags in ``_str`` match those in
120
+ ``_pieces``.
121
+
122
+ :raise ValueError: if the internal string representation of
123
+ this ``ChunkString`` is invalid or not consistent with _pieces.
124
+ """
125
+ # Check overall form
126
+ if not ChunkString._VALID.match(s):
127
+ raise ValueError(
128
+ "Transformation generated invalid " "chunkstring:\n %s" % s
129
+ )
130
+
131
+ # Check that parens are balanced. If the string is long, we
132
+ # have to do this in pieces, to avoid a maximum recursion
133
+ # depth limit for regular expressions.
134
+ brackets = ChunkString._BRACKETS.sub("", s)
135
+ for i in range(1 + len(brackets) // 5000):
136
+ substr = brackets[i * 5000 : i * 5000 + 5000]
137
+ if not ChunkString._BALANCED_BRACKETS.match(substr):
138
+ raise ValueError(
139
+ "Transformation generated invalid " "chunkstring:\n %s" % s
140
+ )
141
+
142
+ if verify_tags <= 0:
143
+ return
144
+
145
+ tags1 = (re.split(r"[\{\}<>]+", s))[1:-1]
146
+ tags2 = [self._tag(piece) for piece in self._pieces]
147
+ if tags1 != tags2:
148
+ raise ValueError(
149
+ "Transformation generated invalid " "chunkstring: tag changed"
150
+ )
151
+
152
+ def to_chunkstruct(self, chunk_label="CHUNK"):
153
+ """
154
+ Return the chunk structure encoded by this ``ChunkString``.
155
+
156
+ :rtype: Tree
157
+ :raise ValueError: If a transformation has generated an
158
+ invalid chunkstring.
159
+ """
160
+ if self._debug > 0:
161
+ self._verify(self._str, 1)
162
+
163
+ # Use this alternating list to create the chunkstruct.
164
+ pieces = []
165
+ index = 0
166
+ piece_in_chunk = 0
167
+ for piece in re.split("[{}]", self._str):
168
+
169
+ # Find the list of tokens contained in this piece.
170
+ length = piece.count("<")
171
+ subsequence = self._pieces[index : index + length]
172
+
173
+ # Add this list of tokens to our pieces.
174
+ if piece_in_chunk:
175
+ pieces.append(Tree(chunk_label, subsequence))
176
+ else:
177
+ pieces += subsequence
178
+
179
+ # Update index, piece_in_chunk
180
+ index += length
181
+ piece_in_chunk = not piece_in_chunk
182
+
183
+ return Tree(self._root_label, pieces)
184
+
185
+ def xform(self, regexp, repl):
186
+ """
187
+ Apply the given transformation to the string encoding of this
188
+ ``ChunkString``. In particular, find all occurrences that match
189
+ ``regexp``, and replace them using ``repl`` (as done by
190
+ ``re.sub``).
191
+
192
+ This transformation should only add and remove braces; it
193
+ should *not* modify the sequence of angle-bracket delimited
194
+ tags. Furthermore, this transformation may not result in
195
+ improper bracketing. Note, in particular, that bracketing may
196
+ not be nested.
197
+
198
+ :type regexp: str or regexp
199
+ :param regexp: A regular expression matching the substring
200
+ that should be replaced. This will typically include a
201
+ named group, which can be used by ``repl``.
202
+ :type repl: str
203
+ :param repl: An expression specifying what should replace the
204
+ matched substring. Typically, this will include a named
205
+ replacement group, specified by ``regexp``.
206
+ :rtype: None
207
+ :raise ValueError: If this transformation generated an
208
+ invalid chunkstring.
209
+ """
210
+ # Do the actual substitution
211
+ s = re.sub(regexp, repl, self._str)
212
+
213
+ # The substitution might have generated "empty chunks"
214
+ # (substrings of the form "{}"). Remove them, so they don't
215
+ # interfere with other transformations.
216
+ s = re.sub(r"\{\}", "", s)
217
+
218
+ # Make sure that the transformation was legal.
219
+ if self._debug > 1:
220
+ self._verify(s, self._debug - 2)
221
+
222
+ # Commit the transformation.
223
+ self._str = s
224
+
225
+ def __repr__(self):
226
+ """
227
+ Return a string representation of this ``ChunkString``.
228
+ It has the form::
229
+
230
+ <ChunkString: '{<DT><JJ><NN>}<VBN><IN>{<DT><NN>}'>
231
+
232
+ :rtype: str
233
+ """
234
+ return "<ChunkString: %s>" % repr(self._str)
235
+
236
+ def __str__(self):
237
+ """
238
+ Return a formatted representation of this ``ChunkString``.
239
+ This representation will include extra spaces to ensure that
240
+ tags will line up with the representation of other
241
+ ``ChunkStrings`` for the same text, regardless of the chunking.
242
+
243
+ :rtype: str
244
+ """
245
+ # Add spaces to make everything line up.
246
+ str = re.sub(r">(?!\})", r"> ", self._str)
247
+ str = re.sub(r"([^\{])<", r"\1 <", str)
248
+ if str[0] == "<":
249
+ str = " " + str
250
+ return str
251
+
252
+
253
+ # //////////////////////////////////////////////////////
254
+ # Chunking Rules
255
+ # //////////////////////////////////////////////////////
256
+
257
+
258
+ class RegexpChunkRule:
259
+ """
260
+ A rule specifying how to modify the chunking in a ``ChunkString``,
261
+ using a transformational regular expression. The
262
+ ``RegexpChunkRule`` class itself can be used to implement any
263
+ transformational rule based on regular expressions. There are
264
+ also a number of subclasses, which can be used to implement
265
+ simpler types of rules, based on matching regular expressions.
266
+
267
+ Each ``RegexpChunkRule`` has a regular expression and a
268
+ replacement expression. When a ``RegexpChunkRule`` is "applied"
269
+ to a ``ChunkString``, it searches the ``ChunkString`` for any
270
+ substring that matches the regular expression, and replaces it
271
+ using the replacement expression. This search/replace operation
272
+ has the same semantics as ``re.sub``.
273
+
274
+ Each ``RegexpChunkRule`` also has a description string, which
275
+ gives a short (typically less than 75 characters) description of
276
+ the purpose of the rule.
277
+
278
+ This transformation defined by this ``RegexpChunkRule`` should
279
+ only add and remove braces; it should *not* modify the sequence
280
+ of angle-bracket delimited tags. Furthermore, this transformation
281
+ may not result in nested or mismatched bracketing.
282
+ """
283
+
284
+ def __init__(self, regexp, repl, descr):
285
+ """
286
+ Construct a new RegexpChunkRule.
287
+
288
+ :type regexp: regexp or str
289
+ :param regexp: The regular expression for this ``RegexpChunkRule``.
290
+ When this rule is applied to a ``ChunkString``, any
291
+ substring that matches ``regexp`` will be replaced using
292
+ the replacement string ``repl``. Note that this must be a
293
+ normal regular expression, not a tag pattern.
294
+ :type repl: str
295
+ :param repl: The replacement expression for this ``RegexpChunkRule``.
296
+ When this rule is applied to a ``ChunkString``, any substring
297
+ that matches ``regexp`` will be replaced using ``repl``.
298
+ :type descr: str
299
+ :param descr: A short description of the purpose and/or effect
300
+ of this rule.
301
+ """
302
+ if isinstance(regexp, str):
303
+ regexp = re.compile(regexp)
304
+ self._repl = repl
305
+ self._descr = descr
306
+ self._regexp = regexp
307
+
308
+ def apply(self, chunkstr):
309
+ # Keep docstring generic so we can inherit it.
310
+ """
311
+ Apply this rule to the given ``ChunkString``. See the
312
+ class reference documentation for a description of what it
313
+ means to apply a rule.
314
+
315
+ :type chunkstr: ChunkString
316
+ :param chunkstr: The chunkstring to which this rule is applied.
317
+ :rtype: None
318
+ :raise ValueError: If this transformation generated an
319
+ invalid chunkstring.
320
+ """
321
+ chunkstr.xform(self._regexp, self._repl)
322
+
323
+ def descr(self):
324
+ """
325
+ Return a short description of the purpose and/or effect of
326
+ this rule.
327
+
328
+ :rtype: str
329
+ """
330
+ return self._descr
331
+
332
+ def __repr__(self):
333
+ """
334
+ Return a string representation of this rule. It has the form::
335
+
336
+ <RegexpChunkRule: '{<IN|VB.*>}'->'<IN>'>
337
+
338
+ Note that this representation does not include the
339
+ description string; that string can be accessed
340
+ separately with the ``descr()`` method.
341
+
342
+ :rtype: str
343
+ """
344
+ return (
345
+ "<RegexpChunkRule: "
346
+ + repr(self._regexp.pattern)
347
+ + "->"
348
+ + repr(self._repl)
349
+ + ">"
350
+ )
351
+
352
+ @staticmethod
353
+ def fromstring(s):
354
+ """
355
+ Create a RegexpChunkRule from a string description.
356
+ Currently, the following formats are supported::
357
+
358
+ {regexp} # chunk rule
359
+ }regexp{ # strip rule
360
+ regexp}{regexp # split rule
361
+ regexp{}regexp # merge rule
362
+
363
+ Where ``regexp`` is a regular expression for the rule. Any
364
+ text following the comment marker (``#``) will be used as
365
+ the rule's description:
366
+
367
+ >>> from nltk.chunk.regexp import RegexpChunkRule
368
+ >>> RegexpChunkRule.fromstring('{<DT>?<NN.*>+}')
369
+ <ChunkRule: '<DT>?<NN.*>+'>
370
+ """
371
+ # Split off the comment (but don't split on '\#')
372
+ m = re.match(r"(?P<rule>(\\.|[^#])*)(?P<comment>#.*)?", s)
373
+ rule = m.group("rule").strip()
374
+ comment = (m.group("comment") or "")[1:].strip()
375
+
376
+ # Pattern bodies: chunk, strip, split, merge
377
+ try:
378
+ if not rule:
379
+ raise ValueError("Empty chunk pattern")
380
+ if rule[0] == "{" and rule[-1] == "}":
381
+ return ChunkRule(rule[1:-1], comment)
382
+ elif rule[0] == "}" and rule[-1] == "{":
383
+ return StripRule(rule[1:-1], comment)
384
+ elif "}{" in rule:
385
+ left, right = rule.split("}{")
386
+ return SplitRule(left, right, comment)
387
+ elif "{}" in rule:
388
+ left, right = rule.split("{}")
389
+ return MergeRule(left, right, comment)
390
+ elif re.match("[^{}]*{[^{}]*}[^{}]*", rule):
391
+ left, chunk, right = re.split("[{}]", rule)
392
+ return ChunkRuleWithContext(left, chunk, right, comment)
393
+ else:
394
+ raise ValueError("Illegal chunk pattern: %s" % rule)
395
+ except (ValueError, re.error) as e:
396
+ raise ValueError("Illegal chunk pattern: %s" % rule) from e
397
+
398
+
399
+ class ChunkRule(RegexpChunkRule):
400
+ """
401
+ A rule specifying how to add chunks to a ``ChunkString``, using a
402
+ matching tag pattern. When applied to a ``ChunkString``, it will
403
+ find any substring that matches this tag pattern and that is not
404
+ already part of a chunk, and create a new chunk containing that
405
+ substring.
406
+ """
407
+
408
+ def __init__(self, tag_pattern, descr):
409
+ """
410
+ Construct a new ``ChunkRule``.
411
+
412
+ :type tag_pattern: str
413
+ :param tag_pattern: This rule's tag pattern. When
414
+ applied to a ``ChunkString``, this rule will
415
+ chunk any substring that matches this tag pattern and that
416
+ is not already part of a chunk.
417
+ :type descr: str
418
+ :param descr: A short description of the purpose and/or effect
419
+ of this rule.
420
+ """
421
+ self._pattern = tag_pattern
422
+ regexp = re.compile(
423
+ "(?P<chunk>%s)%s"
424
+ % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_STRIP_PATTERN)
425
+ )
426
+ RegexpChunkRule.__init__(self, regexp, r"{\g<chunk>}", descr)
427
+
428
+ def __repr__(self):
429
+ """
430
+ Return a string representation of this rule. It has the form::
431
+
432
+ <ChunkRule: '<IN|VB.*>'>
433
+
434
+ Note that this representation does not include the
435
+ description string; that string can be accessed
436
+ separately with the ``descr()`` method.
437
+
438
+ :rtype: str
439
+ """
440
+ return "<ChunkRule: " + repr(self._pattern) + ">"
441
+
442
+
443
+ class StripRule(RegexpChunkRule):
444
+ """
445
+ A rule specifying how to remove strips to a ``ChunkString``,
446
+ using a matching tag pattern. When applied to a
447
+ ``ChunkString``, it will find any substring that matches this
448
+ tag pattern and that is contained in a chunk, and remove it
449
+ from that chunk, thus creating two new chunks.
450
+ """
451
+
452
+ def __init__(self, tag_pattern, descr):
453
+ """
454
+ Construct a new ``StripRule``.
455
+
456
+ :type tag_pattern: str
457
+ :param tag_pattern: This rule's tag pattern. When
458
+ applied to a ``ChunkString``, this rule will
459
+ find any substring that matches this tag pattern and that
460
+ is contained in a chunk, and remove it from that chunk,
461
+ thus creating two new chunks.
462
+ :type descr: str
463
+ :param descr: A short description of the purpose and/or effect
464
+ of this rule.
465
+ """
466
+ self._pattern = tag_pattern
467
+ regexp = re.compile(
468
+ "(?P<strip>%s)%s"
469
+ % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_CHUNK_PATTERN)
470
+ )
471
+ RegexpChunkRule.__init__(self, regexp, r"}\g<strip>{", descr)
472
+
473
+ def __repr__(self):
474
+ """
475
+ Return a string representation of this rule. It has the form::
476
+
477
+ <StripRule: '<IN|VB.*>'>
478
+
479
+ Note that this representation does not include the
480
+ description string; that string can be accessed
481
+ separately with the ``descr()`` method.
482
+
483
+ :rtype: str
484
+ """
485
+ return "<StripRule: " + repr(self._pattern) + ">"
486
+
487
+
488
+ class UnChunkRule(RegexpChunkRule):
489
+ """
490
+ A rule specifying how to remove chunks to a ``ChunkString``,
491
+ using a matching tag pattern. When applied to a
492
+ ``ChunkString``, it will find any complete chunk that matches this
493
+ tag pattern, and un-chunk it.
494
+ """
495
+
496
+ def __init__(self, tag_pattern, descr):
497
+ """
498
+ Construct a new ``UnChunkRule``.
499
+
500
+ :type tag_pattern: str
501
+ :param tag_pattern: This rule's tag pattern. When
502
+ applied to a ``ChunkString``, this rule will
503
+ find any complete chunk that matches this tag pattern,
504
+ and un-chunk it.
505
+ :type descr: str
506
+ :param descr: A short description of the purpose and/or effect
507
+ of this rule.
508
+ """
509
+ self._pattern = tag_pattern
510
+ regexp = re.compile(r"\{(?P<chunk>%s)\}" % tag_pattern2re_pattern(tag_pattern))
511
+ RegexpChunkRule.__init__(self, regexp, r"\g<chunk>", descr)
512
+
513
+ def __repr__(self):
514
+ """
515
+ Return a string representation of this rule. It has the form::
516
+
517
+ <UnChunkRule: '<IN|VB.*>'>
518
+
519
+ Note that this representation does not include the
520
+ description string; that string can be accessed
521
+ separately with the ``descr()`` method.
522
+
523
+ :rtype: str
524
+ """
525
+ return "<UnChunkRule: " + repr(self._pattern) + ">"
526
+
527
+
528
+ class MergeRule(RegexpChunkRule):
529
+ """
530
+ A rule specifying how to merge chunks in a ``ChunkString``, using
531
+ two matching tag patterns: a left pattern, and a right pattern.
532
+ When applied to a ``ChunkString``, it will find any chunk whose end
533
+ matches left pattern, and immediately followed by a chunk whose
534
+ beginning matches right pattern. It will then merge those two
535
+ chunks into a single chunk.
536
+ """
537
+
538
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
539
+ """
540
+ Construct a new ``MergeRule``.
541
+
542
+ :type right_tag_pattern: str
543
+ :param right_tag_pattern: This rule's right tag
544
+ pattern. When applied to a ``ChunkString``, this
545
+ rule will find any chunk whose end matches
546
+ ``left_tag_pattern``, and immediately followed by a chunk
547
+ whose beginning matches this pattern. It will
548
+ then merge those two chunks into a single chunk.
549
+ :type left_tag_pattern: str
550
+ :param left_tag_pattern: This rule's left tag
551
+ pattern. When applied to a ``ChunkString``, this
552
+ rule will find any chunk whose end matches
553
+ this pattern, and immediately followed by a chunk
554
+ whose beginning matches ``right_tag_pattern``. It will
555
+ then merge those two chunks into a single chunk.
556
+
557
+ :type descr: str
558
+ :param descr: A short description of the purpose and/or effect
559
+ of this rule.
560
+ """
561
+ # Ensure that the individual patterns are coherent. E.g., if
562
+ # left='(' and right=')', then this will raise an exception:
563
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
564
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
565
+
566
+ self._left_tag_pattern = left_tag_pattern
567
+ self._right_tag_pattern = right_tag_pattern
568
+ regexp = re.compile(
569
+ "(?P<left>%s)}{(?=%s)"
570
+ % (
571
+ tag_pattern2re_pattern(left_tag_pattern),
572
+ tag_pattern2re_pattern(right_tag_pattern),
573
+ )
574
+ )
575
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>", descr)
576
+
577
+ def __repr__(self):
578
+ """
579
+ Return a string representation of this rule. It has the form::
580
+
581
+ <MergeRule: '<NN|DT|JJ>', '<NN|JJ>'>
582
+
583
+ Note that this representation does not include the
584
+ description string; that string can be accessed
585
+ separately with the ``descr()`` method.
586
+
587
+ :rtype: str
588
+ """
589
+ return (
590
+ "<MergeRule: "
591
+ + repr(self._left_tag_pattern)
592
+ + ", "
593
+ + repr(self._right_tag_pattern)
594
+ + ">"
595
+ )
596
+
597
+
598
+ class SplitRule(RegexpChunkRule):
599
+ """
600
+ A rule specifying how to split chunks in a ``ChunkString``, using
601
+ two matching tag patterns: a left pattern, and a right pattern.
602
+ When applied to a ``ChunkString``, it will find any chunk that
603
+ matches the left pattern followed by the right pattern. It will
604
+ then split the chunk into two new chunks, at the point between the
605
+ two pattern matches.
606
+ """
607
+
608
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
609
+ """
610
+ Construct a new ``SplitRule``.
611
+
612
+ :type right_tag_pattern: str
613
+ :param right_tag_pattern: This rule's right tag
614
+ pattern. When applied to a ``ChunkString``, this rule will
615
+ find any chunk containing a substring that matches
616
+ ``left_tag_pattern`` followed by this pattern. It will
617
+ then split the chunk into two new chunks at the point
618
+ between these two matching patterns.
619
+ :type left_tag_pattern: str
620
+ :param left_tag_pattern: This rule's left tag
621
+ pattern. When applied to a ``ChunkString``, this rule will
622
+ find any chunk containing a substring that matches this
623
+ pattern followed by ``right_tag_pattern``. It will then
624
+ split the chunk into two new chunks at the point between
625
+ these two matching patterns.
626
+ :type descr: str
627
+ :param descr: A short description of the purpose and/or effect
628
+ of this rule.
629
+ """
630
+ # Ensure that the individual patterns are coherent. E.g., if
631
+ # left='(' and right=')', then this will raise an exception:
632
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
633
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
634
+
635
+ self._left_tag_pattern = left_tag_pattern
636
+ self._right_tag_pattern = right_tag_pattern
637
+ regexp = re.compile(
638
+ "(?P<left>%s)(?=%s)"
639
+ % (
640
+ tag_pattern2re_pattern(left_tag_pattern),
641
+ tag_pattern2re_pattern(right_tag_pattern),
642
+ )
643
+ )
644
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>}{", descr)
645
+
646
+ def __repr__(self):
647
+ """
648
+ Return a string representation of this rule. It has the form::
649
+
650
+ <SplitRule: '<NN>', '<DT>'>
651
+
652
+ Note that this representation does not include the
653
+ description string; that string can be accessed
654
+ separately with the ``descr()`` method.
655
+
656
+ :rtype: str
657
+ """
658
+ return (
659
+ "<SplitRule: "
660
+ + repr(self._left_tag_pattern)
661
+ + ", "
662
+ + repr(self._right_tag_pattern)
663
+ + ">"
664
+ )
665
+
666
+
667
+ class ExpandLeftRule(RegexpChunkRule):
668
+ """
669
+ A rule specifying how to expand chunks in a ``ChunkString`` to the left,
670
+ using two matching tag patterns: a left pattern, and a right pattern.
671
+ When applied to a ``ChunkString``, it will find any chunk whose beginning
672
+ matches right pattern, and immediately preceded by a strip whose
673
+ end matches left pattern. It will then expand the chunk to incorporate
674
+ the new material on the left.
675
+ """
676
+
677
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
678
+ """
679
+ Construct a new ``ExpandRightRule``.
680
+
681
+ :type right_tag_pattern: str
682
+ :param right_tag_pattern: This rule's right tag
683
+ pattern. When applied to a ``ChunkString``, this
684
+ rule will find any chunk whose beginning matches
685
+ ``right_tag_pattern``, and immediately preceded by a strip
686
+ whose end matches this pattern. It will
687
+ then merge those two chunks into a single chunk.
688
+ :type left_tag_pattern: str
689
+ :param left_tag_pattern: This rule's left tag
690
+ pattern. When applied to a ``ChunkString``, this
691
+ rule will find any chunk whose beginning matches
692
+ this pattern, and immediately preceded by a strip
693
+ whose end matches ``left_tag_pattern``. It will
694
+ then expand the chunk to incorporate the new material on the left.
695
+
696
+ :type descr: str
697
+ :param descr: A short description of the purpose and/or effect
698
+ of this rule.
699
+ """
700
+ # Ensure that the individual patterns are coherent. E.g., if
701
+ # left='(' and right=')', then this will raise an exception:
702
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
703
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
704
+
705
+ self._left_tag_pattern = left_tag_pattern
706
+ self._right_tag_pattern = right_tag_pattern
707
+ regexp = re.compile(
708
+ r"(?P<left>%s)\{(?P<right>%s)"
709
+ % (
710
+ tag_pattern2re_pattern(left_tag_pattern),
711
+ tag_pattern2re_pattern(right_tag_pattern),
712
+ )
713
+ )
714
+ RegexpChunkRule.__init__(self, regexp, r"{\g<left>\g<right>", descr)
715
+
716
+ def __repr__(self):
717
+ """
718
+ Return a string representation of this rule. It has the form::
719
+
720
+ <ExpandLeftRule: '<NN|DT|JJ>', '<NN|JJ>'>
721
+
722
+ Note that this representation does not include the
723
+ description string; that string can be accessed
724
+ separately with the ``descr()`` method.
725
+
726
+ :rtype: str
727
+ """
728
+ return (
729
+ "<ExpandLeftRule: "
730
+ + repr(self._left_tag_pattern)
731
+ + ", "
732
+ + repr(self._right_tag_pattern)
733
+ + ">"
734
+ )
735
+
736
+
737
+ class ExpandRightRule(RegexpChunkRule):
738
+ """
739
+ A rule specifying how to expand chunks in a ``ChunkString`` to the
740
+ right, using two matching tag patterns: a left pattern, and a
741
+ right pattern. When applied to a ``ChunkString``, it will find any
742
+ chunk whose end matches left pattern, and immediately followed by
743
+ a strip whose beginning matches right pattern. It will then
744
+ expand the chunk to incorporate the new material on the right.
745
+ """
746
+
747
+ def __init__(self, left_tag_pattern, right_tag_pattern, descr):
748
+ """
749
+ Construct a new ``ExpandRightRule``.
750
+
751
+ :type right_tag_pattern: str
752
+ :param right_tag_pattern: This rule's right tag
753
+ pattern. When applied to a ``ChunkString``, this
754
+ rule will find any chunk whose end matches
755
+ ``left_tag_pattern``, and immediately followed by a strip
756
+ whose beginning matches this pattern. It will
757
+ then merge those two chunks into a single chunk.
758
+ :type left_tag_pattern: str
759
+ :param left_tag_pattern: This rule's left tag
760
+ pattern. When applied to a ``ChunkString``, this
761
+ rule will find any chunk whose end matches
762
+ this pattern, and immediately followed by a strip
763
+ whose beginning matches ``right_tag_pattern``. It will
764
+ then expand the chunk to incorporate the new material on the right.
765
+
766
+ :type descr: str
767
+ :param descr: A short description of the purpose and/or effect
768
+ of this rule.
769
+ """
770
+ # Ensure that the individual patterns are coherent. E.g., if
771
+ # left='(' and right=')', then this will raise an exception:
772
+ re.compile(tag_pattern2re_pattern(left_tag_pattern))
773
+ re.compile(tag_pattern2re_pattern(right_tag_pattern))
774
+
775
+ self._left_tag_pattern = left_tag_pattern
776
+ self._right_tag_pattern = right_tag_pattern
777
+ regexp = re.compile(
778
+ r"(?P<left>%s)\}(?P<right>%s)"
779
+ % (
780
+ tag_pattern2re_pattern(left_tag_pattern),
781
+ tag_pattern2re_pattern(right_tag_pattern),
782
+ )
783
+ )
784
+ RegexpChunkRule.__init__(self, regexp, r"\g<left>\g<right>}", descr)
785
+
786
+ def __repr__(self):
787
+ """
788
+ Return a string representation of this rule. It has the form::
789
+
790
+ <ExpandRightRule: '<NN|DT|JJ>', '<NN|JJ>'>
791
+
792
+ Note that this representation does not include the
793
+ description string; that string can be accessed
794
+ separately with the ``descr()`` method.
795
+
796
+ :rtype: str
797
+ """
798
+ return (
799
+ "<ExpandRightRule: "
800
+ + repr(self._left_tag_pattern)
801
+ + ", "
802
+ + repr(self._right_tag_pattern)
803
+ + ">"
804
+ )
805
+
806
+
807
+ class ChunkRuleWithContext(RegexpChunkRule):
808
+ """
809
+ A rule specifying how to add chunks to a ``ChunkString``, using
810
+ three matching tag patterns: one for the left context, one for the
811
+ chunk, and one for the right context. When applied to a
812
+ ``ChunkString``, it will find any substring that matches the chunk
813
+ tag pattern, is surrounded by substrings that match the two
814
+ context patterns, and is not already part of a chunk; and create a
815
+ new chunk containing the substring that matched the chunk tag
816
+ pattern.
817
+
818
+ Caveat: Both the left and right context are consumed when this
819
+ rule matches; therefore, if you need to find overlapping matches,
820
+ you will need to apply your rule more than once.
821
+ """
822
+
823
+ def __init__(
824
+ self,
825
+ left_context_tag_pattern,
826
+ chunk_tag_pattern,
827
+ right_context_tag_pattern,
828
+ descr,
829
+ ):
830
+ """
831
+ Construct a new ``ChunkRuleWithContext``.
832
+
833
+ :type left_context_tag_pattern: str
834
+ :param left_context_tag_pattern: A tag pattern that must match
835
+ the left context of ``chunk_tag_pattern`` for this rule to
836
+ apply.
837
+ :type chunk_tag_pattern: str
838
+ :param chunk_tag_pattern: A tag pattern that must match for this
839
+ rule to apply. If the rule does apply, then this pattern
840
+ also identifies the substring that will be made into a chunk.
841
+ :type right_context_tag_pattern: str
842
+ :param right_context_tag_pattern: A tag pattern that must match
843
+ the right context of ``chunk_tag_pattern`` for this rule to
844
+ apply.
845
+ :type descr: str
846
+ :param descr: A short description of the purpose and/or effect
847
+ of this rule.
848
+ """
849
+ # Ensure that the individual patterns are coherent. E.g., if
850
+ # left='(' and right=')', then this will raise an exception:
851
+ re.compile(tag_pattern2re_pattern(left_context_tag_pattern))
852
+ re.compile(tag_pattern2re_pattern(chunk_tag_pattern))
853
+ re.compile(tag_pattern2re_pattern(right_context_tag_pattern))
854
+
855
+ self._left_context_tag_pattern = left_context_tag_pattern
856
+ self._chunk_tag_pattern = chunk_tag_pattern
857
+ self._right_context_tag_pattern = right_context_tag_pattern
858
+ regexp = re.compile(
859
+ "(?P<left>%s)(?P<chunk>%s)(?P<right>%s)%s"
860
+ % (
861
+ tag_pattern2re_pattern(left_context_tag_pattern),
862
+ tag_pattern2re_pattern(chunk_tag_pattern),
863
+ tag_pattern2re_pattern(right_context_tag_pattern),
864
+ ChunkString.IN_STRIP_PATTERN,
865
+ )
866
+ )
867
+ replacement = r"\g<left>{\g<chunk>}\g<right>"
868
+ RegexpChunkRule.__init__(self, regexp, replacement, descr)
869
+
870
+ def __repr__(self):
871
+ """
872
+ Return a string representation of this rule. It has the form::
873
+
874
+ <ChunkRuleWithContext: '<IN>', '<NN>', '<DT>'>
875
+
876
+ Note that this representation does not include the
877
+ description string; that string can be accessed
878
+ separately with the ``descr()`` method.
879
+
880
+ :rtype: str
881
+ """
882
+ return "<ChunkRuleWithContext: {!r}, {!r}, {!r}>".format(
883
+ self._left_context_tag_pattern,
884
+ self._chunk_tag_pattern,
885
+ self._right_context_tag_pattern,
886
+ )
887
+
888
+
889
+ # //////////////////////////////////////////////////////
890
+ # Tag Pattern Format Conversion
891
+ # //////////////////////////////////////////////////////
892
+
893
+ # this should probably be made more strict than it is -- e.g., it
894
+ # currently accepts 'foo'.
895
+ CHUNK_TAG_PATTERN = re.compile(
896
+ r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+")
897
+ )
898
+
899
+
900
+ def tag_pattern2re_pattern(tag_pattern):
901
+ """
902
+ Convert a tag pattern to a regular expression pattern. A "tag
903
+ pattern" is a modified version of a regular expression, designed
904
+ for matching sequences of tags. The differences between regular
905
+ expression patterns and tag patterns are:
906
+
907
+ - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so
908
+ ``'<NN>+'`` matches one or more repetitions of ``'<NN>'``, not
909
+ ``'<NN'`` followed by one or more repetitions of ``'>'``.
910
+ - Whitespace in tag patterns is ignored. So
911
+ ``'<DT> | <NN>'`` is equivalent to ``'<DT>|<NN>'``
912
+ - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so
913
+ ``'<NN.*>'`` matches any single tag starting with ``'NN'``.
914
+
915
+ In particular, ``tag_pattern2re_pattern`` performs the following
916
+ transformations on the given pattern:
917
+
918
+ - Replace '.' with '[^<>{}]'
919
+ - Remove any whitespace
920
+ - Add extra parens around '<' and '>', to make '<' and '>' act
921
+ like parentheses. E.g., so that in '<NN>+', the '+' has scope
922
+ over the entire '<NN>'; and so that in '<NN|IN>', the '|' has
923
+ scope over 'NN' and 'IN', but not '<' or '>'.
924
+ - Check to make sure the resulting pattern is valid.
925
+
926
+ :type tag_pattern: str
927
+ :param tag_pattern: The tag pattern to convert to a regular
928
+ expression pattern.
929
+ :raise ValueError: If ``tag_pattern`` is not a valid tag pattern.
930
+ In particular, ``tag_pattern`` should not include braces; and it
931
+ should not contain nested or mismatched angle-brackets.
932
+ :rtype: str
933
+ :return: A regular expression pattern corresponding to
934
+ ``tag_pattern``.
935
+ """
936
+ # Clean up the regular expression
937
+ tag_pattern = re.sub(r"\s", "", tag_pattern)
938
+ tag_pattern = re.sub(r"<", "(<(", tag_pattern)
939
+ tag_pattern = re.sub(r">", ")>)", tag_pattern)
940
+
941
+ # Check the regular expression
942
+ if not CHUNK_TAG_PATTERN.match(tag_pattern):
943
+ raise ValueError("Bad tag pattern: %r" % tag_pattern)
944
+
945
+ # Replace "." with CHUNK_TAG_CHAR.
946
+ # We have to do this after, since it adds {}[]<>s, which would
947
+ # confuse CHUNK_TAG_PATTERN.
948
+ # PRE doesn't have lookback assertions, so reverse twice, and do
949
+ # the pattern backwards (with lookahead assertions). This can be
950
+ # made much cleaner once we can switch back to SRE.
951
+ def reverse_str(str):
952
+ lst = list(str)
953
+ lst.reverse()
954
+ return "".join(lst)
955
+
956
+ tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR)
957
+ reversed = reverse_str(tag_pattern)
958
+ reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed)
959
+ tag_pattern = reverse_str(reversed)
960
+
961
+ return tag_pattern
962
+
963
+
964
+ # //////////////////////////////////////////////////////
965
+ # RegexpChunkParser
966
+ # //////////////////////////////////////////////////////
967
+
968
+
969
+ class RegexpChunkParser(ChunkParserI):
970
+ """
971
+ A regular expression based chunk parser. ``RegexpChunkParser`` uses a
972
+ sequence of "rules" to find chunks of a single type within a
973
+ text. The chunking of the text is encoded using a ``ChunkString``,
974
+ and each rule acts by modifying the chunking in the
975
+ ``ChunkString``. The rules are all implemented using regular
976
+ expression matching and substitution.
977
+
978
+ The ``RegexpChunkRule`` class and its subclasses (``ChunkRule``,
979
+ ``StripRule``, ``UnChunkRule``, ``MergeRule``, and ``SplitRule``)
980
+ define the rules that are used by ``RegexpChunkParser``. Each rule
981
+ defines an ``apply()`` method, which modifies the chunking encoded
982
+ by a given ``ChunkString``.
983
+
984
+ :type _rules: list(RegexpChunkRule)
985
+ :ivar _rules: The list of rules that should be applied to a text.
986
+ :type _trace: int
987
+ :ivar _trace: The default level of tracing.
988
+
989
+ """
990
+
991
+ def __init__(self, rules, chunk_label="NP", root_label="S", trace=0):
992
+ """
993
+ Construct a new ``RegexpChunkParser``.
994
+
995
+ :type rules: list(RegexpChunkRule)
996
+ :param rules: The sequence of rules that should be used to
997
+ generate the chunking for a tagged text.
998
+ :type chunk_label: str
999
+ :param chunk_label: The node value that should be used for
1000
+ chunk subtrees. This is typically a short string
1001
+ describing the type of information contained by the chunk,
1002
+ such as ``"NP"`` for base noun phrases.
1003
+ :type root_label: str
1004
+ :param root_label: The node value that should be used for the
1005
+ top node of the chunk structure.
1006
+ :type trace: int
1007
+ :param trace: The level of tracing that should be used when
1008
+ parsing a text. ``0`` will generate no tracing output;
1009
+ ``1`` will generate normal tracing output; and ``2`` or
1010
+ higher will generate verbose tracing output.
1011
+ """
1012
+ self._rules = rules
1013
+ self._trace = trace
1014
+ self._chunk_label = chunk_label
1015
+ self._root_label = root_label
1016
+
1017
+ def _trace_apply(self, chunkstr, verbose):
1018
+ """
1019
+ Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
1020
+ turn. Generate trace output between each rule. If ``verbose``
1021
+ is true, then generate verbose output.
1022
+
1023
+ :type chunkstr: ChunkString
1024
+ :param chunkstr: The chunk string to which each rule should be
1025
+ applied.
1026
+ :type verbose: bool
1027
+ :param verbose: Whether output should be verbose.
1028
+ :rtype: None
1029
+ """
1030
+ print("# Input:")
1031
+ print(chunkstr)
1032
+ for rule in self._rules:
1033
+ rule.apply(chunkstr)
1034
+ if verbose:
1035
+ print("#", rule.descr() + " (" + repr(rule) + "):")
1036
+ else:
1037
+ print("#", rule.descr() + ":")
1038
+ print(chunkstr)
1039
+
1040
+ def _notrace_apply(self, chunkstr):
1041
+ """
1042
+ Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in
1043
+ turn.
1044
+
1045
+ :param chunkstr: The chunk string to which each rule should be
1046
+ applied.
1047
+ :type chunkstr: ChunkString
1048
+ :rtype: None
1049
+ """
1050
+
1051
+ for rule in self._rules:
1052
+ rule.apply(chunkstr)
1053
+
1054
+ def parse(self, chunk_struct, trace=None):
1055
+ """
1056
+ :type chunk_struct: Tree
1057
+ :param chunk_struct: the chunk structure to be (further) chunked
1058
+ :type trace: int
1059
+ :param trace: The level of tracing that should be used when
1060
+ parsing a text. ``0`` will generate no tracing output;
1061
+ ``1`` will generate normal tracing output; and ``2`` or
1062
+ higher will generate verbose tracing output. This value
1063
+ overrides the trace level value that was given to the
1064
+ constructor.
1065
+ :rtype: Tree
1066
+ :return: a chunk structure that encodes the chunks in a given
1067
+ tagged sentence. A chunk is a non-overlapping linguistic
1068
+ group, such as a noun phrase. The set of chunks
1069
+ identified in the chunk structure depends on the rules
1070
+ used to define this ``RegexpChunkParser``.
1071
+ """
1072
+ if len(chunk_struct) == 0:
1073
+ print("Warning: parsing empty text")
1074
+ return Tree(self._root_label, [])
1075
+
1076
+ try:
1077
+ chunk_struct.label()
1078
+ except AttributeError:
1079
+ chunk_struct = Tree(self._root_label, chunk_struct)
1080
+
1081
+ # Use the default trace value?
1082
+ if trace is None:
1083
+ trace = self._trace
1084
+
1085
+ chunkstr = ChunkString(chunk_struct)
1086
+
1087
+ # Apply the sequence of rules to the chunkstring.
1088
+ if trace:
1089
+ verbose = trace > 1
1090
+ self._trace_apply(chunkstr, verbose)
1091
+ else:
1092
+ self._notrace_apply(chunkstr)
1093
+
1094
+ # Use the chunkstring to create a chunk structure.
1095
+ return chunkstr.to_chunkstruct(self._chunk_label)
1096
+
1097
+ def rules(self):
1098
+ """
1099
+ :return: the sequence of rules used by ``RegexpChunkParser``.
1100
+ :rtype: list(RegexpChunkRule)
1101
+ """
1102
+ return self._rules
1103
+
1104
+ def __repr__(self):
1105
+ """
1106
+ :return: a concise string representation of this
1107
+ ``RegexpChunkParser``.
1108
+ :rtype: str
1109
+ """
1110
+ return "<RegexpChunkParser with %d rules>" % len(self._rules)
1111
+
1112
+ def __str__(self):
1113
+ """
1114
+ :return: a verbose string representation of this ``RegexpChunkParser``.
1115
+ :rtype: str
1116
+ """
1117
+ s = "RegexpChunkParser with %d rules:\n" % len(self._rules)
1118
+ margin = 0
1119
+ for rule in self._rules:
1120
+ margin = max(margin, len(rule.descr()))
1121
+ if margin < 35:
1122
+ format = " %" + repr(-(margin + 3)) + "s%s\n"
1123
+ else:
1124
+ format = " %s\n %s\n"
1125
+ for rule in self._rules:
1126
+ s += format % (rule.descr(), repr(rule))
1127
+ return s[:-1]
1128
+
1129
+
1130
+ # //////////////////////////////////////////////////////
1131
+ # Chunk Grammar
1132
+ # //////////////////////////////////////////////////////
1133
+
1134
+
1135
+ class RegexpParser(ChunkParserI):
1136
+ r"""
1137
+ A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of
1138
+ regular expression patterns to specify the behavior of the parser.
1139
+ The chunking of the text is encoded using a ``ChunkString``, and
1140
+ each rule acts by modifying the chunking in the ``ChunkString``.
1141
+ The rules are all implemented using regular expression matching
1142
+ and substitution.
1143
+
1144
+ A grammar contains one or more clauses in the following form::
1145
+
1146
+ NP:
1147
+ {<DT|JJ>} # chunk determiners and adjectives
1148
+ }<[\.VI].*>+{ # strip any tag beginning with V, I, or .
1149
+ <.*>}{<DT> # split a chunk at a determiner
1150
+ <DT|JJ>{}<NN.*> # merge chunk ending with det/adj
1151
+ # with one starting with a noun
1152
+
1153
+ The patterns of a clause are executed in order. An earlier
1154
+ pattern may introduce a chunk boundary that prevents a later
1155
+ pattern from executing. Sometimes an individual pattern will
1156
+ match on multiple, overlapping extents of the input. As with
1157
+ regular expression substitution more generally, the chunker will
1158
+ identify the first match possible, then continue looking for matches
1159
+ after this one has ended.
1160
+
1161
+ The clauses of a grammar are also executed in order. A cascaded
1162
+ chunk parser is one having more than one clause. The maximum depth
1163
+ of a parse tree created by this chunk parser is the same as the
1164
+ number of clauses in the grammar.
1165
+
1166
+ When tracing is turned on, the comment portion of a line is displayed
1167
+ each time the corresponding pattern is applied.
1168
+
1169
+ :type _start: str
1170
+ :ivar _start: The start symbol of the grammar (the root node of
1171
+ resulting trees)
1172
+ :type _stages: int
1173
+ :ivar _stages: The list of parsing stages corresponding to the grammar
1174
+
1175
+ """
1176
+
1177
+ def __init__(self, grammar, root_label="S", loop=1, trace=0):
1178
+ """
1179
+ Create a new chunk parser, from the given start state
1180
+ and set of chunk patterns.
1181
+
1182
+ :param grammar: The grammar, or a list of RegexpChunkParser objects
1183
+ :type grammar: str or list(RegexpChunkParser)
1184
+ :param root_label: The top node of the tree being created
1185
+ :type root_label: str or Nonterminal
1186
+ :param loop: The number of times to run through the patterns
1187
+ :type loop: int
1188
+ :type trace: int
1189
+ :param trace: The level of tracing that should be used when
1190
+ parsing a text. ``0`` will generate no tracing output;
1191
+ ``1`` will generate normal tracing output; and ``2`` or
1192
+ higher will generate verbose tracing output.
1193
+ """
1194
+ self._trace = trace
1195
+ self._stages = []
1196
+ self._grammar = grammar
1197
+ self._loop = loop
1198
+
1199
+ if isinstance(grammar, str):
1200
+ self._read_grammar(grammar, root_label, trace)
1201
+ else:
1202
+ # Make sur the grammar looks like it has the right type:
1203
+ type_err = (
1204
+ "Expected string or list of RegexpChunkParsers " "for the grammar."
1205
+ )
1206
+ try:
1207
+ grammar = list(grammar)
1208
+ except BaseException as e:
1209
+ raise TypeError(type_err) from e
1210
+ for elt in grammar:
1211
+ if not isinstance(elt, RegexpChunkParser):
1212
+ raise TypeError(type_err)
1213
+ self._stages = grammar
1214
+
1215
+ def _read_grammar(self, grammar, root_label, trace):
1216
+ """
1217
+ Helper function for __init__: read the grammar if it is a
1218
+ string.
1219
+ """
1220
+ rules = []
1221
+ lhs = None
1222
+ pattern = regex.compile("(?P<nonterminal>(\\.|[^:])*)(:(?P<rule>.*))")
1223
+ for line in grammar.split("\n"):
1224
+ line = line.strip()
1225
+
1226
+ # New stage begins if there's an unescaped ':'
1227
+ m = pattern.match(line)
1228
+ if m:
1229
+ # Record the stage that we just completed.
1230
+ self._add_stage(rules, lhs, root_label, trace)
1231
+ # Start a new stage.
1232
+ lhs = m.group("nonterminal").strip()
1233
+ rules = []
1234
+ line = m.group("rule").strip()
1235
+
1236
+ # Skip blank & comment-only lines
1237
+ if line == "" or line.startswith("#"):
1238
+ continue
1239
+
1240
+ # Add the rule
1241
+ rules.append(RegexpChunkRule.fromstring(line))
1242
+
1243
+ # Record the final stage
1244
+ self._add_stage(rules, lhs, root_label, trace)
1245
+
1246
+ def _add_stage(self, rules, lhs, root_label, trace):
1247
+ """
1248
+ Helper function for __init__: add a new stage to the parser.
1249
+ """
1250
+ if rules != []:
1251
+ if not lhs:
1252
+ raise ValueError("Expected stage marker (eg NP:)")
1253
+ parser = RegexpChunkParser(
1254
+ rules, chunk_label=lhs, root_label=root_label, trace=trace
1255
+ )
1256
+ self._stages.append(parser)
1257
+
1258
+ def parse(self, chunk_struct, trace=None):
1259
+ """
1260
+ Apply the chunk parser to this input.
1261
+
1262
+ :type chunk_struct: Tree
1263
+ :param chunk_struct: the chunk structure to be (further) chunked
1264
+ (this tree is modified, and is also returned)
1265
+ :type trace: int
1266
+ :param trace: The level of tracing that should be used when
1267
+ parsing a text. ``0`` will generate no tracing output;
1268
+ ``1`` will generate normal tracing output; and ``2`` or
1269
+ higher will generate verbose tracing output. This value
1270
+ overrides the trace level value that was given to the
1271
+ constructor.
1272
+ :return: the chunked output.
1273
+ :rtype: Tree
1274
+ """
1275
+ if trace is None:
1276
+ trace = self._trace
1277
+ for i in range(self._loop):
1278
+ for parser in self._stages:
1279
+ chunk_struct = parser.parse(chunk_struct, trace=trace)
1280
+ return chunk_struct
1281
+
1282
+ def __repr__(self):
1283
+ """
1284
+ :return: a concise string representation of this ``chunk.RegexpParser``.
1285
+ :rtype: str
1286
+ """
1287
+ return "<chunk.RegexpParser with %d stages>" % len(self._stages)
1288
+
1289
+ def __str__(self):
1290
+ """
1291
+ :return: a verbose string representation of this
1292
+ ``RegexpParser``.
1293
+ :rtype: str
1294
+ """
1295
+ s = "chunk.RegexpParser with %d stages:\n" % len(self._stages)
1296
+ margin = 0
1297
+ for parser in self._stages:
1298
+ s += "%s\n" % parser
1299
+ return s[:-1]
1300
+
1301
+
1302
+ # //////////////////////////////////////////////////////
1303
+ # Demonstration code
1304
+ # //////////////////////////////////////////////////////
1305
+
1306
+
1307
+ def demo_eval(chunkparser, text):
1308
+ """
1309
+ Demonstration code for evaluating a chunk parser, using a
1310
+ ``ChunkScore``. This function assumes that ``text`` contains one
1311
+ sentence per line, and that each sentence has the form expected by
1312
+ ``tree.chunk``. It runs the given chunk parser on each sentence in
1313
+ the text, and scores the result. It prints the final score
1314
+ (precision, recall, and f-measure); and reports the set of chunks
1315
+ that were missed and the set of chunks that were incorrect. (At
1316
+ most 10 missing chunks and 10 incorrect chunks are reported).
1317
+
1318
+ :param chunkparser: The chunkparser to be tested
1319
+ :type chunkparser: ChunkParserI
1320
+ :param text: The chunked tagged text that should be used for
1321
+ evaluation.
1322
+ :type text: str
1323
+ """
1324
+ from nltk import chunk
1325
+ from nltk.tree import Tree
1326
+
1327
+ # Evaluate our chunk parser.
1328
+ chunkscore = chunk.ChunkScore()
1329
+
1330
+ for sentence in text.split("\n"):
1331
+ print(sentence)
1332
+ sentence = sentence.strip()
1333
+ if not sentence:
1334
+ continue
1335
+ gold = chunk.tagstr2tree(sentence)
1336
+ tokens = gold.leaves()
1337
+ test = chunkparser.parse(Tree("S", tokens), trace=1)
1338
+ chunkscore.score(gold, test)
1339
+ print()
1340
+
1341
+ print("/" + ("=" * 75) + "\\")
1342
+ print("Scoring", chunkparser)
1343
+ print("-" * 77)
1344
+ print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ")
1345
+ print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ")
1346
+ print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100))
1347
+
1348
+ # Missed chunks.
1349
+ if chunkscore.missed():
1350
+ print("Missed:")
1351
+ missed = chunkscore.missed()
1352
+ for chunk in missed[:10]:
1353
+ print(" ", " ".join(map(str, chunk)))
1354
+ if len(chunkscore.missed()) > 10:
1355
+ print(" ...")
1356
+
1357
+ # Incorrect chunks.
1358
+ if chunkscore.incorrect():
1359
+ print("Incorrect:")
1360
+ incorrect = chunkscore.incorrect()
1361
+ for chunk in incorrect[:10]:
1362
+ print(" ", " ".join(map(str, chunk)))
1363
+ if len(chunkscore.incorrect()) > 10:
1364
+ print(" ...")
1365
+
1366
+ print("\\" + ("=" * 75) + "/")
1367
+ print()
1368
+
1369
+
1370
+ def demo():
1371
+ """
1372
+ A demonstration for the ``RegexpChunkParser`` class. A single text is
1373
+ parsed with four different chunk parsers, using a variety of rules
1374
+ and strategies.
1375
+ """
1376
+
1377
+ from nltk import Tree, chunk
1378
+
1379
+ text = """\
1380
+ [ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./.
1381
+ [ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./.
1382
+ [ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./.
1383
+ """
1384
+
1385
+ print("*" * 75)
1386
+ print("Evaluation text:")
1387
+ print(text)
1388
+ print("*" * 75)
1389
+ print()
1390
+
1391
+ grammar = r"""
1392
+ NP: # NP stage
1393
+ {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
1394
+ {<NNP>+} # chunk proper nouns
1395
+ """
1396
+ cp = chunk.RegexpParser(grammar)
1397
+ demo_eval(cp, text)
1398
+
1399
+ grammar = r"""
1400
+ NP:
1401
+ {<.*>} # start by chunking each tag
1402
+ }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
1403
+ <DT|JJ>{}<NN.*> # merge det/adj with nouns
1404
+ """
1405
+ cp = chunk.RegexpParser(grammar)
1406
+ demo_eval(cp, text)
1407
+
1408
+ grammar = r"""
1409
+ NP: {<DT>?<JJ>*<NN>} # chunk determiners, adjectives and nouns
1410
+ VP: {<TO>?<VB.*>} # VP = verb words
1411
+ """
1412
+ cp = chunk.RegexpParser(grammar)
1413
+ demo_eval(cp, text)
1414
+
1415
+ grammar = r"""
1416
+ NP: {<.*>*} # start by chunking everything
1417
+ }<[\.VI].*>+{ # strip any verbs, prepositions or periods
1418
+ <.*>}{<DT> # separate on determiners
1419
+ PP: {<IN><NP>} # PP = preposition + noun phrase
1420
+ VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
1421
+ """
1422
+ cp = chunk.RegexpParser(grammar)
1423
+ demo_eval(cp, text)
1424
+
1425
+ # Evaluation
1426
+
1427
+ from nltk.corpus import conll2000
1428
+
1429
+ print()
1430
+ print("Demonstration of empty grammar:")
1431
+
1432
+ cp = chunk.RegexpParser("")
1433
+ print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",))))
1434
+
1435
+ print()
1436
+ print("Demonstration of accuracy evaluation using CoNLL tags:")
1437
+
1438
+ grammar = r"""
1439
+ NP:
1440
+ {<.*>} # start by chunking each tag
1441
+ }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods
1442
+ <DT|JJ>{}<NN.*> # merge det/adj with nouns
1443
+ """
1444
+ cp = chunk.RegexpParser(grammar)
1445
+ print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5]))
1446
+
1447
+ print()
1448
+ print("Demonstration of tagged token input")
1449
+
1450
+ grammar = r"""
1451
+ NP: {<.*>*} # start by chunking everything
1452
+ }<[\.VI].*>+{ # strip any verbs, prepositions or periods
1453
+ <.*>}{<DT> # separate on determiners
1454
+ PP: {<IN><NP>} # PP = preposition + noun phrase
1455
+ VP: {<VB.*><NP|PP>*} # VP = verb words + NPs and PPs
1456
+ """
1457
+ cp = chunk.RegexpParser(grammar)
1458
+ print(
1459
+ cp.parse(
1460
+ [
1461
+ ("the", "DT"),
1462
+ ("little", "JJ"),
1463
+ ("cat", "NN"),
1464
+ ("sat", "VBD"),
1465
+ ("on", "IN"),
1466
+ ("the", "DT"),
1467
+ ("mat", "NN"),
1468
+ (".", "."),
1469
+ ]
1470
+ )
1471
+ )
1472
+
1473
+
1474
+ if __name__ == "__main__":
1475
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/cluster/api.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterer Interfaces
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Porting: Steven Bird <[email protected]>
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+
9
+ from abc import ABCMeta, abstractmethod
10
+
11
+ from nltk.probability import DictionaryProbDist
12
+
13
+
14
+ class ClusterI(metaclass=ABCMeta):
15
+ """
16
+ Interface covering basic clustering functionality.
17
+ """
18
+
19
+ @abstractmethod
20
+ def cluster(self, vectors, assign_clusters=False):
21
+ """
22
+ Assigns the vectors to clusters, learning the clustering parameters
23
+ from the data. Returns a cluster identifier for each vector.
24
+ """
25
+
26
+ @abstractmethod
27
+ def classify(self, token):
28
+ """
29
+ Classifies the token into a cluster, setting the token's CLUSTER
30
+ parameter to that cluster identifier.
31
+ """
32
+
33
+ def likelihood(self, vector, label):
34
+ """
35
+ Returns the likelihood (a float) of the token having the
36
+ corresponding cluster.
37
+ """
38
+ if self.classify(vector) == label:
39
+ return 1.0
40
+ else:
41
+ return 0.0
42
+
43
+ def classification_probdist(self, vector):
44
+ """
45
+ Classifies the token into a cluster, returning
46
+ a probability distribution over the cluster identifiers.
47
+ """
48
+ likelihoods = {}
49
+ sum = 0.0
50
+ for cluster in self.cluster_names():
51
+ likelihoods[cluster] = self.likelihood(vector, cluster)
52
+ sum += likelihoods[cluster]
53
+ for cluster in self.cluster_names():
54
+ likelihoods[cluster] /= sum
55
+ return DictionaryProbDist(likelihoods)
56
+
57
+ @abstractmethod
58
+ def num_clusters(self):
59
+ """
60
+ Returns the number of clusters.
61
+ """
62
+
63
+ def cluster_names(self):
64
+ """
65
+ Returns the names of the clusters.
66
+ :rtype: list
67
+ """
68
+ return list(range(self.num_clusters()))
69
+
70
+ def cluster_name(self, index):
71
+ """
72
+ Returns the names of the cluster at index.
73
+ """
74
+ return index
env-llmeval/lib/python3.10/site-packages/nltk/cluster/em.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Expectation Maximization Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ pass
12
+
13
+ from nltk.cluster.util import VectorSpaceClusterer
14
+
15
+
16
+ class EMClusterer(VectorSpaceClusterer):
17
+ """
18
+ The Gaussian EM clusterer models the vectors as being produced by
19
+ a mixture of k Gaussian sources. The parameters of these sources
20
+ (prior probability, mean and covariance matrix) are then found to
21
+ maximise the likelihood of the given data. This is done with the
22
+ expectation maximisation algorithm. It starts with k arbitrarily
23
+ chosen means, priors and covariance matrices. It then calculates
24
+ the membership probabilities for each vector in each of the
25
+ clusters; this is the 'E' step. The cluster parameters are then
26
+ updated in the 'M' step using the maximum likelihood estimate from
27
+ the cluster membership probabilities. This process continues until
28
+ the likelihood of the data does not significantly increase.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ initial_means,
34
+ priors=None,
35
+ covariance_matrices=None,
36
+ conv_threshold=1e-6,
37
+ bias=0.1,
38
+ normalise=False,
39
+ svd_dimensions=None,
40
+ ):
41
+ """
42
+ Creates an EM clusterer with the given starting parameters,
43
+ convergence threshold and vector mangling parameters.
44
+
45
+ :param initial_means: the means of the gaussian cluster centers
46
+ :type initial_means: [seq of] numpy array or seq of SparseArray
47
+ :param priors: the prior probability for each cluster
48
+ :type priors: numpy array or seq of float
49
+ :param covariance_matrices: the covariance matrix for each cluster
50
+ :type covariance_matrices: [seq of] numpy array
51
+ :param conv_threshold: maximum change in likelihood before deemed
52
+ convergent
53
+ :type conv_threshold: int or float
54
+ :param bias: variance bias used to ensure non-singular covariance
55
+ matrices
56
+ :type bias: float
57
+ :param normalise: should vectors be normalised to length 1
58
+ :type normalise: boolean
59
+ :param svd_dimensions: number of dimensions to use in reducing vector
60
+ dimensionsionality with SVD
61
+ :type svd_dimensions: int
62
+ """
63
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
64
+ self._means = numpy.array(initial_means, numpy.float64)
65
+ self._num_clusters = len(initial_means)
66
+ self._conv_threshold = conv_threshold
67
+ self._covariance_matrices = covariance_matrices
68
+ self._priors = priors
69
+ self._bias = bias
70
+
71
+ def num_clusters(self):
72
+ return self._num_clusters
73
+
74
+ def cluster_vectorspace(self, vectors, trace=False):
75
+ assert len(vectors) > 0
76
+
77
+ # set the parameters to initial values
78
+ dimensions = len(vectors[0])
79
+ means = self._means
80
+ priors = self._priors
81
+ if not priors:
82
+ priors = self._priors = (
83
+ numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters
84
+ )
85
+ covariances = self._covariance_matrices
86
+ if not covariances:
87
+ covariances = self._covariance_matrices = [
88
+ numpy.identity(dimensions, numpy.float64)
89
+ for i in range(self._num_clusters)
90
+ ]
91
+
92
+ # do the E and M steps until the likelihood plateaus
93
+ lastl = self._loglikelihood(vectors, priors, means, covariances)
94
+ converged = False
95
+
96
+ while not converged:
97
+ if trace:
98
+ print("iteration; loglikelihood", lastl)
99
+ # E-step, calculate hidden variables, h[i,j]
100
+ h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64)
101
+ for i in range(len(vectors)):
102
+ for j in range(self._num_clusters):
103
+ h[i, j] = priors[j] * self._gaussian(
104
+ means[j], covariances[j], vectors[i]
105
+ )
106
+ h[i, :] /= sum(h[i, :])
107
+
108
+ # M-step, update parameters - cvm, p, mean
109
+ for j in range(self._num_clusters):
110
+ covariance_before = covariances[j]
111
+ new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64)
112
+ new_mean = numpy.zeros(dimensions, numpy.float64)
113
+ sum_hj = 0.0
114
+ for i in range(len(vectors)):
115
+ delta = vectors[i] - means[j]
116
+ new_covariance += h[i, j] * numpy.multiply.outer(delta, delta)
117
+ sum_hj += h[i, j]
118
+ new_mean += h[i, j] * vectors[i]
119
+ covariances[j] = new_covariance / sum_hj
120
+ means[j] = new_mean / sum_hj
121
+ priors[j] = sum_hj / len(vectors)
122
+
123
+ # bias term to stop covariance matrix being singular
124
+ covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64)
125
+
126
+ # calculate likelihood - FIXME: may be broken
127
+ l = self._loglikelihood(vectors, priors, means, covariances)
128
+
129
+ # check for convergence
130
+ if abs(lastl - l) < self._conv_threshold:
131
+ converged = True
132
+ lastl = l
133
+
134
+ def classify_vectorspace(self, vector):
135
+ best = None
136
+ for j in range(self._num_clusters):
137
+ p = self._priors[j] * self._gaussian(
138
+ self._means[j], self._covariance_matrices[j], vector
139
+ )
140
+ if not best or p > best[0]:
141
+ best = (p, j)
142
+ return best[1]
143
+
144
+ def likelihood_vectorspace(self, vector, cluster):
145
+ cid = self.cluster_names().index(cluster)
146
+ return self._priors[cluster] * self._gaussian(
147
+ self._means[cluster], self._covariance_matrices[cluster], vector
148
+ )
149
+
150
+ def _gaussian(self, mean, cvm, x):
151
+ m = len(mean)
152
+ assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape)
153
+ try:
154
+ det = numpy.linalg.det(cvm)
155
+ inv = numpy.linalg.inv(cvm)
156
+ a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0)
157
+ dx = x - mean
158
+ print(dx, inv)
159
+ b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx)
160
+ return a * numpy.exp(b)
161
+ except OverflowError:
162
+ # happens when the exponent is negative infinity - i.e. b = 0
163
+ # i.e. the inverse of cvm is huge (cvm is almost zero)
164
+ return 0
165
+
166
+ def _loglikelihood(self, vectors, priors, means, covariances):
167
+ llh = 0.0
168
+ for vector in vectors:
169
+ p = 0
170
+ for j in range(len(priors)):
171
+ p += priors[j] * self._gaussian(means[j], covariances[j], vector)
172
+ llh += numpy.log(p)
173
+ return llh
174
+
175
+ def __repr__(self):
176
+ return "<EMClusterer means=%s>" % list(self._means)
177
+
178
+
179
+ def demo():
180
+ """
181
+ Non-interactive demonstration of the clusterers with simple 2-D data.
182
+ """
183
+
184
+ from nltk import cluster
185
+
186
+ # example from figure 14.10, page 519, Manning and Schutze
187
+
188
+ vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
189
+ means = [[4, 2], [4, 2.01]]
190
+
191
+ clusterer = cluster.EMClusterer(means, bias=0.1)
192
+ clusters = clusterer.cluster(vectors, True, trace=True)
193
+
194
+ print("Clustered:", vectors)
195
+ print("As: ", clusters)
196
+ print()
197
+
198
+ for c in range(2):
199
+ print("Cluster:", c)
200
+ print("Prior: ", clusterer._priors[c])
201
+ print("Mean: ", clusterer._means[c])
202
+ print("Covar: ", clusterer._covariance_matrices[c])
203
+ print()
204
+
205
+ # classify a new vector
206
+ vector = numpy.array([2, 2])
207
+ print("classify(%s):" % vector, end=" ")
208
+ print(clusterer.classify(vector))
209
+
210
+ # show the classification probabilities
211
+ vector = numpy.array([2, 2])
212
+ print("classification_probdist(%s):" % vector)
213
+ pdist = clusterer.classification_probdist(vector)
214
+ for sample in pdist.samples():
215
+ print(f"{sample} => {pdist.prob(sample) * 100:.0f}%")
216
+
217
+
218
+ if __name__ == "__main__":
219
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/cluster/gaac.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Group Average Agglomerative Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ try:
9
+ import numpy
10
+ except ImportError:
11
+ pass
12
+
13
+ from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance
14
+
15
+
16
+ class GAAClusterer(VectorSpaceClusterer):
17
+ """
18
+ The Group Average Agglomerative starts with each of the N vectors as singleton
19
+ clusters. It then iteratively merges pairs of clusters which have the
20
+ closest centroids. This continues until there is only one cluster. The
21
+ order of merges gives rise to a dendrogram: a tree with the earlier merges
22
+ lower than later merges. The membership of a given number of clusters c, 1
23
+ <= c <= N, can be found by cutting the dendrogram at depth c.
24
+
25
+ This clusterer uses the cosine similarity metric only, which allows for
26
+ efficient speed-up in the clustering process.
27
+ """
28
+
29
+ def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
30
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
31
+ self._num_clusters = num_clusters
32
+ self._dendrogram = None
33
+ self._groups_values = None
34
+
35
+ def cluster(self, vectors, assign_clusters=False, trace=False):
36
+ # stores the merge order
37
+ self._dendrogram = Dendrogram(
38
+ [numpy.array(vector, numpy.float64) for vector in vectors]
39
+ )
40
+ return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
41
+
42
+ def cluster_vectorspace(self, vectors, trace=False):
43
+ # variables describing the initial situation
44
+ N = len(vectors)
45
+ cluster_len = [1] * N
46
+ cluster_count = N
47
+ index_map = numpy.arange(N)
48
+
49
+ # construct the similarity matrix
50
+ dims = (N, N)
51
+ dist = numpy.ones(dims, dtype=float) * numpy.inf
52
+ for i in range(N):
53
+ for j in range(i + 1, N):
54
+ dist[i, j] = cosine_distance(vectors[i], vectors[j])
55
+
56
+ while cluster_count > max(self._num_clusters, 1):
57
+ i, j = numpy.unravel_index(dist.argmin(), dims)
58
+ if trace:
59
+ print("merging %d and %d" % (i, j))
60
+
61
+ # update similarities for merging i and j
62
+ self._merge_similarities(dist, cluster_len, i, j)
63
+
64
+ # remove j
65
+ dist[:, j] = numpy.inf
66
+ dist[j, :] = numpy.inf
67
+
68
+ # merge the clusters
69
+ cluster_len[i] = cluster_len[i] + cluster_len[j]
70
+ self._dendrogram.merge(index_map[i], index_map[j])
71
+ cluster_count -= 1
72
+
73
+ # update the index map to reflect the indexes if we
74
+ # had removed j
75
+ index_map[j + 1 :] -= 1
76
+ index_map[j] = N
77
+
78
+ self.update_clusters(self._num_clusters)
79
+
80
+ def _merge_similarities(self, dist, cluster_len, i, j):
81
+ # the new cluster i merged from i and j adopts the average of
82
+ # i and j's similarity to each other cluster, weighted by the
83
+ # number of points in the clusters i and j
84
+ i_weight = cluster_len[i]
85
+ j_weight = cluster_len[j]
86
+ weight_sum = i_weight + j_weight
87
+
88
+ # update for x<i
89
+ dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
90
+ dist[:i, i] /= weight_sum
91
+ # update for i<x<j
92
+ dist[i, i + 1 : j] = (
93
+ dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
94
+ )
95
+ # update for i<j<x
96
+ dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
97
+ dist[i, i + 1 :] /= weight_sum
98
+
99
+ def update_clusters(self, num_clusters):
100
+ clusters = self._dendrogram.groups(num_clusters)
101
+ self._centroids = []
102
+ for cluster in clusters:
103
+ assert len(cluster) > 0
104
+ if self._should_normalise:
105
+ centroid = self._normalise(cluster[0])
106
+ else:
107
+ centroid = numpy.array(cluster[0])
108
+ for vector in cluster[1:]:
109
+ if self._should_normalise:
110
+ centroid += self._normalise(vector)
111
+ else:
112
+ centroid += vector
113
+ centroid /= len(cluster)
114
+ self._centroids.append(centroid)
115
+ self._num_clusters = len(self._centroids)
116
+
117
+ def classify_vectorspace(self, vector):
118
+ best = None
119
+ for i in range(self._num_clusters):
120
+ centroid = self._centroids[i]
121
+ dist = cosine_distance(vector, centroid)
122
+ if not best or dist < best[0]:
123
+ best = (dist, i)
124
+ return best[1]
125
+
126
+ def dendrogram(self):
127
+ """
128
+ :return: The dendrogram representing the current clustering
129
+ :rtype: Dendrogram
130
+ """
131
+ return self._dendrogram
132
+
133
+ def num_clusters(self):
134
+ return self._num_clusters
135
+
136
+ def __repr__(self):
137
+ return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
138
+
139
+
140
+ def demo():
141
+ """
142
+ Non-interactive demonstration of the clusterers with simple 2-D data.
143
+ """
144
+
145
+ from nltk.cluster import GAAClusterer
146
+
147
+ # use a set of tokens with 2D indices
148
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
149
+
150
+ # test the GAAC clusterer with 4 clusters
151
+ clusterer = GAAClusterer(4)
152
+ clusters = clusterer.cluster(vectors, True)
153
+
154
+ print("Clusterer:", clusterer)
155
+ print("Clustered:", vectors)
156
+ print("As:", clusters)
157
+ print()
158
+
159
+ # show the dendrogram
160
+ clusterer.dendrogram().show()
161
+
162
+ # classify a new vector
163
+ vector = numpy.array([3, 3])
164
+ print("classify(%s):" % vector, end=" ")
165
+ print(clusterer.classify(vector))
166
+ print()
167
+
168
+
169
+ if __name__ == "__main__":
170
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/cluster/kmeans.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: K-Means Clusterer
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # URL: <https://www.nltk.org/>
6
+ # For license information, see LICENSE.TXT
7
+
8
+ import copy
9
+ import random
10
+ import sys
11
+
12
+ try:
13
+ import numpy
14
+ except ImportError:
15
+ pass
16
+
17
+
18
+ from nltk.cluster.util import VectorSpaceClusterer
19
+
20
+
21
+ class KMeansClusterer(VectorSpaceClusterer):
22
+ """
23
+ The K-means clusterer starts with k arbitrary chosen means then allocates
24
+ each vector to the cluster with the closest mean. It then recalculates the
25
+ means of each cluster as the centroid of the vectors in the cluster. This
26
+ process repeats until the cluster memberships stabilise. This is a
27
+ hill-climbing algorithm which may converge to a local maximum. Hence the
28
+ clustering is often repeated with random initial means and the most
29
+ commonly occurring output means are chosen.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ num_means,
35
+ distance,
36
+ repeats=1,
37
+ conv_test=1e-6,
38
+ initial_means=None,
39
+ normalise=False,
40
+ svd_dimensions=None,
41
+ rng=None,
42
+ avoid_empty_clusters=False,
43
+ ):
44
+
45
+ """
46
+ :param num_means: the number of means to use (may use fewer)
47
+ :type num_means: int
48
+ :param distance: measure of distance between two vectors
49
+ :type distance: function taking two vectors and returning a float
50
+ :param repeats: number of randomised clustering trials to use
51
+ :type repeats: int
52
+ :param conv_test: maximum variation in mean differences before
53
+ deemed convergent
54
+ :type conv_test: number
55
+ :param initial_means: set of k initial means
56
+ :type initial_means: sequence of vectors
57
+ :param normalise: should vectors be normalised to length 1
58
+ :type normalise: boolean
59
+ :param svd_dimensions: number of dimensions to use in reducing vector
60
+ dimensionsionality with SVD
61
+ :type svd_dimensions: int
62
+ :param rng: random number generator (or None)
63
+ :type rng: Random
64
+ :param avoid_empty_clusters: include current centroid in computation
65
+ of next one; avoids undefined behavior
66
+ when clusters become empty
67
+ :type avoid_empty_clusters: boolean
68
+ """
69
+ VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
70
+ self._num_means = num_means
71
+ self._distance = distance
72
+ self._max_difference = conv_test
73
+ assert not initial_means or len(initial_means) == num_means
74
+ self._means = initial_means
75
+ assert repeats >= 1
76
+ assert not (initial_means and repeats > 1)
77
+ self._repeats = repeats
78
+ self._rng = rng if rng else random.Random()
79
+ self._avoid_empty_clusters = avoid_empty_clusters
80
+
81
+ def cluster_vectorspace(self, vectors, trace=False):
82
+ if self._means and self._repeats > 1:
83
+ print("Warning: means will be discarded for subsequent trials")
84
+
85
+ meanss = []
86
+ for trial in range(self._repeats):
87
+ if trace:
88
+ print("k-means trial", trial)
89
+ if not self._means or trial > 1:
90
+ self._means = self._rng.sample(list(vectors), self._num_means)
91
+ self._cluster_vectorspace(vectors, trace)
92
+ meanss.append(self._means)
93
+
94
+ if len(meanss) > 1:
95
+ # sort the means first (so that different cluster numbering won't
96
+ # effect the distance comparison)
97
+ for means in meanss:
98
+ means.sort(key=sum)
99
+
100
+ # find the set of means that's minimally different from the others
101
+ min_difference = min_means = None
102
+ for i in range(len(meanss)):
103
+ d = 0
104
+ for j in range(len(meanss)):
105
+ if i != j:
106
+ d += self._sum_distances(meanss[i], meanss[j])
107
+ if min_difference is None or d < min_difference:
108
+ min_difference, min_means = d, meanss[i]
109
+
110
+ # use the best means
111
+ self._means = min_means
112
+
113
+ def _cluster_vectorspace(self, vectors, trace=False):
114
+ if self._num_means < len(vectors):
115
+ # perform k-means clustering
116
+ converged = False
117
+ while not converged:
118
+ # assign the tokens to clusters based on minimum distance to
119
+ # the cluster means
120
+ clusters = [[] for m in range(self._num_means)]
121
+ for vector in vectors:
122
+ index = self.classify_vectorspace(vector)
123
+ clusters[index].append(vector)
124
+
125
+ if trace:
126
+ print("iteration")
127
+ # for i in range(self._num_means):
128
+ # print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
129
+
130
+ # recalculate cluster means by computing the centroid of each cluster
131
+ new_means = list(map(self._centroid, clusters, self._means))
132
+
133
+ # measure the degree of change from the previous step for convergence
134
+ difference = self._sum_distances(self._means, new_means)
135
+ if difference < self._max_difference:
136
+ converged = True
137
+
138
+ # remember the new means
139
+ self._means = new_means
140
+
141
+ def classify_vectorspace(self, vector):
142
+ # finds the closest cluster centroid
143
+ # returns that cluster's index
144
+ best_distance = best_index = None
145
+ for index in range(len(self._means)):
146
+ mean = self._means[index]
147
+ dist = self._distance(vector, mean)
148
+ if best_distance is None or dist < best_distance:
149
+ best_index, best_distance = index, dist
150
+ return best_index
151
+
152
+ def num_clusters(self):
153
+ if self._means:
154
+ return len(self._means)
155
+ else:
156
+ return self._num_means
157
+
158
+ def means(self):
159
+ """
160
+ The means used for clustering.
161
+ """
162
+ return self._means
163
+
164
+ def _sum_distances(self, vectors1, vectors2):
165
+ difference = 0.0
166
+ for u, v in zip(vectors1, vectors2):
167
+ difference += self._distance(u, v)
168
+ return difference
169
+
170
+ def _centroid(self, cluster, mean):
171
+ if self._avoid_empty_clusters:
172
+ centroid = copy.copy(mean)
173
+ for vector in cluster:
174
+ centroid += vector
175
+ return centroid / (1 + len(cluster))
176
+ else:
177
+ if not len(cluster):
178
+ sys.stderr.write("Error: no centroid defined for empty cluster.\n")
179
+ sys.stderr.write(
180
+ "Try setting argument 'avoid_empty_clusters' to True\n"
181
+ )
182
+ assert False
183
+ centroid = copy.copy(cluster[0])
184
+ for vector in cluster[1:]:
185
+ centroid += vector
186
+ return centroid / len(cluster)
187
+
188
+ def __repr__(self):
189
+ return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
190
+
191
+
192
+ #################################################################################
193
+
194
+
195
+ def demo():
196
+ # example from figure 14.9, page 517, Manning and Schutze
197
+
198
+ from nltk.cluster import KMeansClusterer, euclidean_distance
199
+
200
+ vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
201
+ means = [[4, 3], [5, 5]]
202
+
203
+ clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
204
+ clusters = clusterer.cluster(vectors, True, trace=True)
205
+
206
+ print("Clustered:", vectors)
207
+ print("As:", clusters)
208
+ print("Means:", clusterer.means())
209
+ print()
210
+
211
+ vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
212
+
213
+ # test k-means using the euclidean distance metric, 2 means and repeat
214
+ # clustering 10 times with random seeds
215
+
216
+ clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
217
+ clusters = clusterer.cluster(vectors, True)
218
+ print("Clustered:", vectors)
219
+ print("As:", clusters)
220
+ print("Means:", clusterer.means())
221
+ print()
222
+
223
+ # classify a new vector
224
+ vector = numpy.array([3, 3])
225
+ print("classify(%s):" % vector, end=" ")
226
+ print(clusterer.classify(vector))
227
+ print()
228
+
229
+
230
+ if __name__ == "__main__":
231
+ demo()
env-llmeval/lib/python3.10/site-packages/nltk/cluster/util.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Clusterer Utilities
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Trevor Cohn <[email protected]>
5
+ # Contributor: J Richard Snape
6
+ # URL: <https://www.nltk.org/>
7
+ # For license information, see LICENSE.TXT
8
+ import copy
9
+ from abc import abstractmethod
10
+ from math import sqrt
11
+ from sys import stdout
12
+
13
+ try:
14
+ import numpy
15
+ except ImportError:
16
+ pass
17
+
18
+ from nltk.cluster.api import ClusterI
19
+
20
+
21
+ class VectorSpaceClusterer(ClusterI):
22
+ """
23
+ Abstract clusterer which takes tokens and maps them into a vector space.
24
+ Optionally performs singular value decomposition to reduce the
25
+ dimensionality.
26
+ """
27
+
28
+ def __init__(self, normalise=False, svd_dimensions=None):
29
+ """
30
+ :param normalise: should vectors be normalised to length 1
31
+ :type normalise: boolean
32
+ :param svd_dimensions: number of dimensions to use in reducing vector
33
+ dimensionsionality with SVD
34
+ :type svd_dimensions: int
35
+ """
36
+ self._Tt = None
37
+ self._should_normalise = normalise
38
+ self._svd_dimensions = svd_dimensions
39
+
40
+ def cluster(self, vectors, assign_clusters=False, trace=False):
41
+ assert len(vectors) > 0
42
+
43
+ # normalise the vectors
44
+ if self._should_normalise:
45
+ vectors = list(map(self._normalise, vectors))
46
+
47
+ # use SVD to reduce the dimensionality
48
+ if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
49
+ [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
50
+ S = d[: self._svd_dimensions] * numpy.identity(
51
+ self._svd_dimensions, numpy.float64
52
+ )
53
+ T = u[:, : self._svd_dimensions]
54
+ Dt = vt[: self._svd_dimensions, :]
55
+ vectors = numpy.transpose(numpy.dot(S, Dt))
56
+ self._Tt = numpy.transpose(T)
57
+
58
+ # call abstract method to cluster the vectors
59
+ self.cluster_vectorspace(vectors, trace)
60
+
61
+ # assign the vectors to clusters
62
+ if assign_clusters:
63
+ return [self.classify(vector) for vector in vectors]
64
+
65
+ @abstractmethod
66
+ def cluster_vectorspace(self, vectors, trace):
67
+ """
68
+ Finds the clusters using the given set of vectors.
69
+ """
70
+
71
+ def classify(self, vector):
72
+ if self._should_normalise:
73
+ vector = self._normalise(vector)
74
+ if self._Tt is not None:
75
+ vector = numpy.dot(self._Tt, vector)
76
+ cluster = self.classify_vectorspace(vector)
77
+ return self.cluster_name(cluster)
78
+
79
+ @abstractmethod
80
+ def classify_vectorspace(self, vector):
81
+ """
82
+ Returns the index of the appropriate cluster for the vector.
83
+ """
84
+
85
+ def likelihood(self, vector, label):
86
+ if self._should_normalise:
87
+ vector = self._normalise(vector)
88
+ if self._Tt is not None:
89
+ vector = numpy.dot(self._Tt, vector)
90
+ return self.likelihood_vectorspace(vector, label)
91
+
92
+ def likelihood_vectorspace(self, vector, cluster):
93
+ """
94
+ Returns the likelihood of the vector belonging to the cluster.
95
+ """
96
+ predicted = self.classify_vectorspace(vector)
97
+ return 1.0 if cluster == predicted else 0.0
98
+
99
+ def vector(self, vector):
100
+ """
101
+ Returns the vector after normalisation and dimensionality reduction
102
+ """
103
+ if self._should_normalise:
104
+ vector = self._normalise(vector)
105
+ if self._Tt is not None:
106
+ vector = numpy.dot(self._Tt, vector)
107
+ return vector
108
+
109
+ def _normalise(self, vector):
110
+ """
111
+ Normalises the vector to unit length.
112
+ """
113
+ return vector / sqrt(numpy.dot(vector, vector))
114
+
115
+
116
+ def euclidean_distance(u, v):
117
+ """
118
+ Returns the euclidean distance between vectors u and v. This is equivalent
119
+ to the length of the vector (u - v).
120
+ """
121
+ diff = u - v
122
+ return sqrt(numpy.dot(diff, diff))
123
+
124
+
125
+ def cosine_distance(u, v):
126
+ """
127
+ Returns 1 minus the cosine of the angle between vectors v and u. This is
128
+ equal to ``1 - (u.v / |u||v|)``.
129
+ """
130
+ return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
131
+
132
+
133
+ class _DendrogramNode:
134
+ """Tree node of a dendrogram."""
135
+
136
+ def __init__(self, value, *children):
137
+ self._value = value
138
+ self._children = children
139
+
140
+ def leaves(self, values=True):
141
+ if self._children:
142
+ leaves = []
143
+ for child in self._children:
144
+ leaves.extend(child.leaves(values))
145
+ return leaves
146
+ elif values:
147
+ return [self._value]
148
+ else:
149
+ return [self]
150
+
151
+ def groups(self, n):
152
+ queue = [(self._value, self)]
153
+
154
+ while len(queue) < n:
155
+ priority, node = queue.pop()
156
+ if not node._children:
157
+ queue.push((priority, node))
158
+ break
159
+ for child in node._children:
160
+ if child._children:
161
+ queue.append((child._value, child))
162
+ else:
163
+ queue.append((0, child))
164
+ # makes the earliest merges at the start, latest at the end
165
+ queue.sort()
166
+
167
+ groups = []
168
+ for priority, node in queue:
169
+ groups.append(node.leaves())
170
+ return groups
171
+
172
+ def __lt__(self, comparator):
173
+ return cosine_distance(self._value, comparator._value) < 0
174
+
175
+
176
+ class Dendrogram:
177
+ """
178
+ Represents a dendrogram, a tree with a specified branching order. This
179
+ must be initialised with the leaf items, then iteratively call merge for
180
+ each branch. This class constructs a tree representing the order of calls
181
+ to the merge function.
182
+ """
183
+
184
+ def __init__(self, items=[]):
185
+ """
186
+ :param items: the items at the leaves of the dendrogram
187
+ :type items: sequence of (any)
188
+ """
189
+ self._items = [_DendrogramNode(item) for item in items]
190
+ self._original_items = copy.copy(self._items)
191
+ self._merge = 1
192
+
193
+ def merge(self, *indices):
194
+ """
195
+ Merges nodes at given indices in the dendrogram. The nodes will be
196
+ combined which then replaces the first node specified. All other nodes
197
+ involved in the merge will be removed.
198
+
199
+ :param indices: indices of the items to merge (at least two)
200
+ :type indices: seq of int
201
+ """
202
+ assert len(indices) >= 2
203
+ node = _DendrogramNode(self._merge, *(self._items[i] for i in indices))
204
+ self._merge += 1
205
+ self._items[indices[0]] = node
206
+ for i in indices[1:]:
207
+ del self._items[i]
208
+
209
+ def groups(self, n):
210
+ """
211
+ Finds the n-groups of items (leaves) reachable from a cut at depth n.
212
+ :param n: number of groups
213
+ :type n: int
214
+ """
215
+ if len(self._items) > 1:
216
+ root = _DendrogramNode(self._merge, *self._items)
217
+ else:
218
+ root = self._items[0]
219
+ return root.groups(n)
220
+
221
+ def show(self, leaf_labels=[]):
222
+ """
223
+ Print the dendrogram in ASCII art to standard out.
224
+
225
+ :param leaf_labels: an optional list of strings to use for labeling the
226
+ leaves
227
+ :type leaf_labels: list
228
+ """
229
+
230
+ # ASCII rendering characters
231
+ JOIN, HLINK, VLINK = "+", "-", "|"
232
+
233
+ # find the root (or create one)
234
+ if len(self._items) > 1:
235
+ root = _DendrogramNode(self._merge, *self._items)
236
+ else:
237
+ root = self._items[0]
238
+ leaves = self._original_items
239
+
240
+ if leaf_labels:
241
+ last_row = leaf_labels
242
+ else:
243
+ last_row = ["%s" % leaf._value for leaf in leaves]
244
+
245
+ # find the bottom row and the best cell width
246
+ width = max(map(len, last_row)) + 1
247
+ lhalf = width // 2
248
+ rhalf = int(width - lhalf - 1)
249
+
250
+ # display functions
251
+ def format(centre, left=" ", right=" "):
252
+ return f"{lhalf * left}{centre}{right * rhalf}"
253
+
254
+ def display(str):
255
+ stdout.write(str)
256
+
257
+ # for each merge, top down
258
+ queue = [(root._value, root)]
259
+ verticals = [format(" ") for leaf in leaves]
260
+ while queue:
261
+ priority, node = queue.pop()
262
+ child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
263
+ indices = list(map(leaves.index, child_left_leaf))
264
+ if child_left_leaf:
265
+ min_idx = min(indices)
266
+ max_idx = max(indices)
267
+ for i in range(len(leaves)):
268
+ if leaves[i] in child_left_leaf:
269
+ if i == min_idx:
270
+ display(format(JOIN, " ", HLINK))
271
+ elif i == max_idx:
272
+ display(format(JOIN, HLINK, " "))
273
+ else:
274
+ display(format(JOIN, HLINK, HLINK))
275
+ verticals[i] = format(VLINK)
276
+ elif min_idx <= i <= max_idx:
277
+ display(format(HLINK, HLINK, HLINK))
278
+ else:
279
+ display(verticals[i])
280
+ display("\n")
281
+ for child in node._children:
282
+ if child._children:
283
+ queue.append((child._value, child))
284
+ queue.sort()
285
+
286
+ for vertical in verticals:
287
+ display(vertical)
288
+ display("\n")
289
+
290
+ # finally, display the last line
291
+ display("".join(item.center(width) for item in last_row))
292
+ display("\n")
293
+
294
+ def __repr__(self):
295
+ if len(self._items) > 1:
296
+ root = _DendrogramNode(self._merge, *self._items)
297
+ else:
298
+ root = self._items[0]
299
+ leaves = root.leaves(False)
300
+ return "<Dendrogram with %d leaves>" % len(leaves)
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc ADDED
Binary file (9.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc ADDED
Binary file (6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc ADDED
Binary file (5.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc ADDED
Binary file (5.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc ADDED
Binary file (7.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc ADDED
Binary file (8.66 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc ADDED
Binary file (4.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ """
11
+ Transformation Based Learning
12
+
13
+ A general purpose package for Transformation Based Learning,
14
+ currently used by nltk.tag.BrillTagger.
15
+
16
+ isort:skip_file
17
+ """
18
+
19
+ from nltk.tbl.template import Template
20
+
21
+ # API: Template(...), Template.expand(...)
22
+
23
+ from nltk.tbl.feature import Feature
24
+
25
+ # API: Feature(...), Feature.expand(...)
26
+
27
+ from nltk.tbl.rule import Rule
28
+
29
+ # API: Rule.format(...), Rule.templatetid
30
+
31
+ from nltk.tbl.erroranalysis import error_list
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc ADDED
Binary file (168 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc ADDED
Binary file (9.85 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tbl/api.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/nltk/tbl/demo.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import os
11
+ import pickle
12
+ import random
13
+ import time
14
+
15
+ from nltk.corpus import treebank
16
+ from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
17
+ from nltk.tag.brill import Pos, Word
18
+ from nltk.tbl import Template, error_list
19
+
20
+
21
+ def demo():
22
+ """
23
+ Run a demo with defaults. See source comments for details,
24
+ or docstrings of any of the more specific demo_* functions.
25
+ """
26
+ postag()
27
+
28
+
29
+ def demo_repr_rule_format():
30
+ """
31
+ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
32
+ """
33
+ postag(ruleformat="repr")
34
+
35
+
36
+ def demo_str_rule_format():
37
+ """
38
+ Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
39
+ """
40
+ postag(ruleformat="str")
41
+
42
+
43
+ def demo_verbose_rule_format():
44
+ """
45
+ Exemplify Rule.format("verbose")
46
+ """
47
+ postag(ruleformat="verbose")
48
+
49
+
50
+ def demo_multiposition_feature():
51
+ """
52
+ The feature/s of a template takes a list of positions
53
+ relative to the current word where the feature should be
54
+ looked for, conceptually joined by logical OR. For instance,
55
+ Pos([-1, 1]), given a value V, will hold whenever V is found
56
+ one step to the left and/or one step to the right.
57
+
58
+ For contiguous ranges, a 2-arg form giving inclusive end
59
+ points can also be used: Pos(-3, -1) is the same as the arg
60
+ below.
61
+ """
62
+ postag(templates=[Template(Pos([-3, -2, -1]))])
63
+
64
+
65
+ def demo_multifeature_template():
66
+ """
67
+ Templates can have more than a single feature.
68
+ """
69
+ postag(templates=[Template(Word([0]), Pos([-2, -1]))])
70
+
71
+
72
+ def demo_template_statistics():
73
+ """
74
+ Show aggregate statistics per template. Little used templates are
75
+ candidates for deletion, much used templates may possibly be refined.
76
+
77
+ Deleting unused templates is mostly about saving time and/or space:
78
+ training is basically O(T) in the number of templates T
79
+ (also in terms of memory usage, which often will be the limiting factor).
80
+ """
81
+ postag(incremental_stats=True, template_stats=True)
82
+
83
+
84
+ def demo_generated_templates():
85
+ """
86
+ Template.expand and Feature.expand are class methods facilitating
87
+ generating large amounts of templates. See their documentation for
88
+ details.
89
+
90
+ Note: training with 500 templates can easily fill all available
91
+ even on relatively small corpora
92
+ """
93
+ wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False)
94
+ tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True)
95
+ templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3)))
96
+ print(
97
+ "Generated {} templates for transformation-based learning".format(
98
+ len(templates)
99
+ )
100
+ )
101
+ postag(templates=templates, incremental_stats=True, template_stats=True)
102
+
103
+
104
+ def demo_learning_curve():
105
+ """
106
+ Plot a learning curve -- the contribution on tagging accuracy of
107
+ the individual rules.
108
+ Note: requires matplotlib
109
+ """
110
+ postag(
111
+ incremental_stats=True,
112
+ separate_baseline_data=True,
113
+ learning_curve_output="learningcurve.png",
114
+ )
115
+
116
+
117
+ def demo_error_analysis():
118
+ """
119
+ Writes a file with context for each erroneous word after tagging testing data
120
+ """
121
+ postag(error_output="errors.txt")
122
+
123
+
124
+ def demo_serialize_tagger():
125
+ """
126
+ Serializes the learned tagger to a file in pickle format; reloads it
127
+ and validates the process.
128
+ """
129
+ postag(serialize_output="tagger.pcl")
130
+
131
+
132
+ def demo_high_accuracy_rules():
133
+ """
134
+ Discard rules with low accuracy. This may hurt performance a bit,
135
+ but will often produce rules which are more interesting read to a human.
136
+ """
137
+ postag(num_sents=3000, min_acc=0.96, min_score=10)
138
+
139
+
140
+ def postag(
141
+ templates=None,
142
+ tagged_data=None,
143
+ num_sents=1000,
144
+ max_rules=300,
145
+ min_score=3,
146
+ min_acc=None,
147
+ train=0.8,
148
+ trace=3,
149
+ randomize=False,
150
+ ruleformat="str",
151
+ incremental_stats=False,
152
+ template_stats=False,
153
+ error_output=None,
154
+ serialize_output=None,
155
+ learning_curve_output=None,
156
+ learning_curve_take=300,
157
+ baseline_backoff_tagger=None,
158
+ separate_baseline_data=False,
159
+ cache_baseline_tagger=None,
160
+ ):
161
+ """
162
+ Brill Tagger Demonstration
163
+ :param templates: how many sentences of training and testing data to use
164
+ :type templates: list of Template
165
+
166
+ :param tagged_data: maximum number of rule instances to create
167
+ :type tagged_data: C{int}
168
+
169
+ :param num_sents: how many sentences of training and testing data to use
170
+ :type num_sents: C{int}
171
+
172
+ :param max_rules: maximum number of rule instances to create
173
+ :type max_rules: C{int}
174
+
175
+ :param min_score: the minimum score for a rule in order for it to be considered
176
+ :type min_score: C{int}
177
+
178
+ :param min_acc: the minimum score for a rule in order for it to be considered
179
+ :type min_acc: C{float}
180
+
181
+ :param train: the fraction of the the corpus to be used for training (1=all)
182
+ :type train: C{float}
183
+
184
+ :param trace: the level of diagnostic tracing output to produce (0-4)
185
+ :type trace: C{int}
186
+
187
+ :param randomize: whether the training data should be a random subset of the corpus
188
+ :type randomize: C{bool}
189
+
190
+ :param ruleformat: rule output format, one of "str", "repr", "verbose"
191
+ :type ruleformat: C{str}
192
+
193
+ :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
194
+ :type incremental_stats: C{bool}
195
+
196
+ :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
197
+ :type template_stats: C{bool}
198
+
199
+ :param error_output: the file where errors will be saved
200
+ :type error_output: C{string}
201
+
202
+ :param serialize_output: the file where the learned tbl tagger will be saved
203
+ :type serialize_output: C{string}
204
+
205
+ :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
206
+ :type learning_curve_output: C{string}
207
+
208
+ :param learning_curve_take: how many rules plotted
209
+ :type learning_curve_take: C{int}
210
+
211
+ :param baseline_backoff_tagger: the file where rules will be saved
212
+ :type baseline_backoff_tagger: tagger
213
+
214
+ :param separate_baseline_data: use a fraction of the training data exclusively for training baseline
215
+ :type separate_baseline_data: C{bool}
216
+
217
+ :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
218
+ deterministic output from the baseline unigram tagger between python versions)
219
+ :type cache_baseline_tagger: C{string}
220
+
221
+
222
+ Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
223
+ is fast and fine for a demo, but is likely to generalize worse on unseen data.
224
+ Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
225
+ """
226
+
227
+ # defaults
228
+ baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
229
+ if templates is None:
230
+ from nltk.tag.brill import brill24, describe_template_sets
231
+
232
+ # some pre-built template sets taken from typical systems or publications are
233
+ # available. Print a list with describe_template_sets()
234
+ # for instance:
235
+ templates = brill24()
236
+ (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data(
237
+ tagged_data, train, num_sents, randomize, separate_baseline_data
238
+ )
239
+
240
+ # creating (or reloading from cache) a baseline tagger (unigram tagger)
241
+ # this is just a mechanism for getting deterministic output from the baseline between
242
+ # python versions
243
+ if cache_baseline_tagger:
244
+ if not os.path.exists(cache_baseline_tagger):
245
+ baseline_tagger = UnigramTagger(
246
+ baseline_data, backoff=baseline_backoff_tagger
247
+ )
248
+ with open(cache_baseline_tagger, "w") as print_rules:
249
+ pickle.dump(baseline_tagger, print_rules)
250
+ print(
251
+ "Trained baseline tagger, pickled it to {}".format(
252
+ cache_baseline_tagger
253
+ )
254
+ )
255
+ with open(cache_baseline_tagger) as print_rules:
256
+ baseline_tagger = pickle.load(print_rules)
257
+ print(f"Reloaded pickled tagger from {cache_baseline_tagger}")
258
+ else:
259
+ baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
260
+ print("Trained baseline tagger")
261
+ if gold_data:
262
+ print(
263
+ " Accuracy on test set: {:0.4f}".format(
264
+ baseline_tagger.accuracy(gold_data)
265
+ )
266
+ )
267
+
268
+ # creating a Brill tagger
269
+ tbrill = time.time()
270
+ trainer = BrillTaggerTrainer(
271
+ baseline_tagger, templates, trace, ruleformat=ruleformat
272
+ )
273
+ print("Training tbl tagger...")
274
+ brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
275
+ print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds")
276
+ if gold_data:
277
+ print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data))
278
+
279
+ # printing the learned rules, if learned silently
280
+ if trace == 1:
281
+ print("\nLearned rules: ")
282
+ for (ruleno, rule) in enumerate(brill_tagger.rules(), 1):
283
+ print(f"{ruleno:4d} {rule.format(ruleformat):s}")
284
+
285
+ # printing template statistics (optionally including comparison with the training data)
286
+ # note: if not separate_baseline_data, then baseline accuracy will be artificially high
287
+ if incremental_stats:
288
+ print(
289
+ "Incrementally tagging the test data, collecting individual rule statistics"
290
+ )
291
+ (taggedtest, teststats) = brill_tagger.batch_tag_incremental(
292
+ testing_data, gold_data
293
+ )
294
+ print(" Rule statistics collected")
295
+ if not separate_baseline_data:
296
+ print(
297
+ "WARNING: train_stats asked for separate_baseline_data=True; the baseline "
298
+ "will be artificially high"
299
+ )
300
+ trainstats = brill_tagger.train_stats()
301
+ if template_stats:
302
+ brill_tagger.print_template_statistics(teststats)
303
+ if learning_curve_output:
304
+ _demo_plot(
305
+ learning_curve_output, teststats, trainstats, take=learning_curve_take
306
+ )
307
+ print(f"Wrote plot of learning curve to {learning_curve_output}")
308
+ else:
309
+ print("Tagging the test data")
310
+ taggedtest = brill_tagger.tag_sents(testing_data)
311
+ if template_stats:
312
+ brill_tagger.print_template_statistics()
313
+
314
+ # writing error analysis to file
315
+ if error_output is not None:
316
+ with open(error_output, "w") as f:
317
+ f.write("Errors for Brill Tagger %r\n\n" % serialize_output)
318
+ f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n")
319
+ print(f"Wrote tagger errors including context to {error_output}")
320
+
321
+ # serializing the tagger to a pickle file and reloading (just to see it works)
322
+ if serialize_output is not None:
323
+ taggedtest = brill_tagger.tag_sents(testing_data)
324
+ with open(serialize_output, "w") as print_rules:
325
+ pickle.dump(brill_tagger, print_rules)
326
+ print(f"Wrote pickled tagger to {serialize_output}")
327
+ with open(serialize_output) as print_rules:
328
+ brill_tagger_reloaded = pickle.load(print_rules)
329
+ print(f"Reloaded pickled tagger from {serialize_output}")
330
+ taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
331
+ if taggedtest == taggedtest_reloaded:
332
+ print("Reloaded tagger tried on test set, results identical")
333
+ else:
334
+ print("PROBLEM: Reloaded tagger gave different results on test set")
335
+
336
+
337
+ def _demo_prepare_data(
338
+ tagged_data, train, num_sents, randomize, separate_baseline_data
339
+ ):
340
+ # train is the proportion of data used in training; the rest is reserved
341
+ # for testing.
342
+ if tagged_data is None:
343
+ print("Loading tagged data from treebank... ")
344
+ tagged_data = treebank.tagged_sents()
345
+ if num_sents is None or len(tagged_data) <= num_sents:
346
+ num_sents = len(tagged_data)
347
+ if randomize:
348
+ random.seed(len(tagged_data))
349
+ random.shuffle(tagged_data)
350
+ cutoff = int(num_sents * train)
351
+ training_data = tagged_data[:cutoff]
352
+ gold_data = tagged_data[cutoff:num_sents]
353
+ testing_data = [[t[0] for t in sent] for sent in gold_data]
354
+ if not separate_baseline_data:
355
+ baseline_data = training_data
356
+ else:
357
+ bl_cutoff = len(training_data) // 3
358
+ (baseline_data, training_data) = (
359
+ training_data[:bl_cutoff],
360
+ training_data[bl_cutoff:],
361
+ )
362
+ (trainseqs, traintokens) = corpus_size(training_data)
363
+ (testseqs, testtokens) = corpus_size(testing_data)
364
+ (bltrainseqs, bltraintokens) = corpus_size(baseline_data)
365
+ print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)")
366
+ print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)")
367
+ print(
368
+ "Read baseline data ({:d} sents/{:d} wds) {:s}".format(
369
+ bltrainseqs,
370
+ bltraintokens,
371
+ "" if separate_baseline_data else "[reused the training set]",
372
+ )
373
+ )
374
+ return (training_data, baseline_data, gold_data, testing_data)
375
+
376
+
377
+ def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
378
+ testcurve = [teststats["initialerrors"]]
379
+ for rulescore in teststats["rulescores"]:
380
+ testcurve.append(testcurve[-1] - rulescore)
381
+ testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]]
382
+
383
+ traincurve = [trainstats["initialerrors"]]
384
+ for rulescore in trainstats["rulescores"]:
385
+ traincurve.append(traincurve[-1] - rulescore)
386
+ traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]]
387
+
388
+ import matplotlib.pyplot as plt
389
+
390
+ r = list(range(len(testcurve)))
391
+ plt.plot(r, testcurve, r, traincurve)
392
+ plt.axis([None, None, None, 1.0])
393
+ plt.savefig(learning_curve_output)
394
+
395
+
396
+ NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")])
397
+
398
+ REGEXP_TAGGER = RegexpTagger(
399
+ [
400
+ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
401
+ (r"(The|the|A|a|An|an)$", "AT"), # articles
402
+ (r".*able$", "JJ"), # adjectives
403
+ (r".*ness$", "NN"), # nouns formed from adjectives
404
+ (r".*ly$", "RB"), # adverbs
405
+ (r".*s$", "NNS"), # plural nouns
406
+ (r".*ing$", "VBG"), # gerunds
407
+ (r".*ed$", "VBD"), # past tense verbs
408
+ (r".*", "NN"), # nouns (default)
409
+ ]
410
+ )
411
+
412
+
413
+ def corpus_size(seqs):
414
+ return (len(seqs), sum(len(x) for x in seqs))
415
+
416
+
417
+ if __name__ == "__main__":
418
+ demo_learning_curve()
env-llmeval/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ # returns a list of errors in string format
11
+
12
+
13
+ def error_list(train_sents, test_sents):
14
+ """
15
+ Returns a list of human-readable strings indicating the errors in the
16
+ given tagging of the corpus.
17
+
18
+ :param train_sents: The correct tagging of the corpus
19
+ :type train_sents: list(tuple)
20
+ :param test_sents: The tagged corpus
21
+ :type test_sents: list(tuple)
22
+ """
23
+ hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % (
24
+ "left context",
25
+ "word/test->gold".center(22),
26
+ "right context",
27
+ )
28
+ errors = [hdr]
29
+ for (train_sent, test_sent) in zip(train_sents, test_sents):
30
+ for wordnum, (word, train_pos) in enumerate(train_sent):
31
+ test_pos = test_sent[wordnum][1]
32
+ if train_pos != test_pos:
33
+ left = " ".join("%s/%s" % w for w in train_sent[:wordnum])
34
+ right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :])
35
+ mid = f"{word}/{test_pos}->{train_pos}"
36
+ errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}")
37
+
38
+ return errors
env-llmeval/lib/python3.10/site-packages/nltk/tbl/rule.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ from abc import ABCMeta, abstractmethod
11
+
12
+ from nltk import jsontags
13
+
14
+
15
+ ######################################################################
16
+ # Tag Rules
17
+ ######################################################################
18
+ class TagRule(metaclass=ABCMeta):
19
+ """
20
+ An interface for tag transformations on a tagged corpus, as
21
+ performed by tbl taggers. Each transformation finds all tokens
22
+ in the corpus that are tagged with a specific original tag and
23
+ satisfy a specific condition, and replaces their tags with a
24
+ replacement tag. For any given transformation, the original
25
+ tag, replacement tag, and condition are fixed. Conditions may
26
+ depend on the token under consideration, as well as any other
27
+ tokens in the corpus.
28
+
29
+ Tag rules must be comparable and hashable.
30
+ """
31
+
32
+ def __init__(self, original_tag, replacement_tag):
33
+
34
+ self.original_tag = original_tag
35
+ """The tag which this TagRule may cause to be replaced."""
36
+
37
+ self.replacement_tag = replacement_tag
38
+ """The tag with which this TagRule may replace another tag."""
39
+
40
+ def apply(self, tokens, positions=None):
41
+ """
42
+ Apply this rule at every position in positions where it
43
+ applies to the given sentence. I.e., for each position p
44
+ in *positions*, if *tokens[p]* is tagged with this rule's
45
+ original tag, and satisfies this rule's condition, then set
46
+ its tag to be this rule's replacement tag.
47
+
48
+ :param tokens: The tagged sentence
49
+ :type tokens: list(tuple(str, str))
50
+ :type positions: list(int)
51
+ :param positions: The positions where the transformation is to
52
+ be tried. If not specified, try it at all positions.
53
+ :return: The indices of tokens whose tags were changed by this
54
+ rule.
55
+ :rtype: int
56
+ """
57
+ if positions is None:
58
+ positions = list(range(len(tokens)))
59
+
60
+ # Determine the indices at which this rule applies.
61
+ change = [i for i in positions if self.applies(tokens, i)]
62
+
63
+ # Make the changes. Note: this must be done in a separate
64
+ # step from finding applicable locations, since we don't want
65
+ # the rule to interact with itself.
66
+ for i in change:
67
+ tokens[i] = (tokens[i][0], self.replacement_tag)
68
+
69
+ return change
70
+
71
+ @abstractmethod
72
+ def applies(self, tokens, index):
73
+ """
74
+ :return: True if the rule would change the tag of
75
+ ``tokens[index]``, False otherwise
76
+ :rtype: bool
77
+ :param tokens: A tagged sentence
78
+ :type tokens: list(str)
79
+ :param index: The index to check
80
+ :type index: int
81
+ """
82
+
83
+ # Rules must be comparable and hashable for the algorithm to work
84
+ def __eq__(self, other):
85
+ raise TypeError("Rules must implement __eq__()")
86
+
87
+ def __ne__(self, other):
88
+ raise TypeError("Rules must implement __ne__()")
89
+
90
+ def __hash__(self):
91
+ raise TypeError("Rules must implement __hash__()")
92
+
93
+
94
+ @jsontags.register_tag
95
+ class Rule(TagRule):
96
+ """
97
+ A Rule checks the current corpus position for a certain set of conditions;
98
+ if they are all fulfilled, the Rule is triggered, meaning that it
99
+ will change tag A to tag B. For other tags than A, nothing happens.
100
+
101
+ The conditions are parameters to the Rule instance. Each condition is a feature-value pair,
102
+ with a set of positions to check for the value of the corresponding feature.
103
+ Conceptually, the positions are joined by logical OR, and the feature set by logical AND.
104
+
105
+ More formally, the Rule is then applicable to the M{n}th token iff:
106
+
107
+ - The M{n}th token is tagged with the Rule's original tag; and
108
+ - For each (Feature(positions), M{value}) tuple:
109
+
110
+ - The value of Feature of at least one token in {n+p for p in positions}
111
+ is M{value}.
112
+ """
113
+
114
+ json_tag = "nltk.tbl.Rule"
115
+
116
+ def __init__(self, templateid, original_tag, replacement_tag, conditions):
117
+ """
118
+ Construct a new Rule that changes a token's tag from
119
+ C{original_tag} to C{replacement_tag} if all of the properties
120
+ specified in C{conditions} hold.
121
+
122
+ :param templateid: the template id (a zero-padded string, '001' etc,
123
+ so it will sort nicely)
124
+ :type templateid: string
125
+
126
+ :param conditions: A list of Feature(positions),
127
+ each of which specifies that the property (computed by
128
+ Feature.extract_property()) of at least one
129
+ token in M{n} + p in positions is C{value}.
130
+ :type conditions: C{iterable} of C{Feature}
131
+
132
+ """
133
+ TagRule.__init__(self, original_tag, replacement_tag)
134
+ self._conditions = conditions
135
+ self.templateid = templateid
136
+
137
+ def encode_json_obj(self):
138
+ return {
139
+ "templateid": self.templateid,
140
+ "original": self.original_tag,
141
+ "replacement": self.replacement_tag,
142
+ "conditions": self._conditions,
143
+ }
144
+
145
+ @classmethod
146
+ def decode_json_obj(cls, obj):
147
+ return cls(
148
+ obj["templateid"],
149
+ obj["original"],
150
+ obj["replacement"],
151
+ tuple(tuple(feat) for feat in obj["conditions"]),
152
+ )
153
+
154
+ def applies(self, tokens, index):
155
+ # Inherit docs from TagRule
156
+
157
+ # Does the given token have this Rule's "original tag"?
158
+ if tokens[index][1] != self.original_tag:
159
+ return False
160
+
161
+ # Check to make sure that every condition holds.
162
+ for (feature, val) in self._conditions:
163
+
164
+ # Look for *any* token that satisfies the condition.
165
+ for pos in feature.positions:
166
+ if not (0 <= index + pos < len(tokens)):
167
+ continue
168
+ if feature.extract_property(tokens, index + pos) == val:
169
+ break
170
+ else:
171
+ # No token satisfied the condition; return false.
172
+ return False
173
+
174
+ # Every condition checked out, so the Rule is applicable.
175
+ return True
176
+
177
+ def __eq__(self, other):
178
+ return self is other or (
179
+ other is not None
180
+ and other.__class__ == self.__class__
181
+ and self.original_tag == other.original_tag
182
+ and self.replacement_tag == other.replacement_tag
183
+ and self._conditions == other._conditions
184
+ )
185
+
186
+ def __ne__(self, other):
187
+ return not (self == other)
188
+
189
+ def __hash__(self):
190
+
191
+ # Cache our hash value (justified by profiling.)
192
+ try:
193
+ return self.__hash
194
+ except AttributeError:
195
+ self.__hash = hash(repr(self))
196
+ return self.__hash
197
+
198
+ def __repr__(self):
199
+ # Cache the repr (justified by profiling -- this is used as
200
+ # a sort key when deterministic=True.)
201
+ try:
202
+ return self.__repr
203
+ except AttributeError:
204
+ self.__repr = "{}('{}', {}, {}, [{}])".format(
205
+ self.__class__.__name__,
206
+ self.templateid,
207
+ repr(self.original_tag),
208
+ repr(self.replacement_tag),
209
+ # list(self._conditions) would be simpler but will not generate
210
+ # the same Rule.__repr__ in python 2 and 3 and thus break some tests
211
+ ", ".join(f"({f},{repr(v)})" for (f, v) in self._conditions),
212
+ )
213
+
214
+ return self.__repr
215
+
216
+ def __str__(self):
217
+ def _condition_to_logic(feature, value):
218
+ """
219
+ Return a compact, predicate-logic styled string representation
220
+ of the given condition.
221
+ """
222
+ return "{}:{}@[{}]".format(
223
+ feature.PROPERTY_NAME,
224
+ value,
225
+ ",".join(str(w) for w in feature.positions),
226
+ )
227
+
228
+ conditions = " & ".join(
229
+ [_condition_to_logic(f, v) for (f, v) in self._conditions]
230
+ )
231
+ s = f"{self.original_tag}->{self.replacement_tag} if {conditions}"
232
+
233
+ return s
234
+
235
+ def format(self, fmt):
236
+ """
237
+ Return a string representation of this rule.
238
+
239
+ >>> from nltk.tbl.rule import Rule
240
+ >>> from nltk.tag.brill import Pos
241
+
242
+ >>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')])
243
+
244
+ r.format("str") == str(r)
245
+ True
246
+ >>> r.format("str")
247
+ 'VB->NN if Pos:DT@[-2,-1]'
248
+
249
+ r.format("repr") == repr(r)
250
+ True
251
+ >>> r.format("repr")
252
+ "Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])"
253
+
254
+ >>> r.format("verbose")
255
+ 'VB -> NN if the Pos of words i-2...i-1 is "DT"'
256
+
257
+ >>> r.format("not_found")
258
+ Traceback (most recent call last):
259
+ File "<stdin>", line 1, in <module>
260
+ File "nltk/tbl/rule.py", line 256, in format
261
+ raise ValueError("unknown rule format spec: {0}".format(fmt))
262
+ ValueError: unknown rule format spec: not_found
263
+ >>>
264
+
265
+ :param fmt: format specification
266
+ :type fmt: str
267
+ :return: string representation
268
+ :rtype: str
269
+ """
270
+ if fmt == "str":
271
+ return self.__str__()
272
+ elif fmt == "repr":
273
+ return self.__repr__()
274
+ elif fmt == "verbose":
275
+ return self._verbose_format()
276
+ else:
277
+ raise ValueError(f"unknown rule format spec: {fmt}")
278
+
279
+ def _verbose_format(self):
280
+ """
281
+ Return a wordy, human-readable string representation
282
+ of the given rule.
283
+
284
+ Not sure how useful this is.
285
+ """
286
+
287
+ def condition_to_str(feature, value):
288
+ return 'the {} of {} is "{}"'.format(
289
+ feature.PROPERTY_NAME,
290
+ range_to_str(feature.positions),
291
+ value,
292
+ )
293
+
294
+ def range_to_str(positions):
295
+ if len(positions) == 1:
296
+ p = positions[0]
297
+ if p == 0:
298
+ return "this word"
299
+ if p == -1:
300
+ return "the preceding word"
301
+ elif p == 1:
302
+ return "the following word"
303
+ elif p < 0:
304
+ return "word i-%d" % -p
305
+ elif p > 0:
306
+ return "word i+%d" % p
307
+ else:
308
+ # for complete compatibility with the wordy format of nltk2
309
+ mx = max(positions)
310
+ mn = min(positions)
311
+ if mx - mn == len(positions) - 1:
312
+ return "words i%+d...i%+d" % (mn, mx)
313
+ else:
314
+ return "words {{{}}}".format(
315
+ ",".join("i%+d" % d for d in positions)
316
+ )
317
+
318
+ replacement = f"{self.original_tag} -> {self.replacement_tag}"
319
+ conditions = (" if " if self._conditions else "") + ", and ".join(
320
+ condition_to_str(f, v) for (f, v) in self._conditions
321
+ )
322
+ return replacement + conditions
env-llmeval/lib/python3.10/site-packages/nltk/tbl/template.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Transformation-based learning
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Marcus Uneson <[email protected]>
5
+ # based on previous (nltk2) version by
6
+ # Christopher Maloof, Edward Loper, Steven Bird
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ import itertools as it
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ from nltk.tbl.feature import Feature
14
+ from nltk.tbl.rule import Rule
15
+
16
+
17
+ class BrillTemplateI(metaclass=ABCMeta):
18
+ """
19
+ An interface for generating lists of transformational rules that
20
+ apply at given sentence positions. ``BrillTemplateI`` is used by
21
+ ``Brill`` training algorithms to generate candidate rules.
22
+ """
23
+
24
+ @abstractmethod
25
+ def applicable_rules(self, tokens, i, correctTag):
26
+ """
27
+ Return a list of the transformational rules that would correct
28
+ the ``i``-th subtoken's tag in the given token. In particular,
29
+ return a list of zero or more rules that would change
30
+ ``tokens[i][1]`` to ``correctTag``, if applied to ``token[i]``.
31
+
32
+ If the ``i``-th token already has the correct tag (i.e., if
33
+ ``tagged_tokens[i][1] == correctTag``), then
34
+ ``applicable_rules()`` should return the empty list.
35
+
36
+ :param tokens: The tagged tokens being tagged.
37
+ :type tokens: list(tuple)
38
+ :param i: The index of the token whose tag should be corrected.
39
+ :type i: int
40
+ :param correctTag: The correct tag for the ``i``-th token.
41
+ :type correctTag: any
42
+ :rtype: list(BrillRule)
43
+ """
44
+
45
+ @abstractmethod
46
+ def get_neighborhood(self, token, index):
47
+ """
48
+ Returns the set of indices *i* such that
49
+ ``applicable_rules(token, i, ...)`` depends on the value of
50
+ the *index*th token of *token*.
51
+
52
+ This method is used by the "fast" Brill tagger trainer.
53
+
54
+ :param token: The tokens being tagged.
55
+ :type token: list(tuple)
56
+ :param index: The index whose neighborhood should be returned.
57
+ :type index: int
58
+ :rtype: set
59
+ """
60
+
61
+
62
+ class Template(BrillTemplateI):
63
+ """
64
+ A tbl Template that generates a list of L{Rule}s that apply at a given sentence
65
+ position. In particular, each C{Template} is parameterized by a list of
66
+ independent features (a combination of a specific
67
+ property to extract and a list C{L} of relative positions at which to extract
68
+ it) and generates all Rules that:
69
+
70
+ - use the given features, each at its own independent position; and
71
+ - are applicable to the given token.
72
+ """
73
+
74
+ ALLTEMPLATES = []
75
+ # record a unique id of form "001", for each template created
76
+ # _ids = it.count(0)
77
+
78
+ def __init__(self, *features):
79
+
80
+ """
81
+ Construct a Template for generating Rules.
82
+
83
+ Takes a list of Features. A C{Feature} is a combination
84
+ of a specific property and its relative positions and should be
85
+ a subclass of L{nltk.tbl.feature.Feature}.
86
+
87
+ An alternative calling convention (kept for backwards compatibility,
88
+ but less expressive as it only permits one feature type) is
89
+ Template(Feature, (start1, end1), (start2, end2), ...)
90
+ In new code, that would be better written
91
+ Template(Feature(start1, end1), Feature(start2, end2), ...)
92
+
93
+ For instance, importing some features
94
+
95
+ >>> from nltk.tbl.template import Template
96
+ >>> from nltk.tag.brill import Word, Pos
97
+
98
+ Create some features
99
+
100
+ >>> wfeat1, wfeat2, pfeat = (Word([-1]), Word([1,2]), Pos([-2,-1]))
101
+
102
+ Create a single-feature template
103
+
104
+ >>> Template(wfeat1)
105
+ Template(Word([-1]))
106
+
107
+ Or a two-feature one
108
+
109
+ >>> Template(wfeat1, wfeat2)
110
+ Template(Word([-1]),Word([1, 2]))
111
+
112
+ Or a three-feature one with two different feature types
113
+
114
+ >>> Template(wfeat1, wfeat2, pfeat)
115
+ Template(Word([-1]),Word([1, 2]),Pos([-2, -1]))
116
+
117
+ deprecated api: Feature subclass, followed by list of (start,end) pairs
118
+ (permits only a single Feature)
119
+
120
+ >>> Template(Word, (-2,-1), (0,0))
121
+ Template(Word([-2, -1]),Word([0]))
122
+
123
+ Incorrect specification raises TypeError
124
+
125
+ >>> Template(Word, (-2,-1), Pos, (0,0))
126
+ Traceback (most recent call last):
127
+ File "<stdin>", line 1, in <module>
128
+ File "nltk/tag/tbl/template.py", line 143, in __init__
129
+ raise TypeError(
130
+ TypeError: expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ...
131
+
132
+ :type features: list of Features
133
+ :param features: the features to build this Template on
134
+ """
135
+ # determine the calling form: either
136
+ # Template(Feature, args1, [args2, ...)]
137
+ # Template(Feature1(args), Feature2(args), ...)
138
+ if all(isinstance(f, Feature) for f in features):
139
+ self._features = features
140
+ elif issubclass(features[0], Feature) and all(
141
+ isinstance(a, tuple) for a in features[1:]
142
+ ):
143
+ self._features = [features[0](*tp) for tp in features[1:]]
144
+ else:
145
+ raise TypeError(
146
+ "expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ..."
147
+ )
148
+ self.id = f"{len(self.ALLTEMPLATES):03d}"
149
+ self.ALLTEMPLATES.append(self)
150
+
151
+ def __repr__(self):
152
+ return "{}({})".format(
153
+ self.__class__.__name__,
154
+ ",".join([str(f) for f in self._features]),
155
+ )
156
+
157
+ def applicable_rules(self, tokens, index, correct_tag):
158
+ if tokens[index][1] == correct_tag:
159
+ return []
160
+
161
+ # For each of this Template's features, find the conditions
162
+ # that are applicable for the given token.
163
+ # Then, generate one Rule for each combination of features
164
+ # (the crossproduct of the conditions).
165
+
166
+ applicable_conditions = self._applicable_conditions(tokens, index)
167
+ xs = list(it.product(*applicable_conditions))
168
+ return [Rule(self.id, tokens[index][1], correct_tag, tuple(x)) for x in xs]
169
+
170
+ def _applicable_conditions(self, tokens, index):
171
+ """
172
+ :returns: A set of all conditions for rules
173
+ that are applicable to C{tokens[index]}.
174
+ """
175
+ conditions = []
176
+
177
+ for feature in self._features:
178
+ conditions.append([])
179
+ for pos in feature.positions:
180
+ if not (0 <= index + pos < len(tokens)):
181
+ continue
182
+ value = feature.extract_property(tokens, index + pos)
183
+ conditions[-1].append((feature, value))
184
+ return conditions
185
+
186
+ def get_neighborhood(self, tokens, index):
187
+ # inherit docs from BrillTemplateI
188
+
189
+ # applicable_rules(tokens, index, ...) depends on index.
190
+ neighborhood = {index} # set literal for python 2.7+
191
+
192
+ # applicable_rules(tokens, i, ...) depends on index if
193
+ # i+start < index <= i+end.
194
+
195
+ allpositions = [0] + [p for feat in self._features for p in feat.positions]
196
+ start, end = min(allpositions), max(allpositions)
197
+ s = max(0, index + (-end))
198
+ e = min(index + (-start) + 1, len(tokens))
199
+ for i in range(s, e):
200
+ neighborhood.add(i)
201
+ return neighborhood
202
+
203
+ @classmethod
204
+ def expand(cls, featurelists, combinations=None, skipintersecting=True):
205
+
206
+ """
207
+ Factory method to mass generate Templates from a list L of lists of Features.
208
+
209
+ #With combinations=(k1, k2), the function will in all possible ways choose k1 ... k2
210
+ #of the sublists in L; it will output all Templates formed by the Cartesian product
211
+ #of this selection, with duplicates and other semantically equivalent
212
+ #forms removed. Default for combinations is (1, len(L)).
213
+
214
+ The feature lists may have been specified
215
+ manually, or generated from Feature.expand(). For instance,
216
+
217
+ >>> from nltk.tbl.template import Template
218
+ >>> from nltk.tag.brill import Word, Pos
219
+
220
+ #creating some features
221
+ >>> (wd_0, wd_01) = (Word([0]), Word([0,1]))
222
+
223
+ >>> (pos_m2, pos_m33) = (Pos([-2]), Pos([3-2,-1,0,1,2,3]))
224
+
225
+ >>> list(Template.expand([[wd_0], [pos_m2]]))
226
+ [Template(Word([0])), Template(Pos([-2])), Template(Pos([-2]),Word([0]))]
227
+
228
+ >>> list(Template.expand([[wd_0, wd_01], [pos_m2]]))
229
+ [Template(Word([0])), Template(Word([0, 1])), Template(Pos([-2])), Template(Pos([-2]),Word([0])), Template(Pos([-2]),Word([0, 1]))]
230
+
231
+ #note: with Feature.expand(), it is very easy to generate more templates
232
+ #than your system can handle -- for instance,
233
+ >>> wordtpls = Word.expand([-2,-1,0,1], [1,2], excludezero=False)
234
+ >>> len(wordtpls)
235
+ 7
236
+
237
+ >>> postpls = Pos.expand([-3,-2,-1,0,1,2], [1,2,3], excludezero=True)
238
+ >>> len(postpls)
239
+ 9
240
+
241
+ #and now the Cartesian product of all non-empty combinations of two wordtpls and
242
+ #two postpls, with semantic equivalents removed
243
+ >>> templates = list(Template.expand([wordtpls, wordtpls, postpls, postpls]))
244
+ >>> len(templates)
245
+ 713
246
+
247
+
248
+ will return a list of eight templates
249
+ Template(Word([0])),
250
+ Template(Word([0, 1])),
251
+ Template(Pos([-2])),
252
+ Template(Pos([-1])),
253
+ Template(Pos([-2]),Word([0])),
254
+ Template(Pos([-1]),Word([0])),
255
+ Template(Pos([-2]),Word([0, 1])),
256
+ Template(Pos([-1]),Word([0, 1]))]
257
+
258
+
259
+ #Templates where one feature is a subset of another, such as
260
+ #Template(Word([0,1]), Word([1]), will not appear in the output.
261
+ #By default, this non-subset constraint is tightened to disjointness:
262
+ #Templates of type Template(Word([0,1]), Word([1,2]) will also be filtered out.
263
+ #With skipintersecting=False, then such Templates are allowed
264
+
265
+ WARNING: this method makes it very easy to fill all your memory when training
266
+ generated templates on any real-world corpus
267
+
268
+ :param featurelists: lists of Features, whose Cartesian product will return a set of Templates
269
+ :type featurelists: list of (list of Features)
270
+ :param combinations: given n featurelists: if combinations=k, all generated Templates will have
271
+ k features; if combinations=(k1,k2) they will have k1..k2 features; if None, defaults to 1..n
272
+ :type combinations: None, int, or (int, int)
273
+ :param skipintersecting: if True, do not output intersecting Templates (non-disjoint positions for some feature)
274
+ :type skipintersecting: bool
275
+ :returns: generator of Templates
276
+
277
+ """
278
+
279
+ def nonempty_powerset(xs): # xs is a list
280
+ # itertools docnonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
281
+
282
+ # find the correct tuple given combinations, one of {None, k, (k1,k2)}
283
+ k = combinations # for brevity
284
+ combrange = (
285
+ (1, len(xs) + 1)
286
+ if k is None
287
+ else (k, k + 1) # n over 1 .. n over n (all non-empty combinations)
288
+ if isinstance(k, int)
289
+ else (k[0], k[1] + 1) # n over k (only
290
+ ) # n over k1, n over k1+1... n over k2
291
+ return it.chain.from_iterable(
292
+ it.combinations(xs, r) for r in range(*combrange)
293
+ )
294
+
295
+ seentemplates = set()
296
+ for picks in nonempty_powerset(featurelists):
297
+ for pick in it.product(*picks):
298
+ if any(
299
+ i != j and x.issuperset(y)
300
+ for (i, x) in enumerate(pick)
301
+ for (j, y) in enumerate(pick)
302
+ ):
303
+ continue
304
+ if skipintersecting and any(
305
+ i != j and x.intersects(y)
306
+ for (i, x) in enumerate(pick)
307
+ for (j, y) in enumerate(pick)
308
+ ):
309
+ continue
310
+ thistemplate = cls(*sorted(pick))
311
+ strpick = str(thistemplate)
312
+ #!!FIXME --this is hackish
313
+ if strpick in seentemplates: # already added
314
+ cls._poptemplate()
315
+ continue
316
+ seentemplates.add(strpick)
317
+ yield thistemplate
318
+
319
+ @classmethod
320
+ def _cleartemplates(cls):
321
+ cls.ALLTEMPLATES = []
322
+
323
+ @classmethod
324
+ def _poptemplate(cls):
325
+ return cls.ALLTEMPLATES.pop() if cls.ALLTEMPLATES else None
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__init__.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Natural Language Toolkit: Tokenizers
2
+ #
3
+ # Copyright (C) 2001-2023 NLTK Project
4
+ # Author: Edward Loper <[email protected]>
5
+ # Steven Bird <[email protected]> (minor additions)
6
+ # Contributors: matthewmc, clouds56
7
+ # URL: <https://www.nltk.org/>
8
+ # For license information, see LICENSE.TXT
9
+
10
+ r"""
11
+ NLTK Tokenizer Package
12
+
13
+ Tokenizers divide strings into lists of substrings. For example,
14
+ tokenizers can be used to find the words and punctuation in a string:
15
+
16
+ >>> from nltk.tokenize import word_tokenize
17
+ >>> s = '''Good muffins cost $3.88\nin New York. Please buy me
18
+ ... two of them.\n\nThanks.'''
19
+ >>> word_tokenize(s) # doctest: +NORMALIZE_WHITESPACE
20
+ ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.',
21
+ 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
22
+
23
+ This particular tokenizer requires the Punkt sentence tokenization
24
+ models to be installed. NLTK also provides a simpler,
25
+ regular-expression based tokenizer, which splits text on whitespace
26
+ and punctuation:
27
+
28
+ >>> from nltk.tokenize import wordpunct_tokenize
29
+ >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE
30
+ ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.',
31
+ 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
32
+
33
+ We can also operate at the level of sentences, using the sentence
34
+ tokenizer directly as follows:
35
+
36
+ >>> from nltk.tokenize import sent_tokenize, word_tokenize
37
+ >>> sent_tokenize(s)
38
+ ['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.']
39
+ >>> [word_tokenize(t) for t in sent_tokenize(s)] # doctest: +NORMALIZE_WHITESPACE
40
+ [['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'],
41
+ ['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']]
42
+
43
+ Caution: when tokenizing a Unicode string, make sure you are not
44
+ using an encoded version of the string (it may be necessary to
45
+ decode it first, e.g. with ``s.decode("utf8")``.
46
+
47
+ NLTK tokenizers can produce token-spans, represented as tuples of integers
48
+ having the same semantics as string slices, to support efficient comparison
49
+ of tokenizers. (These methods are implemented as generators.)
50
+
51
+ >>> from nltk.tokenize import WhitespaceTokenizer
52
+ >>> list(WhitespaceTokenizer().span_tokenize(s)) # doctest: +NORMALIZE_WHITESPACE
53
+ [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44),
54
+ (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
55
+
56
+ There are numerous ways to tokenize text. If you need more control over
57
+ tokenization, see the other methods provided in this package.
58
+
59
+ For further information, please see Chapter 3 of the NLTK book.
60
+ """
61
+
62
+ import re
63
+
64
+ from nltk.data import load
65
+ from nltk.tokenize.casual import TweetTokenizer, casual_tokenize
66
+ from nltk.tokenize.destructive import NLTKWordTokenizer
67
+ from nltk.tokenize.legality_principle import LegalitySyllableTokenizer
68
+ from nltk.tokenize.mwe import MWETokenizer
69
+ from nltk.tokenize.punkt import PunktSentenceTokenizer
70
+ from nltk.tokenize.regexp import (
71
+ BlanklineTokenizer,
72
+ RegexpTokenizer,
73
+ WhitespaceTokenizer,
74
+ WordPunctTokenizer,
75
+ blankline_tokenize,
76
+ regexp_tokenize,
77
+ wordpunct_tokenize,
78
+ )
79
+ from nltk.tokenize.repp import ReppTokenizer
80
+ from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
81
+ from nltk.tokenize.simple import (
82
+ LineTokenizer,
83
+ SpaceTokenizer,
84
+ TabTokenizer,
85
+ line_tokenize,
86
+ )
87
+ from nltk.tokenize.sonority_sequencing import SyllableTokenizer
88
+ from nltk.tokenize.stanford_segmenter import StanfordSegmenter
89
+ from nltk.tokenize.texttiling import TextTilingTokenizer
90
+ from nltk.tokenize.toktok import ToktokTokenizer
91
+ from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer
92
+ from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize
93
+
94
+
95
+ # Standard sentence tokenizer.
96
+ def sent_tokenize(text, language="english"):
97
+ """
98
+ Return a sentence-tokenized copy of *text*,
99
+ using NLTK's recommended sentence tokenizer
100
+ (currently :class:`.PunktSentenceTokenizer`
101
+ for the specified language).
102
+
103
+ :param text: text to split into sentences
104
+ :param language: the model name in the Punkt corpus
105
+ """
106
+ tokenizer = load(f"tokenizers/punkt/{language}.pickle")
107
+ return tokenizer.tokenize(text)
108
+
109
+
110
+ # Standard word tokenizer.
111
+ _treebank_word_tokenizer = NLTKWordTokenizer()
112
+
113
+
114
+ def word_tokenize(text, language="english", preserve_line=False):
115
+ """
116
+ Return a tokenized copy of *text*,
117
+ using NLTK's recommended word tokenizer
118
+ (currently an improved :class:`.TreebankWordTokenizer`
119
+ along with :class:`.PunktSentenceTokenizer`
120
+ for the specified language).
121
+
122
+ :param text: text to split into words
123
+ :type text: str
124
+ :param language: the model name in the Punkt corpus
125
+ :type language: str
126
+ :param preserve_line: A flag to decide whether to sentence tokenize the text or not.
127
+ :type preserve_line: bool
128
+ """
129
+ sentences = [text] if preserve_line else sent_tokenize(text, language)
130
+ return [
131
+ token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent)
132
+ ]
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.35 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc ADDED
Binary file (6.77 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc ADDED
Binary file (3.75 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc ADDED
Binary file (43.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc ADDED
Binary file (8.21 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc ADDED
Binary file (7.98 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc ADDED
Binary file (3.42 kB). View file