Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/nltk/ccg/lexicon.py +338 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__init__.py +92 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/api.py +74 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/em.py +219 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/gaac.py +170 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/kmeans.py +231 -0
- llmeval-env/lib/python3.10/site-packages/nltk/cluster/util.py +300 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/__init__.py +102 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/api.py +72 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/dependencygraph.py +799 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/generate.py +85 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/malt.py +393 -0
- llmeval-env/lib/python3.10/site-packages/nltk/parse/stanford.py +470 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__init__.py +34 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem.py +361 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem2.py +457 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/cistem.py +209 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/isri.py +395 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/lancaster.py +343 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/porter.py +715 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/regexp.py +56 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/rslp.py +137 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/snowball.py +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/stem/wordnet.py +49 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__init__.py +32 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/nltk/ccg/lexicon.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Combinatory Categorial Grammar
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Graeme Gange <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
"""
|
8 |
+
CCG Lexicons
|
9 |
+
"""
|
10 |
+
|
11 |
+
import re
|
12 |
+
from collections import defaultdict
|
13 |
+
|
14 |
+
from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory
|
15 |
+
from nltk.internals import deprecated
|
16 |
+
from nltk.sem.logic import Expression
|
17 |
+
|
18 |
+
# ------------
|
19 |
+
# Regular expressions used for parsing components of the lexicon
|
20 |
+
# ------------
|
21 |
+
|
22 |
+
# Parses a primitive category and subscripts
|
23 |
+
PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""")
|
24 |
+
|
25 |
+
# Separates the next primitive category from the remainder of the
|
26 |
+
# string
|
27 |
+
NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""")
|
28 |
+
|
29 |
+
# Separates the next application operator from the remainder
|
30 |
+
APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""")
|
31 |
+
|
32 |
+
# Parses the definition of the right-hand side (rhs) of either a word or a family
|
33 |
+
LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE)
|
34 |
+
|
35 |
+
# Parses the right hand side that contains category and maybe semantic predicate
|
36 |
+
RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE)
|
37 |
+
|
38 |
+
# Parses the semantic predicate
|
39 |
+
SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE)
|
40 |
+
|
41 |
+
# Strips comments from a line
|
42 |
+
COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""")
|
43 |
+
|
44 |
+
|
45 |
+
class Token:
|
46 |
+
"""
|
47 |
+
Class representing a token.
|
48 |
+
|
49 |
+
token => category {semantics}
|
50 |
+
e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)}
|
51 |
+
|
52 |
+
* `token` (string)
|
53 |
+
* `categ` (string)
|
54 |
+
* `semantics` (Expression)
|
55 |
+
"""
|
56 |
+
|
57 |
+
def __init__(self, token, categ, semantics=None):
|
58 |
+
self._token = token
|
59 |
+
self._categ = categ
|
60 |
+
self._semantics = semantics
|
61 |
+
|
62 |
+
def categ(self):
|
63 |
+
return self._categ
|
64 |
+
|
65 |
+
def semantics(self):
|
66 |
+
return self._semantics
|
67 |
+
|
68 |
+
def __str__(self):
|
69 |
+
semantics_str = ""
|
70 |
+
if self._semantics is not None:
|
71 |
+
semantics_str = " {" + str(self._semantics) + "}"
|
72 |
+
return "" + str(self._categ) + semantics_str
|
73 |
+
|
74 |
+
def __cmp__(self, other):
|
75 |
+
if not isinstance(other, Token):
|
76 |
+
return -1
|
77 |
+
return cmp((self._categ, self._semantics), other.categ(), other.semantics())
|
78 |
+
|
79 |
+
|
80 |
+
class CCGLexicon:
|
81 |
+
"""
|
82 |
+
Class representing a lexicon for CCG grammars.
|
83 |
+
|
84 |
+
* `primitives`: The list of primitive categories for the lexicon
|
85 |
+
* `families`: Families of categories
|
86 |
+
* `entries`: A mapping of words to possible categories
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self, start, primitives, families, entries):
|
90 |
+
self._start = PrimitiveCategory(start)
|
91 |
+
self._primitives = primitives
|
92 |
+
self._families = families
|
93 |
+
self._entries = entries
|
94 |
+
|
95 |
+
def categories(self, word):
|
96 |
+
"""
|
97 |
+
Returns all the possible categories for a word
|
98 |
+
"""
|
99 |
+
return self._entries[word]
|
100 |
+
|
101 |
+
def start(self):
|
102 |
+
"""
|
103 |
+
Return the target category for the parser
|
104 |
+
"""
|
105 |
+
return self._start
|
106 |
+
|
107 |
+
def __str__(self):
|
108 |
+
"""
|
109 |
+
String representation of the lexicon. Used for debugging.
|
110 |
+
"""
|
111 |
+
string = ""
|
112 |
+
first = True
|
113 |
+
for ident in sorted(self._entries):
|
114 |
+
if not first:
|
115 |
+
string = string + "\n"
|
116 |
+
string = string + ident + " => "
|
117 |
+
|
118 |
+
first = True
|
119 |
+
for cat in self._entries[ident]:
|
120 |
+
if not first:
|
121 |
+
string = string + " | "
|
122 |
+
else:
|
123 |
+
first = False
|
124 |
+
string = string + "%s" % cat
|
125 |
+
return string
|
126 |
+
|
127 |
+
|
128 |
+
# -----------
|
129 |
+
# Parsing lexicons
|
130 |
+
# -----------
|
131 |
+
|
132 |
+
|
133 |
+
def matchBrackets(string):
|
134 |
+
"""
|
135 |
+
Separate the contents matching the first set of brackets from the rest of
|
136 |
+
the input.
|
137 |
+
"""
|
138 |
+
rest = string[1:]
|
139 |
+
inside = "("
|
140 |
+
|
141 |
+
while rest != "" and not rest.startswith(")"):
|
142 |
+
if rest.startswith("("):
|
143 |
+
(part, rest) = matchBrackets(rest)
|
144 |
+
inside = inside + part
|
145 |
+
else:
|
146 |
+
inside = inside + rest[0]
|
147 |
+
rest = rest[1:]
|
148 |
+
if rest.startswith(")"):
|
149 |
+
return (inside + ")", rest[1:])
|
150 |
+
raise AssertionError("Unmatched bracket in string '" + string + "'")
|
151 |
+
|
152 |
+
|
153 |
+
def nextCategory(string):
|
154 |
+
"""
|
155 |
+
Separate the string for the next portion of the category from the rest
|
156 |
+
of the string
|
157 |
+
"""
|
158 |
+
if string.startswith("("):
|
159 |
+
return matchBrackets(string)
|
160 |
+
return NEXTPRIM_RE.match(string).groups()
|
161 |
+
|
162 |
+
|
163 |
+
def parseApplication(app):
|
164 |
+
"""
|
165 |
+
Parse an application operator
|
166 |
+
"""
|
167 |
+
return Direction(app[0], app[1:])
|
168 |
+
|
169 |
+
|
170 |
+
def parseSubscripts(subscr):
|
171 |
+
"""
|
172 |
+
Parse the subscripts for a primitive category
|
173 |
+
"""
|
174 |
+
if subscr:
|
175 |
+
return subscr[1:-1].split(",")
|
176 |
+
return []
|
177 |
+
|
178 |
+
|
179 |
+
def parsePrimitiveCategory(chunks, primitives, families, var):
|
180 |
+
"""
|
181 |
+
Parse a primitive category
|
182 |
+
|
183 |
+
If the primitive is the special category 'var', replace it with the
|
184 |
+
correct `CCGVar`.
|
185 |
+
"""
|
186 |
+
if chunks[0] == "var":
|
187 |
+
if chunks[1] is None:
|
188 |
+
if var is None:
|
189 |
+
var = CCGVar()
|
190 |
+
return (var, var)
|
191 |
+
|
192 |
+
catstr = chunks[0]
|
193 |
+
if catstr in families:
|
194 |
+
(cat, cvar) = families[catstr]
|
195 |
+
if var is None:
|
196 |
+
var = cvar
|
197 |
+
else:
|
198 |
+
cat = cat.substitute([(cvar, var)])
|
199 |
+
return (cat, var)
|
200 |
+
|
201 |
+
if catstr in primitives:
|
202 |
+
subscrs = parseSubscripts(chunks[1])
|
203 |
+
return (PrimitiveCategory(catstr, subscrs), var)
|
204 |
+
raise AssertionError(
|
205 |
+
"String '" + catstr + "' is neither a family nor primitive category."
|
206 |
+
)
|
207 |
+
|
208 |
+
|
209 |
+
def augParseCategory(line, primitives, families, var=None):
|
210 |
+
"""
|
211 |
+
Parse a string representing a category, and returns a tuple with
|
212 |
+
(possibly) the CCG variable for the category
|
213 |
+
"""
|
214 |
+
(cat_string, rest) = nextCategory(line)
|
215 |
+
|
216 |
+
if cat_string.startswith("("):
|
217 |
+
(res, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
|
218 |
+
|
219 |
+
else:
|
220 |
+
(res, var) = parsePrimitiveCategory(
|
221 |
+
PRIM_RE.match(cat_string).groups(), primitives, families, var
|
222 |
+
)
|
223 |
+
|
224 |
+
while rest != "":
|
225 |
+
app = APP_RE.match(rest).groups()
|
226 |
+
direction = parseApplication(app[0:3])
|
227 |
+
rest = app[3]
|
228 |
+
|
229 |
+
(cat_string, rest) = nextCategory(rest)
|
230 |
+
if cat_string.startswith("("):
|
231 |
+
(arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var)
|
232 |
+
else:
|
233 |
+
(arg, var) = parsePrimitiveCategory(
|
234 |
+
PRIM_RE.match(cat_string).groups(), primitives, families, var
|
235 |
+
)
|
236 |
+
res = FunctionalCategory(res, arg, direction)
|
237 |
+
|
238 |
+
return (res, var)
|
239 |
+
|
240 |
+
|
241 |
+
def fromstring(lex_str, include_semantics=False):
|
242 |
+
"""
|
243 |
+
Convert string representation into a lexicon for CCGs.
|
244 |
+
"""
|
245 |
+
CCGVar.reset_id()
|
246 |
+
primitives = []
|
247 |
+
families = {}
|
248 |
+
entries = defaultdict(list)
|
249 |
+
for line in lex_str.splitlines():
|
250 |
+
# Strip comments and leading/trailing whitespace.
|
251 |
+
line = COMMENTS_RE.match(line).groups()[0].strip()
|
252 |
+
if line == "":
|
253 |
+
continue
|
254 |
+
|
255 |
+
if line.startswith(":-"):
|
256 |
+
# A line of primitive categories.
|
257 |
+
# The first one is the target category
|
258 |
+
# ie, :- S, N, NP, VP
|
259 |
+
primitives = primitives + [
|
260 |
+
prim.strip() for prim in line[2:].strip().split(",")
|
261 |
+
]
|
262 |
+
else:
|
263 |
+
# Either a family definition, or a word definition
|
264 |
+
(ident, sep, rhs) = LEX_RE.match(line).groups()
|
265 |
+
(catstr, semantics_str) = RHS_RE.match(rhs).groups()
|
266 |
+
(cat, var) = augParseCategory(catstr, primitives, families)
|
267 |
+
|
268 |
+
if sep == "::":
|
269 |
+
# Family definition
|
270 |
+
# ie, Det :: NP/N
|
271 |
+
families[ident] = (cat, var)
|
272 |
+
else:
|
273 |
+
semantics = None
|
274 |
+
if include_semantics is True:
|
275 |
+
if semantics_str is None:
|
276 |
+
raise AssertionError(
|
277 |
+
line
|
278 |
+
+ " must contain semantics because include_semantics is set to True"
|
279 |
+
)
|
280 |
+
else:
|
281 |
+
semantics = Expression.fromstring(
|
282 |
+
SEMANTICS_RE.match(semantics_str).groups()[0]
|
283 |
+
)
|
284 |
+
# Word definition
|
285 |
+
# ie, which => (N\N)/(S/NP)
|
286 |
+
entries[ident].append(Token(ident, cat, semantics))
|
287 |
+
return CCGLexicon(primitives[0], primitives, families, entries)
|
288 |
+
|
289 |
+
|
290 |
+
@deprecated("Use fromstring() instead.")
|
291 |
+
def parseLexicon(lex_str):
|
292 |
+
return fromstring(lex_str)
|
293 |
+
|
294 |
+
|
295 |
+
openccg_tinytiny = fromstring(
|
296 |
+
"""
|
297 |
+
# Rather minimal lexicon based on the openccg `tinytiny' grammar.
|
298 |
+
# Only incorporates a subset of the morphological subcategories, however.
|
299 |
+
:- S,NP,N # Primitive categories
|
300 |
+
Det :: NP/N # Determiners
|
301 |
+
Pro :: NP
|
302 |
+
IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular)
|
303 |
+
IntransVpl :: S\\NP[pl] # Plural
|
304 |
+
TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular)
|
305 |
+
TransVpl :: S\\NP[pl]/NP # Plural
|
306 |
+
|
307 |
+
the => NP[sg]/N[sg]
|
308 |
+
the => NP[pl]/N[pl]
|
309 |
+
|
310 |
+
I => Pro
|
311 |
+
me => Pro
|
312 |
+
we => Pro
|
313 |
+
us => Pro
|
314 |
+
|
315 |
+
book => N[sg]
|
316 |
+
books => N[pl]
|
317 |
+
|
318 |
+
peach => N[sg]
|
319 |
+
peaches => N[pl]
|
320 |
+
|
321 |
+
policeman => N[sg]
|
322 |
+
policemen => N[pl]
|
323 |
+
|
324 |
+
boy => N[sg]
|
325 |
+
boys => N[pl]
|
326 |
+
|
327 |
+
sleep => IntransVsg
|
328 |
+
sleep => IntransVpl
|
329 |
+
|
330 |
+
eat => IntransVpl
|
331 |
+
eat => TransVpl
|
332 |
+
eats => IntransVsg
|
333 |
+
eats => TransVsg
|
334 |
+
|
335 |
+
see => TransVpl
|
336 |
+
sees => TransVsg
|
337 |
+
"""
|
338 |
+
)
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__init__.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Clusterers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
This module contains a number of basic clustering algorithms. Clustering
|
10 |
+
describes the task of discovering groups of similar items with a large
|
11 |
+
collection. It is also describe as unsupervised machine learning, as the data
|
12 |
+
from which it learns is unannotated with class information, as is the case for
|
13 |
+
supervised learning. Annotated data is difficult and expensive to obtain in
|
14 |
+
the quantities required for the majority of supervised learning algorithms.
|
15 |
+
This problem, the knowledge acquisition bottleneck, is common to most natural
|
16 |
+
language processing tasks, thus fueling the need for quality unsupervised
|
17 |
+
approaches.
|
18 |
+
|
19 |
+
This module contains a k-means clusterer, E-M clusterer and a group average
|
20 |
+
agglomerative clusterer (GAAC). All these clusterers involve finding good
|
21 |
+
cluster groupings for a set of vectors in multi-dimensional space.
|
22 |
+
|
23 |
+
The K-means clusterer starts with k arbitrary chosen means then allocates each
|
24 |
+
vector to the cluster with the closest mean. It then recalculates the means of
|
25 |
+
each cluster as the centroid of the vectors in the cluster. This process
|
26 |
+
repeats until the cluster memberships stabilise. This is a hill-climbing
|
27 |
+
algorithm which may converge to a local maximum. Hence the clustering is
|
28 |
+
often repeated with random initial means and the most commonly occurring
|
29 |
+
output means are chosen.
|
30 |
+
|
31 |
+
The GAAC clusterer starts with each of the *N* vectors as singleton clusters.
|
32 |
+
It then iteratively merges pairs of clusters which have the closest centroids.
|
33 |
+
This continues until there is only one cluster. The order of merges gives rise
|
34 |
+
to a dendrogram - a tree with the earlier merges lower than later merges. The
|
35 |
+
membership of a given number of clusters *c*, *1 <= c <= N*, can be found by
|
36 |
+
cutting the dendrogram at depth *c*.
|
37 |
+
|
38 |
+
The Gaussian EM clusterer models the vectors as being produced by a mixture
|
39 |
+
of k Gaussian sources. The parameters of these sources (prior probability,
|
40 |
+
mean and covariance matrix) are then found to maximise the likelihood of the
|
41 |
+
given data. This is done with the expectation maximisation algorithm. It
|
42 |
+
starts with k arbitrarily chosen means, priors and covariance matrices. It
|
43 |
+
then calculates the membership probabilities for each vector in each of the
|
44 |
+
clusters - this is the 'E' step. The cluster parameters are then updated in
|
45 |
+
the 'M' step using the maximum likelihood estimate from the cluster membership
|
46 |
+
probabilities. This process continues until the likelihood of the data does
|
47 |
+
not significantly increase.
|
48 |
+
|
49 |
+
They all extend the ClusterI interface which defines common operations
|
50 |
+
available with each clusterer. These operations include:
|
51 |
+
|
52 |
+
- cluster: clusters a sequence of vectors
|
53 |
+
- classify: assign a vector to a cluster
|
54 |
+
- classification_probdist: give the probability distribution over cluster memberships
|
55 |
+
|
56 |
+
The current existing classifiers also extend cluster.VectorSpace, an
|
57 |
+
abstract class which allows for singular value decomposition (SVD) and vector
|
58 |
+
normalisation. SVD is used to reduce the dimensionality of the vector space in
|
59 |
+
such a manner as to preserve as much of the variation as possible, by
|
60 |
+
reparameterising the axes in order of variability and discarding all bar the
|
61 |
+
first d dimensions. Normalisation ensures that vectors fall in the unit
|
62 |
+
hypersphere.
|
63 |
+
|
64 |
+
Usage example (see also demo())::
|
65 |
+
|
66 |
+
from nltk import cluster
|
67 |
+
from nltk.cluster import euclidean_distance
|
68 |
+
from numpy import array
|
69 |
+
|
70 |
+
vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]]
|
71 |
+
|
72 |
+
# initialise the clusterer (will also assign the vectors to clusters)
|
73 |
+
clusterer = cluster.KMeansClusterer(2, euclidean_distance)
|
74 |
+
clusterer.cluster(vectors, True)
|
75 |
+
|
76 |
+
# classify a new vector
|
77 |
+
print(clusterer.classify(array([3, 3])))
|
78 |
+
|
79 |
+
Note that the vectors must use numpy array-like
|
80 |
+
objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for
|
81 |
+
efficiency when required.
|
82 |
+
"""
|
83 |
+
|
84 |
+
from nltk.cluster.em import EMClusterer
|
85 |
+
from nltk.cluster.gaac import GAAClusterer
|
86 |
+
from nltk.cluster.kmeans import KMeansClusterer
|
87 |
+
from nltk.cluster.util import (
|
88 |
+
Dendrogram,
|
89 |
+
VectorSpaceClusterer,
|
90 |
+
cosine_distance,
|
91 |
+
euclidean_distance,
|
92 |
+
)
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.32 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc
ADDED
Binary file (2.43 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc
ADDED
Binary file (6.85 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc
ADDED
Binary file (4.99 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc
ADDED
Binary file (6.67 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc
ADDED
Binary file (9.85 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/api.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Clusterer Interfaces
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Porting: Steven Bird <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
from abc import ABCMeta, abstractmethod
|
10 |
+
|
11 |
+
from nltk.probability import DictionaryProbDist
|
12 |
+
|
13 |
+
|
14 |
+
class ClusterI(metaclass=ABCMeta):
|
15 |
+
"""
|
16 |
+
Interface covering basic clustering functionality.
|
17 |
+
"""
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def cluster(self, vectors, assign_clusters=False):
|
21 |
+
"""
|
22 |
+
Assigns the vectors to clusters, learning the clustering parameters
|
23 |
+
from the data. Returns a cluster identifier for each vector.
|
24 |
+
"""
|
25 |
+
|
26 |
+
@abstractmethod
|
27 |
+
def classify(self, token):
|
28 |
+
"""
|
29 |
+
Classifies the token into a cluster, setting the token's CLUSTER
|
30 |
+
parameter to that cluster identifier.
|
31 |
+
"""
|
32 |
+
|
33 |
+
def likelihood(self, vector, label):
|
34 |
+
"""
|
35 |
+
Returns the likelihood (a float) of the token having the
|
36 |
+
corresponding cluster.
|
37 |
+
"""
|
38 |
+
if self.classify(vector) == label:
|
39 |
+
return 1.0
|
40 |
+
else:
|
41 |
+
return 0.0
|
42 |
+
|
43 |
+
def classification_probdist(self, vector):
|
44 |
+
"""
|
45 |
+
Classifies the token into a cluster, returning
|
46 |
+
a probability distribution over the cluster identifiers.
|
47 |
+
"""
|
48 |
+
likelihoods = {}
|
49 |
+
sum = 0.0
|
50 |
+
for cluster in self.cluster_names():
|
51 |
+
likelihoods[cluster] = self.likelihood(vector, cluster)
|
52 |
+
sum += likelihoods[cluster]
|
53 |
+
for cluster in self.cluster_names():
|
54 |
+
likelihoods[cluster] /= sum
|
55 |
+
return DictionaryProbDist(likelihoods)
|
56 |
+
|
57 |
+
@abstractmethod
|
58 |
+
def num_clusters(self):
|
59 |
+
"""
|
60 |
+
Returns the number of clusters.
|
61 |
+
"""
|
62 |
+
|
63 |
+
def cluster_names(self):
|
64 |
+
"""
|
65 |
+
Returns the names of the clusters.
|
66 |
+
:rtype: list
|
67 |
+
"""
|
68 |
+
return list(range(self.num_clusters()))
|
69 |
+
|
70 |
+
def cluster_name(self, index):
|
71 |
+
"""
|
72 |
+
Returns the names of the cluster at index.
|
73 |
+
"""
|
74 |
+
return index
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/em.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Expectation Maximization Clusterer
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
try:
|
9 |
+
import numpy
|
10 |
+
except ImportError:
|
11 |
+
pass
|
12 |
+
|
13 |
+
from nltk.cluster.util import VectorSpaceClusterer
|
14 |
+
|
15 |
+
|
16 |
+
class EMClusterer(VectorSpaceClusterer):
|
17 |
+
"""
|
18 |
+
The Gaussian EM clusterer models the vectors as being produced by
|
19 |
+
a mixture of k Gaussian sources. The parameters of these sources
|
20 |
+
(prior probability, mean and covariance matrix) are then found to
|
21 |
+
maximise the likelihood of the given data. This is done with the
|
22 |
+
expectation maximisation algorithm. It starts with k arbitrarily
|
23 |
+
chosen means, priors and covariance matrices. It then calculates
|
24 |
+
the membership probabilities for each vector in each of the
|
25 |
+
clusters; this is the 'E' step. The cluster parameters are then
|
26 |
+
updated in the 'M' step using the maximum likelihood estimate from
|
27 |
+
the cluster membership probabilities. This process continues until
|
28 |
+
the likelihood of the data does not significantly increase.
|
29 |
+
"""
|
30 |
+
|
31 |
+
def __init__(
|
32 |
+
self,
|
33 |
+
initial_means,
|
34 |
+
priors=None,
|
35 |
+
covariance_matrices=None,
|
36 |
+
conv_threshold=1e-6,
|
37 |
+
bias=0.1,
|
38 |
+
normalise=False,
|
39 |
+
svd_dimensions=None,
|
40 |
+
):
|
41 |
+
"""
|
42 |
+
Creates an EM clusterer with the given starting parameters,
|
43 |
+
convergence threshold and vector mangling parameters.
|
44 |
+
|
45 |
+
:param initial_means: the means of the gaussian cluster centers
|
46 |
+
:type initial_means: [seq of] numpy array or seq of SparseArray
|
47 |
+
:param priors: the prior probability for each cluster
|
48 |
+
:type priors: numpy array or seq of float
|
49 |
+
:param covariance_matrices: the covariance matrix for each cluster
|
50 |
+
:type covariance_matrices: [seq of] numpy array
|
51 |
+
:param conv_threshold: maximum change in likelihood before deemed
|
52 |
+
convergent
|
53 |
+
:type conv_threshold: int or float
|
54 |
+
:param bias: variance bias used to ensure non-singular covariance
|
55 |
+
matrices
|
56 |
+
:type bias: float
|
57 |
+
:param normalise: should vectors be normalised to length 1
|
58 |
+
:type normalise: boolean
|
59 |
+
:param svd_dimensions: number of dimensions to use in reducing vector
|
60 |
+
dimensionsionality with SVD
|
61 |
+
:type svd_dimensions: int
|
62 |
+
"""
|
63 |
+
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
|
64 |
+
self._means = numpy.array(initial_means, numpy.float64)
|
65 |
+
self._num_clusters = len(initial_means)
|
66 |
+
self._conv_threshold = conv_threshold
|
67 |
+
self._covariance_matrices = covariance_matrices
|
68 |
+
self._priors = priors
|
69 |
+
self._bias = bias
|
70 |
+
|
71 |
+
def num_clusters(self):
|
72 |
+
return self._num_clusters
|
73 |
+
|
74 |
+
def cluster_vectorspace(self, vectors, trace=False):
|
75 |
+
assert len(vectors) > 0
|
76 |
+
|
77 |
+
# set the parameters to initial values
|
78 |
+
dimensions = len(vectors[0])
|
79 |
+
means = self._means
|
80 |
+
priors = self._priors
|
81 |
+
if not priors:
|
82 |
+
priors = self._priors = (
|
83 |
+
numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters
|
84 |
+
)
|
85 |
+
covariances = self._covariance_matrices
|
86 |
+
if not covariances:
|
87 |
+
covariances = self._covariance_matrices = [
|
88 |
+
numpy.identity(dimensions, numpy.float64)
|
89 |
+
for i in range(self._num_clusters)
|
90 |
+
]
|
91 |
+
|
92 |
+
# do the E and M steps until the likelihood plateaus
|
93 |
+
lastl = self._loglikelihood(vectors, priors, means, covariances)
|
94 |
+
converged = False
|
95 |
+
|
96 |
+
while not converged:
|
97 |
+
if trace:
|
98 |
+
print("iteration; loglikelihood", lastl)
|
99 |
+
# E-step, calculate hidden variables, h[i,j]
|
100 |
+
h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64)
|
101 |
+
for i in range(len(vectors)):
|
102 |
+
for j in range(self._num_clusters):
|
103 |
+
h[i, j] = priors[j] * self._gaussian(
|
104 |
+
means[j], covariances[j], vectors[i]
|
105 |
+
)
|
106 |
+
h[i, :] /= sum(h[i, :])
|
107 |
+
|
108 |
+
# M-step, update parameters - cvm, p, mean
|
109 |
+
for j in range(self._num_clusters):
|
110 |
+
covariance_before = covariances[j]
|
111 |
+
new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64)
|
112 |
+
new_mean = numpy.zeros(dimensions, numpy.float64)
|
113 |
+
sum_hj = 0.0
|
114 |
+
for i in range(len(vectors)):
|
115 |
+
delta = vectors[i] - means[j]
|
116 |
+
new_covariance += h[i, j] * numpy.multiply.outer(delta, delta)
|
117 |
+
sum_hj += h[i, j]
|
118 |
+
new_mean += h[i, j] * vectors[i]
|
119 |
+
covariances[j] = new_covariance / sum_hj
|
120 |
+
means[j] = new_mean / sum_hj
|
121 |
+
priors[j] = sum_hj / len(vectors)
|
122 |
+
|
123 |
+
# bias term to stop covariance matrix being singular
|
124 |
+
covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64)
|
125 |
+
|
126 |
+
# calculate likelihood - FIXME: may be broken
|
127 |
+
l = self._loglikelihood(vectors, priors, means, covariances)
|
128 |
+
|
129 |
+
# check for convergence
|
130 |
+
if abs(lastl - l) < self._conv_threshold:
|
131 |
+
converged = True
|
132 |
+
lastl = l
|
133 |
+
|
134 |
+
def classify_vectorspace(self, vector):
|
135 |
+
best = None
|
136 |
+
for j in range(self._num_clusters):
|
137 |
+
p = self._priors[j] * self._gaussian(
|
138 |
+
self._means[j], self._covariance_matrices[j], vector
|
139 |
+
)
|
140 |
+
if not best or p > best[0]:
|
141 |
+
best = (p, j)
|
142 |
+
return best[1]
|
143 |
+
|
144 |
+
def likelihood_vectorspace(self, vector, cluster):
|
145 |
+
cid = self.cluster_names().index(cluster)
|
146 |
+
return self._priors[cluster] * self._gaussian(
|
147 |
+
self._means[cluster], self._covariance_matrices[cluster], vector
|
148 |
+
)
|
149 |
+
|
150 |
+
def _gaussian(self, mean, cvm, x):
|
151 |
+
m = len(mean)
|
152 |
+
assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape)
|
153 |
+
try:
|
154 |
+
det = numpy.linalg.det(cvm)
|
155 |
+
inv = numpy.linalg.inv(cvm)
|
156 |
+
a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0)
|
157 |
+
dx = x - mean
|
158 |
+
print(dx, inv)
|
159 |
+
b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx)
|
160 |
+
return a * numpy.exp(b)
|
161 |
+
except OverflowError:
|
162 |
+
# happens when the exponent is negative infinity - i.e. b = 0
|
163 |
+
# i.e. the inverse of cvm is huge (cvm is almost zero)
|
164 |
+
return 0
|
165 |
+
|
166 |
+
def _loglikelihood(self, vectors, priors, means, covariances):
|
167 |
+
llh = 0.0
|
168 |
+
for vector in vectors:
|
169 |
+
p = 0
|
170 |
+
for j in range(len(priors)):
|
171 |
+
p += priors[j] * self._gaussian(means[j], covariances[j], vector)
|
172 |
+
llh += numpy.log(p)
|
173 |
+
return llh
|
174 |
+
|
175 |
+
def __repr__(self):
|
176 |
+
return "<EMClusterer means=%s>" % list(self._means)
|
177 |
+
|
178 |
+
|
179 |
+
def demo():
|
180 |
+
"""
|
181 |
+
Non-interactive demonstration of the clusterers with simple 2-D data.
|
182 |
+
"""
|
183 |
+
|
184 |
+
from nltk import cluster
|
185 |
+
|
186 |
+
# example from figure 14.10, page 519, Manning and Schutze
|
187 |
+
|
188 |
+
vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
|
189 |
+
means = [[4, 2], [4, 2.01]]
|
190 |
+
|
191 |
+
clusterer = cluster.EMClusterer(means, bias=0.1)
|
192 |
+
clusters = clusterer.cluster(vectors, True, trace=True)
|
193 |
+
|
194 |
+
print("Clustered:", vectors)
|
195 |
+
print("As: ", clusters)
|
196 |
+
print()
|
197 |
+
|
198 |
+
for c in range(2):
|
199 |
+
print("Cluster:", c)
|
200 |
+
print("Prior: ", clusterer._priors[c])
|
201 |
+
print("Mean: ", clusterer._means[c])
|
202 |
+
print("Covar: ", clusterer._covariance_matrices[c])
|
203 |
+
print()
|
204 |
+
|
205 |
+
# classify a new vector
|
206 |
+
vector = numpy.array([2, 2])
|
207 |
+
print("classify(%s):" % vector, end=" ")
|
208 |
+
print(clusterer.classify(vector))
|
209 |
+
|
210 |
+
# show the classification probabilities
|
211 |
+
vector = numpy.array([2, 2])
|
212 |
+
print("classification_probdist(%s):" % vector)
|
213 |
+
pdist = clusterer.classification_probdist(vector)
|
214 |
+
for sample in pdist.samples():
|
215 |
+
print(f"{sample} => {pdist.prob(sample) * 100:.0f}%")
|
216 |
+
|
217 |
+
|
218 |
+
if __name__ == "__main__":
|
219 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/gaac.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Group Average Agglomerative Clusterer
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
try:
|
9 |
+
import numpy
|
10 |
+
except ImportError:
|
11 |
+
pass
|
12 |
+
|
13 |
+
from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance
|
14 |
+
|
15 |
+
|
16 |
+
class GAAClusterer(VectorSpaceClusterer):
|
17 |
+
"""
|
18 |
+
The Group Average Agglomerative starts with each of the N vectors as singleton
|
19 |
+
clusters. It then iteratively merges pairs of clusters which have the
|
20 |
+
closest centroids. This continues until there is only one cluster. The
|
21 |
+
order of merges gives rise to a dendrogram: a tree with the earlier merges
|
22 |
+
lower than later merges. The membership of a given number of clusters c, 1
|
23 |
+
<= c <= N, can be found by cutting the dendrogram at depth c.
|
24 |
+
|
25 |
+
This clusterer uses the cosine similarity metric only, which allows for
|
26 |
+
efficient speed-up in the clustering process.
|
27 |
+
"""
|
28 |
+
|
29 |
+
def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None):
|
30 |
+
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
|
31 |
+
self._num_clusters = num_clusters
|
32 |
+
self._dendrogram = None
|
33 |
+
self._groups_values = None
|
34 |
+
|
35 |
+
def cluster(self, vectors, assign_clusters=False, trace=False):
|
36 |
+
# stores the merge order
|
37 |
+
self._dendrogram = Dendrogram(
|
38 |
+
[numpy.array(vector, numpy.float64) for vector in vectors]
|
39 |
+
)
|
40 |
+
return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace)
|
41 |
+
|
42 |
+
def cluster_vectorspace(self, vectors, trace=False):
|
43 |
+
# variables describing the initial situation
|
44 |
+
N = len(vectors)
|
45 |
+
cluster_len = [1] * N
|
46 |
+
cluster_count = N
|
47 |
+
index_map = numpy.arange(N)
|
48 |
+
|
49 |
+
# construct the similarity matrix
|
50 |
+
dims = (N, N)
|
51 |
+
dist = numpy.ones(dims, dtype=float) * numpy.inf
|
52 |
+
for i in range(N):
|
53 |
+
for j in range(i + 1, N):
|
54 |
+
dist[i, j] = cosine_distance(vectors[i], vectors[j])
|
55 |
+
|
56 |
+
while cluster_count > max(self._num_clusters, 1):
|
57 |
+
i, j = numpy.unravel_index(dist.argmin(), dims)
|
58 |
+
if trace:
|
59 |
+
print("merging %d and %d" % (i, j))
|
60 |
+
|
61 |
+
# update similarities for merging i and j
|
62 |
+
self._merge_similarities(dist, cluster_len, i, j)
|
63 |
+
|
64 |
+
# remove j
|
65 |
+
dist[:, j] = numpy.inf
|
66 |
+
dist[j, :] = numpy.inf
|
67 |
+
|
68 |
+
# merge the clusters
|
69 |
+
cluster_len[i] = cluster_len[i] + cluster_len[j]
|
70 |
+
self._dendrogram.merge(index_map[i], index_map[j])
|
71 |
+
cluster_count -= 1
|
72 |
+
|
73 |
+
# update the index map to reflect the indexes if we
|
74 |
+
# had removed j
|
75 |
+
index_map[j + 1 :] -= 1
|
76 |
+
index_map[j] = N
|
77 |
+
|
78 |
+
self.update_clusters(self._num_clusters)
|
79 |
+
|
80 |
+
def _merge_similarities(self, dist, cluster_len, i, j):
|
81 |
+
# the new cluster i merged from i and j adopts the average of
|
82 |
+
# i and j's similarity to each other cluster, weighted by the
|
83 |
+
# number of points in the clusters i and j
|
84 |
+
i_weight = cluster_len[i]
|
85 |
+
j_weight = cluster_len[j]
|
86 |
+
weight_sum = i_weight + j_weight
|
87 |
+
|
88 |
+
# update for x<i
|
89 |
+
dist[:i, i] = dist[:i, i] * i_weight + dist[:i, j] * j_weight
|
90 |
+
dist[:i, i] /= weight_sum
|
91 |
+
# update for i<x<j
|
92 |
+
dist[i, i + 1 : j] = (
|
93 |
+
dist[i, i + 1 : j] * i_weight + dist[i + 1 : j, j] * j_weight
|
94 |
+
)
|
95 |
+
# update for i<j<x
|
96 |
+
dist[i, j + 1 :] = dist[i, j + 1 :] * i_weight + dist[j, j + 1 :] * j_weight
|
97 |
+
dist[i, i + 1 :] /= weight_sum
|
98 |
+
|
99 |
+
def update_clusters(self, num_clusters):
|
100 |
+
clusters = self._dendrogram.groups(num_clusters)
|
101 |
+
self._centroids = []
|
102 |
+
for cluster in clusters:
|
103 |
+
assert len(cluster) > 0
|
104 |
+
if self._should_normalise:
|
105 |
+
centroid = self._normalise(cluster[0])
|
106 |
+
else:
|
107 |
+
centroid = numpy.array(cluster[0])
|
108 |
+
for vector in cluster[1:]:
|
109 |
+
if self._should_normalise:
|
110 |
+
centroid += self._normalise(vector)
|
111 |
+
else:
|
112 |
+
centroid += vector
|
113 |
+
centroid /= len(cluster)
|
114 |
+
self._centroids.append(centroid)
|
115 |
+
self._num_clusters = len(self._centroids)
|
116 |
+
|
117 |
+
def classify_vectorspace(self, vector):
|
118 |
+
best = None
|
119 |
+
for i in range(self._num_clusters):
|
120 |
+
centroid = self._centroids[i]
|
121 |
+
dist = cosine_distance(vector, centroid)
|
122 |
+
if not best or dist < best[0]:
|
123 |
+
best = (dist, i)
|
124 |
+
return best[1]
|
125 |
+
|
126 |
+
def dendrogram(self):
|
127 |
+
"""
|
128 |
+
:return: The dendrogram representing the current clustering
|
129 |
+
:rtype: Dendrogram
|
130 |
+
"""
|
131 |
+
return self._dendrogram
|
132 |
+
|
133 |
+
def num_clusters(self):
|
134 |
+
return self._num_clusters
|
135 |
+
|
136 |
+
def __repr__(self):
|
137 |
+
return "<GroupAverageAgglomerative Clusterer n=%d>" % self._num_clusters
|
138 |
+
|
139 |
+
|
140 |
+
def demo():
|
141 |
+
"""
|
142 |
+
Non-interactive demonstration of the clusterers with simple 2-D data.
|
143 |
+
"""
|
144 |
+
|
145 |
+
from nltk.cluster import GAAClusterer
|
146 |
+
|
147 |
+
# use a set of tokens with 2D indices
|
148 |
+
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
|
149 |
+
|
150 |
+
# test the GAAC clusterer with 4 clusters
|
151 |
+
clusterer = GAAClusterer(4)
|
152 |
+
clusters = clusterer.cluster(vectors, True)
|
153 |
+
|
154 |
+
print("Clusterer:", clusterer)
|
155 |
+
print("Clustered:", vectors)
|
156 |
+
print("As:", clusters)
|
157 |
+
print()
|
158 |
+
|
159 |
+
# show the dendrogram
|
160 |
+
clusterer.dendrogram().show()
|
161 |
+
|
162 |
+
# classify a new vector
|
163 |
+
vector = numpy.array([3, 3])
|
164 |
+
print("classify(%s):" % vector, end=" ")
|
165 |
+
print(clusterer.classify(vector))
|
166 |
+
print()
|
167 |
+
|
168 |
+
|
169 |
+
if __name__ == "__main__":
|
170 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/kmeans.py
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: K-Means Clusterer
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
import copy
|
9 |
+
import random
|
10 |
+
import sys
|
11 |
+
|
12 |
+
try:
|
13 |
+
import numpy
|
14 |
+
except ImportError:
|
15 |
+
pass
|
16 |
+
|
17 |
+
|
18 |
+
from nltk.cluster.util import VectorSpaceClusterer
|
19 |
+
|
20 |
+
|
21 |
+
class KMeansClusterer(VectorSpaceClusterer):
|
22 |
+
"""
|
23 |
+
The K-means clusterer starts with k arbitrary chosen means then allocates
|
24 |
+
each vector to the cluster with the closest mean. It then recalculates the
|
25 |
+
means of each cluster as the centroid of the vectors in the cluster. This
|
26 |
+
process repeats until the cluster memberships stabilise. This is a
|
27 |
+
hill-climbing algorithm which may converge to a local maximum. Hence the
|
28 |
+
clustering is often repeated with random initial means and the most
|
29 |
+
commonly occurring output means are chosen.
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
num_means,
|
35 |
+
distance,
|
36 |
+
repeats=1,
|
37 |
+
conv_test=1e-6,
|
38 |
+
initial_means=None,
|
39 |
+
normalise=False,
|
40 |
+
svd_dimensions=None,
|
41 |
+
rng=None,
|
42 |
+
avoid_empty_clusters=False,
|
43 |
+
):
|
44 |
+
|
45 |
+
"""
|
46 |
+
:param num_means: the number of means to use (may use fewer)
|
47 |
+
:type num_means: int
|
48 |
+
:param distance: measure of distance between two vectors
|
49 |
+
:type distance: function taking two vectors and returning a float
|
50 |
+
:param repeats: number of randomised clustering trials to use
|
51 |
+
:type repeats: int
|
52 |
+
:param conv_test: maximum variation in mean differences before
|
53 |
+
deemed convergent
|
54 |
+
:type conv_test: number
|
55 |
+
:param initial_means: set of k initial means
|
56 |
+
:type initial_means: sequence of vectors
|
57 |
+
:param normalise: should vectors be normalised to length 1
|
58 |
+
:type normalise: boolean
|
59 |
+
:param svd_dimensions: number of dimensions to use in reducing vector
|
60 |
+
dimensionsionality with SVD
|
61 |
+
:type svd_dimensions: int
|
62 |
+
:param rng: random number generator (or None)
|
63 |
+
:type rng: Random
|
64 |
+
:param avoid_empty_clusters: include current centroid in computation
|
65 |
+
of next one; avoids undefined behavior
|
66 |
+
when clusters become empty
|
67 |
+
:type avoid_empty_clusters: boolean
|
68 |
+
"""
|
69 |
+
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
|
70 |
+
self._num_means = num_means
|
71 |
+
self._distance = distance
|
72 |
+
self._max_difference = conv_test
|
73 |
+
assert not initial_means or len(initial_means) == num_means
|
74 |
+
self._means = initial_means
|
75 |
+
assert repeats >= 1
|
76 |
+
assert not (initial_means and repeats > 1)
|
77 |
+
self._repeats = repeats
|
78 |
+
self._rng = rng if rng else random.Random()
|
79 |
+
self._avoid_empty_clusters = avoid_empty_clusters
|
80 |
+
|
81 |
+
def cluster_vectorspace(self, vectors, trace=False):
|
82 |
+
if self._means and self._repeats > 1:
|
83 |
+
print("Warning: means will be discarded for subsequent trials")
|
84 |
+
|
85 |
+
meanss = []
|
86 |
+
for trial in range(self._repeats):
|
87 |
+
if trace:
|
88 |
+
print("k-means trial", trial)
|
89 |
+
if not self._means or trial > 1:
|
90 |
+
self._means = self._rng.sample(list(vectors), self._num_means)
|
91 |
+
self._cluster_vectorspace(vectors, trace)
|
92 |
+
meanss.append(self._means)
|
93 |
+
|
94 |
+
if len(meanss) > 1:
|
95 |
+
# sort the means first (so that different cluster numbering won't
|
96 |
+
# effect the distance comparison)
|
97 |
+
for means in meanss:
|
98 |
+
means.sort(key=sum)
|
99 |
+
|
100 |
+
# find the set of means that's minimally different from the others
|
101 |
+
min_difference = min_means = None
|
102 |
+
for i in range(len(meanss)):
|
103 |
+
d = 0
|
104 |
+
for j in range(len(meanss)):
|
105 |
+
if i != j:
|
106 |
+
d += self._sum_distances(meanss[i], meanss[j])
|
107 |
+
if min_difference is None or d < min_difference:
|
108 |
+
min_difference, min_means = d, meanss[i]
|
109 |
+
|
110 |
+
# use the best means
|
111 |
+
self._means = min_means
|
112 |
+
|
113 |
+
def _cluster_vectorspace(self, vectors, trace=False):
|
114 |
+
if self._num_means < len(vectors):
|
115 |
+
# perform k-means clustering
|
116 |
+
converged = False
|
117 |
+
while not converged:
|
118 |
+
# assign the tokens to clusters based on minimum distance to
|
119 |
+
# the cluster means
|
120 |
+
clusters = [[] for m in range(self._num_means)]
|
121 |
+
for vector in vectors:
|
122 |
+
index = self.classify_vectorspace(vector)
|
123 |
+
clusters[index].append(vector)
|
124 |
+
|
125 |
+
if trace:
|
126 |
+
print("iteration")
|
127 |
+
# for i in range(self._num_means):
|
128 |
+
# print ' mean', i, 'allocated', len(clusters[i]), 'vectors'
|
129 |
+
|
130 |
+
# recalculate cluster means by computing the centroid of each cluster
|
131 |
+
new_means = list(map(self._centroid, clusters, self._means))
|
132 |
+
|
133 |
+
# measure the degree of change from the previous step for convergence
|
134 |
+
difference = self._sum_distances(self._means, new_means)
|
135 |
+
if difference < self._max_difference:
|
136 |
+
converged = True
|
137 |
+
|
138 |
+
# remember the new means
|
139 |
+
self._means = new_means
|
140 |
+
|
141 |
+
def classify_vectorspace(self, vector):
|
142 |
+
# finds the closest cluster centroid
|
143 |
+
# returns that cluster's index
|
144 |
+
best_distance = best_index = None
|
145 |
+
for index in range(len(self._means)):
|
146 |
+
mean = self._means[index]
|
147 |
+
dist = self._distance(vector, mean)
|
148 |
+
if best_distance is None or dist < best_distance:
|
149 |
+
best_index, best_distance = index, dist
|
150 |
+
return best_index
|
151 |
+
|
152 |
+
def num_clusters(self):
|
153 |
+
if self._means:
|
154 |
+
return len(self._means)
|
155 |
+
else:
|
156 |
+
return self._num_means
|
157 |
+
|
158 |
+
def means(self):
|
159 |
+
"""
|
160 |
+
The means used for clustering.
|
161 |
+
"""
|
162 |
+
return self._means
|
163 |
+
|
164 |
+
def _sum_distances(self, vectors1, vectors2):
|
165 |
+
difference = 0.0
|
166 |
+
for u, v in zip(vectors1, vectors2):
|
167 |
+
difference += self._distance(u, v)
|
168 |
+
return difference
|
169 |
+
|
170 |
+
def _centroid(self, cluster, mean):
|
171 |
+
if self._avoid_empty_clusters:
|
172 |
+
centroid = copy.copy(mean)
|
173 |
+
for vector in cluster:
|
174 |
+
centroid += vector
|
175 |
+
return centroid / (1 + len(cluster))
|
176 |
+
else:
|
177 |
+
if not len(cluster):
|
178 |
+
sys.stderr.write("Error: no centroid defined for empty cluster.\n")
|
179 |
+
sys.stderr.write(
|
180 |
+
"Try setting argument 'avoid_empty_clusters' to True\n"
|
181 |
+
)
|
182 |
+
assert False
|
183 |
+
centroid = copy.copy(cluster[0])
|
184 |
+
for vector in cluster[1:]:
|
185 |
+
centroid += vector
|
186 |
+
return centroid / len(cluster)
|
187 |
+
|
188 |
+
def __repr__(self):
|
189 |
+
return "<KMeansClusterer means=%s repeats=%d>" % (self._means, self._repeats)
|
190 |
+
|
191 |
+
|
192 |
+
#################################################################################
|
193 |
+
|
194 |
+
|
195 |
+
def demo():
|
196 |
+
# example from figure 14.9, page 517, Manning and Schutze
|
197 |
+
|
198 |
+
from nltk.cluster import KMeansClusterer, euclidean_distance
|
199 |
+
|
200 |
+
vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]]
|
201 |
+
means = [[4, 3], [5, 5]]
|
202 |
+
|
203 |
+
clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means)
|
204 |
+
clusters = clusterer.cluster(vectors, True, trace=True)
|
205 |
+
|
206 |
+
print("Clustered:", vectors)
|
207 |
+
print("As:", clusters)
|
208 |
+
print("Means:", clusterer.means())
|
209 |
+
print()
|
210 |
+
|
211 |
+
vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
|
212 |
+
|
213 |
+
# test k-means using the euclidean distance metric, 2 means and repeat
|
214 |
+
# clustering 10 times with random seeds
|
215 |
+
|
216 |
+
clusterer = KMeansClusterer(2, euclidean_distance, repeats=10)
|
217 |
+
clusters = clusterer.cluster(vectors, True)
|
218 |
+
print("Clustered:", vectors)
|
219 |
+
print("As:", clusters)
|
220 |
+
print("Means:", clusterer.means())
|
221 |
+
print()
|
222 |
+
|
223 |
+
# classify a new vector
|
224 |
+
vector = numpy.array([3, 3])
|
225 |
+
print("classify(%s):" % vector, end=" ")
|
226 |
+
print(clusterer.classify(vector))
|
227 |
+
print()
|
228 |
+
|
229 |
+
|
230 |
+
if __name__ == "__main__":
|
231 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/cluster/util.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Clusterer Utilities
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Contributor: J Richard Snape
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
import copy
|
9 |
+
from abc import abstractmethod
|
10 |
+
from math import sqrt
|
11 |
+
from sys import stdout
|
12 |
+
|
13 |
+
try:
|
14 |
+
import numpy
|
15 |
+
except ImportError:
|
16 |
+
pass
|
17 |
+
|
18 |
+
from nltk.cluster.api import ClusterI
|
19 |
+
|
20 |
+
|
21 |
+
class VectorSpaceClusterer(ClusterI):
|
22 |
+
"""
|
23 |
+
Abstract clusterer which takes tokens and maps them into a vector space.
|
24 |
+
Optionally performs singular value decomposition to reduce the
|
25 |
+
dimensionality.
|
26 |
+
"""
|
27 |
+
|
28 |
+
def __init__(self, normalise=False, svd_dimensions=None):
|
29 |
+
"""
|
30 |
+
:param normalise: should vectors be normalised to length 1
|
31 |
+
:type normalise: boolean
|
32 |
+
:param svd_dimensions: number of dimensions to use in reducing vector
|
33 |
+
dimensionsionality with SVD
|
34 |
+
:type svd_dimensions: int
|
35 |
+
"""
|
36 |
+
self._Tt = None
|
37 |
+
self._should_normalise = normalise
|
38 |
+
self._svd_dimensions = svd_dimensions
|
39 |
+
|
40 |
+
def cluster(self, vectors, assign_clusters=False, trace=False):
|
41 |
+
assert len(vectors) > 0
|
42 |
+
|
43 |
+
# normalise the vectors
|
44 |
+
if self._should_normalise:
|
45 |
+
vectors = list(map(self._normalise, vectors))
|
46 |
+
|
47 |
+
# use SVD to reduce the dimensionality
|
48 |
+
if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
|
49 |
+
[u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
|
50 |
+
S = d[: self._svd_dimensions] * numpy.identity(
|
51 |
+
self._svd_dimensions, numpy.float64
|
52 |
+
)
|
53 |
+
T = u[:, : self._svd_dimensions]
|
54 |
+
Dt = vt[: self._svd_dimensions, :]
|
55 |
+
vectors = numpy.transpose(numpy.dot(S, Dt))
|
56 |
+
self._Tt = numpy.transpose(T)
|
57 |
+
|
58 |
+
# call abstract method to cluster the vectors
|
59 |
+
self.cluster_vectorspace(vectors, trace)
|
60 |
+
|
61 |
+
# assign the vectors to clusters
|
62 |
+
if assign_clusters:
|
63 |
+
return [self.classify(vector) for vector in vectors]
|
64 |
+
|
65 |
+
@abstractmethod
|
66 |
+
def cluster_vectorspace(self, vectors, trace):
|
67 |
+
"""
|
68 |
+
Finds the clusters using the given set of vectors.
|
69 |
+
"""
|
70 |
+
|
71 |
+
def classify(self, vector):
|
72 |
+
if self._should_normalise:
|
73 |
+
vector = self._normalise(vector)
|
74 |
+
if self._Tt is not None:
|
75 |
+
vector = numpy.dot(self._Tt, vector)
|
76 |
+
cluster = self.classify_vectorspace(vector)
|
77 |
+
return self.cluster_name(cluster)
|
78 |
+
|
79 |
+
@abstractmethod
|
80 |
+
def classify_vectorspace(self, vector):
|
81 |
+
"""
|
82 |
+
Returns the index of the appropriate cluster for the vector.
|
83 |
+
"""
|
84 |
+
|
85 |
+
def likelihood(self, vector, label):
|
86 |
+
if self._should_normalise:
|
87 |
+
vector = self._normalise(vector)
|
88 |
+
if self._Tt is not None:
|
89 |
+
vector = numpy.dot(self._Tt, vector)
|
90 |
+
return self.likelihood_vectorspace(vector, label)
|
91 |
+
|
92 |
+
def likelihood_vectorspace(self, vector, cluster):
|
93 |
+
"""
|
94 |
+
Returns the likelihood of the vector belonging to the cluster.
|
95 |
+
"""
|
96 |
+
predicted = self.classify_vectorspace(vector)
|
97 |
+
return 1.0 if cluster == predicted else 0.0
|
98 |
+
|
99 |
+
def vector(self, vector):
|
100 |
+
"""
|
101 |
+
Returns the vector after normalisation and dimensionality reduction
|
102 |
+
"""
|
103 |
+
if self._should_normalise:
|
104 |
+
vector = self._normalise(vector)
|
105 |
+
if self._Tt is not None:
|
106 |
+
vector = numpy.dot(self._Tt, vector)
|
107 |
+
return vector
|
108 |
+
|
109 |
+
def _normalise(self, vector):
|
110 |
+
"""
|
111 |
+
Normalises the vector to unit length.
|
112 |
+
"""
|
113 |
+
return vector / sqrt(numpy.dot(vector, vector))
|
114 |
+
|
115 |
+
|
116 |
+
def euclidean_distance(u, v):
|
117 |
+
"""
|
118 |
+
Returns the euclidean distance between vectors u and v. This is equivalent
|
119 |
+
to the length of the vector (u - v).
|
120 |
+
"""
|
121 |
+
diff = u - v
|
122 |
+
return sqrt(numpy.dot(diff, diff))
|
123 |
+
|
124 |
+
|
125 |
+
def cosine_distance(u, v):
|
126 |
+
"""
|
127 |
+
Returns 1 minus the cosine of the angle between vectors v and u. This is
|
128 |
+
equal to ``1 - (u.v / |u||v|)``.
|
129 |
+
"""
|
130 |
+
return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v))))
|
131 |
+
|
132 |
+
|
133 |
+
class _DendrogramNode:
|
134 |
+
"""Tree node of a dendrogram."""
|
135 |
+
|
136 |
+
def __init__(self, value, *children):
|
137 |
+
self._value = value
|
138 |
+
self._children = children
|
139 |
+
|
140 |
+
def leaves(self, values=True):
|
141 |
+
if self._children:
|
142 |
+
leaves = []
|
143 |
+
for child in self._children:
|
144 |
+
leaves.extend(child.leaves(values))
|
145 |
+
return leaves
|
146 |
+
elif values:
|
147 |
+
return [self._value]
|
148 |
+
else:
|
149 |
+
return [self]
|
150 |
+
|
151 |
+
def groups(self, n):
|
152 |
+
queue = [(self._value, self)]
|
153 |
+
|
154 |
+
while len(queue) < n:
|
155 |
+
priority, node = queue.pop()
|
156 |
+
if not node._children:
|
157 |
+
queue.push((priority, node))
|
158 |
+
break
|
159 |
+
for child in node._children:
|
160 |
+
if child._children:
|
161 |
+
queue.append((child._value, child))
|
162 |
+
else:
|
163 |
+
queue.append((0, child))
|
164 |
+
# makes the earliest merges at the start, latest at the end
|
165 |
+
queue.sort()
|
166 |
+
|
167 |
+
groups = []
|
168 |
+
for priority, node in queue:
|
169 |
+
groups.append(node.leaves())
|
170 |
+
return groups
|
171 |
+
|
172 |
+
def __lt__(self, comparator):
|
173 |
+
return cosine_distance(self._value, comparator._value) < 0
|
174 |
+
|
175 |
+
|
176 |
+
class Dendrogram:
|
177 |
+
"""
|
178 |
+
Represents a dendrogram, a tree with a specified branching order. This
|
179 |
+
must be initialised with the leaf items, then iteratively call merge for
|
180 |
+
each branch. This class constructs a tree representing the order of calls
|
181 |
+
to the merge function.
|
182 |
+
"""
|
183 |
+
|
184 |
+
def __init__(self, items=[]):
|
185 |
+
"""
|
186 |
+
:param items: the items at the leaves of the dendrogram
|
187 |
+
:type items: sequence of (any)
|
188 |
+
"""
|
189 |
+
self._items = [_DendrogramNode(item) for item in items]
|
190 |
+
self._original_items = copy.copy(self._items)
|
191 |
+
self._merge = 1
|
192 |
+
|
193 |
+
def merge(self, *indices):
|
194 |
+
"""
|
195 |
+
Merges nodes at given indices in the dendrogram. The nodes will be
|
196 |
+
combined which then replaces the first node specified. All other nodes
|
197 |
+
involved in the merge will be removed.
|
198 |
+
|
199 |
+
:param indices: indices of the items to merge (at least two)
|
200 |
+
:type indices: seq of int
|
201 |
+
"""
|
202 |
+
assert len(indices) >= 2
|
203 |
+
node = _DendrogramNode(self._merge, *(self._items[i] for i in indices))
|
204 |
+
self._merge += 1
|
205 |
+
self._items[indices[0]] = node
|
206 |
+
for i in indices[1:]:
|
207 |
+
del self._items[i]
|
208 |
+
|
209 |
+
def groups(self, n):
|
210 |
+
"""
|
211 |
+
Finds the n-groups of items (leaves) reachable from a cut at depth n.
|
212 |
+
:param n: number of groups
|
213 |
+
:type n: int
|
214 |
+
"""
|
215 |
+
if len(self._items) > 1:
|
216 |
+
root = _DendrogramNode(self._merge, *self._items)
|
217 |
+
else:
|
218 |
+
root = self._items[0]
|
219 |
+
return root.groups(n)
|
220 |
+
|
221 |
+
def show(self, leaf_labels=[]):
|
222 |
+
"""
|
223 |
+
Print the dendrogram in ASCII art to standard out.
|
224 |
+
|
225 |
+
:param leaf_labels: an optional list of strings to use for labeling the
|
226 |
+
leaves
|
227 |
+
:type leaf_labels: list
|
228 |
+
"""
|
229 |
+
|
230 |
+
# ASCII rendering characters
|
231 |
+
JOIN, HLINK, VLINK = "+", "-", "|"
|
232 |
+
|
233 |
+
# find the root (or create one)
|
234 |
+
if len(self._items) > 1:
|
235 |
+
root = _DendrogramNode(self._merge, *self._items)
|
236 |
+
else:
|
237 |
+
root = self._items[0]
|
238 |
+
leaves = self._original_items
|
239 |
+
|
240 |
+
if leaf_labels:
|
241 |
+
last_row = leaf_labels
|
242 |
+
else:
|
243 |
+
last_row = ["%s" % leaf._value for leaf in leaves]
|
244 |
+
|
245 |
+
# find the bottom row and the best cell width
|
246 |
+
width = max(map(len, last_row)) + 1
|
247 |
+
lhalf = width // 2
|
248 |
+
rhalf = int(width - lhalf - 1)
|
249 |
+
|
250 |
+
# display functions
|
251 |
+
def format(centre, left=" ", right=" "):
|
252 |
+
return f"{lhalf * left}{centre}{right * rhalf}"
|
253 |
+
|
254 |
+
def display(str):
|
255 |
+
stdout.write(str)
|
256 |
+
|
257 |
+
# for each merge, top down
|
258 |
+
queue = [(root._value, root)]
|
259 |
+
verticals = [format(" ") for leaf in leaves]
|
260 |
+
while queue:
|
261 |
+
priority, node = queue.pop()
|
262 |
+
child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children))
|
263 |
+
indices = list(map(leaves.index, child_left_leaf))
|
264 |
+
if child_left_leaf:
|
265 |
+
min_idx = min(indices)
|
266 |
+
max_idx = max(indices)
|
267 |
+
for i in range(len(leaves)):
|
268 |
+
if leaves[i] in child_left_leaf:
|
269 |
+
if i == min_idx:
|
270 |
+
display(format(JOIN, " ", HLINK))
|
271 |
+
elif i == max_idx:
|
272 |
+
display(format(JOIN, HLINK, " "))
|
273 |
+
else:
|
274 |
+
display(format(JOIN, HLINK, HLINK))
|
275 |
+
verticals[i] = format(VLINK)
|
276 |
+
elif min_idx <= i <= max_idx:
|
277 |
+
display(format(HLINK, HLINK, HLINK))
|
278 |
+
else:
|
279 |
+
display(verticals[i])
|
280 |
+
display("\n")
|
281 |
+
for child in node._children:
|
282 |
+
if child._children:
|
283 |
+
queue.append((child._value, child))
|
284 |
+
queue.sort()
|
285 |
+
|
286 |
+
for vertical in verticals:
|
287 |
+
display(vertical)
|
288 |
+
display("\n")
|
289 |
+
|
290 |
+
# finally, display the last line
|
291 |
+
display("".join(item.center(width) for item in last_row))
|
292 |
+
display("\n")
|
293 |
+
|
294 |
+
def __repr__(self):
|
295 |
+
if len(self._items) > 1:
|
296 |
+
root = _DendrogramNode(self._merge, *self._items)
|
297 |
+
else:
|
298 |
+
root = self._items[0]
|
299 |
+
leaves = root.leaves(False)
|
300 |
+
return "<Dendrogram with %d leaves>" % len(leaves)
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/__init__.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Parsers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
#
|
9 |
+
|
10 |
+
"""
|
11 |
+
NLTK Parsers
|
12 |
+
|
13 |
+
Classes and interfaces for producing tree structures that represent
|
14 |
+
the internal organization of a text. This task is known as "parsing"
|
15 |
+
the text, and the resulting tree structures are called the text's
|
16 |
+
"parses". Typically, the text is a single sentence, and the tree
|
17 |
+
structure represents the syntactic structure of the sentence.
|
18 |
+
However, parsers can also be used in other domains. For example,
|
19 |
+
parsers can be used to derive the morphological structure of the
|
20 |
+
morphemes that make up a word, or to derive the discourse structure
|
21 |
+
for a set of utterances.
|
22 |
+
|
23 |
+
Sometimes, a single piece of text can be represented by more than one
|
24 |
+
tree structure. Texts represented by more than one tree structure are
|
25 |
+
called "ambiguous" texts. Note that there are actually two ways in
|
26 |
+
which a text can be ambiguous:
|
27 |
+
|
28 |
+
- The text has multiple correct parses.
|
29 |
+
- There is not enough information to decide which of several
|
30 |
+
candidate parses is correct.
|
31 |
+
|
32 |
+
However, the parser module does *not* distinguish these two types of
|
33 |
+
ambiguity.
|
34 |
+
|
35 |
+
The parser module defines ``ParserI``, a standard interface for parsing
|
36 |
+
texts; and two simple implementations of that interface,
|
37 |
+
``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains
|
38 |
+
three sub-modules for specialized kinds of parsing:
|
39 |
+
|
40 |
+
- ``nltk.parser.chart`` defines chart parsing, which uses dynamic
|
41 |
+
programming to efficiently parse texts.
|
42 |
+
- ``nltk.parser.probabilistic`` defines probabilistic parsing, which
|
43 |
+
associates a probability with each parse.
|
44 |
+
"""
|
45 |
+
|
46 |
+
from nltk.parse.api import ParserI
|
47 |
+
from nltk.parse.bllip import BllipParser
|
48 |
+
from nltk.parse.chart import (
|
49 |
+
BottomUpChartParser,
|
50 |
+
BottomUpLeftCornerChartParser,
|
51 |
+
ChartParser,
|
52 |
+
LeftCornerChartParser,
|
53 |
+
SteppingChartParser,
|
54 |
+
TopDownChartParser,
|
55 |
+
)
|
56 |
+
from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser
|
57 |
+
from nltk.parse.dependencygraph import DependencyGraph
|
58 |
+
from nltk.parse.earleychart import (
|
59 |
+
EarleyChartParser,
|
60 |
+
FeatureEarleyChartParser,
|
61 |
+
FeatureIncrementalBottomUpChartParser,
|
62 |
+
FeatureIncrementalBottomUpLeftCornerChartParser,
|
63 |
+
FeatureIncrementalChartParser,
|
64 |
+
FeatureIncrementalTopDownChartParser,
|
65 |
+
IncrementalBottomUpChartParser,
|
66 |
+
IncrementalBottomUpLeftCornerChartParser,
|
67 |
+
IncrementalChartParser,
|
68 |
+
IncrementalLeftCornerChartParser,
|
69 |
+
IncrementalTopDownChartParser,
|
70 |
+
)
|
71 |
+
from nltk.parse.evaluate import DependencyEvaluator
|
72 |
+
from nltk.parse.featurechart import (
|
73 |
+
FeatureBottomUpChartParser,
|
74 |
+
FeatureBottomUpLeftCornerChartParser,
|
75 |
+
FeatureChartParser,
|
76 |
+
FeatureTopDownChartParser,
|
77 |
+
)
|
78 |
+
from nltk.parse.malt import MaltParser
|
79 |
+
from nltk.parse.nonprojectivedependencyparser import (
|
80 |
+
NaiveBayesDependencyScorer,
|
81 |
+
NonprojectiveDependencyParser,
|
82 |
+
ProbabilisticNonprojectiveParser,
|
83 |
+
)
|
84 |
+
from nltk.parse.pchart import (
|
85 |
+
BottomUpProbabilisticChartParser,
|
86 |
+
InsideChartParser,
|
87 |
+
LongestChartParser,
|
88 |
+
RandomChartParser,
|
89 |
+
UnsortedChartParser,
|
90 |
+
)
|
91 |
+
from nltk.parse.projectivedependencyparser import (
|
92 |
+
ProbabilisticProjectiveDependencyParser,
|
93 |
+
ProjectiveDependencyParser,
|
94 |
+
)
|
95 |
+
from nltk.parse.recursivedescent import (
|
96 |
+
RecursiveDescentParser,
|
97 |
+
SteppingRecursiveDescentParser,
|
98 |
+
)
|
99 |
+
from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser
|
100 |
+
from nltk.parse.transitionparser import TransitionParser
|
101 |
+
from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser
|
102 |
+
from nltk.parse.viterbi import ViterbiParser
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/api.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Parser API
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
#
|
9 |
+
|
10 |
+
import itertools
|
11 |
+
|
12 |
+
from nltk.internals import overridden
|
13 |
+
|
14 |
+
|
15 |
+
class ParserI:
|
16 |
+
"""
|
17 |
+
A processing class for deriving trees that represent possible
|
18 |
+
structures for a sequence of tokens. These tree structures are
|
19 |
+
known as "parses". Typically, parsers are used to derive syntax
|
20 |
+
trees for sentences. But parsers can also be used to derive other
|
21 |
+
kinds of tree structure, such as morphological trees and discourse
|
22 |
+
structures.
|
23 |
+
|
24 |
+
Subclasses must define:
|
25 |
+
- at least one of: ``parse()``, ``parse_sents()``.
|
26 |
+
|
27 |
+
Subclasses may define:
|
28 |
+
- ``grammar()``
|
29 |
+
"""
|
30 |
+
|
31 |
+
def grammar(self):
|
32 |
+
"""
|
33 |
+
:return: The grammar used by this parser.
|
34 |
+
"""
|
35 |
+
raise NotImplementedError()
|
36 |
+
|
37 |
+
def parse(self, sent, *args, **kwargs):
|
38 |
+
"""
|
39 |
+
:return: An iterator that generates parse trees for the sentence.
|
40 |
+
When possible this list is sorted from most likely to least likely.
|
41 |
+
|
42 |
+
:param sent: The sentence to be parsed
|
43 |
+
:type sent: list(str)
|
44 |
+
:rtype: iter(Tree)
|
45 |
+
"""
|
46 |
+
if overridden(self.parse_sents):
|
47 |
+
return next(self.parse_sents([sent], *args, **kwargs))
|
48 |
+
elif overridden(self.parse_one):
|
49 |
+
return (
|
50 |
+
tree
|
51 |
+
for tree in [self.parse_one(sent, *args, **kwargs)]
|
52 |
+
if tree is not None
|
53 |
+
)
|
54 |
+
elif overridden(self.parse_all):
|
55 |
+
return iter(self.parse_all(sent, *args, **kwargs))
|
56 |
+
else:
|
57 |
+
raise NotImplementedError()
|
58 |
+
|
59 |
+
def parse_sents(self, sents, *args, **kwargs):
|
60 |
+
"""
|
61 |
+
Apply ``self.parse()`` to each element of ``sents``.
|
62 |
+
:rtype: iter(iter(Tree))
|
63 |
+
"""
|
64 |
+
return (self.parse(sent, *args, **kwargs) for sent in sents)
|
65 |
+
|
66 |
+
def parse_all(self, sent, *args, **kwargs):
|
67 |
+
""":rtype: list(Tree)"""
|
68 |
+
return list(self.parse(sent, *args, **kwargs))
|
69 |
+
|
70 |
+
def parse_one(self, sent, *args, **kwargs):
|
71 |
+
""":rtype: Tree or None"""
|
72 |
+
return next(self.parse(sent, *args, **kwargs), None)
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/dependencygraph.py
ADDED
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Dependency Grammars
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Jason Narad <[email protected]>
|
5 |
+
# Steven Bird <[email protected]> (modifications)
|
6 |
+
#
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
#
|
10 |
+
|
11 |
+
"""
|
12 |
+
Tools for reading and writing dependency trees.
|
13 |
+
The input is assumed to be in Malt-TAB format
|
14 |
+
(https://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
|
15 |
+
"""
|
16 |
+
|
17 |
+
import subprocess
|
18 |
+
import warnings
|
19 |
+
from collections import defaultdict
|
20 |
+
from itertools import chain
|
21 |
+
from pprint import pformat
|
22 |
+
|
23 |
+
from nltk.internals import find_binary
|
24 |
+
from nltk.tree import Tree
|
25 |
+
|
26 |
+
#################################################################
|
27 |
+
# DependencyGraph Class
|
28 |
+
#################################################################
|
29 |
+
|
30 |
+
|
31 |
+
class DependencyGraph:
|
32 |
+
"""
|
33 |
+
A container for the nodes and labelled edges of a dependency structure.
|
34 |
+
"""
|
35 |
+
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
tree_str=None,
|
39 |
+
cell_extractor=None,
|
40 |
+
zero_based=False,
|
41 |
+
cell_separator=None,
|
42 |
+
top_relation_label="ROOT",
|
43 |
+
):
|
44 |
+
"""Dependency graph.
|
45 |
+
|
46 |
+
We place a dummy `TOP` node with the index 0, since the root node is
|
47 |
+
often assigned 0 as its head. This also means that the indexing of the
|
48 |
+
nodes corresponds directly to the Malt-TAB format, which starts at 1.
|
49 |
+
|
50 |
+
If zero-based is True, then Malt-TAB-like input with node numbers
|
51 |
+
starting at 0 and the root node assigned -1 (as produced by, e.g.,
|
52 |
+
zpar).
|
53 |
+
|
54 |
+
:param str cell_separator: the cell separator. If not provided, cells
|
55 |
+
are split by whitespace.
|
56 |
+
|
57 |
+
:param str top_relation_label: the label by which the top relation is
|
58 |
+
identified, for examlple, `ROOT`, `null` or `TOP`.
|
59 |
+
"""
|
60 |
+
self.nodes = defaultdict(
|
61 |
+
lambda: {
|
62 |
+
"address": None,
|
63 |
+
"word": None,
|
64 |
+
"lemma": None,
|
65 |
+
"ctag": None,
|
66 |
+
"tag": None,
|
67 |
+
"feats": None,
|
68 |
+
"head": None,
|
69 |
+
"deps": defaultdict(list),
|
70 |
+
"rel": None,
|
71 |
+
}
|
72 |
+
)
|
73 |
+
|
74 |
+
self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0})
|
75 |
+
|
76 |
+
self.root = None
|
77 |
+
|
78 |
+
if tree_str:
|
79 |
+
self._parse(
|
80 |
+
tree_str,
|
81 |
+
cell_extractor=cell_extractor,
|
82 |
+
zero_based=zero_based,
|
83 |
+
cell_separator=cell_separator,
|
84 |
+
top_relation_label=top_relation_label,
|
85 |
+
)
|
86 |
+
|
87 |
+
def remove_by_address(self, address):
|
88 |
+
"""
|
89 |
+
Removes the node with the given address. References
|
90 |
+
to this node in others will still exist.
|
91 |
+
"""
|
92 |
+
del self.nodes[address]
|
93 |
+
|
94 |
+
def redirect_arcs(self, originals, redirect):
|
95 |
+
"""
|
96 |
+
Redirects arcs to any of the nodes in the originals list
|
97 |
+
to the redirect node address.
|
98 |
+
"""
|
99 |
+
for node in self.nodes.values():
|
100 |
+
new_deps = []
|
101 |
+
for dep in node["deps"]:
|
102 |
+
if dep in originals:
|
103 |
+
new_deps.append(redirect)
|
104 |
+
else:
|
105 |
+
new_deps.append(dep)
|
106 |
+
node["deps"] = new_deps
|
107 |
+
|
108 |
+
def add_arc(self, head_address, mod_address):
|
109 |
+
"""
|
110 |
+
Adds an arc from the node specified by head_address to the
|
111 |
+
node specified by the mod address.
|
112 |
+
"""
|
113 |
+
relation = self.nodes[mod_address]["rel"]
|
114 |
+
self.nodes[head_address]["deps"].setdefault(relation, [])
|
115 |
+
self.nodes[head_address]["deps"][relation].append(mod_address)
|
116 |
+
# self.nodes[head_address]['deps'].append(mod_address)
|
117 |
+
|
118 |
+
def connect_graph(self):
|
119 |
+
"""
|
120 |
+
Fully connects all non-root nodes. All nodes are set to be dependents
|
121 |
+
of the root node.
|
122 |
+
"""
|
123 |
+
for node1 in self.nodes.values():
|
124 |
+
for node2 in self.nodes.values():
|
125 |
+
if node1["address"] != node2["address"] and node2["rel"] != "TOP":
|
126 |
+
relation = node2["rel"]
|
127 |
+
node1["deps"].setdefault(relation, [])
|
128 |
+
node1["deps"][relation].append(node2["address"])
|
129 |
+
# node1['deps'].append(node2['address'])
|
130 |
+
|
131 |
+
def get_by_address(self, node_address):
|
132 |
+
"""Return the node with the given address."""
|
133 |
+
return self.nodes[node_address]
|
134 |
+
|
135 |
+
def contains_address(self, node_address):
|
136 |
+
"""
|
137 |
+
Returns true if the graph contains a node with the given node
|
138 |
+
address, false otherwise.
|
139 |
+
"""
|
140 |
+
return node_address in self.nodes
|
141 |
+
|
142 |
+
def to_dot(self):
|
143 |
+
"""Return a dot representation suitable for using with Graphviz.
|
144 |
+
|
145 |
+
>>> dg = DependencyGraph(
|
146 |
+
... 'John N 2\\n'
|
147 |
+
... 'loves V 0\\n'
|
148 |
+
... 'Mary N 2'
|
149 |
+
... )
|
150 |
+
>>> print(dg.to_dot())
|
151 |
+
digraph G{
|
152 |
+
edge [dir=forward]
|
153 |
+
node [shape=plaintext]
|
154 |
+
<BLANKLINE>
|
155 |
+
0 [label="0 (None)"]
|
156 |
+
0 -> 2 [label="ROOT"]
|
157 |
+
1 [label="1 (John)"]
|
158 |
+
2 [label="2 (loves)"]
|
159 |
+
2 -> 1 [label=""]
|
160 |
+
2 -> 3 [label=""]
|
161 |
+
3 [label="3 (Mary)"]
|
162 |
+
}
|
163 |
+
|
164 |
+
"""
|
165 |
+
# Start the digraph specification
|
166 |
+
s = "digraph G{\n"
|
167 |
+
s += "edge [dir=forward]\n"
|
168 |
+
s += "node [shape=plaintext]\n"
|
169 |
+
|
170 |
+
# Draw the remaining nodes
|
171 |
+
for node in sorted(self.nodes.values(), key=lambda v: v["address"]):
|
172 |
+
s += '\n{} [label="{} ({})"]'.format(
|
173 |
+
node["address"],
|
174 |
+
node["address"],
|
175 |
+
node["word"],
|
176 |
+
)
|
177 |
+
for rel, deps in node["deps"].items():
|
178 |
+
for dep in deps:
|
179 |
+
if rel is not None:
|
180 |
+
s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel)
|
181 |
+
else:
|
182 |
+
s += "\n{} -> {} ".format(node["address"], dep)
|
183 |
+
s += "\n}"
|
184 |
+
|
185 |
+
return s
|
186 |
+
|
187 |
+
def _repr_svg_(self):
|
188 |
+
"""Show SVG representation of the transducer (IPython magic).
|
189 |
+
>>> from nltk.test.setup_fixt import check_binary
|
190 |
+
>>> check_binary('dot')
|
191 |
+
>>> dg = DependencyGraph(
|
192 |
+
... 'John N 2\\n'
|
193 |
+
... 'loves V 0\\n'
|
194 |
+
... 'Mary N 2'
|
195 |
+
... )
|
196 |
+
>>> dg._repr_svg_().split('\\n')[0]
|
197 |
+
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
|
198 |
+
|
199 |
+
"""
|
200 |
+
dot_string = self.to_dot()
|
201 |
+
return dot2img(dot_string)
|
202 |
+
|
203 |
+
def __str__(self):
|
204 |
+
return pformat(self.nodes)
|
205 |
+
|
206 |
+
def __repr__(self):
|
207 |
+
return f"<DependencyGraph with {len(self.nodes)} nodes>"
|
208 |
+
|
209 |
+
@staticmethod
|
210 |
+
def load(
|
211 |
+
filename, zero_based=False, cell_separator=None, top_relation_label="ROOT"
|
212 |
+
):
|
213 |
+
"""
|
214 |
+
:param filename: a name of a file in Malt-TAB format
|
215 |
+
:param zero_based: nodes in the input file are numbered starting from 0
|
216 |
+
rather than 1 (as produced by, e.g., zpar)
|
217 |
+
:param str cell_separator: the cell separator. If not provided, cells
|
218 |
+
are split by whitespace.
|
219 |
+
:param str top_relation_label: the label by which the top relation is
|
220 |
+
identified, for examlple, `ROOT`, `null` or `TOP`.
|
221 |
+
|
222 |
+
:return: a list of DependencyGraphs
|
223 |
+
|
224 |
+
"""
|
225 |
+
with open(filename) as infile:
|
226 |
+
return [
|
227 |
+
DependencyGraph(
|
228 |
+
tree_str,
|
229 |
+
zero_based=zero_based,
|
230 |
+
cell_separator=cell_separator,
|
231 |
+
top_relation_label=top_relation_label,
|
232 |
+
)
|
233 |
+
for tree_str in infile.read().split("\n\n")
|
234 |
+
]
|
235 |
+
|
236 |
+
def left_children(self, node_index):
|
237 |
+
"""
|
238 |
+
Returns the number of left children under the node specified
|
239 |
+
by the given address.
|
240 |
+
"""
|
241 |
+
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
|
242 |
+
index = self.nodes[node_index]["address"]
|
243 |
+
return sum(1 for c in children if c < index)
|
244 |
+
|
245 |
+
def right_children(self, node_index):
|
246 |
+
"""
|
247 |
+
Returns the number of right children under the node specified
|
248 |
+
by the given address.
|
249 |
+
"""
|
250 |
+
children = chain.from_iterable(self.nodes[node_index]["deps"].values())
|
251 |
+
index = self.nodes[node_index]["address"]
|
252 |
+
return sum(1 for c in children if c > index)
|
253 |
+
|
254 |
+
def add_node(self, node):
|
255 |
+
if not self.contains_address(node["address"]):
|
256 |
+
self.nodes[node["address"]].update(node)
|
257 |
+
|
258 |
+
def _parse(
|
259 |
+
self,
|
260 |
+
input_,
|
261 |
+
cell_extractor=None,
|
262 |
+
zero_based=False,
|
263 |
+
cell_separator=None,
|
264 |
+
top_relation_label="ROOT",
|
265 |
+
):
|
266 |
+
"""Parse a sentence.
|
267 |
+
|
268 |
+
:param extractor: a function that given a tuple of cells returns a
|
269 |
+
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
|
270 |
+
rel``.
|
271 |
+
|
272 |
+
:param str cell_separator: the cell separator. If not provided, cells
|
273 |
+
are split by whitespace.
|
274 |
+
|
275 |
+
:param str top_relation_label: the label by which the top relation is
|
276 |
+
identified, for examlple, `ROOT`, `null` or `TOP`.
|
277 |
+
|
278 |
+
"""
|
279 |
+
|
280 |
+
def extract_3_cells(cells, index):
|
281 |
+
word, tag, head = cells
|
282 |
+
return index, word, word, tag, tag, "", head, ""
|
283 |
+
|
284 |
+
def extract_4_cells(cells, index):
|
285 |
+
word, tag, head, rel = cells
|
286 |
+
return index, word, word, tag, tag, "", head, rel
|
287 |
+
|
288 |
+
def extract_7_cells(cells, index):
|
289 |
+
line_index, word, lemma, tag, _, head, rel = cells
|
290 |
+
try:
|
291 |
+
index = int(line_index)
|
292 |
+
except ValueError:
|
293 |
+
# index can't be parsed as an integer, use default
|
294 |
+
pass
|
295 |
+
return index, word, lemma, tag, tag, "", head, rel
|
296 |
+
|
297 |
+
def extract_10_cells(cells, index):
|
298 |
+
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
|
299 |
+
try:
|
300 |
+
index = int(line_index)
|
301 |
+
except ValueError:
|
302 |
+
# index can't be parsed as an integer, use default
|
303 |
+
pass
|
304 |
+
return index, word, lemma, ctag, tag, feats, head, rel
|
305 |
+
|
306 |
+
extractors = {
|
307 |
+
3: extract_3_cells,
|
308 |
+
4: extract_4_cells,
|
309 |
+
7: extract_7_cells,
|
310 |
+
10: extract_10_cells,
|
311 |
+
}
|
312 |
+
|
313 |
+
if isinstance(input_, str):
|
314 |
+
input_ = (line for line in input_.split("\n"))
|
315 |
+
|
316 |
+
lines = (l.rstrip() for l in input_)
|
317 |
+
lines = (l for l in lines if l)
|
318 |
+
|
319 |
+
cell_number = None
|
320 |
+
for index, line in enumerate(lines, start=1):
|
321 |
+
cells = line.split(cell_separator)
|
322 |
+
if cell_number is None:
|
323 |
+
cell_number = len(cells)
|
324 |
+
else:
|
325 |
+
assert cell_number == len(cells)
|
326 |
+
|
327 |
+
if cell_extractor is None:
|
328 |
+
try:
|
329 |
+
cell_extractor = extractors[cell_number]
|
330 |
+
except KeyError as e:
|
331 |
+
raise ValueError(
|
332 |
+
"Number of tab-delimited fields ({}) not supported by "
|
333 |
+
"CoNLL(10) or Malt-Tab(4) format".format(cell_number)
|
334 |
+
) from e
|
335 |
+
|
336 |
+
try:
|
337 |
+
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(
|
338 |
+
cells, index
|
339 |
+
)
|
340 |
+
except (TypeError, ValueError):
|
341 |
+
# cell_extractor doesn't take 2 arguments or doesn't return 8
|
342 |
+
# values; assume the cell_extractor is an older external
|
343 |
+
# extractor and doesn't accept or return an index.
|
344 |
+
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
|
345 |
+
|
346 |
+
if head == "_":
|
347 |
+
continue
|
348 |
+
|
349 |
+
head = int(head)
|
350 |
+
if zero_based:
|
351 |
+
head += 1
|
352 |
+
|
353 |
+
self.nodes[index].update(
|
354 |
+
{
|
355 |
+
"address": index,
|
356 |
+
"word": word,
|
357 |
+
"lemma": lemma,
|
358 |
+
"ctag": ctag,
|
359 |
+
"tag": tag,
|
360 |
+
"feats": feats,
|
361 |
+
"head": head,
|
362 |
+
"rel": rel,
|
363 |
+
}
|
364 |
+
)
|
365 |
+
|
366 |
+
# Make sure that the fake root node has labeled dependencies.
|
367 |
+
if (cell_number == 3) and (head == 0):
|
368 |
+
rel = top_relation_label
|
369 |
+
self.nodes[head]["deps"][rel].append(index)
|
370 |
+
|
371 |
+
if self.nodes[0]["deps"][top_relation_label]:
|
372 |
+
root_address = self.nodes[0]["deps"][top_relation_label][0]
|
373 |
+
self.root = self.nodes[root_address]
|
374 |
+
self.top_relation_label = top_relation_label
|
375 |
+
else:
|
376 |
+
warnings.warn(
|
377 |
+
"The graph doesn't contain a node " "that depends on the root element."
|
378 |
+
)
|
379 |
+
|
380 |
+
def _word(self, node, filter=True):
|
381 |
+
w = node["word"]
|
382 |
+
if filter:
|
383 |
+
if w != ",":
|
384 |
+
return w
|
385 |
+
return w
|
386 |
+
|
387 |
+
def _tree(self, i):
|
388 |
+
"""Turn dependency graphs into NLTK trees.
|
389 |
+
|
390 |
+
:param int i: index of a node
|
391 |
+
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
|
392 |
+
"""
|
393 |
+
node = self.get_by_address(i)
|
394 |
+
word = node["word"]
|
395 |
+
deps = sorted(chain.from_iterable(node["deps"].values()))
|
396 |
+
|
397 |
+
if deps:
|
398 |
+
return Tree(word, [self._tree(dep) for dep in deps])
|
399 |
+
else:
|
400 |
+
return word
|
401 |
+
|
402 |
+
def tree(self):
|
403 |
+
"""
|
404 |
+
Starting with the ``root`` node, build a dependency tree using the NLTK
|
405 |
+
``Tree`` constructor. Dependency labels are omitted.
|
406 |
+
"""
|
407 |
+
node = self.root
|
408 |
+
|
409 |
+
word = node["word"]
|
410 |
+
deps = sorted(chain.from_iterable(node["deps"].values()))
|
411 |
+
return Tree(word, [self._tree(dep) for dep in deps])
|
412 |
+
|
413 |
+
def triples(self, node=None):
|
414 |
+
"""
|
415 |
+
Extract dependency triples of the form:
|
416 |
+
((head word, head tag), rel, (dep word, dep tag))
|
417 |
+
"""
|
418 |
+
|
419 |
+
if not node:
|
420 |
+
node = self.root
|
421 |
+
|
422 |
+
head = (node["word"], node["ctag"])
|
423 |
+
for i in sorted(chain.from_iterable(node["deps"].values())):
|
424 |
+
dep = self.get_by_address(i)
|
425 |
+
yield (head, dep["rel"], (dep["word"], dep["ctag"]))
|
426 |
+
yield from self.triples(node=dep)
|
427 |
+
|
428 |
+
def _hd(self, i):
|
429 |
+
try:
|
430 |
+
return self.nodes[i]["head"]
|
431 |
+
except IndexError:
|
432 |
+
return None
|
433 |
+
|
434 |
+
def _rel(self, i):
|
435 |
+
try:
|
436 |
+
return self.nodes[i]["rel"]
|
437 |
+
except IndexError:
|
438 |
+
return None
|
439 |
+
|
440 |
+
# what's the return type? Boolean or list?
|
441 |
+
def contains_cycle(self):
|
442 |
+
"""Check whether there are cycles.
|
443 |
+
|
444 |
+
>>> dg = DependencyGraph(treebank_data)
|
445 |
+
>>> dg.contains_cycle()
|
446 |
+
False
|
447 |
+
|
448 |
+
>>> cyclic_dg = DependencyGraph()
|
449 |
+
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
|
450 |
+
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
|
451 |
+
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
|
452 |
+
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
|
453 |
+
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
|
454 |
+
>>> cyclic_dg.nodes = {
|
455 |
+
... 0: top,
|
456 |
+
... 1: child1,
|
457 |
+
... 2: child2,
|
458 |
+
... 3: child3,
|
459 |
+
... 4: child4,
|
460 |
+
... }
|
461 |
+
>>> cyclic_dg.root = top
|
462 |
+
|
463 |
+
>>> cyclic_dg.contains_cycle()
|
464 |
+
[1, 2, 4, 3]
|
465 |
+
|
466 |
+
"""
|
467 |
+
distances = {}
|
468 |
+
|
469 |
+
for node in self.nodes.values():
|
470 |
+
for dep in node["deps"]:
|
471 |
+
key = tuple([node["address"], dep])
|
472 |
+
distances[key] = 1
|
473 |
+
|
474 |
+
for _ in self.nodes:
|
475 |
+
new_entries = {}
|
476 |
+
|
477 |
+
for pair1 in distances:
|
478 |
+
for pair2 in distances:
|
479 |
+
if pair1[1] == pair2[0]:
|
480 |
+
key = tuple([pair1[0], pair2[1]])
|
481 |
+
new_entries[key] = distances[pair1] + distances[pair2]
|
482 |
+
|
483 |
+
for pair in new_entries:
|
484 |
+
distances[pair] = new_entries[pair]
|
485 |
+
if pair[0] == pair[1]:
|
486 |
+
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
|
487 |
+
return path
|
488 |
+
|
489 |
+
return False # return []?
|
490 |
+
|
491 |
+
def get_cycle_path(self, curr_node, goal_node_index):
|
492 |
+
for dep in curr_node["deps"]:
|
493 |
+
if dep == goal_node_index:
|
494 |
+
return [curr_node["address"]]
|
495 |
+
for dep in curr_node["deps"]:
|
496 |
+
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
|
497 |
+
if len(path) > 0:
|
498 |
+
path.insert(0, curr_node["address"])
|
499 |
+
return path
|
500 |
+
return []
|
501 |
+
|
502 |
+
def to_conll(self, style):
|
503 |
+
"""
|
504 |
+
The dependency graph in CoNLL format.
|
505 |
+
|
506 |
+
:param style: the style to use for the format (3, 4, 10 columns)
|
507 |
+
:type style: int
|
508 |
+
:rtype: str
|
509 |
+
"""
|
510 |
+
|
511 |
+
if style == 3:
|
512 |
+
template = "{word}\t{tag}\t{head}\n"
|
513 |
+
elif style == 4:
|
514 |
+
template = "{word}\t{tag}\t{head}\t{rel}\n"
|
515 |
+
elif style == 10:
|
516 |
+
template = (
|
517 |
+
"{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n"
|
518 |
+
)
|
519 |
+
else:
|
520 |
+
raise ValueError(
|
521 |
+
"Number of tab-delimited fields ({}) not supported by "
|
522 |
+
"CoNLL(10) or Malt-Tab(4) format".format(style)
|
523 |
+
)
|
524 |
+
|
525 |
+
return "".join(
|
526 |
+
template.format(i=i, **node)
|
527 |
+
for i, node in sorted(self.nodes.items())
|
528 |
+
if node["tag"] != "TOP"
|
529 |
+
)
|
530 |
+
|
531 |
+
def nx_graph(self):
|
532 |
+
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
|
533 |
+
import networkx
|
534 |
+
|
535 |
+
nx_nodelist = list(range(1, len(self.nodes)))
|
536 |
+
nx_edgelist = [
|
537 |
+
(n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)
|
538 |
+
]
|
539 |
+
self.nx_labels = {}
|
540 |
+
for n in nx_nodelist:
|
541 |
+
self.nx_labels[n] = self.nodes[n]["word"]
|
542 |
+
|
543 |
+
g = networkx.MultiDiGraph()
|
544 |
+
g.add_nodes_from(nx_nodelist)
|
545 |
+
g.add_edges_from(nx_edgelist)
|
546 |
+
|
547 |
+
return g
|
548 |
+
|
549 |
+
|
550 |
+
def dot2img(dot_string, t="svg"):
|
551 |
+
"""
|
552 |
+
Create image representation fom dot_string, using the 'dot' program
|
553 |
+
from the Graphviz package.
|
554 |
+
|
555 |
+
Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps',
|
556 |
+
'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats).
|
557 |
+
|
558 |
+
Note that the "capture_output" option of subprocess.run() is only available
|
559 |
+
with text formats (like svg), but not with binary image formats (like png).
|
560 |
+
"""
|
561 |
+
|
562 |
+
try:
|
563 |
+
find_binary("dot")
|
564 |
+
try:
|
565 |
+
if t in ["dot", "dot_json", "json", "svg"]:
|
566 |
+
proc = subprocess.run(
|
567 |
+
["dot", "-T%s" % t],
|
568 |
+
capture_output=True,
|
569 |
+
input=dot_string,
|
570 |
+
text=True,
|
571 |
+
)
|
572 |
+
else:
|
573 |
+
proc = subprocess.run(
|
574 |
+
["dot", "-T%s" % t],
|
575 |
+
input=bytes(dot_string, encoding="utf8"),
|
576 |
+
)
|
577 |
+
return proc.stdout
|
578 |
+
except:
|
579 |
+
raise Exception(
|
580 |
+
"Cannot create image representation by running dot from string: {}"
|
581 |
+
"".format(dot_string)
|
582 |
+
)
|
583 |
+
except OSError as e:
|
584 |
+
raise Exception("Cannot find the dot binary from Graphviz package") from e
|
585 |
+
|
586 |
+
|
587 |
+
class DependencyGraphError(Exception):
|
588 |
+
"""Dependency graph exception."""
|
589 |
+
|
590 |
+
|
591 |
+
def demo():
|
592 |
+
malt_demo()
|
593 |
+
conll_demo()
|
594 |
+
conll_file_demo()
|
595 |
+
cycle_finding_demo()
|
596 |
+
|
597 |
+
|
598 |
+
def malt_demo(nx=False):
|
599 |
+
"""
|
600 |
+
A demonstration of the result of reading a dependency
|
601 |
+
version of the first sentence of the Penn Treebank.
|
602 |
+
"""
|
603 |
+
dg = DependencyGraph(
|
604 |
+
"""Pierre NNP 2 NMOD
|
605 |
+
Vinken NNP 8 SUB
|
606 |
+
, , 2 P
|
607 |
+
61 CD 5 NMOD
|
608 |
+
years NNS 6 AMOD
|
609 |
+
old JJ 2 NMOD
|
610 |
+
, , 2 P
|
611 |
+
will MD 0 ROOT
|
612 |
+
join VB 8 VC
|
613 |
+
the DT 11 NMOD
|
614 |
+
board NN 9 OBJ
|
615 |
+
as IN 9 VMOD
|
616 |
+
a DT 15 NMOD
|
617 |
+
nonexecutive JJ 15 NMOD
|
618 |
+
director NN 12 PMOD
|
619 |
+
Nov. NNP 9 VMOD
|
620 |
+
29 CD 16 NMOD
|
621 |
+
. . 9 VMOD
|
622 |
+
"""
|
623 |
+
)
|
624 |
+
tree = dg.tree()
|
625 |
+
tree.pprint()
|
626 |
+
if nx:
|
627 |
+
# currently doesn't work
|
628 |
+
import networkx
|
629 |
+
from matplotlib import pylab
|
630 |
+
|
631 |
+
g = dg.nx_graph()
|
632 |
+
g.info()
|
633 |
+
pos = networkx.spring_layout(g, dim=1)
|
634 |
+
networkx.draw_networkx_nodes(g, pos, node_size=50)
|
635 |
+
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
|
636 |
+
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
|
637 |
+
pylab.xticks([])
|
638 |
+
pylab.yticks([])
|
639 |
+
pylab.savefig("tree.png")
|
640 |
+
pylab.show()
|
641 |
+
|
642 |
+
|
643 |
+
def conll_demo():
|
644 |
+
"""
|
645 |
+
A demonstration of how to read a string representation of
|
646 |
+
a CoNLL format dependency tree.
|
647 |
+
"""
|
648 |
+
dg = DependencyGraph(conll_data1)
|
649 |
+
tree = dg.tree()
|
650 |
+
tree.pprint()
|
651 |
+
print(dg)
|
652 |
+
print(dg.to_conll(4))
|
653 |
+
|
654 |
+
|
655 |
+
def conll_file_demo():
|
656 |
+
print("Mass conll_read demo...")
|
657 |
+
graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry]
|
658 |
+
for graph in graphs:
|
659 |
+
tree = graph.tree()
|
660 |
+
print("\n")
|
661 |
+
tree.pprint()
|
662 |
+
|
663 |
+
|
664 |
+
def cycle_finding_demo():
|
665 |
+
dg = DependencyGraph(treebank_data)
|
666 |
+
print(dg.contains_cycle())
|
667 |
+
cyclic_dg = DependencyGraph()
|
668 |
+
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0})
|
669 |
+
cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1})
|
670 |
+
cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2})
|
671 |
+
cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3})
|
672 |
+
cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4})
|
673 |
+
print(cyclic_dg.contains_cycle())
|
674 |
+
|
675 |
+
|
676 |
+
treebank_data = """Pierre NNP 2 NMOD
|
677 |
+
Vinken NNP 8 SUB
|
678 |
+
, , 2 P
|
679 |
+
61 CD 5 NMOD
|
680 |
+
years NNS 6 AMOD
|
681 |
+
old JJ 2 NMOD
|
682 |
+
, , 2 P
|
683 |
+
will MD 0 ROOT
|
684 |
+
join VB 8 VC
|
685 |
+
the DT 11 NMOD
|
686 |
+
board NN 9 OBJ
|
687 |
+
as IN 9 VMOD
|
688 |
+
a DT 15 NMOD
|
689 |
+
nonexecutive JJ 15 NMOD
|
690 |
+
director NN 12 PMOD
|
691 |
+
Nov. NNP 9 VMOD
|
692 |
+
29 CD 16 NMOD
|
693 |
+
. . 9 VMOD
|
694 |
+
"""
|
695 |
+
|
696 |
+
conll_data1 = """
|
697 |
+
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
|
698 |
+
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
|
699 |
+
3 met met Prep Prep voor 8 mod _ _
|
700 |
+
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
|
701 |
+
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
|
702 |
+
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
|
703 |
+
7 gaan ga V V hulp|inf 6 vc _ _
|
704 |
+
8 winkelen winkel V V intrans|inf 11 cnj _ _
|
705 |
+
9 , , Punc Punc komma 8 punct _ _
|
706 |
+
10 zwemmen zwem V V intrans|inf 11 cnj _ _
|
707 |
+
11 of of Conj Conj neven 7 vc _ _
|
708 |
+
12 terrassen terras N N soort|mv|neut 11 cnj _ _
|
709 |
+
13 . . Punc Punc punt 12 punct _ _
|
710 |
+
"""
|
711 |
+
|
712 |
+
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
|
713 |
+
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
|
714 |
+
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
|
715 |
+
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
|
716 |
+
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
|
717 |
+
6 . . Punc Punc punt 5 punct _ _
|
718 |
+
|
719 |
+
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
|
720 |
+
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
|
721 |
+
3 met met Prep Prep voor 8 mod _ _
|
722 |
+
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
|
723 |
+
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
|
724 |
+
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
|
725 |
+
7 gaan ga V V hulp|inf 6 vc _ _
|
726 |
+
8 winkelen winkel V V intrans|inf 11 cnj _ _
|
727 |
+
9 , , Punc Punc komma 8 punct _ _
|
728 |
+
10 zwemmen zwem V V intrans|inf 11 cnj _ _
|
729 |
+
11 of of Conj Conj neven 7 vc _ _
|
730 |
+
12 terrassen terras N N soort|mv|neut 11 cnj _ _
|
731 |
+
13 . . Punc Punc punt 12 punct _ _
|
732 |
+
|
733 |
+
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
|
734 |
+
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
|
735 |
+
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
|
736 |
+
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
|
737 |
+
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
|
738 |
+
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
|
739 |
+
7 . . Punc Punc punt 6 punct _ _
|
740 |
+
|
741 |
+
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
|
742 |
+
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
|
743 |
+
3 bij bij Prep Prep voor 2 ld _ _
|
744 |
+
4 de de Art Art bep|zijdofmv|neut 6 det _ _
|
745 |
+
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
|
746 |
+
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
|
747 |
+
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
|
748 |
+
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
|
749 |
+
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
|
750 |
+
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
|
751 |
+
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
|
752 |
+
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
|
753 |
+
13 . . Punc Punc punt 12 punct _ _
|
754 |
+
|
755 |
+
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
|
756 |
+
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
|
757 |
+
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
|
758 |
+
4 naast naast Prep Prep voor 11 mod _ _
|
759 |
+
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
|
760 |
+
6 op op Prep Prep voor 11 ld _ _
|
761 |
+
7 de de Art Art bep|zijdofmv|neut 8 det _ _
|
762 |
+
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
|
763 |
+
9 kunnen kan V V hulp|inf 2 vc _ _
|
764 |
+
10 gaan ga V V hulp|inf 9 vc _ _
|
765 |
+
11 liggen lig V V intrans|inf 10 vc _ _
|
766 |
+
12 . . Punc Punc punt 11 punct _ _
|
767 |
+
|
768 |
+
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
|
769 |
+
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
|
770 |
+
3 mams mams N N soort|ev|neut 4 det _ _
|
771 |
+
4 rug rug N N soort|ev|neut 5 obj1 _ _
|
772 |
+
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
|
773 |
+
6 hebben heb V V hulp|inf 2 vc _ _
|
774 |
+
7 en en Conj Conj neven 0 ROOT _ _
|
775 |
+
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
|
776 |
+
9 de de Art Art bep|zijdofmv|neut 10 det _ _
|
777 |
+
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
|
778 |
+
11 . . Punc Punc punt 10 punct _ _
|
779 |
+
|
780 |
+
1 Of of Conj Conj onder|metfin 0 ROOT _ _
|
781 |
+
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
|
782 |
+
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
|
783 |
+
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
|
784 |
+
5 met met Prep Prep voor 10 mod _ _
|
785 |
+
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
|
786 |
+
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
|
787 |
+
8 rond rond Adv Adv deelv 10 svp _ _
|
788 |
+
9 kunnen kan V V hulp|inf 3 vc _ _
|
789 |
+
10 slenteren slenter V V intrans|inf 9 vc _ _
|
790 |
+
11 in in Prep Prep voor 10 mod _ _
|
791 |
+
12 de de Art Art bep|zijdofmv|neut 13 det _ _
|
792 |
+
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
|
793 |
+
14 van van Prep Prep voor 13 mod _ _
|
794 |
+
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
|
795 |
+
16 . . Punc Punc punt 15 punct _ _
|
796 |
+
"""
|
797 |
+
|
798 |
+
if __name__ == "__main__":
|
799 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/generate.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Generating from a CFG
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# Peter Ljunglöf <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
#
|
9 |
+
|
10 |
+
import itertools
|
11 |
+
import sys
|
12 |
+
|
13 |
+
from nltk.grammar import Nonterminal
|
14 |
+
|
15 |
+
|
16 |
+
def generate(grammar, start=None, depth=None, n=None):
|
17 |
+
"""
|
18 |
+
Generates an iterator of all sentences from a CFG.
|
19 |
+
|
20 |
+
:param grammar: The Grammar used to generate sentences.
|
21 |
+
:param start: The Nonterminal from which to start generate sentences.
|
22 |
+
:param depth: The maximal depth of the generated tree.
|
23 |
+
:param n: The maximum number of sentences to return.
|
24 |
+
:return: An iterator of lists of terminal tokens.
|
25 |
+
"""
|
26 |
+
if not start:
|
27 |
+
start = grammar.start()
|
28 |
+
if depth is None:
|
29 |
+
depth = sys.maxsize
|
30 |
+
|
31 |
+
iter = _generate_all(grammar, [start], depth)
|
32 |
+
|
33 |
+
if n:
|
34 |
+
iter = itertools.islice(iter, n)
|
35 |
+
|
36 |
+
return iter
|
37 |
+
|
38 |
+
|
39 |
+
def _generate_all(grammar, items, depth):
|
40 |
+
if items:
|
41 |
+
try:
|
42 |
+
for frag1 in _generate_one(grammar, items[0], depth):
|
43 |
+
for frag2 in _generate_all(grammar, items[1:], depth):
|
44 |
+
yield frag1 + frag2
|
45 |
+
except RecursionError as error:
|
46 |
+
# Helpful error message while still showing the recursion stack.
|
47 |
+
raise RuntimeError(
|
48 |
+
"The grammar has rule(s) that yield infinite recursion!"
|
49 |
+
) from error
|
50 |
+
else:
|
51 |
+
yield []
|
52 |
+
|
53 |
+
|
54 |
+
def _generate_one(grammar, item, depth):
|
55 |
+
if depth > 0:
|
56 |
+
if isinstance(item, Nonterminal):
|
57 |
+
for prod in grammar.productions(lhs=item):
|
58 |
+
yield from _generate_all(grammar, prod.rhs(), depth - 1)
|
59 |
+
else:
|
60 |
+
yield [item]
|
61 |
+
|
62 |
+
|
63 |
+
demo_grammar = """
|
64 |
+
S -> NP VP
|
65 |
+
NP -> Det N
|
66 |
+
PP -> P NP
|
67 |
+
VP -> 'slept' | 'saw' NP | 'walked' PP
|
68 |
+
Det -> 'the' | 'a'
|
69 |
+
N -> 'man' | 'park' | 'dog'
|
70 |
+
P -> 'in' | 'with'
|
71 |
+
"""
|
72 |
+
|
73 |
+
|
74 |
+
def demo(N=23):
|
75 |
+
from nltk.grammar import CFG
|
76 |
+
|
77 |
+
print("Generating the first %d sentences for demo grammar:" % (N,))
|
78 |
+
print(demo_grammar)
|
79 |
+
grammar = CFG.fromstring(demo_grammar)
|
80 |
+
for n, sent in enumerate(generate(grammar, n=N), 1):
|
81 |
+
print("%3d. %s" % (n, " ".join(sent)))
|
82 |
+
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
demo()
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/malt.py
ADDED
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to MaltParser
|
2 |
+
#
|
3 |
+
# Author: Dan Garrette <[email protected]>
|
4 |
+
# Contributor: Liling Tan, Mustufain, osamamukhtar11
|
5 |
+
#
|
6 |
+
# Copyright (C) 2001-2023 NLTK Project
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
import inspect
|
11 |
+
import os
|
12 |
+
import subprocess
|
13 |
+
import sys
|
14 |
+
import tempfile
|
15 |
+
|
16 |
+
from nltk.data import ZipFilePathPointer
|
17 |
+
from nltk.internals import find_dir, find_file, find_jars_within_path
|
18 |
+
from nltk.parse.api import ParserI
|
19 |
+
from nltk.parse.dependencygraph import DependencyGraph
|
20 |
+
from nltk.parse.util import taggedsents_to_conll
|
21 |
+
|
22 |
+
|
23 |
+
def malt_regex_tagger():
|
24 |
+
from nltk.tag import RegexpTagger
|
25 |
+
|
26 |
+
_tagger = RegexpTagger(
|
27 |
+
[
|
28 |
+
(r"\.$", "."),
|
29 |
+
(r"\,$", ","),
|
30 |
+
(r"\?$", "?"), # fullstop, comma, Qmark
|
31 |
+
(r"\($", "("),
|
32 |
+
(r"\)$", ")"), # round brackets
|
33 |
+
(r"\[$", "["),
|
34 |
+
(r"\]$", "]"), # square brackets
|
35 |
+
(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers
|
36 |
+
(r"(The|the|A|a|An|an)$", "DT"), # articles
|
37 |
+
(r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns
|
38 |
+
(r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive
|
39 |
+
(r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive
|
40 |
+
(r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions
|
41 |
+
(r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions
|
42 |
+
(r"(till|Till|until|Until)$", "IN"), # time prepopsitions
|
43 |
+
(r"(by|By|beside|Beside)$", "IN"), # space prepopsitions
|
44 |
+
(r"(under|Under|below|Below)$", "IN"), # space prepopsitions
|
45 |
+
(r"(over|Over|above|Above)$", "IN"), # space prepopsitions
|
46 |
+
(r"(across|Across|through|Through)$", "IN"), # space prepopsitions
|
47 |
+
(r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions
|
48 |
+
(r"(onto|Onto|from|From)$", "IN"), # space prepopsitions
|
49 |
+
(r".*able$", "JJ"), # adjectives
|
50 |
+
(r".*ness$", "NN"), # nouns formed from adjectives
|
51 |
+
(r".*ly$", "RB"), # adverbs
|
52 |
+
(r".*s$", "NNS"), # plural nouns
|
53 |
+
(r".*ing$", "VBG"), # gerunds
|
54 |
+
(r".*ed$", "VBD"), # past tense verbs
|
55 |
+
(r".*", "NN"), # nouns (default)
|
56 |
+
]
|
57 |
+
)
|
58 |
+
return _tagger.tag
|
59 |
+
|
60 |
+
|
61 |
+
def find_maltparser(parser_dirname):
|
62 |
+
"""
|
63 |
+
A module to find MaltParser .jar file and its dependencies.
|
64 |
+
"""
|
65 |
+
if os.path.exists(parser_dirname): # If a full path is given.
|
66 |
+
_malt_dir = parser_dirname
|
67 |
+
else: # Try to find path to maltparser directory in environment variables.
|
68 |
+
_malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",))
|
69 |
+
# Checks that that the found directory contains all the necessary .jar
|
70 |
+
malt_dependencies = ["", "", ""]
|
71 |
+
_malt_jars = set(find_jars_within_path(_malt_dir))
|
72 |
+
_jars = {os.path.split(jar)[1] for jar in _malt_jars}
|
73 |
+
malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"}
|
74 |
+
|
75 |
+
assert malt_dependencies.issubset(_jars)
|
76 |
+
assert any(
|
77 |
+
filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars)
|
78 |
+
)
|
79 |
+
return list(_malt_jars)
|
80 |
+
|
81 |
+
|
82 |
+
def find_malt_model(model_filename):
|
83 |
+
"""
|
84 |
+
A module to find pre-trained MaltParser model.
|
85 |
+
"""
|
86 |
+
if model_filename is None:
|
87 |
+
return "malt_temp.mco"
|
88 |
+
elif os.path.exists(model_filename): # If a full path is given.
|
89 |
+
return model_filename
|
90 |
+
else: # Try to find path to malt model in environment variables.
|
91 |
+
return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False)
|
92 |
+
|
93 |
+
|
94 |
+
class MaltParser(ParserI):
|
95 |
+
"""
|
96 |
+
A class for dependency parsing with MaltParser. The input is the paths to:
|
97 |
+
- (optionally) a maltparser directory
|
98 |
+
- (optionally) the path to a pre-trained MaltParser .mco model file
|
99 |
+
- (optionally) the tagger to use for POS tagging before parsing
|
100 |
+
- (optionally) additional Java arguments
|
101 |
+
|
102 |
+
Example:
|
103 |
+
>>> from nltk.parse import malt
|
104 |
+
>>> # With MALT_PARSER and MALT_MODEL environment set.
|
105 |
+
>>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP
|
106 |
+
>>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
|
107 |
+
(shot I (elephant an) (in (pajamas my)) .)
|
108 |
+
>>> # Without MALT_PARSER and MALT_MODEL environment.
|
109 |
+
>>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP
|
110 |
+
>>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP
|
111 |
+
(shot I (elephant an) (in (pajamas my)) .)
|
112 |
+
"""
|
113 |
+
|
114 |
+
def __init__(
|
115 |
+
self,
|
116 |
+
parser_dirname="",
|
117 |
+
model_filename=None,
|
118 |
+
tagger=None,
|
119 |
+
additional_java_args=None,
|
120 |
+
):
|
121 |
+
"""
|
122 |
+
An interface for parsing with the Malt Parser.
|
123 |
+
|
124 |
+
:param parser_dirname: The path to the maltparser directory that
|
125 |
+
contains the maltparser-1.x.jar
|
126 |
+
:type parser_dirname: str
|
127 |
+
:param model_filename: The name of the pre-trained model with .mco file
|
128 |
+
extension. If provided, training will not be required.
|
129 |
+
(see http://www.maltparser.org/mco/mco.html and
|
130 |
+
see http://www.patful.com/chalk/node/185)
|
131 |
+
:type model_filename: str
|
132 |
+
:param tagger: The tagger used to POS tag the raw string before
|
133 |
+
formatting to CONLL format. It should behave like `nltk.pos_tag`
|
134 |
+
:type tagger: function
|
135 |
+
:param additional_java_args: This is the additional Java arguments that
|
136 |
+
one can use when calling Maltparser, usually this is the heapsize
|
137 |
+
limits, e.g. `additional_java_args=['-Xmx1024m']`
|
138 |
+
(see https://goo.gl/mpDBvQ)
|
139 |
+
:type additional_java_args: list
|
140 |
+
"""
|
141 |
+
|
142 |
+
# Find all the necessary jar files for MaltParser.
|
143 |
+
self.malt_jars = find_maltparser(parser_dirname)
|
144 |
+
# Initialize additional java arguments.
|
145 |
+
self.additional_java_args = (
|
146 |
+
additional_java_args if additional_java_args is not None else []
|
147 |
+
)
|
148 |
+
# Initialize model.
|
149 |
+
self.model = find_malt_model(model_filename)
|
150 |
+
self._trained = self.model != "malt_temp.mco"
|
151 |
+
# Set the working_dir parameters i.e. `-w` from MaltParser's option.
|
152 |
+
self.working_dir = tempfile.gettempdir()
|
153 |
+
# Initialize POS tagger.
|
154 |
+
self.tagger = tagger if tagger is not None else malt_regex_tagger()
|
155 |
+
|
156 |
+
def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"):
|
157 |
+
"""
|
158 |
+
Use MaltParser to parse multiple POS tagged sentences. Takes multiple
|
159 |
+
sentences where each sentence is a list of (word, tag) tuples.
|
160 |
+
The sentences must have already been tokenized and tagged.
|
161 |
+
|
162 |
+
:param sentences: Input sentences to parse
|
163 |
+
:type sentence: list(list(tuple(str, str)))
|
164 |
+
:return: iter(iter(``DependencyGraph``)) the dependency graph
|
165 |
+
representation of each sentence
|
166 |
+
"""
|
167 |
+
if not self._trained:
|
168 |
+
raise Exception("Parser has not been trained. Call train() first.")
|
169 |
+
|
170 |
+
with tempfile.NamedTemporaryFile(
|
171 |
+
prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False
|
172 |
+
) as input_file:
|
173 |
+
with tempfile.NamedTemporaryFile(
|
174 |
+
prefix="malt_output.conll.",
|
175 |
+
dir=self.working_dir,
|
176 |
+
mode="w",
|
177 |
+
delete=False,
|
178 |
+
) as output_file:
|
179 |
+
# Convert list of sentences to CONLL format.
|
180 |
+
for line in taggedsents_to_conll(sentences):
|
181 |
+
input_file.write(str(line))
|
182 |
+
input_file.close()
|
183 |
+
|
184 |
+
# Generate command to run maltparser.
|
185 |
+
cmd = self.generate_malt_command(
|
186 |
+
input_file.name, output_file.name, mode="parse"
|
187 |
+
)
|
188 |
+
|
189 |
+
# This is a maltparser quirk, it needs to be run
|
190 |
+
# where the model file is. otherwise it goes into an awkward
|
191 |
+
# missing .jars or strange -w working_dir problem.
|
192 |
+
_current_path = os.getcwd() # Remembers the current path.
|
193 |
+
try: # Change to modelfile path
|
194 |
+
os.chdir(os.path.split(self.model)[0])
|
195 |
+
except:
|
196 |
+
pass
|
197 |
+
ret = self._execute(cmd, verbose) # Run command.
|
198 |
+
os.chdir(_current_path) # Change back to current path.
|
199 |
+
|
200 |
+
if ret != 0:
|
201 |
+
raise Exception(
|
202 |
+
"MaltParser parsing (%s) failed with exit "
|
203 |
+
"code %d" % (" ".join(cmd), ret)
|
204 |
+
)
|
205 |
+
|
206 |
+
# Must return iter(iter(Tree))
|
207 |
+
with open(output_file.name) as infile:
|
208 |
+
for tree_str in infile.read().split("\n\n"):
|
209 |
+
yield (
|
210 |
+
iter(
|
211 |
+
[
|
212 |
+
DependencyGraph(
|
213 |
+
tree_str, top_relation_label=top_relation_label
|
214 |
+
)
|
215 |
+
]
|
216 |
+
)
|
217 |
+
)
|
218 |
+
|
219 |
+
os.remove(input_file.name)
|
220 |
+
os.remove(output_file.name)
|
221 |
+
|
222 |
+
def parse_sents(self, sentences, verbose=False, top_relation_label="null"):
|
223 |
+
"""
|
224 |
+
Use MaltParser to parse multiple sentences.
|
225 |
+
Takes a list of sentences, where each sentence is a list of words.
|
226 |
+
Each sentence will be automatically tagged with this
|
227 |
+
MaltParser instance's tagger.
|
228 |
+
|
229 |
+
:param sentences: Input sentences to parse
|
230 |
+
:type sentence: list(list(str))
|
231 |
+
:return: iter(DependencyGraph)
|
232 |
+
"""
|
233 |
+
tagged_sentences = (self.tagger(sentence) for sentence in sentences)
|
234 |
+
return self.parse_tagged_sents(
|
235 |
+
tagged_sentences, verbose, top_relation_label=top_relation_label
|
236 |
+
)
|
237 |
+
|
238 |
+
def generate_malt_command(self, inputfilename, outputfilename=None, mode=None):
|
239 |
+
"""
|
240 |
+
This function generates the maltparser command use at the terminal.
|
241 |
+
|
242 |
+
:param inputfilename: path to the input file
|
243 |
+
:type inputfilename: str
|
244 |
+
:param outputfilename: path to the output file
|
245 |
+
:type outputfilename: str
|
246 |
+
"""
|
247 |
+
|
248 |
+
cmd = ["java"]
|
249 |
+
cmd += self.additional_java_args # Adds additional java arguments
|
250 |
+
# Joins classpaths with ";" if on Windows and on Linux/Mac use ":"
|
251 |
+
classpaths_separator = ";" if sys.platform.startswith("win") else ":"
|
252 |
+
cmd += [
|
253 |
+
"-cp",
|
254 |
+
classpaths_separator.join(self.malt_jars),
|
255 |
+
] # Adds classpaths for jars
|
256 |
+
cmd += ["org.maltparser.Malt"] # Adds the main function.
|
257 |
+
|
258 |
+
# Adds the model file.
|
259 |
+
if os.path.exists(self.model): # when parsing
|
260 |
+
cmd += ["-c", os.path.split(self.model)[-1]]
|
261 |
+
else: # when learning
|
262 |
+
cmd += ["-c", self.model]
|
263 |
+
|
264 |
+
cmd += ["-i", inputfilename]
|
265 |
+
if mode == "parse":
|
266 |
+
cmd += ["-o", outputfilename]
|
267 |
+
cmd += ["-m", mode] # mode use to generate parses.
|
268 |
+
return cmd
|
269 |
+
|
270 |
+
@staticmethod
|
271 |
+
def _execute(cmd, verbose=False):
|
272 |
+
output = None if verbose else subprocess.PIPE
|
273 |
+
p = subprocess.Popen(cmd, stdout=output, stderr=output)
|
274 |
+
return p.wait()
|
275 |
+
|
276 |
+
def train(self, depgraphs, verbose=False):
|
277 |
+
"""
|
278 |
+
Train MaltParser from a list of ``DependencyGraph`` objects
|
279 |
+
|
280 |
+
:param depgraphs: list of ``DependencyGraph`` objects for training input data
|
281 |
+
:type depgraphs: DependencyGraph
|
282 |
+
"""
|
283 |
+
|
284 |
+
# Write the conll_str to malt_train.conll file in /tmp/
|
285 |
+
with tempfile.NamedTemporaryFile(
|
286 |
+
prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
|
287 |
+
) as input_file:
|
288 |
+
input_str = "\n".join(dg.to_conll(10) for dg in depgraphs)
|
289 |
+
input_file.write(str(input_str))
|
290 |
+
# Trains the model with the malt_train.conll
|
291 |
+
self.train_from_file(input_file.name, verbose=verbose)
|
292 |
+
# Removes the malt_train.conll once training finishes.
|
293 |
+
os.remove(input_file.name)
|
294 |
+
|
295 |
+
def train_from_file(self, conll_file, verbose=False):
|
296 |
+
"""
|
297 |
+
Train MaltParser from a file
|
298 |
+
:param conll_file: str for the filename of the training input data
|
299 |
+
:type conll_file: str
|
300 |
+
"""
|
301 |
+
|
302 |
+
# If conll_file is a ZipFilePathPointer,
|
303 |
+
# then we need to do some extra massaging
|
304 |
+
if isinstance(conll_file, ZipFilePathPointer):
|
305 |
+
with tempfile.NamedTemporaryFile(
|
306 |
+
prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False
|
307 |
+
) as input_file:
|
308 |
+
with conll_file.open() as conll_input_file:
|
309 |
+
conll_str = conll_input_file.read()
|
310 |
+
input_file.write(str(conll_str))
|
311 |
+
return self.train_from_file(input_file.name, verbose=verbose)
|
312 |
+
|
313 |
+
# Generate command to run maltparser.
|
314 |
+
cmd = self.generate_malt_command(conll_file, mode="learn")
|
315 |
+
ret = self._execute(cmd, verbose)
|
316 |
+
if ret != 0:
|
317 |
+
raise Exception(
|
318 |
+
"MaltParser training (%s) failed with exit "
|
319 |
+
"code %d" % (" ".join(cmd), ret)
|
320 |
+
)
|
321 |
+
self._trained = True
|
322 |
+
|
323 |
+
|
324 |
+
if __name__ == "__main__":
|
325 |
+
"""
|
326 |
+
A demonstration function to show how NLTK users can use the malt parser API.
|
327 |
+
|
328 |
+
>>> from nltk import pos_tag
|
329 |
+
>>> assert 'MALT_PARSER' in os.environ, str(
|
330 |
+
... "Please set MALT_PARSER in your global environment, e.g.:\n"
|
331 |
+
... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'")
|
332 |
+
>>>
|
333 |
+
>>> assert 'MALT_MODEL' in os.environ, str(
|
334 |
+
... "Please set MALT_MODEL in your global environment, e.g.:\n"
|
335 |
+
... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'")
|
336 |
+
>>>
|
337 |
+
>>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
|
338 |
+
... "2 sees _ VB _ _ 0 ROOT _ _\n"
|
339 |
+
... "3 a _ DT _ _ 4 SPEC _ _\n"
|
340 |
+
... "4 dog _ NN _ _ 2 OBJ _ _\n"
|
341 |
+
... "5 . _ . _ _ 2 PUNCT _ _\n")
|
342 |
+
>>>
|
343 |
+
>>>
|
344 |
+
>>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n"
|
345 |
+
... "2 walks _ VB _ _ 0 ROOT _ _\n"
|
346 |
+
... "3 . _ . _ _ 2 PUNCT _ _\n")
|
347 |
+
>>> dg1 = DependencyGraph(_dg1_str)
|
348 |
+
>>> dg2 = DependencyGraph(_dg2_str)
|
349 |
+
>>> # Initialize a MaltParser object
|
350 |
+
>>> mp = MaltParser()
|
351 |
+
>>>
|
352 |
+
>>> # Trains a model.
|
353 |
+
>>> mp.train([dg1,dg2], verbose=False)
|
354 |
+
>>> sent1 = ['John','sees','Mary', '.']
|
355 |
+
>>> sent2 = ['John', 'walks', 'a', 'dog', '.']
|
356 |
+
>>>
|
357 |
+
>>> # Parse a single sentence.
|
358 |
+
>>> parsed_sent1 = mp.parse_one(sent1)
|
359 |
+
>>> parsed_sent2 = mp.parse_one(sent2)
|
360 |
+
>>> print(parsed_sent1.tree())
|
361 |
+
(sees John Mary .)
|
362 |
+
>>> print(parsed_sent2.tree())
|
363 |
+
(walks John (dog a) .)
|
364 |
+
>>>
|
365 |
+
>>> # Parsing multiple sentences.
|
366 |
+
>>> sentences = [sent1,sent2]
|
367 |
+
>>> parsed_sents = mp.parse_sents(sentences)
|
368 |
+
>>> print(next(next(parsed_sents)).tree())
|
369 |
+
(sees John Mary .)
|
370 |
+
>>> print(next(next(parsed_sents)).tree())
|
371 |
+
(walks John (dog a) .)
|
372 |
+
>>>
|
373 |
+
>>> # Initialize a MaltParser object with an English pre-trained model.
|
374 |
+
>>> parser_dirname = 'maltparser-1.9.2'
|
375 |
+
>>> model_name = 'engmalt.linear-1.7.mco'
|
376 |
+
>>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag)
|
377 |
+
>>> sent1 = 'I shot an elephant in my pajamas .'.split()
|
378 |
+
>>> sent2 = 'Time flies like banana .'.split()
|
379 |
+
>>> # Parse a single sentence.
|
380 |
+
>>> print(mp.parse_one(sent1).tree())
|
381 |
+
(shot I (elephant an) (in (pajamas my)) .)
|
382 |
+
# Parsing multiple sentences
|
383 |
+
>>> sentences = [sent1,sent2]
|
384 |
+
>>> parsed_sents = mp.parse_sents(sentences)
|
385 |
+
>>> print(next(next(parsed_sents)).tree())
|
386 |
+
(shot I (elephant an) (in (pajamas my)) .)
|
387 |
+
>>> print(next(next(parsed_sents)).tree())
|
388 |
+
(flies Time (like banana) .)
|
389 |
+
"""
|
390 |
+
|
391 |
+
import doctest
|
392 |
+
|
393 |
+
doctest.testmod()
|
llmeval-env/lib/python3.10/site-packages/nltk/parse/stanford.py
ADDED
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Interface to the Stanford Parser
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Xu <[email protected]>
|
5 |
+
#
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
import os
|
10 |
+
import tempfile
|
11 |
+
import warnings
|
12 |
+
from subprocess import PIPE
|
13 |
+
|
14 |
+
from nltk.internals import (
|
15 |
+
_java_options,
|
16 |
+
config_java,
|
17 |
+
find_jar_iter,
|
18 |
+
find_jars_within_path,
|
19 |
+
java,
|
20 |
+
)
|
21 |
+
from nltk.parse.api import ParserI
|
22 |
+
from nltk.parse.dependencygraph import DependencyGraph
|
23 |
+
from nltk.tree import Tree
|
24 |
+
|
25 |
+
_stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml"
|
26 |
+
|
27 |
+
|
28 |
+
class GenericStanfordParser(ParserI):
|
29 |
+
"""Interface to the Stanford Parser"""
|
30 |
+
|
31 |
+
_MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar"
|
32 |
+
_JAR = r"stanford-parser\.jar"
|
33 |
+
_MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser"
|
34 |
+
|
35 |
+
_USE_STDIN = False
|
36 |
+
_DOUBLE_SPACED_OUTPUT = False
|
37 |
+
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
path_to_jar=None,
|
41 |
+
path_to_models_jar=None,
|
42 |
+
model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz",
|
43 |
+
encoding="utf8",
|
44 |
+
verbose=False,
|
45 |
+
java_options="-mx4g",
|
46 |
+
corenlp_options="",
|
47 |
+
):
|
48 |
+
|
49 |
+
# find the most recent code and model jar
|
50 |
+
stanford_jar = max(
|
51 |
+
find_jar_iter(
|
52 |
+
self._JAR,
|
53 |
+
path_to_jar,
|
54 |
+
env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"),
|
55 |
+
searchpath=(),
|
56 |
+
url=_stanford_url,
|
57 |
+
verbose=verbose,
|
58 |
+
is_regex=True,
|
59 |
+
),
|
60 |
+
key=lambda model_path: os.path.dirname(model_path),
|
61 |
+
)
|
62 |
+
|
63 |
+
model_jar = max(
|
64 |
+
find_jar_iter(
|
65 |
+
self._MODEL_JAR_PATTERN,
|
66 |
+
path_to_models_jar,
|
67 |
+
env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"),
|
68 |
+
searchpath=(),
|
69 |
+
url=_stanford_url,
|
70 |
+
verbose=verbose,
|
71 |
+
is_regex=True,
|
72 |
+
),
|
73 |
+
key=lambda model_path: os.path.dirname(model_path),
|
74 |
+
)
|
75 |
+
|
76 |
+
# self._classpath = (stanford_jar, model_jar)
|
77 |
+
|
78 |
+
# Adding logging jar files to classpath
|
79 |
+
stanford_dir = os.path.split(stanford_jar)[0]
|
80 |
+
self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir))
|
81 |
+
|
82 |
+
self.model_path = model_path
|
83 |
+
self._encoding = encoding
|
84 |
+
self.corenlp_options = corenlp_options
|
85 |
+
self.java_options = java_options
|
86 |
+
|
87 |
+
def _parse_trees_output(self, output_):
|
88 |
+
res = []
|
89 |
+
cur_lines = []
|
90 |
+
cur_trees = []
|
91 |
+
blank = False
|
92 |
+
for line in output_.splitlines(False):
|
93 |
+
if line == "":
|
94 |
+
if blank:
|
95 |
+
res.append(iter(cur_trees))
|
96 |
+
cur_trees = []
|
97 |
+
blank = False
|
98 |
+
elif self._DOUBLE_SPACED_OUTPUT:
|
99 |
+
cur_trees.append(self._make_tree("\n".join(cur_lines)))
|
100 |
+
cur_lines = []
|
101 |
+
blank = True
|
102 |
+
else:
|
103 |
+
res.append(iter([self._make_tree("\n".join(cur_lines))]))
|
104 |
+
cur_lines = []
|
105 |
+
else:
|
106 |
+
cur_lines.append(line)
|
107 |
+
blank = False
|
108 |
+
return iter(res)
|
109 |
+
|
110 |
+
def parse_sents(self, sentences, verbose=False):
|
111 |
+
"""
|
112 |
+
Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
|
113 |
+
list where each sentence is a list of words.
|
114 |
+
Each sentence will be automatically tagged with this StanfordParser instance's
|
115 |
+
tagger.
|
116 |
+
If whitespaces exists inside a token, then the token will be treated as
|
117 |
+
separate tokens.
|
118 |
+
|
119 |
+
:param sentences: Input sentences to parse
|
120 |
+
:type sentences: list(list(str))
|
121 |
+
:rtype: iter(iter(Tree))
|
122 |
+
"""
|
123 |
+
cmd = [
|
124 |
+
self._MAIN_CLASS,
|
125 |
+
"-model",
|
126 |
+
self.model_path,
|
127 |
+
"-sentences",
|
128 |
+
"newline",
|
129 |
+
"-outputFormat",
|
130 |
+
self._OUTPUT_FORMAT,
|
131 |
+
"-tokenized",
|
132 |
+
"-escaper",
|
133 |
+
"edu.stanford.nlp.process.PTBEscapingProcessor",
|
134 |
+
]
|
135 |
+
return self._parse_trees_output(
|
136 |
+
self._execute(
|
137 |
+
cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose
|
138 |
+
)
|
139 |
+
)
|
140 |
+
|
141 |
+
def raw_parse(self, sentence, verbose=False):
|
142 |
+
"""
|
143 |
+
Use StanfordParser to parse a sentence. Takes a sentence as a string;
|
144 |
+
before parsing, it will be automatically tokenized and tagged by
|
145 |
+
the Stanford Parser.
|
146 |
+
|
147 |
+
:param sentence: Input sentence to parse
|
148 |
+
:type sentence: str
|
149 |
+
:rtype: iter(Tree)
|
150 |
+
"""
|
151 |
+
return next(self.raw_parse_sents([sentence], verbose))
|
152 |
+
|
153 |
+
def raw_parse_sents(self, sentences, verbose=False):
|
154 |
+
"""
|
155 |
+
Use StanfordParser to parse multiple sentences. Takes multiple sentences as a
|
156 |
+
list of strings.
|
157 |
+
Each sentence will be automatically tokenized and tagged by the Stanford Parser.
|
158 |
+
|
159 |
+
:param sentences: Input sentences to parse
|
160 |
+
:type sentences: list(str)
|
161 |
+
:rtype: iter(iter(Tree))
|
162 |
+
"""
|
163 |
+
cmd = [
|
164 |
+
self._MAIN_CLASS,
|
165 |
+
"-model",
|
166 |
+
self.model_path,
|
167 |
+
"-sentences",
|
168 |
+
"newline",
|
169 |
+
"-outputFormat",
|
170 |
+
self._OUTPUT_FORMAT,
|
171 |
+
]
|
172 |
+
return self._parse_trees_output(
|
173 |
+
self._execute(cmd, "\n".join(sentences), verbose)
|
174 |
+
)
|
175 |
+
|
176 |
+
def tagged_parse(self, sentence, verbose=False):
|
177 |
+
"""
|
178 |
+
Use StanfordParser to parse a sentence. Takes a sentence as a list of
|
179 |
+
(word, tag) tuples; the sentence must have already been tokenized and
|
180 |
+
tagged.
|
181 |
+
|
182 |
+
:param sentence: Input sentence to parse
|
183 |
+
:type sentence: list(tuple(str, str))
|
184 |
+
:rtype: iter(Tree)
|
185 |
+
"""
|
186 |
+
return next(self.tagged_parse_sents([sentence], verbose))
|
187 |
+
|
188 |
+
def tagged_parse_sents(self, sentences, verbose=False):
|
189 |
+
"""
|
190 |
+
Use StanfordParser to parse multiple sentences. Takes multiple sentences
|
191 |
+
where each sentence is a list of (word, tag) tuples.
|
192 |
+
The sentences must have already been tokenized and tagged.
|
193 |
+
|
194 |
+
:param sentences: Input sentences to parse
|
195 |
+
:type sentences: list(list(tuple(str, str)))
|
196 |
+
:rtype: iter(iter(Tree))
|
197 |
+
"""
|
198 |
+
tag_separator = "/"
|
199 |
+
cmd = [
|
200 |
+
self._MAIN_CLASS,
|
201 |
+
"-model",
|
202 |
+
self.model_path,
|
203 |
+
"-sentences",
|
204 |
+
"newline",
|
205 |
+
"-outputFormat",
|
206 |
+
self._OUTPUT_FORMAT,
|
207 |
+
"-tokenized",
|
208 |
+
"-tagSeparator",
|
209 |
+
tag_separator,
|
210 |
+
"-tokenizerFactory",
|
211 |
+
"edu.stanford.nlp.process.WhitespaceTokenizer",
|
212 |
+
"-tokenizerMethod",
|
213 |
+
"newCoreLabelTokenizerFactory",
|
214 |
+
]
|
215 |
+
# We don't need to escape slashes as "splitting is done on the last instance of the character in the token"
|
216 |
+
return self._parse_trees_output(
|
217 |
+
self._execute(
|
218 |
+
cmd,
|
219 |
+
"\n".join(
|
220 |
+
" ".join(tag_separator.join(tagged) for tagged in sentence)
|
221 |
+
for sentence in sentences
|
222 |
+
),
|
223 |
+
verbose,
|
224 |
+
)
|
225 |
+
)
|
226 |
+
|
227 |
+
def _execute(self, cmd, input_, verbose=False):
|
228 |
+
encoding = self._encoding
|
229 |
+
cmd.extend(["-encoding", encoding])
|
230 |
+
if self.corenlp_options:
|
231 |
+
cmd.extend(self.corenlp_options.split())
|
232 |
+
|
233 |
+
default_options = " ".join(_java_options)
|
234 |
+
|
235 |
+
# Configure java.
|
236 |
+
config_java(options=self.java_options, verbose=verbose)
|
237 |
+
|
238 |
+
# Windows is incompatible with NamedTemporaryFile() without passing in delete=False.
|
239 |
+
with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file:
|
240 |
+
# Write the actual sentences to the temporary input file
|
241 |
+
if isinstance(input_, str) and encoding:
|
242 |
+
input_ = input_.encode(encoding)
|
243 |
+
input_file.write(input_)
|
244 |
+
input_file.flush()
|
245 |
+
|
246 |
+
# Run the tagger and get the output.
|
247 |
+
if self._USE_STDIN:
|
248 |
+
input_file.seek(0)
|
249 |
+
stdout, stderr = java(
|
250 |
+
cmd,
|
251 |
+
classpath=self._classpath,
|
252 |
+
stdin=input_file,
|
253 |
+
stdout=PIPE,
|
254 |
+
stderr=PIPE,
|
255 |
+
)
|
256 |
+
else:
|
257 |
+
cmd.append(input_file.name)
|
258 |
+
stdout, stderr = java(
|
259 |
+
cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE
|
260 |
+
)
|
261 |
+
|
262 |
+
stdout = stdout.replace(b"\xc2\xa0", b" ")
|
263 |
+
stdout = stdout.replace(b"\x00\xa0", b" ")
|
264 |
+
stdout = stdout.decode(encoding)
|
265 |
+
|
266 |
+
os.unlink(input_file.name)
|
267 |
+
|
268 |
+
# Return java configurations to their default values.
|
269 |
+
config_java(options=default_options, verbose=False)
|
270 |
+
|
271 |
+
return stdout
|
272 |
+
|
273 |
+
|
274 |
+
class StanfordParser(GenericStanfordParser):
|
275 |
+
"""
|
276 |
+
>>> parser=StanfordParser(
|
277 |
+
... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
|
278 |
+
... ) # doctest: +SKIP
|
279 |
+
|
280 |
+
>>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
281 |
+
[Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
|
282 |
+
Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
|
283 |
+
Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])]
|
284 |
+
|
285 |
+
>>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents((
|
286 |
+
... "the quick brown fox jumps over the lazy dog",
|
287 |
+
... "the quick grey wolf jumps over the lazy fox"
|
288 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
289 |
+
[Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
|
290 |
+
Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']),
|
291 |
+
Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP',
|
292 |
+
[Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP',
|
293 |
+
[Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']),
|
294 |
+
Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])]
|
295 |
+
|
296 |
+
>>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents((
|
297 |
+
... "I 'm a dog".split(),
|
298 |
+
... "This is my friends ' cat ( the tabby )".split(),
|
299 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
300 |
+
[Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]),
|
301 |
+
Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP',
|
302 |
+
[Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']),
|
303 |
+
Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []),
|
304 |
+
Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])]
|
305 |
+
|
306 |
+
>>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents((
|
307 |
+
... (
|
308 |
+
... ("The", "DT"),
|
309 |
+
... ("quick", "JJ"),
|
310 |
+
... ("brown", "JJ"),
|
311 |
+
... ("fox", "NN"),
|
312 |
+
... ("jumped", "VBD"),
|
313 |
+
... ("over", "IN"),
|
314 |
+
... ("the", "DT"),
|
315 |
+
... ("lazy", "JJ"),
|
316 |
+
... ("dog", "NN"),
|
317 |
+
... (".", "."),
|
318 |
+
... ),
|
319 |
+
... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
320 |
+
[Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']),
|
321 |
+
Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP',
|
322 |
+
[Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])]
|
323 |
+
"""
|
324 |
+
|
325 |
+
_OUTPUT_FORMAT = "penn"
|
326 |
+
|
327 |
+
def __init__(self, *args, **kwargs):
|
328 |
+
warnings.warn(
|
329 |
+
"The StanfordParser will be deprecated\n"
|
330 |
+
"Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.",
|
331 |
+
DeprecationWarning,
|
332 |
+
stacklevel=2,
|
333 |
+
)
|
334 |
+
|
335 |
+
super().__init__(*args, **kwargs)
|
336 |
+
|
337 |
+
def _make_tree(self, result):
|
338 |
+
return Tree.fromstring(result)
|
339 |
+
|
340 |
+
|
341 |
+
class StanfordDependencyParser(GenericStanfordParser):
|
342 |
+
|
343 |
+
"""
|
344 |
+
>>> dep_parser=StanfordDependencyParser(
|
345 |
+
... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
|
346 |
+
... ) # doctest: +SKIP
|
347 |
+
|
348 |
+
>>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
|
349 |
+
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])]
|
350 |
+
|
351 |
+
>>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
|
352 |
+
[[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
|
353 |
+
((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
|
354 |
+
((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
|
355 |
+
((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
|
356 |
+
|
357 |
+
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
|
358 |
+
... "The quick brown fox jumps over the lazy dog.",
|
359 |
+
... "The quick grey wolf jumps over the lazy fox."
|
360 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
361 |
+
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]),
|
362 |
+
Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])]
|
363 |
+
|
364 |
+
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
|
365 |
+
... "I 'm a dog".split(),
|
366 |
+
... "This is my friends ' cat ( the tabby )".split(),
|
367 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
368 |
+
[Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])]
|
369 |
+
|
370 |
+
>>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents((
|
371 |
+
... (
|
372 |
+
... ("The", "DT"),
|
373 |
+
... ("quick", "JJ"),
|
374 |
+
... ("brown", "JJ"),
|
375 |
+
... ("fox", "NN"),
|
376 |
+
... ("jumped", "VBD"),
|
377 |
+
... ("over", "IN"),
|
378 |
+
... ("the", "DT"),
|
379 |
+
... ("lazy", "JJ"),
|
380 |
+
... ("dog", "NN"),
|
381 |
+
... (".", "."),
|
382 |
+
... ),
|
383 |
+
... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
384 |
+
[[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')),
|
385 |
+
((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')),
|
386 |
+
((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')),
|
387 |
+
((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]]
|
388 |
+
|
389 |
+
"""
|
390 |
+
|
391 |
+
_OUTPUT_FORMAT = "conll2007"
|
392 |
+
|
393 |
+
def __init__(self, *args, **kwargs):
|
394 |
+
warnings.warn(
|
395 |
+
"The StanfordDependencyParser will be deprecated\n"
|
396 |
+
"Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
|
397 |
+
DeprecationWarning,
|
398 |
+
stacklevel=2,
|
399 |
+
)
|
400 |
+
|
401 |
+
super().__init__(*args, **kwargs)
|
402 |
+
|
403 |
+
def _make_tree(self, result):
|
404 |
+
return DependencyGraph(result, top_relation_label="root")
|
405 |
+
|
406 |
+
|
407 |
+
class StanfordNeuralDependencyParser(GenericStanfordParser):
|
408 |
+
"""
|
409 |
+
>>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP
|
410 |
+
>>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP
|
411 |
+
|
412 |
+
>>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
|
413 |
+
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])]
|
414 |
+
|
415 |
+
>>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP
|
416 |
+
[[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det',
|
417 |
+
(u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'),
|
418 |
+
u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')),
|
419 |
+
((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det',
|
420 |
+
(u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'),
|
421 |
+
u'punct', (u'.', u'.'))]]
|
422 |
+
|
423 |
+
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents((
|
424 |
+
... "The quick brown fox jumps over the lazy dog.",
|
425 |
+
... "The quick grey wolf jumps over the lazy fox."
|
426 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
427 |
+
[Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over',
|
428 |
+
'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']),
|
429 |
+
Tree('fox', ['over', 'the', 'lazy']), '.'])]
|
430 |
+
|
431 |
+
>>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents((
|
432 |
+
... "I 'm a dog".split(),
|
433 |
+
... "This is my friends ' cat ( the tabby )".split(),
|
434 |
+
... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP
|
435 |
+
[Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends',
|
436 |
+
['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])]
|
437 |
+
"""
|
438 |
+
|
439 |
+
_OUTPUT_FORMAT = "conll"
|
440 |
+
_MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
|
441 |
+
_JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar"
|
442 |
+
_MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar"
|
443 |
+
_USE_STDIN = True
|
444 |
+
_DOUBLE_SPACED_OUTPUT = True
|
445 |
+
|
446 |
+
def __init__(self, *args, **kwargs):
|
447 |
+
warnings.warn(
|
448 |
+
"The StanfordNeuralDependencyParser will be deprecated\n"
|
449 |
+
"Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.",
|
450 |
+
DeprecationWarning,
|
451 |
+
stacklevel=2,
|
452 |
+
)
|
453 |
+
|
454 |
+
super().__init__(*args, **kwargs)
|
455 |
+
self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse"
|
456 |
+
|
457 |
+
def tagged_parse_sents(self, sentences, verbose=False):
|
458 |
+
"""
|
459 |
+
Currently unimplemented because the neural dependency parser (and
|
460 |
+
the StanfordCoreNLP pipeline class) doesn't support passing in pre-
|
461 |
+
tagged tokens.
|
462 |
+
"""
|
463 |
+
raise NotImplementedError(
|
464 |
+
"tagged_parse[_sents] is not supported by "
|
465 |
+
"StanfordNeuralDependencyParser; use "
|
466 |
+
"parse[_sents] or raw_parse[_sents] instead."
|
467 |
+
)
|
468 |
+
|
469 |
+
def _make_tree(self, result):
|
470 |
+
return DependencyGraph(result, top_relation_label="ROOT")
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__init__.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Stemmers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# Steven Bird <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
"""
|
11 |
+
NLTK Stemmers
|
12 |
+
|
13 |
+
Interfaces used to remove morphological affixes from words, leaving
|
14 |
+
only the word stem. Stemming algorithms aim to remove those affixes
|
15 |
+
required for eg. grammatical role, tense, derivational morphology
|
16 |
+
leaving only the stem of the word. This is a difficult problem due to
|
17 |
+
irregular words (eg. common verbs in English), complicated
|
18 |
+
morphological rules, and part-of-speech and sense ambiguities
|
19 |
+
(eg. ``ceil-`` is not the stem of ``ceiling``).
|
20 |
+
|
21 |
+
StemmerI defines a standard interface for stemmers.
|
22 |
+
"""
|
23 |
+
|
24 |
+
from nltk.stem.api import StemmerI
|
25 |
+
from nltk.stem.arlstem import ARLSTem
|
26 |
+
from nltk.stem.arlstem2 import ARLSTem2
|
27 |
+
from nltk.stem.cistem import Cistem
|
28 |
+
from nltk.stem.isri import ISRIStemmer
|
29 |
+
from nltk.stem.lancaster import LancasterStemmer
|
30 |
+
from nltk.stem.porter import PorterStemmer
|
31 |
+
from nltk.stem.regexp import RegexpStemmer
|
32 |
+
from nltk.stem.rslp import RSLPStemmer
|
33 |
+
from nltk.stem.snowball import SnowballStemmer
|
34 |
+
from nltk.stem.wordnet import WordNetLemmatizer
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc
ADDED
Binary file (827 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc
ADDED
Binary file (8.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc
ADDED
Binary file (9.97 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc
ADDED
Binary file (6.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc
ADDED
Binary file (9.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc
ADDED
Binary file (6.41 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc
ADDED
Binary file (21.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc
ADDED
Binary file (3.09 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc
ADDED
Binary file (97.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc
ADDED
Binary file (687 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc
ADDED
Binary file (1.86 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem.py
ADDED
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Natural Language Toolkit: ARLSTem Stemmer
|
3 |
+
#
|
4 |
+
# Copyright (C) 2001-2023 NLTK Project
|
5 |
+
#
|
6 |
+
# Author: Kheireddine Abainia (x-programer) <[email protected]>
|
7 |
+
# Algorithms: Kheireddine Abainia <[email protected]>
|
8 |
+
# Siham Ouamour
|
9 |
+
# Halim Sayoud
|
10 |
+
# URL: <https://www.nltk.org/>
|
11 |
+
# For license information, see LICENSE.TXT
|
12 |
+
|
13 |
+
|
14 |
+
"""
|
15 |
+
ARLSTem Arabic Stemmer
|
16 |
+
The details about the implementation of this algorithm are described in:
|
17 |
+
K. Abainia, S. Ouamour and H. Sayoud, A Novel Robust Arabic Light Stemmer ,
|
18 |
+
Journal of Experimental & Theoretical Artificial Intelligence (JETAI'17),
|
19 |
+
Vol. 29, No. 3, 2017, pp. 557-573.
|
20 |
+
The ARLSTem is a light Arabic stemmer that is based on removing the affixes
|
21 |
+
from the word (i.e. prefixes, suffixes and infixes). It was evaluated and
|
22 |
+
compared to several other stemmers using Paice's parameters (under-stemming
|
23 |
+
index, over-stemming index and stemming weight), and the results showed that
|
24 |
+
ARLSTem is promising and producing high performances. This stemmer is not
|
25 |
+
based on any dictionary and can be used on-line effectively.
|
26 |
+
"""
|
27 |
+
import re
|
28 |
+
|
29 |
+
from nltk.stem.api import StemmerI
|
30 |
+
|
31 |
+
|
32 |
+
class ARLSTem(StemmerI):
|
33 |
+
"""
|
34 |
+
ARLSTem stemmer : a light Arabic Stemming algorithm without any dictionary.
|
35 |
+
Department of Telecommunication & Information Processing. USTHB University,
|
36 |
+
Algiers, Algeria.
|
37 |
+
ARLSTem.stem(token) returns the Arabic stem for the input token.
|
38 |
+
The ARLSTem Stemmer requires that all tokens are encoded using Unicode
|
39 |
+
encoding.
|
40 |
+
"""
|
41 |
+
|
42 |
+
def __init__(self):
|
43 |
+
# different Alif with hamza
|
44 |
+
self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]")
|
45 |
+
self.re_alifMaqsura = re.compile(r"[\u0649]")
|
46 |
+
self.re_diacritics = re.compile(r"[\u064B-\u065F]")
|
47 |
+
|
48 |
+
# Alif Laam, Laam Laam, Fa Laam, Fa Ba
|
49 |
+
self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"]
|
50 |
+
# Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam
|
51 |
+
self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"]
|
52 |
+
# Fa Laam Laam, Waaw Laam Laam
|
53 |
+
self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"]
|
54 |
+
# Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam
|
55 |
+
self.pr4 = [
|
56 |
+
"\u0641\u0628\u0627\u0644",
|
57 |
+
"\u0648\u0628\u0627\u0644",
|
58 |
+
"\u0641\u0643\u0627\u0644",
|
59 |
+
]
|
60 |
+
|
61 |
+
# Kaf Yaa, Kaf Miim
|
62 |
+
self.su2 = ["\u0643\u064A", "\u0643\u0645"]
|
63 |
+
# Ha Alif, Ha Miim
|
64 |
+
self.su22 = ["\u0647\u0627", "\u0647\u0645"]
|
65 |
+
# Kaf Miim Alif, Kaf Noon Shadda
|
66 |
+
self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"]
|
67 |
+
# Ha Miim Alif, Ha Noon Shadda
|
68 |
+
self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"]
|
69 |
+
|
70 |
+
# Alif Noon, Ya Noon, Waaw Noon
|
71 |
+
self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"]
|
72 |
+
# Taa Alif Noon, Taa Ya Noon
|
73 |
+
self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"]
|
74 |
+
|
75 |
+
# Alif Noon, Waaw Noon
|
76 |
+
self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"]
|
77 |
+
# Siin Taa, Siin Yaa
|
78 |
+
self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"]
|
79 |
+
# Siin Alif, Siin Noon
|
80 |
+
self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"]
|
81 |
+
# Lam Noon, Lam Taa, Lam Yaa, Lam Hamza
|
82 |
+
self.verb_pr33 = [
|
83 |
+
"\u0644\u0646",
|
84 |
+
"\u0644\u062A",
|
85 |
+
"\u0644\u064A",
|
86 |
+
"\u0644\u0623",
|
87 |
+
]
|
88 |
+
# Taa Miim Alif, Taa Noon Shadda
|
89 |
+
self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"]
|
90 |
+
# Noon Alif, Taa Miim, Taa Alif, Waaw Alif
|
91 |
+
self.verb_suf2 = [
|
92 |
+
"\u0646\u0627",
|
93 |
+
"\u062A\u0645",
|
94 |
+
"\u062A\u0627",
|
95 |
+
"\u0648\u0627",
|
96 |
+
]
|
97 |
+
# Taa, Alif, Noon
|
98 |
+
self.verb_suf1 = ["\u062A", "\u0627", "\u0646"]
|
99 |
+
|
100 |
+
def stem(self, token):
|
101 |
+
"""
|
102 |
+
call this function to get the word's stem based on ARLSTem .
|
103 |
+
"""
|
104 |
+
try:
|
105 |
+
if token is None:
|
106 |
+
raise ValueError(
|
107 |
+
"The word could not be stemmed, because \
|
108 |
+
it is empty !"
|
109 |
+
)
|
110 |
+
# remove Arabic diacritics and replace some letters with others
|
111 |
+
token = self.norm(token)
|
112 |
+
# strip common prefixes of the nouns
|
113 |
+
pre = self.pref(token)
|
114 |
+
if pre is not None:
|
115 |
+
token = pre
|
116 |
+
# strip the suffixes which are common to nouns and verbs
|
117 |
+
token = self.suff(token)
|
118 |
+
# transform a plural noun to a singular noun
|
119 |
+
ps = self.plur2sing(token)
|
120 |
+
if ps is None:
|
121 |
+
# transform from the feminine form to the masculine form
|
122 |
+
fm = self.fem2masc(token)
|
123 |
+
if fm is not None:
|
124 |
+
return fm
|
125 |
+
else:
|
126 |
+
if pre is None: # if the prefixes are not stripped
|
127 |
+
# strip the verb prefixes and suffixes
|
128 |
+
return self.verb(token)
|
129 |
+
else:
|
130 |
+
return ps
|
131 |
+
return token
|
132 |
+
except ValueError as e:
|
133 |
+
print(e)
|
134 |
+
|
135 |
+
def norm(self, token):
|
136 |
+
"""
|
137 |
+
normalize the word by removing diacritics, replacing hamzated Alif
|
138 |
+
with Alif replacing AlifMaqsura with Yaa and removing Waaw at the
|
139 |
+
beginning.
|
140 |
+
"""
|
141 |
+
# strip Arabic diacritics
|
142 |
+
token = self.re_diacritics.sub("", token)
|
143 |
+
# replace Hamzated Alif with Alif bare
|
144 |
+
token = self.re_hamzated_alif.sub("\u0627", token)
|
145 |
+
# replace alifMaqsura with Yaa
|
146 |
+
token = self.re_alifMaqsura.sub("\u064A", token)
|
147 |
+
# strip the Waaw from the word beginning if the remaining is 3 letters
|
148 |
+
# at least
|
149 |
+
if token.startswith("\u0648") and len(token) > 3:
|
150 |
+
token = token[1:]
|
151 |
+
return token
|
152 |
+
|
153 |
+
def pref(self, token):
|
154 |
+
"""
|
155 |
+
remove prefixes from the words' beginning.
|
156 |
+
"""
|
157 |
+
if len(token) > 5:
|
158 |
+
for p3 in self.pr3:
|
159 |
+
if token.startswith(p3):
|
160 |
+
return token[3:]
|
161 |
+
if len(token) > 6:
|
162 |
+
for p4 in self.pr4:
|
163 |
+
if token.startswith(p4):
|
164 |
+
return token[4:]
|
165 |
+
if len(token) > 5:
|
166 |
+
for p3 in self.pr32:
|
167 |
+
if token.startswith(p3):
|
168 |
+
return token[3:]
|
169 |
+
if len(token) > 4:
|
170 |
+
for p2 in self.pr2:
|
171 |
+
if token.startswith(p2):
|
172 |
+
return token[2:]
|
173 |
+
|
174 |
+
def suff(self, token):
|
175 |
+
"""
|
176 |
+
remove suffixes from the word's end.
|
177 |
+
"""
|
178 |
+
if token.endswith("\u0643") and len(token) > 3:
|
179 |
+
return token[:-1]
|
180 |
+
if len(token) > 4:
|
181 |
+
for s2 in self.su2:
|
182 |
+
if token.endswith(s2):
|
183 |
+
return token[:-2]
|
184 |
+
if len(token) > 5:
|
185 |
+
for s3 in self.su3:
|
186 |
+
if token.endswith(s3):
|
187 |
+
return token[:-3]
|
188 |
+
if token.endswith("\u0647") and len(token) > 3:
|
189 |
+
token = token[:-1]
|
190 |
+
return token
|
191 |
+
if len(token) > 4:
|
192 |
+
for s2 in self.su22:
|
193 |
+
if token.endswith(s2):
|
194 |
+
return token[:-2]
|
195 |
+
if len(token) > 5:
|
196 |
+
for s3 in self.su32:
|
197 |
+
if token.endswith(s3):
|
198 |
+
return token[:-3]
|
199 |
+
if token.endswith("\u0646\u0627") and len(token) > 4:
|
200 |
+
return token[:-2]
|
201 |
+
return token
|
202 |
+
|
203 |
+
def fem2masc(self, token):
|
204 |
+
"""
|
205 |
+
transform the word from the feminine form to the masculine form.
|
206 |
+
"""
|
207 |
+
if token.endswith("\u0629") and len(token) > 3:
|
208 |
+
return token[:-1]
|
209 |
+
|
210 |
+
def plur2sing(self, token):
|
211 |
+
"""
|
212 |
+
transform the word from the plural form to the singular form.
|
213 |
+
"""
|
214 |
+
if len(token) > 4:
|
215 |
+
for ps2 in self.pl_si2:
|
216 |
+
if token.endswith(ps2):
|
217 |
+
return token[:-2]
|
218 |
+
if len(token) > 5:
|
219 |
+
for ps3 in self.pl_si3:
|
220 |
+
if token.endswith(ps3):
|
221 |
+
return token[:-3]
|
222 |
+
if len(token) > 3 and token.endswith("\u0627\u062A"):
|
223 |
+
return token[:-2]
|
224 |
+
if len(token) > 3 and token.startswith("\u0627") and token[2] == "\u0627":
|
225 |
+
return token[:2] + token[3:]
|
226 |
+
if len(token) > 4 and token.startswith("\u0627") and token[-2] == "\u0627":
|
227 |
+
return token[1:-2] + token[-1]
|
228 |
+
|
229 |
+
def verb(self, token):
|
230 |
+
"""
|
231 |
+
stem the verb prefixes and suffixes or both
|
232 |
+
"""
|
233 |
+
vb = self.verb_t1(token)
|
234 |
+
if vb is not None:
|
235 |
+
return vb
|
236 |
+
vb = self.verb_t2(token)
|
237 |
+
if vb is not None:
|
238 |
+
return vb
|
239 |
+
vb = self.verb_t3(token)
|
240 |
+
if vb is not None:
|
241 |
+
return vb
|
242 |
+
vb = self.verb_t4(token)
|
243 |
+
if vb is not None:
|
244 |
+
return vb
|
245 |
+
vb = self.verb_t5(token)
|
246 |
+
if vb is not None:
|
247 |
+
return vb
|
248 |
+
return self.verb_t6(token)
|
249 |
+
|
250 |
+
def verb_t1(self, token):
|
251 |
+
"""
|
252 |
+
stem the present prefixes and suffixes
|
253 |
+
"""
|
254 |
+
if len(token) > 5 and token.startswith("\u062A"): # Taa
|
255 |
+
for s2 in self.pl_si2:
|
256 |
+
if token.endswith(s2):
|
257 |
+
return token[1:-2]
|
258 |
+
if len(token) > 5 and token.startswith("\u064A"): # Yaa
|
259 |
+
for s2 in self.verb_su2:
|
260 |
+
if token.endswith(s2):
|
261 |
+
return token[1:-2]
|
262 |
+
if len(token) > 4 and token.startswith("\u0627"): # Alif
|
263 |
+
# Waaw Alif
|
264 |
+
if len(token) > 5 and token.endswith("\u0648\u0627"):
|
265 |
+
return token[1:-2]
|
266 |
+
# Yaa
|
267 |
+
if token.endswith("\u064A"):
|
268 |
+
return token[1:-1]
|
269 |
+
# Alif
|
270 |
+
if token.endswith("\u0627"):
|
271 |
+
return token[1:-1]
|
272 |
+
# Noon
|
273 |
+
if token.endswith("\u0646"):
|
274 |
+
return token[1:-1]
|
275 |
+
# ^Yaa, Noon$
|
276 |
+
if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"):
|
277 |
+
return token[1:-1]
|
278 |
+
# ^Taa, Noon$
|
279 |
+
if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"):
|
280 |
+
return token[1:-1]
|
281 |
+
|
282 |
+
def verb_t2(self, token):
|
283 |
+
"""
|
284 |
+
stem the future prefixes and suffixes
|
285 |
+
"""
|
286 |
+
if len(token) > 6:
|
287 |
+
for s2 in self.pl_si2:
|
288 |
+
# ^Siin Taa
|
289 |
+
if token.startswith(self.verb_pr2[0]) and token.endswith(s2):
|
290 |
+
return token[2:-2]
|
291 |
+
# ^Siin Yaa, Alif Noon$
|
292 |
+
if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]):
|
293 |
+
return token[2:-2]
|
294 |
+
# ^Siin Yaa, Waaw Noon$
|
295 |
+
if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]):
|
296 |
+
return token[2:-2]
|
297 |
+
# ^Siin Taa, Noon$
|
298 |
+
if (
|
299 |
+
len(token) > 5
|
300 |
+
and token.startswith(self.verb_pr2[0])
|
301 |
+
and token.endswith("\u0646")
|
302 |
+
):
|
303 |
+
return token[2:-1]
|
304 |
+
# ^Siin Yaa, Noon$
|
305 |
+
if (
|
306 |
+
len(token) > 5
|
307 |
+
and token.startswith(self.verb_pr2[1])
|
308 |
+
and token.endswith("\u0646")
|
309 |
+
):
|
310 |
+
return token[2:-1]
|
311 |
+
|
312 |
+
def verb_t3(self, token):
|
313 |
+
"""
|
314 |
+
stem the present suffixes
|
315 |
+
"""
|
316 |
+
if len(token) > 5:
|
317 |
+
for su3 in self.verb_suf3:
|
318 |
+
if token.endswith(su3):
|
319 |
+
return token[:-3]
|
320 |
+
if len(token) > 4:
|
321 |
+
for su2 in self.verb_suf2:
|
322 |
+
if token.endswith(su2):
|
323 |
+
return token[:-2]
|
324 |
+
if len(token) > 3:
|
325 |
+
for su1 in self.verb_suf1:
|
326 |
+
if token.endswith(su1):
|
327 |
+
return token[:-1]
|
328 |
+
|
329 |
+
def verb_t4(self, token):
|
330 |
+
"""
|
331 |
+
stem the present prefixes
|
332 |
+
"""
|
333 |
+
if len(token) > 3:
|
334 |
+
for pr1 in self.verb_suf1:
|
335 |
+
if token.startswith(pr1):
|
336 |
+
return token[1:]
|
337 |
+
if token.startswith("\u064A"):
|
338 |
+
return token[1:]
|
339 |
+
|
340 |
+
def verb_t5(self, token):
|
341 |
+
"""
|
342 |
+
stem the future prefixes
|
343 |
+
"""
|
344 |
+
if len(token) > 4:
|
345 |
+
for pr2 in self.verb_pr22:
|
346 |
+
if token.startswith(pr2):
|
347 |
+
return token[2:]
|
348 |
+
for pr2 in self.verb_pr2:
|
349 |
+
if token.startswith(pr2):
|
350 |
+
return token[2:]
|
351 |
+
return token
|
352 |
+
|
353 |
+
def verb_t6(self, token):
|
354 |
+
"""
|
355 |
+
stem the order prefixes
|
356 |
+
"""
|
357 |
+
if len(token) > 4:
|
358 |
+
for pr3 in self.verb_pr33:
|
359 |
+
if token.startswith(pr3):
|
360 |
+
return token[2:]
|
361 |
+
return token
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem2.py
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Natural Language Toolkit: ARLSTem Stemmer v2
|
3 |
+
#
|
4 |
+
# Copyright (C) 2001-2023 NLTK Project
|
5 |
+
#
|
6 |
+
# Author: Kheireddine Abainia (x-programer) <[email protected]>
|
7 |
+
# Algorithms: Kheireddine Abainia <[email protected]>
|
8 |
+
# Hamza Rebbani <[email protected]>
|
9 |
+
# URL: <https://www.nltk.org/>
|
10 |
+
# For license information, see LICENSE.TXT
|
11 |
+
|
12 |
+
|
13 |
+
"""
|
14 |
+
ARLSTem2 Arabic Light Stemmer
|
15 |
+
The details about the implementation of this algorithm are described in:
|
16 |
+
K. Abainia and H. Rebbani, Comparing the Effectiveness of the Improved ARLSTem
|
17 |
+
Algorithm with Existing Arabic Light Stemmers, International Conference on
|
18 |
+
Theoretical and Applicative Aspects of Computer Science (ICTAACS'19), Skikda,
|
19 |
+
Algeria, December 15-16, 2019.
|
20 |
+
ARLSTem2 is an Arabic light stemmer based on removing the affixes from
|
21 |
+
the words (i.e. prefixes, suffixes and infixes). It is an improvement
|
22 |
+
of the previous Arabic light stemmer (ARLSTem). The new version was compared to
|
23 |
+
the original algorithm and several existing Arabic light stemmers, where the
|
24 |
+
results showed that the new version considerably improves the under-stemming
|
25 |
+
errors that are common to light stemmers. Both ARLSTem and ARLSTem2 can be run
|
26 |
+
online and do not use any dictionary.
|
27 |
+
"""
|
28 |
+
import re
|
29 |
+
|
30 |
+
from nltk.stem.api import StemmerI
|
31 |
+
|
32 |
+
|
33 |
+
class ARLSTem2(StemmerI):
|
34 |
+
"""
|
35 |
+
Return a stemmed Arabic word after removing affixes. This an improved
|
36 |
+
version of the previous algorithm, which reduces under-stemming errors.
|
37 |
+
Typically used in Arabic search engine, information retrieval and NLP.
|
38 |
+
|
39 |
+
>>> from nltk.stem import arlstem2
|
40 |
+
>>> stemmer = ARLSTem2()
|
41 |
+
>>> word = stemmer.stem('يعمل')
|
42 |
+
>>> print(word)
|
43 |
+
عمل
|
44 |
+
|
45 |
+
:param token: The input Arabic word (unicode) to be stemmed
|
46 |
+
:type token: unicode
|
47 |
+
:return: A unicode Arabic word
|
48 |
+
"""
|
49 |
+
|
50 |
+
def __init__(self):
|
51 |
+
# different Alif with hamza
|
52 |
+
self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]")
|
53 |
+
self.re_alifMaqsura = re.compile(r"[\u0649]")
|
54 |
+
self.re_diacritics = re.compile(r"[\u064B-\u065F]")
|
55 |
+
|
56 |
+
# Alif Laam, Laam Laam, Fa Laam, Fa Ba
|
57 |
+
self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"]
|
58 |
+
# Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam
|
59 |
+
self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"]
|
60 |
+
# Fa Laam Laam, Waaw Laam Laam
|
61 |
+
self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"]
|
62 |
+
# Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam
|
63 |
+
self.pr4 = [
|
64 |
+
"\u0641\u0628\u0627\u0644",
|
65 |
+
"\u0648\u0628\u0627\u0644",
|
66 |
+
"\u0641\u0643\u0627\u0644",
|
67 |
+
]
|
68 |
+
|
69 |
+
# Kaf Yaa, Kaf Miim
|
70 |
+
self.su2 = ["\u0643\u064A", "\u0643\u0645"]
|
71 |
+
# Ha Alif, Ha Miim
|
72 |
+
self.su22 = ["\u0647\u0627", "\u0647\u0645"]
|
73 |
+
# Kaf Miim Alif, Kaf Noon Shadda
|
74 |
+
self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"]
|
75 |
+
# Ha Miim Alif, Ha Noon Shadda
|
76 |
+
self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"]
|
77 |
+
|
78 |
+
# Alif Noon, Ya Noon, Waaw Noon
|
79 |
+
self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"]
|
80 |
+
# Taa Alif Noon, Taa Ya Noon
|
81 |
+
self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"]
|
82 |
+
|
83 |
+
# Alif Noon, Waaw Noon
|
84 |
+
self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"]
|
85 |
+
# Siin Taa, Siin Yaa
|
86 |
+
self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"]
|
87 |
+
# Siin Alif, Siin Noon
|
88 |
+
self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"]
|
89 |
+
# Lam Noon, Lam Taa, Lam Yaa, Lam Hamza
|
90 |
+
self.verb_pr33 = [
|
91 |
+
"\u0644\u0646",
|
92 |
+
"\u0644\u062A",
|
93 |
+
"\u0644\u064A",
|
94 |
+
"\u0644\u0623",
|
95 |
+
]
|
96 |
+
# Taa Miim Alif, Taa Noon Shadda
|
97 |
+
self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"]
|
98 |
+
# Noon Alif, Taa Miim, Taa Alif, Waaw Alif
|
99 |
+
self.verb_suf2 = [
|
100 |
+
"\u0646\u0627",
|
101 |
+
"\u062A\u0645",
|
102 |
+
"\u062A\u0627",
|
103 |
+
"\u0648\u0627",
|
104 |
+
]
|
105 |
+
# Taa, Alif, Noon
|
106 |
+
self.verb_suf1 = ["\u062A", "\u0627", "\u0646"]
|
107 |
+
|
108 |
+
def stem1(self, token):
|
109 |
+
"""
|
110 |
+
call this function to get the first stem
|
111 |
+
"""
|
112 |
+
try:
|
113 |
+
if token is None:
|
114 |
+
raise ValueError(
|
115 |
+
"The word could not be stemmed, because \
|
116 |
+
it is empty !"
|
117 |
+
)
|
118 |
+
self.is_verb = False
|
119 |
+
# remove Arabic diacritics and replace some letters with others
|
120 |
+
token = self.norm(token)
|
121 |
+
# strip the common noun prefixes
|
122 |
+
pre = self.pref(token)
|
123 |
+
if pre is not None:
|
124 |
+
token = pre
|
125 |
+
# transform the feminine form to masculine form
|
126 |
+
fm = self.fem2masc(token)
|
127 |
+
if fm is not None:
|
128 |
+
return fm
|
129 |
+
# strip the adjective affixes
|
130 |
+
adj = self.adjective(token)
|
131 |
+
if adj is not None:
|
132 |
+
return adj
|
133 |
+
# strip the suffixes that are common to nouns and verbs
|
134 |
+
token = self.suff(token)
|
135 |
+
# transform a plural noun to a singular noun
|
136 |
+
ps = self.plur2sing(token)
|
137 |
+
if ps is None:
|
138 |
+
if pre is None: # if the noun prefixes are not stripped
|
139 |
+
# strip the verb prefixes and suffixes
|
140 |
+
verb = self.verb(token)
|
141 |
+
if verb is not None:
|
142 |
+
self.is_verb = True
|
143 |
+
return verb
|
144 |
+
else:
|
145 |
+
return ps
|
146 |
+
return token
|
147 |
+
except ValueError as e:
|
148 |
+
print(e)
|
149 |
+
|
150 |
+
def stem(self, token):
|
151 |
+
# stem the input word
|
152 |
+
try:
|
153 |
+
if token is None:
|
154 |
+
raise ValueError(
|
155 |
+
"The word could not be stemmed, because \
|
156 |
+
it is empty !"
|
157 |
+
)
|
158 |
+
# run the first round of stemming
|
159 |
+
token = self.stem1(token)
|
160 |
+
# check if there is some additional noun affixes
|
161 |
+
if len(token) > 4:
|
162 |
+
# ^Taa, $Yaa + char
|
163 |
+
if token.startswith("\u062A") and token[-2] == "\u064A":
|
164 |
+
token = token[1:-2] + token[-1]
|
165 |
+
return token
|
166 |
+
# ^Miim, $Waaw + char
|
167 |
+
if token.startswith("\u0645") and token[-2] == "\u0648":
|
168 |
+
token = token[1:-2] + token[-1]
|
169 |
+
return token
|
170 |
+
if len(token) > 3:
|
171 |
+
# !^Alif, $Yaa
|
172 |
+
if not token.startswith("\u0627") and token.endswith("\u064A"):
|
173 |
+
token = token[:-1]
|
174 |
+
return token
|
175 |
+
# $Laam
|
176 |
+
if token.startswith("\u0644"):
|
177 |
+
return token[1:]
|
178 |
+
return token
|
179 |
+
except ValueError as e:
|
180 |
+
print(e)
|
181 |
+
|
182 |
+
def norm(self, token):
|
183 |
+
"""
|
184 |
+
normalize the word by removing diacritics, replace hamzated Alif
|
185 |
+
with Alif bare, replace AlifMaqsura with Yaa and remove Waaw at the
|
186 |
+
beginning.
|
187 |
+
"""
|
188 |
+
# strip Arabic diacritics
|
189 |
+
token = self.re_diacritics.sub("", token)
|
190 |
+
# replace Hamzated Alif with Alif bare
|
191 |
+
token = self.re_hamzated_alif.sub("\u0627", token)
|
192 |
+
# replace alifMaqsura with Yaa
|
193 |
+
token = self.re_alifMaqsura.sub("\u064A", token)
|
194 |
+
# strip the Waaw from the word beginning if the remaining is
|
195 |
+
# tri-literal at least
|
196 |
+
if token.startswith("\u0648") and len(token) > 3:
|
197 |
+
token = token[1:]
|
198 |
+
return token
|
199 |
+
|
200 |
+
def pref(self, token):
|
201 |
+
"""
|
202 |
+
remove prefixes from the words' beginning.
|
203 |
+
"""
|
204 |
+
if len(token) > 5:
|
205 |
+
for p3 in self.pr3:
|
206 |
+
if token.startswith(p3):
|
207 |
+
return token[3:]
|
208 |
+
if len(token) > 6:
|
209 |
+
for p4 in self.pr4:
|
210 |
+
if token.startswith(p4):
|
211 |
+
return token[4:]
|
212 |
+
if len(token) > 5:
|
213 |
+
for p3 in self.pr32:
|
214 |
+
if token.startswith(p3):
|
215 |
+
return token[3:]
|
216 |
+
if len(token) > 4:
|
217 |
+
for p2 in self.pr2:
|
218 |
+
if token.startswith(p2):
|
219 |
+
return token[2:]
|
220 |
+
|
221 |
+
def adjective(self, token):
|
222 |
+
"""
|
223 |
+
remove the infixes from adjectives
|
224 |
+
"""
|
225 |
+
# ^Alif, Alif, $Yaa
|
226 |
+
if len(token) > 5:
|
227 |
+
if (
|
228 |
+
token.startswith("\u0627")
|
229 |
+
and token[-3] == "\u0627"
|
230 |
+
and token.endswith("\u064A")
|
231 |
+
):
|
232 |
+
return token[:-3] + token[-2]
|
233 |
+
|
234 |
+
def suff(self, token):
|
235 |
+
"""
|
236 |
+
remove the suffixes from the word's ending.
|
237 |
+
"""
|
238 |
+
if token.endswith("\u0643") and len(token) > 3:
|
239 |
+
return token[:-1]
|
240 |
+
if len(token) > 4:
|
241 |
+
for s2 in self.su2:
|
242 |
+
if token.endswith(s2):
|
243 |
+
return token[:-2]
|
244 |
+
if len(token) > 5:
|
245 |
+
for s3 in self.su3:
|
246 |
+
if token.endswith(s3):
|
247 |
+
return token[:-3]
|
248 |
+
if token.endswith("\u0647") and len(token) > 3:
|
249 |
+
token = token[:-1]
|
250 |
+
return token
|
251 |
+
if len(token) > 4:
|
252 |
+
for s2 in self.su22:
|
253 |
+
if token.endswith(s2):
|
254 |
+
return token[:-2]
|
255 |
+
if len(token) > 5:
|
256 |
+
for s3 in self.su32:
|
257 |
+
if token.endswith(s3):
|
258 |
+
return token[:-3]
|
259 |
+
# $Noon and Alif
|
260 |
+
if token.endswith("\u0646\u0627") and len(token) > 4:
|
261 |
+
return token[:-2]
|
262 |
+
return token
|
263 |
+
|
264 |
+
def fem2masc(self, token):
|
265 |
+
"""
|
266 |
+
transform the word from the feminine form to the masculine form.
|
267 |
+
"""
|
268 |
+
if len(token) > 6:
|
269 |
+
# ^Taa, Yaa, $Yaa and Taa Marbuta
|
270 |
+
if (
|
271 |
+
token.startswith("\u062A")
|
272 |
+
and token[-4] == "\u064A"
|
273 |
+
and token.endswith("\u064A\u0629")
|
274 |
+
):
|
275 |
+
return token[1:-4] + token[-3]
|
276 |
+
# ^Alif, Yaa, $Yaa and Taa Marbuta
|
277 |
+
if (
|
278 |
+
token.startswith("\u0627")
|
279 |
+
and token[-4] == "\u0627"
|
280 |
+
and token.endswith("\u064A\u0629")
|
281 |
+
):
|
282 |
+
return token[:-4] + token[-3]
|
283 |
+
# $Alif, Yaa and Taa Marbuta
|
284 |
+
if token.endswith("\u0627\u064A\u0629") and len(token) > 5:
|
285 |
+
return token[:-2]
|
286 |
+
if len(token) > 4:
|
287 |
+
# Alif, $Taa Marbuta
|
288 |
+
if token[1] == "\u0627" and token.endswith("\u0629"):
|
289 |
+
return token[0] + token[2:-1]
|
290 |
+
# $Yaa and Taa Marbuta
|
291 |
+
if token.endswith("\u064A\u0629"):
|
292 |
+
return token[:-2]
|
293 |
+
# $Taa Marbuta
|
294 |
+
if token.endswith("\u0629") and len(token) > 3:
|
295 |
+
return token[:-1]
|
296 |
+
|
297 |
+
def plur2sing(self, token):
|
298 |
+
"""
|
299 |
+
transform the word from the plural form to the singular form.
|
300 |
+
"""
|
301 |
+
# ^Haa, $Noon, Waaw
|
302 |
+
if len(token) > 5:
|
303 |
+
if token.startswith("\u0645") and token.endswith("\u0648\u0646"):
|
304 |
+
return token[1:-2]
|
305 |
+
if len(token) > 4:
|
306 |
+
for ps2 in self.pl_si2:
|
307 |
+
if token.endswith(ps2):
|
308 |
+
return token[:-2]
|
309 |
+
if len(token) > 5:
|
310 |
+
for ps3 in self.pl_si3:
|
311 |
+
if token.endswith(ps3):
|
312 |
+
return token[:-3]
|
313 |
+
if len(token) > 4:
|
314 |
+
# $Alif, Taa
|
315 |
+
if token.endswith("\u0627\u062A"):
|
316 |
+
return token[:-2]
|
317 |
+
# ^Alif Alif
|
318 |
+
if token.startswith("\u0627") and token[2] == "\u0627":
|
319 |
+
return token[:2] + token[3:]
|
320 |
+
# ^Alif Alif
|
321 |
+
if token.startswith("\u0627") and token[-2] == "\u0627":
|
322 |
+
return token[1:-2] + token[-1]
|
323 |
+
|
324 |
+
def verb(self, token):
|
325 |
+
"""
|
326 |
+
stem the verb prefixes and suffixes or both
|
327 |
+
"""
|
328 |
+
vb = self.verb_t1(token)
|
329 |
+
if vb is not None:
|
330 |
+
return vb
|
331 |
+
vb = self.verb_t2(token)
|
332 |
+
if vb is not None:
|
333 |
+
return vb
|
334 |
+
vb = self.verb_t3(token)
|
335 |
+
if vb is not None:
|
336 |
+
return vb
|
337 |
+
vb = self.verb_t4(token)
|
338 |
+
if vb is not None:
|
339 |
+
return vb
|
340 |
+
vb = self.verb_t5(token)
|
341 |
+
if vb is not None:
|
342 |
+
return vb
|
343 |
+
vb = self.verb_t6(token)
|
344 |
+
return vb
|
345 |
+
|
346 |
+
def verb_t1(self, token):
|
347 |
+
"""
|
348 |
+
stem the present tense co-occurred prefixes and suffixes
|
349 |
+
"""
|
350 |
+
if len(token) > 5 and token.startswith("\u062A"): # Taa
|
351 |
+
for s2 in self.pl_si2:
|
352 |
+
if token.endswith(s2):
|
353 |
+
return token[1:-2]
|
354 |
+
if len(token) > 5 and token.startswith("\u064A"): # Yaa
|
355 |
+
for s2 in self.verb_su2:
|
356 |
+
if token.endswith(s2):
|
357 |
+
return token[1:-2]
|
358 |
+
if len(token) > 4 and token.startswith("\u0627"): # Alif
|
359 |
+
# Waaw Alif
|
360 |
+
if len(token) > 5 and token.endswith("\u0648\u0627"):
|
361 |
+
return token[1:-2]
|
362 |
+
# Yaa
|
363 |
+
if token.endswith("\u064A"):
|
364 |
+
return token[1:-1]
|
365 |
+
# Alif
|
366 |
+
if token.endswith("\u0627"):
|
367 |
+
return token[1:-1]
|
368 |
+
# Noon
|
369 |
+
if token.endswith("\u0646"):
|
370 |
+
return token[1:-1]
|
371 |
+
# ^Yaa, Noon$
|
372 |
+
if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"):
|
373 |
+
return token[1:-1]
|
374 |
+
# ^Taa, Noon$
|
375 |
+
if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"):
|
376 |
+
return token[1:-1]
|
377 |
+
|
378 |
+
def verb_t2(self, token):
|
379 |
+
"""
|
380 |
+
stem the future tense co-occurred prefixes and suffixes
|
381 |
+
"""
|
382 |
+
if len(token) > 6:
|
383 |
+
for s2 in self.pl_si2:
|
384 |
+
# ^Siin Taa
|
385 |
+
if token.startswith(self.verb_pr2[0]) and token.endswith(s2):
|
386 |
+
return token[2:-2]
|
387 |
+
# ^Siin Yaa, Alif Noon$
|
388 |
+
if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]):
|
389 |
+
return token[2:-2]
|
390 |
+
# ^Siin Yaa, Waaw Noon$
|
391 |
+
if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]):
|
392 |
+
return token[2:-2]
|
393 |
+
# ^Siin Taa, Noon$
|
394 |
+
if (
|
395 |
+
len(token) > 5
|
396 |
+
and token.startswith(self.verb_pr2[0])
|
397 |
+
and token.endswith("\u0646")
|
398 |
+
):
|
399 |
+
return token[2:-1]
|
400 |
+
# ^Siin Yaa, Noon$
|
401 |
+
if (
|
402 |
+
len(token) > 5
|
403 |
+
and token.startswith(self.verb_pr2[1])
|
404 |
+
and token.endswith("\u0646")
|
405 |
+
):
|
406 |
+
return token[2:-1]
|
407 |
+
|
408 |
+
def verb_t3(self, token):
|
409 |
+
"""
|
410 |
+
stem the present tense suffixes
|
411 |
+
"""
|
412 |
+
if len(token) > 5:
|
413 |
+
for su3 in self.verb_suf3:
|
414 |
+
if token.endswith(su3):
|
415 |
+
return token[:-3]
|
416 |
+
if len(token) > 4:
|
417 |
+
for su2 in self.verb_suf2:
|
418 |
+
if token.endswith(su2):
|
419 |
+
return token[:-2]
|
420 |
+
if len(token) > 3:
|
421 |
+
for su1 in self.verb_suf1:
|
422 |
+
if token.endswith(su1):
|
423 |
+
return token[:-1]
|
424 |
+
|
425 |
+
def verb_t4(self, token):
|
426 |
+
"""
|
427 |
+
stem the present tense prefixes
|
428 |
+
"""
|
429 |
+
if len(token) > 3:
|
430 |
+
for pr1 in self.verb_suf1:
|
431 |
+
if token.startswith(pr1):
|
432 |
+
return token[1:]
|
433 |
+
if token.startswith("\u064A"):
|
434 |
+
return token[1:]
|
435 |
+
|
436 |
+
def verb_t5(self, token):
|
437 |
+
"""
|
438 |
+
stem the future tense prefixes
|
439 |
+
"""
|
440 |
+
if len(token) > 4:
|
441 |
+
for pr2 in self.verb_pr22:
|
442 |
+
if token.startswith(pr2):
|
443 |
+
return token[2:]
|
444 |
+
for pr2 in self.verb_pr2:
|
445 |
+
if token.startswith(pr2):
|
446 |
+
return token[2:]
|
447 |
+
|
448 |
+
def verb_t6(self, token):
|
449 |
+
"""
|
450 |
+
stem the imperative tense prefixes
|
451 |
+
"""
|
452 |
+
if len(token) > 4:
|
453 |
+
for pr3 in self.verb_pr33:
|
454 |
+
if token.startswith(pr3):
|
455 |
+
return token[2:]
|
456 |
+
|
457 |
+
return token
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/cistem.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: CISTEM Stemmer for German
|
2 |
+
# Copyright (C) 2001-2023 NLTK Project
|
3 |
+
# Author: Leonie Weissweiler <[email protected]>
|
4 |
+
# Tom Aarsen <> (modifications)
|
5 |
+
# Algorithm: Leonie Weissweiler <[email protected]>
|
6 |
+
# Alexander Fraser <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
import re
|
11 |
+
from typing import Tuple
|
12 |
+
|
13 |
+
from nltk.stem.api import StemmerI
|
14 |
+
|
15 |
+
|
16 |
+
class Cistem(StemmerI):
|
17 |
+
"""
|
18 |
+
CISTEM Stemmer for German
|
19 |
+
|
20 |
+
This is the official Python implementation of the CISTEM stemmer.
|
21 |
+
It is based on the paper
|
22 |
+
Leonie Weissweiler, Alexander Fraser (2017). Developing a Stemmer for German
|
23 |
+
Based on a Comparative Analysis of Publicly Available Stemmers.
|
24 |
+
In Proceedings of the German Society for Computational Linguistics and Language
|
25 |
+
Technology (GSCL)
|
26 |
+
which can be read here:
|
27 |
+
https://www.cis.lmu.de/~weissweiler/cistem/
|
28 |
+
|
29 |
+
In the paper, we conducted an analysis of publicly available stemmers,
|
30 |
+
developed two gold standards for German stemming and evaluated the stemmers
|
31 |
+
based on the two gold standards. We then proposed the stemmer implemented here
|
32 |
+
and show that it achieves slightly better f-measure than the other stemmers and
|
33 |
+
is thrice as fast as the Snowball stemmer for German while being about as fast
|
34 |
+
as most other stemmers.
|
35 |
+
|
36 |
+
case_insensitive is a a boolean specifying if case-insensitive stemming
|
37 |
+
should be used. Case insensitivity improves performance only if words in the
|
38 |
+
text may be incorrectly upper case. For all-lowercase and correctly cased
|
39 |
+
text, best performance is achieved by setting case_insensitive for false.
|
40 |
+
|
41 |
+
:param case_insensitive: if True, the stemming is case insensitive. False by default.
|
42 |
+
:type case_insensitive: bool
|
43 |
+
"""
|
44 |
+
|
45 |
+
strip_ge = re.compile(r"^ge(.{4,})")
|
46 |
+
repl_xx = re.compile(r"(.)\1")
|
47 |
+
strip_emr = re.compile(r"e[mr]$")
|
48 |
+
strip_nd = re.compile(r"nd$")
|
49 |
+
strip_t = re.compile(r"t$")
|
50 |
+
strip_esn = re.compile(r"[esn]$")
|
51 |
+
repl_xx_back = re.compile(r"(.)\*")
|
52 |
+
|
53 |
+
def __init__(self, case_insensitive: bool = False):
|
54 |
+
self._case_insensitive = case_insensitive
|
55 |
+
|
56 |
+
@staticmethod
|
57 |
+
def replace_to(word: str) -> str:
|
58 |
+
word = word.replace("sch", "$")
|
59 |
+
word = word.replace("ei", "%")
|
60 |
+
word = word.replace("ie", "&")
|
61 |
+
word = Cistem.repl_xx.sub(r"\1*", word)
|
62 |
+
|
63 |
+
return word
|
64 |
+
|
65 |
+
@staticmethod
|
66 |
+
def replace_back(word: str) -> str:
|
67 |
+
word = Cistem.repl_xx_back.sub(r"\1\1", word)
|
68 |
+
word = word.replace("%", "ei")
|
69 |
+
word = word.replace("&", "ie")
|
70 |
+
word = word.replace("$", "sch")
|
71 |
+
|
72 |
+
return word
|
73 |
+
|
74 |
+
def stem(self, word: str) -> str:
|
75 |
+
"""Stems the input word.
|
76 |
+
|
77 |
+
:param word: The word that is to be stemmed.
|
78 |
+
:type word: str
|
79 |
+
:return: The stemmed word.
|
80 |
+
:rtype: str
|
81 |
+
|
82 |
+
>>> from nltk.stem.cistem import Cistem
|
83 |
+
>>> stemmer = Cistem()
|
84 |
+
>>> s1 = "Speicherbehältern"
|
85 |
+
>>> stemmer.stem(s1)
|
86 |
+
'speicherbehalt'
|
87 |
+
>>> s2 = "Grenzpostens"
|
88 |
+
>>> stemmer.stem(s2)
|
89 |
+
'grenzpost'
|
90 |
+
>>> s3 = "Ausgefeiltere"
|
91 |
+
>>> stemmer.stem(s3)
|
92 |
+
'ausgefeilt'
|
93 |
+
>>> stemmer = Cistem(True)
|
94 |
+
>>> stemmer.stem(s1)
|
95 |
+
'speicherbehal'
|
96 |
+
>>> stemmer.stem(s2)
|
97 |
+
'grenzpo'
|
98 |
+
>>> stemmer.stem(s3)
|
99 |
+
'ausgefeil'
|
100 |
+
"""
|
101 |
+
if len(word) == 0:
|
102 |
+
return word
|
103 |
+
|
104 |
+
upper = word[0].isupper()
|
105 |
+
word = word.lower()
|
106 |
+
|
107 |
+
word = word.replace("ü", "u")
|
108 |
+
word = word.replace("ö", "o")
|
109 |
+
word = word.replace("ä", "a")
|
110 |
+
word = word.replace("ß", "ss")
|
111 |
+
|
112 |
+
word = Cistem.strip_ge.sub(r"\1", word)
|
113 |
+
|
114 |
+
return self._segment_inner(word, upper)[0]
|
115 |
+
|
116 |
+
def segment(self, word: str) -> Tuple[str, str]:
|
117 |
+
"""
|
118 |
+
This method works very similarly to stem (:func:'cistem.stem'). The difference is that in
|
119 |
+
addition to returning the stem, it also returns the rest that was removed at
|
120 |
+
the end. To be able to return the stem unchanged so the stem and the rest
|
121 |
+
can be concatenated to form the original word, all subsitutions that altered
|
122 |
+
the stem in any other way than by removing letters at the end were left out.
|
123 |
+
|
124 |
+
:param word: The word that is to be stemmed.
|
125 |
+
:type word: str
|
126 |
+
:return: A tuple of the stemmed word and the removed suffix.
|
127 |
+
:rtype: Tuple[str, str]
|
128 |
+
|
129 |
+
>>> from nltk.stem.cistem import Cistem
|
130 |
+
>>> stemmer = Cistem()
|
131 |
+
>>> s1 = "Speicherbehältern"
|
132 |
+
>>> stemmer.segment(s1)
|
133 |
+
('speicherbehält', 'ern')
|
134 |
+
>>> s2 = "Grenzpostens"
|
135 |
+
>>> stemmer.segment(s2)
|
136 |
+
('grenzpost', 'ens')
|
137 |
+
>>> s3 = "Ausgefeiltere"
|
138 |
+
>>> stemmer.segment(s3)
|
139 |
+
('ausgefeilt', 'ere')
|
140 |
+
>>> stemmer = Cistem(True)
|
141 |
+
>>> stemmer.segment(s1)
|
142 |
+
('speicherbehäl', 'tern')
|
143 |
+
>>> stemmer.segment(s2)
|
144 |
+
('grenzpo', 'stens')
|
145 |
+
>>> stemmer.segment(s3)
|
146 |
+
('ausgefeil', 'tere')
|
147 |
+
"""
|
148 |
+
if len(word) == 0:
|
149 |
+
return ("", "")
|
150 |
+
|
151 |
+
upper = word[0].isupper()
|
152 |
+
word = word.lower()
|
153 |
+
|
154 |
+
return self._segment_inner(word, upper)
|
155 |
+
|
156 |
+
def _segment_inner(self, word: str, upper: bool):
|
157 |
+
"""Inner method for iteratively applying the code stemming regexes.
|
158 |
+
This method receives a pre-processed variant of the word to be stemmed,
|
159 |
+
or the word to be segmented, and returns a tuple of the word and the
|
160 |
+
removed suffix.
|
161 |
+
|
162 |
+
:param word: A pre-processed variant of the word that is to be stemmed.
|
163 |
+
:type word: str
|
164 |
+
:param upper: Whether the original word started with a capital letter.
|
165 |
+
:type upper: bool
|
166 |
+
:return: A tuple of the stemmed word and the removed suffix.
|
167 |
+
:rtype: Tuple[str, str]
|
168 |
+
"""
|
169 |
+
|
170 |
+
rest_length = 0
|
171 |
+
word_copy = word[:]
|
172 |
+
|
173 |
+
# Pre-processing before applying the substitution patterns
|
174 |
+
word = Cistem.replace_to(word)
|
175 |
+
rest = ""
|
176 |
+
|
177 |
+
# Apply the substitution patterns
|
178 |
+
while len(word) > 3:
|
179 |
+
if len(word) > 5:
|
180 |
+
word, n = Cistem.strip_emr.subn("", word)
|
181 |
+
if n != 0:
|
182 |
+
rest_length += 2
|
183 |
+
continue
|
184 |
+
|
185 |
+
word, n = Cistem.strip_nd.subn("", word)
|
186 |
+
if n != 0:
|
187 |
+
rest_length += 2
|
188 |
+
continue
|
189 |
+
|
190 |
+
if not upper or self._case_insensitive:
|
191 |
+
word, n = Cistem.strip_t.subn("", word)
|
192 |
+
if n != 0:
|
193 |
+
rest_length += 1
|
194 |
+
continue
|
195 |
+
|
196 |
+
word, n = Cistem.strip_esn.subn("", word)
|
197 |
+
if n != 0:
|
198 |
+
rest_length += 1
|
199 |
+
continue
|
200 |
+
else:
|
201 |
+
break
|
202 |
+
|
203 |
+
# Post-processing after applying the substitution patterns
|
204 |
+
word = Cistem.replace_back(word)
|
205 |
+
|
206 |
+
if rest_length:
|
207 |
+
rest = word_copy[-rest_length:]
|
208 |
+
|
209 |
+
return (word, rest)
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/isri.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Natural Language Toolkit: The ISRI Arabic Stemmer
|
3 |
+
#
|
4 |
+
# Copyright (C) 2001-2023 NLTK Project
|
5 |
+
# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005)
|
6 |
+
# Author: Hosam Algasaier <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
|
10 |
+
"""
|
11 |
+
ISRI Arabic Stemmer
|
12 |
+
|
13 |
+
The algorithm for this stemmer is described in:
|
14 |
+
|
15 |
+
Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary.
|
16 |
+
Information Science Research Institute. University of Nevada, Las Vegas, USA.
|
17 |
+
|
18 |
+
The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features
|
19 |
+
with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root
|
20 |
+
dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than
|
21 |
+
returning the original unmodified word.
|
22 |
+
|
23 |
+
Additional adjustments were made to improve the algorithm:
|
24 |
+
|
25 |
+
1- Adding 60 stop words.
|
26 |
+
2- Adding the pattern (تفاعيل) to ISRI pattern set.
|
27 |
+
3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it
|
28 |
+
increases the word ambiguities and changes the original root.
|
29 |
+
|
30 |
+
"""
|
31 |
+
import re
|
32 |
+
|
33 |
+
from nltk.stem.api import StemmerI
|
34 |
+
|
35 |
+
|
36 |
+
class ISRIStemmer(StemmerI):
|
37 |
+
"""
|
38 |
+
ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary.
|
39 |
+
Information Science Research Institute. University of Nevada, Las Vegas, USA.
|
40 |
+
|
41 |
+
A few minor modifications have been made to ISRI basic algorithm.
|
42 |
+
See the source code of this module for more information.
|
43 |
+
|
44 |
+
isri.stem(token) returns Arabic root for the given token.
|
45 |
+
|
46 |
+
The ISRI Stemmer requires that all tokens have Unicode string types.
|
47 |
+
If you use Python IDLE on Arabic Windows you have to decode text first
|
48 |
+
using Arabic '1256' coding.
|
49 |
+
"""
|
50 |
+
|
51 |
+
def __init__(self):
|
52 |
+
# length three prefixes
|
53 |
+
self.p3 = [
|
54 |
+
"\u0643\u0627\u0644",
|
55 |
+
"\u0628\u0627\u0644",
|
56 |
+
"\u0648\u0644\u0644",
|
57 |
+
"\u0648\u0627\u0644",
|
58 |
+
]
|
59 |
+
|
60 |
+
# length two prefixes
|
61 |
+
self.p2 = ["\u0627\u0644", "\u0644\u0644"]
|
62 |
+
|
63 |
+
# length one prefixes
|
64 |
+
self.p1 = [
|
65 |
+
"\u0644",
|
66 |
+
"\u0628",
|
67 |
+
"\u0641",
|
68 |
+
"\u0633",
|
69 |
+
"\u0648",
|
70 |
+
"\u064a",
|
71 |
+
"\u062a",
|
72 |
+
"\u0646",
|
73 |
+
"\u0627",
|
74 |
+
]
|
75 |
+
|
76 |
+
# length three suffixes
|
77 |
+
self.s3 = [
|
78 |
+
"\u062a\u0645\u0644",
|
79 |
+
"\u0647\u0645\u0644",
|
80 |
+
"\u062a\u0627\u0646",
|
81 |
+
"\u062a\u064a\u0646",
|
82 |
+
"\u0643\u0645\u0644",
|
83 |
+
]
|
84 |
+
|
85 |
+
# length two suffixes
|
86 |
+
self.s2 = [
|
87 |
+
"\u0648\u0646",
|
88 |
+
"\u0627\u062a",
|
89 |
+
"\u0627\u0646",
|
90 |
+
"\u064a\u0646",
|
91 |
+
"\u062a\u0646",
|
92 |
+
"\u0643\u0645",
|
93 |
+
"\u0647\u0646",
|
94 |
+
"\u0646\u0627",
|
95 |
+
"\u064a\u0627",
|
96 |
+
"\u0647\u0627",
|
97 |
+
"\u062a\u0645",
|
98 |
+
"\u0643\u0646",
|
99 |
+
"\u0646\u064a",
|
100 |
+
"\u0648\u0627",
|
101 |
+
"\u0645\u0627",
|
102 |
+
"\u0647\u0645",
|
103 |
+
]
|
104 |
+
|
105 |
+
# length one suffixes
|
106 |
+
self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"]
|
107 |
+
|
108 |
+
# groups of length four patterns
|
109 |
+
self.pr4 = {
|
110 |
+
0: ["\u0645"],
|
111 |
+
1: ["\u0627"],
|
112 |
+
2: ["\u0627", "\u0648", "\u064A"],
|
113 |
+
3: ["\u0629"],
|
114 |
+
}
|
115 |
+
|
116 |
+
# Groups of length five patterns and length three roots
|
117 |
+
self.pr53 = {
|
118 |
+
0: ["\u0627", "\u062a"],
|
119 |
+
1: ["\u0627", "\u064a", "\u0648"],
|
120 |
+
2: ["\u0627", "\u062a", "\u0645"],
|
121 |
+
3: ["\u0645", "\u064a", "\u062a"],
|
122 |
+
4: ["\u0645", "\u062a"],
|
123 |
+
5: ["\u0627", "\u0648"],
|
124 |
+
6: ["\u0627", "\u0645"],
|
125 |
+
}
|
126 |
+
|
127 |
+
self.re_short_vowels = re.compile(r"[\u064B-\u0652]")
|
128 |
+
self.re_hamza = re.compile(r"[\u0621\u0624\u0626]")
|
129 |
+
self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]")
|
130 |
+
|
131 |
+
self.stop_words = [
|
132 |
+
"\u064a\u0643\u0648\u0646",
|
133 |
+
"\u0648\u0644\u064a\u0633",
|
134 |
+
"\u0648\u0643\u0627\u0646",
|
135 |
+
"\u0643\u0630\u0644\u0643",
|
136 |
+
"\u0627\u0644\u062a\u064a",
|
137 |
+
"\u0648\u0628\u064a\u0646",
|
138 |
+
"\u0639\u0644\u064a\u0647\u0627",
|
139 |
+
"\u0645\u0633\u0627\u0621",
|
140 |
+
"\u0627\u0644\u0630\u064a",
|
141 |
+
"\u0648\u0643\u0627\u0646\u062a",
|
142 |
+
"\u0648\u0644\u0643\u0646",
|
143 |
+
"\u0648\u0627\u0644\u062a\u064a",
|
144 |
+
"\u062a\u0643\u0648\u0646",
|
145 |
+
"\u0627\u0644\u064a\u0648\u0645",
|
146 |
+
"\u0627\u0644\u0644\u0630\u064a\u0646",
|
147 |
+
"\u0639\u0644\u064a\u0647",
|
148 |
+
"\u0643\u0627\u0646\u062a",
|
149 |
+
"\u0644\u0630\u0644\u0643",
|
150 |
+
"\u0623\u0645\u0627\u0645",
|
151 |
+
"\u0647\u0646\u0627\u0643",
|
152 |
+
"\u0645\u0646\u0647\u0627",
|
153 |
+
"\u0645\u0627\u0632\u0627\u0644",
|
154 |
+
"\u0644\u0627\u0632\u0627\u0644",
|
155 |
+
"\u0644\u0627\u064a\u0632\u0627\u0644",
|
156 |
+
"\u0645\u0627\u064a\u0632\u0627\u0644",
|
157 |
+
"\u0627\u0635\u0628\u062d",
|
158 |
+
"\u0623\u0635\u0628\u062d",
|
159 |
+
"\u0623\u0645\u0633\u0649",
|
160 |
+
"\u0627\u0645\u0633\u0649",
|
161 |
+
"\u0623\u0636\u062d\u0649",
|
162 |
+
"\u0627\u0636\u062d\u0649",
|
163 |
+
"\u0645\u0627\u0628\u0631\u062d",
|
164 |
+
"\u0645\u0627\u0641\u062a\u0626",
|
165 |
+
"\u0645\u0627\u0627\u0646\u0641\u0643",
|
166 |
+
"\u0644\u0627\u0633\u064a\u0645\u0627",
|
167 |
+
"\u0648\u0644\u0627\u064a\u0632\u0627\u0644",
|
168 |
+
"\u0627\u0644\u062d\u0627\u0644\u064a",
|
169 |
+
"\u0627\u0644\u064a\u0647\u0627",
|
170 |
+
"\u0627\u0644\u0630\u064a\u0646",
|
171 |
+
"\u0641\u0627\u0646\u0647",
|
172 |
+
"\u0648\u0627\u0644\u0630\u064a",
|
173 |
+
"\u0648\u0647\u0630\u0627",
|
174 |
+
"\u0644\u0647\u0630\u0627",
|
175 |
+
"\u0641\u0643\u0627\u0646",
|
176 |
+
"\u0633\u062a\u0643\u0648\u0646",
|
177 |
+
"\u0627\u0644\u064a\u0647",
|
178 |
+
"\u064a\u0645\u0643\u0646",
|
179 |
+
"\u0628\u0647\u0630\u0627",
|
180 |
+
"\u0627\u0644\u0630\u0649",
|
181 |
+
]
|
182 |
+
|
183 |
+
def stem(self, token):
|
184 |
+
"""
|
185 |
+
Stemming a word token using the ISRI stemmer.
|
186 |
+
"""
|
187 |
+
token = self.norm(
|
188 |
+
token, 1
|
189 |
+
) # remove diacritics which representing Arabic short vowels
|
190 |
+
if token in self.stop_words:
|
191 |
+
return token # exclude stop words from being processed
|
192 |
+
token = self.pre32(
|
193 |
+
token
|
194 |
+
) # remove length three and length two prefixes in this order
|
195 |
+
token = self.suf32(
|
196 |
+
token
|
197 |
+
) # remove length three and length two suffixes in this order
|
198 |
+
token = self.waw(
|
199 |
+
token
|
200 |
+
) # remove connective ‘و’ if it precedes a word beginning with ‘و’
|
201 |
+
token = self.norm(token, 2) # normalize initial hamza to bare alif
|
202 |
+
# if 4 <= word length <= 7, then stem; otherwise, no stemming
|
203 |
+
if len(token) == 4: # length 4 word
|
204 |
+
token = self.pro_w4(token)
|
205 |
+
elif len(token) == 5: # length 5 word
|
206 |
+
token = self.pro_w53(token)
|
207 |
+
token = self.end_w5(token)
|
208 |
+
elif len(token) == 6: # length 6 word
|
209 |
+
token = self.pro_w6(token)
|
210 |
+
token = self.end_w6(token)
|
211 |
+
elif len(token) == 7: # length 7 word
|
212 |
+
token = self.suf1(token)
|
213 |
+
if len(token) == 7:
|
214 |
+
token = self.pre1(token)
|
215 |
+
if len(token) == 6:
|
216 |
+
token = self.pro_w6(token)
|
217 |
+
token = self.end_w6(token)
|
218 |
+
return token
|
219 |
+
|
220 |
+
def norm(self, word, num=3):
|
221 |
+
"""
|
222 |
+
normalization:
|
223 |
+
num=1 normalize diacritics
|
224 |
+
num=2 normalize initial hamza
|
225 |
+
num=3 both 1&2
|
226 |
+
"""
|
227 |
+
if num == 1:
|
228 |
+
word = self.re_short_vowels.sub("", word)
|
229 |
+
elif num == 2:
|
230 |
+
word = self.re_initial_hamza.sub("\u0627", word)
|
231 |
+
elif num == 3:
|
232 |
+
word = self.re_short_vowels.sub("", word)
|
233 |
+
word = self.re_initial_hamza.sub("\u0627", word)
|
234 |
+
return word
|
235 |
+
|
236 |
+
def pre32(self, word):
|
237 |
+
"""remove length three and length two prefixes in this order"""
|
238 |
+
if len(word) >= 6:
|
239 |
+
for pre3 in self.p3:
|
240 |
+
if word.startswith(pre3):
|
241 |
+
return word[3:]
|
242 |
+
if len(word) >= 5:
|
243 |
+
for pre2 in self.p2:
|
244 |
+
if word.startswith(pre2):
|
245 |
+
return word[2:]
|
246 |
+
return word
|
247 |
+
|
248 |
+
def suf32(self, word):
|
249 |
+
"""remove length three and length two suffixes in this order"""
|
250 |
+
if len(word) >= 6:
|
251 |
+
for suf3 in self.s3:
|
252 |
+
if word.endswith(suf3):
|
253 |
+
return word[:-3]
|
254 |
+
if len(word) >= 5:
|
255 |
+
for suf2 in self.s2:
|
256 |
+
if word.endswith(suf2):
|
257 |
+
return word[:-2]
|
258 |
+
return word
|
259 |
+
|
260 |
+
def waw(self, word):
|
261 |
+
"""remove connective ‘و’ if it precedes a word beginning with ‘و’"""
|
262 |
+
if len(word) >= 4 and word[:2] == "\u0648\u0648":
|
263 |
+
word = word[1:]
|
264 |
+
return word
|
265 |
+
|
266 |
+
def pro_w4(self, word):
|
267 |
+
"""process length four patterns and extract length three roots"""
|
268 |
+
if word[0] in self.pr4[0]: # مفعل
|
269 |
+
word = word[1:]
|
270 |
+
elif word[1] in self.pr4[1]: # فاعل
|
271 |
+
word = word[:1] + word[2:]
|
272 |
+
elif word[2] in self.pr4[2]: # فعال - فعول - فعيل
|
273 |
+
word = word[:2] + word[3]
|
274 |
+
elif word[3] in self.pr4[3]: # فعلة
|
275 |
+
word = word[:-1]
|
276 |
+
else:
|
277 |
+
word = self.suf1(word) # do - normalize short sufix
|
278 |
+
if len(word) == 4:
|
279 |
+
word = self.pre1(word) # do - normalize short prefix
|
280 |
+
return word
|
281 |
+
|
282 |
+
def pro_w53(self, word):
|
283 |
+
"""process length five patterns and extract length three roots"""
|
284 |
+
if word[2] in self.pr53[0] and word[0] == "\u0627": # افتعل - افاعل
|
285 |
+
word = word[1] + word[3:]
|
286 |
+
elif word[3] in self.pr53[1] and word[0] == "\u0645": # مفعول - مفعال - مفعيل
|
287 |
+
word = word[1:3] + word[4]
|
288 |
+
elif word[0] in self.pr53[2] and word[4] == "\u0629": # مفعلة - تفعلة - افعلة
|
289 |
+
word = word[1:4]
|
290 |
+
elif word[0] in self.pr53[3] and word[2] == "\u062a": # مفتعل - يفتعل - تفتعل
|
291 |
+
word = word[1] + word[3:]
|
292 |
+
elif word[0] in self.pr53[4] and word[2] == "\u0627": # مفاعل - تفاعل
|
293 |
+
word = word[1] + word[3:]
|
294 |
+
elif word[2] in self.pr53[5] and word[4] == "\u0629": # فعولة - فعالة
|
295 |
+
word = word[:2] + word[3]
|
296 |
+
elif word[0] in self.pr53[6] and word[1] == "\u0646": # انفعل - منفعل
|
297 |
+
word = word[2:]
|
298 |
+
elif word[3] == "\u0627" and word[0] == "\u0627": # افعال
|
299 |
+
word = word[1:3] + word[4]
|
300 |
+
elif word[4] == "\u0646" and word[3] == "\u0627": # فعلان
|
301 |
+
word = word[:3]
|
302 |
+
elif word[3] == "\u064a" and word[0] == "\u062a": # تفعيل
|
303 |
+
word = word[1:3] + word[4]
|
304 |
+
elif word[3] == "\u0648" and word[1] == "\u0627": # فاعول
|
305 |
+
word = word[0] + word[2] + word[4]
|
306 |
+
elif word[2] == "\u0627" and word[1] == "\u0648": # فواعل
|
307 |
+
word = word[0] + word[3:]
|
308 |
+
elif word[3] == "\u0626" and word[2] == "\u0627": # فعائل
|
309 |
+
word = word[:2] + word[4]
|
310 |
+
elif word[4] == "\u0629" and word[1] == "\u0627": # فاعلة
|
311 |
+
word = word[0] + word[2:4]
|
312 |
+
elif word[4] == "\u064a" and word[2] == "\u0627": # فعالي
|
313 |
+
word = word[:2] + word[3]
|
314 |
+
else:
|
315 |
+
word = self.suf1(word) # do - normalize short sufix
|
316 |
+
if len(word) == 5:
|
317 |
+
word = self.pre1(word) # do - normalize short prefix
|
318 |
+
return word
|
319 |
+
|
320 |
+
def pro_w54(self, word):
|
321 |
+
"""process length five patterns and extract length four roots"""
|
322 |
+
if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل
|
323 |
+
word = word[1:]
|
324 |
+
elif word[4] == "\u0629": # فعللة
|
325 |
+
word = word[:4]
|
326 |
+
elif word[2] == "\u0627": # فعالل
|
327 |
+
word = word[:2] + word[3:]
|
328 |
+
return word
|
329 |
+
|
330 |
+
def end_w5(self, word):
|
331 |
+
"""ending step (word of length five)"""
|
332 |
+
if len(word) == 4:
|
333 |
+
word = self.pro_w4(word)
|
334 |
+
elif len(word) == 5:
|
335 |
+
word = self.pro_w54(word)
|
336 |
+
return word
|
337 |
+
|
338 |
+
def pro_w6(self, word):
|
339 |
+
"""process length six patterns and extract length three roots"""
|
340 |
+
if word.startswith("\u0627\u0633\u062a") or word.startswith(
|
341 |
+
"\u0645\u0633\u062a"
|
342 |
+
): # مستفعل - استفعل
|
343 |
+
word = word[3:]
|
344 |
+
elif (
|
345 |
+
word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629"
|
346 |
+
): # مفعالة
|
347 |
+
word = word[1:3] + word[4]
|
348 |
+
elif (
|
349 |
+
word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627"
|
350 |
+
): # افتعال
|
351 |
+
word = word[1] + word[3] + word[5]
|
352 |
+
elif (
|
353 |
+
word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4]
|
354 |
+
): # افعوعل
|
355 |
+
word = word[1] + word[4:]
|
356 |
+
elif (
|
357 |
+
word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a"
|
358 |
+
): # تفاعيل new pattern
|
359 |
+
word = word[1] + word[3] + word[5]
|
360 |
+
else:
|
361 |
+
word = self.suf1(word) # do - normalize short sufix
|
362 |
+
if len(word) == 6:
|
363 |
+
word = self.pre1(word) # do - normalize short prefix
|
364 |
+
return word
|
365 |
+
|
366 |
+
def pro_w64(self, word):
|
367 |
+
"""process length six patterns and extract length four roots"""
|
368 |
+
if word[0] == "\u0627" and word[4] == "\u0627": # افعلال
|
369 |
+
word = word[1:4] + word[5]
|
370 |
+
elif word.startswith("\u0645\u062a"): # متفعلل
|
371 |
+
word = word[2:]
|
372 |
+
return word
|
373 |
+
|
374 |
+
def end_w6(self, word):
|
375 |
+
"""ending step (word of length six)"""
|
376 |
+
if len(word) == 5:
|
377 |
+
word = self.pro_w53(word)
|
378 |
+
word = self.end_w5(word)
|
379 |
+
elif len(word) == 6:
|
380 |
+
word = self.pro_w64(word)
|
381 |
+
return word
|
382 |
+
|
383 |
+
def suf1(self, word):
|
384 |
+
"""normalize short sufix"""
|
385 |
+
for sf1 in self.s1:
|
386 |
+
if word.endswith(sf1):
|
387 |
+
return word[:-1]
|
388 |
+
return word
|
389 |
+
|
390 |
+
def pre1(self, word):
|
391 |
+
"""normalize short prefix"""
|
392 |
+
for sp1 in self.p1:
|
393 |
+
if word.startswith(sp1):
|
394 |
+
return word[1:]
|
395 |
+
return word
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/lancaster.py
ADDED
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Stemmers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Tomcavage <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
A word stemmer based on the Lancaster (Paice/Husk) stemming algorithm.
|
10 |
+
Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61.
|
11 |
+
"""
|
12 |
+
import re
|
13 |
+
|
14 |
+
from nltk.stem.api import StemmerI
|
15 |
+
|
16 |
+
|
17 |
+
class LancasterStemmer(StemmerI):
|
18 |
+
"""
|
19 |
+
Lancaster Stemmer
|
20 |
+
|
21 |
+
>>> from nltk.stem.lancaster import LancasterStemmer
|
22 |
+
>>> st = LancasterStemmer()
|
23 |
+
>>> st.stem('maximum') # Remove "-um" when word is intact
|
24 |
+
'maxim'
|
25 |
+
>>> st.stem('presumably') # Don't remove "-um" when word is not intact
|
26 |
+
'presum'
|
27 |
+
>>> st.stem('multiply') # No action taken if word ends with "-ply"
|
28 |
+
'multiply'
|
29 |
+
>>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules
|
30 |
+
'provid'
|
31 |
+
>>> st.stem('owed') # Word starting with vowel must contain at least 2 letters
|
32 |
+
'ow'
|
33 |
+
>>> st.stem('ear') # ditto
|
34 |
+
'ear'
|
35 |
+
>>> st.stem('saying') # Words starting with consonant must contain at least 3
|
36 |
+
'say'
|
37 |
+
>>> st.stem('crying') # letters and one of those letters must be a vowel
|
38 |
+
'cry'
|
39 |
+
>>> st.stem('string') # ditto
|
40 |
+
'string'
|
41 |
+
>>> st.stem('meant') # ditto
|
42 |
+
'meant'
|
43 |
+
>>> st.stem('cement') # ditto
|
44 |
+
'cem'
|
45 |
+
>>> st_pre = LancasterStemmer(strip_prefix_flag=True)
|
46 |
+
>>> st_pre.stem('kilometer') # Test Prefix
|
47 |
+
'met'
|
48 |
+
>>> st_custom = LancasterStemmer(rule_tuple=("ssen4>", "s1t."))
|
49 |
+
>>> st_custom.stem("ness") # Change s to t
|
50 |
+
'nest'
|
51 |
+
"""
|
52 |
+
|
53 |
+
# The rule list is static since it doesn't change between instances
|
54 |
+
default_rule_tuple = (
|
55 |
+
"ai*2.", # -ia > - if intact
|
56 |
+
"a*1.", # -a > - if intact
|
57 |
+
"bb1.", # -bb > -b
|
58 |
+
"city3s.", # -ytic > -ys
|
59 |
+
"ci2>", # -ic > -
|
60 |
+
"cn1t>", # -nc > -nt
|
61 |
+
"dd1.", # -dd > -d
|
62 |
+
"dei3y>", # -ied > -y
|
63 |
+
"deec2ss.", # -ceed >", -cess
|
64 |
+
"dee1.", # -eed > -ee
|
65 |
+
"de2>", # -ed > -
|
66 |
+
"dooh4>", # -hood > -
|
67 |
+
"e1>", # -e > -
|
68 |
+
"feil1v.", # -lief > -liev
|
69 |
+
"fi2>", # -if > -
|
70 |
+
"gni3>", # -ing > -
|
71 |
+
"gai3y.", # -iag > -y
|
72 |
+
"ga2>", # -ag > -
|
73 |
+
"gg1.", # -gg > -g
|
74 |
+
"ht*2.", # -th > - if intact
|
75 |
+
"hsiug5ct.", # -guish > -ct
|
76 |
+
"hsi3>", # -ish > -
|
77 |
+
"i*1.", # -i > - if intact
|
78 |
+
"i1y>", # -i > -y
|
79 |
+
"ji1d.", # -ij > -id -- see nois4j> & vis3j>
|
80 |
+
"juf1s.", # -fuj > -fus
|
81 |
+
"ju1d.", # -uj > -ud
|
82 |
+
"jo1d.", # -oj > -od
|
83 |
+
"jeh1r.", # -hej > -her
|
84 |
+
"jrev1t.", # -verj > -vert
|
85 |
+
"jsim2t.", # -misj > -mit
|
86 |
+
"jn1d.", # -nj > -nd
|
87 |
+
"j1s.", # -j > -s
|
88 |
+
"lbaifi6.", # -ifiabl > -
|
89 |
+
"lbai4y.", # -iabl > -y
|
90 |
+
"lba3>", # -abl > -
|
91 |
+
"lbi3.", # -ibl > -
|
92 |
+
"lib2l>", # -bil > -bl
|
93 |
+
"lc1.", # -cl > c
|
94 |
+
"lufi4y.", # -iful > -y
|
95 |
+
"luf3>", # -ful > -
|
96 |
+
"lu2.", # -ul > -
|
97 |
+
"lai3>", # -ial > -
|
98 |
+
"lau3>", # -ual > -
|
99 |
+
"la2>", # -al > -
|
100 |
+
"ll1.", # -ll > -l
|
101 |
+
"mui3.", # -ium > -
|
102 |
+
"mu*2.", # -um > - if intact
|
103 |
+
"msi3>", # -ism > -
|
104 |
+
"mm1.", # -mm > -m
|
105 |
+
"nois4j>", # -sion > -j
|
106 |
+
"noix4ct.", # -xion > -ct
|
107 |
+
"noi3>", # -ion > -
|
108 |
+
"nai3>", # -ian > -
|
109 |
+
"na2>", # -an > -
|
110 |
+
"nee0.", # protect -een
|
111 |
+
"ne2>", # -en > -
|
112 |
+
"nn1.", # -nn > -n
|
113 |
+
"pihs4>", # -ship > -
|
114 |
+
"pp1.", # -pp > -p
|
115 |
+
"re2>", # -er > -
|
116 |
+
"rae0.", # protect -ear
|
117 |
+
"ra2.", # -ar > -
|
118 |
+
"ro2>", # -or > -
|
119 |
+
"ru2>", # -ur > -
|
120 |
+
"rr1.", # -rr > -r
|
121 |
+
"rt1>", # -tr > -t
|
122 |
+
"rei3y>", # -ier > -y
|
123 |
+
"sei3y>", # -ies > -y
|
124 |
+
"sis2.", # -sis > -s
|
125 |
+
"si2>", # -is > -
|
126 |
+
"ssen4>", # -ness > -
|
127 |
+
"ss0.", # protect -ss
|
128 |
+
"suo3>", # -ous > -
|
129 |
+
"su*2.", # -us > - if intact
|
130 |
+
"s*1>", # -s > - if intact
|
131 |
+
"s0.", # -s > -s
|
132 |
+
"tacilp4y.", # -plicat > -ply
|
133 |
+
"ta2>", # -at > -
|
134 |
+
"tnem4>", # -ment > -
|
135 |
+
"tne3>", # -ent > -
|
136 |
+
"tna3>", # -ant > -
|
137 |
+
"tpir2b.", # -ript > -rib
|
138 |
+
"tpro2b.", # -orpt > -orb
|
139 |
+
"tcud1.", # -duct > -duc
|
140 |
+
"tpmus2.", # -sumpt > -sum
|
141 |
+
"tpec2iv.", # -cept > -ceiv
|
142 |
+
"tulo2v.", # -olut > -olv
|
143 |
+
"tsis0.", # protect -sist
|
144 |
+
"tsi3>", # -ist > -
|
145 |
+
"tt1.", # -tt > -t
|
146 |
+
"uqi3.", # -iqu > -
|
147 |
+
"ugo1.", # -ogu > -og
|
148 |
+
"vis3j>", # -siv > -j
|
149 |
+
"vie0.", # protect -eiv
|
150 |
+
"vi2>", # -iv > -
|
151 |
+
"ylb1>", # -bly > -bl
|
152 |
+
"yli3y>", # -ily > -y
|
153 |
+
"ylp0.", # protect -ply
|
154 |
+
"yl2>", # -ly > -
|
155 |
+
"ygo1.", # -ogy > -og
|
156 |
+
"yhp1.", # -phy > -ph
|
157 |
+
"ymo1.", # -omy > -om
|
158 |
+
"ypo1.", # -opy > -op
|
159 |
+
"yti3>", # -ity > -
|
160 |
+
"yte3>", # -ety > -
|
161 |
+
"ytl2.", # -lty > -l
|
162 |
+
"yrtsi5.", # -istry > -
|
163 |
+
"yra3>", # -ary > -
|
164 |
+
"yro3>", # -ory > -
|
165 |
+
"yfi3.", # -ify > -
|
166 |
+
"ycn2t>", # -ncy > -nt
|
167 |
+
"yca3>", # -acy > -
|
168 |
+
"zi2>", # -iz > -
|
169 |
+
"zy1s.", # -yz > -ys
|
170 |
+
)
|
171 |
+
|
172 |
+
def __init__(self, rule_tuple=None, strip_prefix_flag=False):
|
173 |
+
"""Create an instance of the Lancaster stemmer."""
|
174 |
+
# Setup an empty rule dictionary - this will be filled in later
|
175 |
+
self.rule_dictionary = {}
|
176 |
+
# Check if a user wants to strip prefix
|
177 |
+
self._strip_prefix = strip_prefix_flag
|
178 |
+
# Check if a user wants to use his/her own rule tuples.
|
179 |
+
self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple
|
180 |
+
|
181 |
+
def parseRules(self, rule_tuple=None):
|
182 |
+
"""Validate the set of rules used in this stemmer.
|
183 |
+
|
184 |
+
If this function is called as an individual method, without using stem
|
185 |
+
method, rule_tuple argument will be compiled into self.rule_dictionary.
|
186 |
+
If this function is called within stem, self._rule_tuple will be used.
|
187 |
+
|
188 |
+
"""
|
189 |
+
# If there is no argument for the function, use class' own rule tuple.
|
190 |
+
rule_tuple = rule_tuple if rule_tuple else self._rule_tuple
|
191 |
+
valid_rule = re.compile(r"^[a-z]+\*?\d[a-z]*[>\.]?$")
|
192 |
+
# Empty any old rules from the rule set before adding new ones
|
193 |
+
self.rule_dictionary = {}
|
194 |
+
|
195 |
+
for rule in rule_tuple:
|
196 |
+
if not valid_rule.match(rule):
|
197 |
+
raise ValueError(f"The rule {rule} is invalid")
|
198 |
+
first_letter = rule[0:1]
|
199 |
+
if first_letter in self.rule_dictionary:
|
200 |
+
self.rule_dictionary[first_letter].append(rule)
|
201 |
+
else:
|
202 |
+
self.rule_dictionary[first_letter] = [rule]
|
203 |
+
|
204 |
+
def stem(self, word):
|
205 |
+
"""Stem a word using the Lancaster stemmer."""
|
206 |
+
# Lower-case the word, since all the rules are lower-cased
|
207 |
+
word = word.lower()
|
208 |
+
word = self.__stripPrefix(word) if self._strip_prefix else word
|
209 |
+
|
210 |
+
# Save a copy of the original word
|
211 |
+
intact_word = word
|
212 |
+
|
213 |
+
# If rule dictionary is empty, parse rule tuple.
|
214 |
+
if not self.rule_dictionary:
|
215 |
+
self.parseRules()
|
216 |
+
|
217 |
+
return self.__doStemming(word, intact_word)
|
218 |
+
|
219 |
+
def __doStemming(self, word, intact_word):
|
220 |
+
"""Perform the actual word stemming"""
|
221 |
+
|
222 |
+
valid_rule = re.compile(r"^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$")
|
223 |
+
|
224 |
+
proceed = True
|
225 |
+
|
226 |
+
while proceed:
|
227 |
+
|
228 |
+
# Find the position of the last letter of the word to be stemmed
|
229 |
+
last_letter_position = self.__getLastLetter(word)
|
230 |
+
|
231 |
+
# Only stem the word if it has a last letter and a rule matching that last letter
|
232 |
+
if (
|
233 |
+
last_letter_position < 0
|
234 |
+
or word[last_letter_position] not in self.rule_dictionary
|
235 |
+
):
|
236 |
+
proceed = False
|
237 |
+
|
238 |
+
else:
|
239 |
+
rule_was_applied = False
|
240 |
+
|
241 |
+
# Go through each rule that matches the word's final letter
|
242 |
+
for rule in self.rule_dictionary[word[last_letter_position]]:
|
243 |
+
rule_match = valid_rule.match(rule)
|
244 |
+
if rule_match:
|
245 |
+
(
|
246 |
+
ending_string,
|
247 |
+
intact_flag,
|
248 |
+
remove_total,
|
249 |
+
append_string,
|
250 |
+
cont_flag,
|
251 |
+
) = rule_match.groups()
|
252 |
+
|
253 |
+
# Convert the number of chars to remove when stemming
|
254 |
+
# from a string to an integer
|
255 |
+
remove_total = int(remove_total)
|
256 |
+
|
257 |
+
# Proceed if word's ending matches rule's word ending
|
258 |
+
if word.endswith(ending_string[::-1]):
|
259 |
+
if intact_flag:
|
260 |
+
if word == intact_word and self.__isAcceptable(
|
261 |
+
word, remove_total
|
262 |
+
):
|
263 |
+
word = self.__applyRule(
|
264 |
+
word, remove_total, append_string
|
265 |
+
)
|
266 |
+
rule_was_applied = True
|
267 |
+
if cont_flag == ".":
|
268 |
+
proceed = False
|
269 |
+
break
|
270 |
+
elif self.__isAcceptable(word, remove_total):
|
271 |
+
word = self.__applyRule(
|
272 |
+
word, remove_total, append_string
|
273 |
+
)
|
274 |
+
rule_was_applied = True
|
275 |
+
if cont_flag == ".":
|
276 |
+
proceed = False
|
277 |
+
break
|
278 |
+
# If no rules apply, the word doesn't need any more stemming
|
279 |
+
if rule_was_applied == False:
|
280 |
+
proceed = False
|
281 |
+
return word
|
282 |
+
|
283 |
+
def __getLastLetter(self, word):
|
284 |
+
"""Get the zero-based index of the last alphabetic character in this string"""
|
285 |
+
last_letter = -1
|
286 |
+
for position in range(len(word)):
|
287 |
+
if word[position].isalpha():
|
288 |
+
last_letter = position
|
289 |
+
else:
|
290 |
+
break
|
291 |
+
return last_letter
|
292 |
+
|
293 |
+
def __isAcceptable(self, word, remove_total):
|
294 |
+
"""Determine if the word is acceptable for stemming."""
|
295 |
+
word_is_acceptable = False
|
296 |
+
# If the word starts with a vowel, it must be at least 2
|
297 |
+
# characters long to be stemmed
|
298 |
+
if word[0] in "aeiouy":
|
299 |
+
if len(word) - remove_total >= 2:
|
300 |
+
word_is_acceptable = True
|
301 |
+
# If the word starts with a consonant, it must be at least 3
|
302 |
+
# characters long (including one vowel) to be stemmed
|
303 |
+
elif len(word) - remove_total >= 3:
|
304 |
+
if word[1] in "aeiouy":
|
305 |
+
word_is_acceptable = True
|
306 |
+
elif word[2] in "aeiouy":
|
307 |
+
word_is_acceptable = True
|
308 |
+
return word_is_acceptable
|
309 |
+
|
310 |
+
def __applyRule(self, word, remove_total, append_string):
|
311 |
+
"""Apply the stemming rule to the word"""
|
312 |
+
# Remove letters from the end of the word
|
313 |
+
new_word_length = len(word) - remove_total
|
314 |
+
word = word[0:new_word_length]
|
315 |
+
|
316 |
+
# And add new letters to the end of the truncated word
|
317 |
+
if append_string:
|
318 |
+
word += append_string
|
319 |
+
return word
|
320 |
+
|
321 |
+
def __stripPrefix(self, word):
|
322 |
+
"""Remove prefix from a word.
|
323 |
+
|
324 |
+
This function originally taken from Whoosh.
|
325 |
+
|
326 |
+
"""
|
327 |
+
for prefix in (
|
328 |
+
"kilo",
|
329 |
+
"micro",
|
330 |
+
"milli",
|
331 |
+
"intra",
|
332 |
+
"ultra",
|
333 |
+
"mega",
|
334 |
+
"nano",
|
335 |
+
"pico",
|
336 |
+
"pseudo",
|
337 |
+
):
|
338 |
+
if word.startswith(prefix):
|
339 |
+
return word[len(prefix) :]
|
340 |
+
return word
|
341 |
+
|
342 |
+
def __repr__(self):
|
343 |
+
return "<LancasterStemmer>"
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/porter.py
ADDED
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Porter Stemmer
|
3 |
+
|
4 |
+
This is the Porter stemming algorithm. It follows the algorithm
|
5 |
+
presented in
|
6 |
+
|
7 |
+
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
|
8 |
+
|
9 |
+
with some optional deviations that can be turned on or off with the
|
10 |
+
`mode` argument to the constructor.
|
11 |
+
|
12 |
+
Martin Porter, the algorithm's inventor, maintains a web page about the
|
13 |
+
algorithm at
|
14 |
+
|
15 |
+
https://www.tartarus.org/~martin/PorterStemmer/
|
16 |
+
|
17 |
+
which includes another Python implementation and other implementations
|
18 |
+
in many languages.
|
19 |
+
"""
|
20 |
+
|
21 |
+
__docformat__ = "plaintext"
|
22 |
+
|
23 |
+
import re
|
24 |
+
|
25 |
+
from nltk.stem.api import StemmerI
|
26 |
+
|
27 |
+
|
28 |
+
class PorterStemmer(StemmerI):
|
29 |
+
"""
|
30 |
+
A word stemmer based on the Porter stemming algorithm.
|
31 |
+
|
32 |
+
Porter, M. "An algorithm for suffix stripping."
|
33 |
+
Program 14.3 (1980): 130-137.
|
34 |
+
|
35 |
+
See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage
|
36 |
+
of the algorithm.
|
37 |
+
|
38 |
+
Martin Porter has endorsed several modifications to the Porter
|
39 |
+
algorithm since writing his original paper, and those extensions are
|
40 |
+
included in the implementations on his website. Additionally, others
|
41 |
+
have proposed further improvements to the algorithm, including NLTK
|
42 |
+
contributors. There are thus three modes that can be selected by
|
43 |
+
passing the appropriate constant to the class constructor's `mode`
|
44 |
+
attribute:
|
45 |
+
|
46 |
+
- PorterStemmer.ORIGINAL_ALGORITHM
|
47 |
+
|
48 |
+
An implementation that is faithful to the original paper.
|
49 |
+
|
50 |
+
Note that Martin Porter has deprecated this version of the
|
51 |
+
algorithm. Martin distributes implementations of the Porter
|
52 |
+
Stemmer in many languages, hosted at:
|
53 |
+
|
54 |
+
https://www.tartarus.org/~martin/PorterStemmer/
|
55 |
+
|
56 |
+
and all of these implementations include his extensions. He
|
57 |
+
strongly recommends against using the original, published
|
58 |
+
version of the algorithm; only use this mode if you clearly
|
59 |
+
understand why you are choosing to do so.
|
60 |
+
|
61 |
+
- PorterStemmer.MARTIN_EXTENSIONS
|
62 |
+
|
63 |
+
An implementation that only uses the modifications to the
|
64 |
+
algorithm that are included in the implementations on Martin
|
65 |
+
Porter's website. He has declared Porter frozen, so the
|
66 |
+
behaviour of those implementations should never change.
|
67 |
+
|
68 |
+
- PorterStemmer.NLTK_EXTENSIONS (default)
|
69 |
+
|
70 |
+
An implementation that includes further improvements devised by
|
71 |
+
NLTK contributors or taken from other modified implementations
|
72 |
+
found on the web.
|
73 |
+
|
74 |
+
For the best stemming, you should use the default NLTK_EXTENSIONS
|
75 |
+
version. However, if you need to get the same results as either the
|
76 |
+
original algorithm or one of Martin Porter's hosted versions for
|
77 |
+
compatibility with an existing implementation or dataset, you can use
|
78 |
+
one of the other modes instead.
|
79 |
+
"""
|
80 |
+
|
81 |
+
# Modes the Stemmer can be instantiated in
|
82 |
+
NLTK_EXTENSIONS = "NLTK_EXTENSIONS"
|
83 |
+
MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS"
|
84 |
+
ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM"
|
85 |
+
|
86 |
+
def __init__(self, mode=NLTK_EXTENSIONS):
|
87 |
+
if mode not in (
|
88 |
+
self.NLTK_EXTENSIONS,
|
89 |
+
self.MARTIN_EXTENSIONS,
|
90 |
+
self.ORIGINAL_ALGORITHM,
|
91 |
+
):
|
92 |
+
raise ValueError(
|
93 |
+
"Mode must be one of PorterStemmer.NLTK_EXTENSIONS, "
|
94 |
+
"PorterStemmer.MARTIN_EXTENSIONS, or "
|
95 |
+
"PorterStemmer.ORIGINAL_ALGORITHM"
|
96 |
+
)
|
97 |
+
|
98 |
+
self.mode = mode
|
99 |
+
|
100 |
+
if self.mode == self.NLTK_EXTENSIONS:
|
101 |
+
# This is a table of irregular forms. It is quite short,
|
102 |
+
# but still reflects the errors actually drawn to Martin
|
103 |
+
# Porter's attention over a 20 year period!
|
104 |
+
irregular_forms = {
|
105 |
+
"sky": ["sky", "skies"],
|
106 |
+
"die": ["dying"],
|
107 |
+
"lie": ["lying"],
|
108 |
+
"tie": ["tying"],
|
109 |
+
"news": ["news"],
|
110 |
+
"inning": ["innings", "inning"],
|
111 |
+
"outing": ["outings", "outing"],
|
112 |
+
"canning": ["cannings", "canning"],
|
113 |
+
"howe": ["howe"],
|
114 |
+
"proceed": ["proceed"],
|
115 |
+
"exceed": ["exceed"],
|
116 |
+
"succeed": ["succeed"],
|
117 |
+
}
|
118 |
+
|
119 |
+
self.pool = {}
|
120 |
+
for key in irregular_forms:
|
121 |
+
for val in irregular_forms[key]:
|
122 |
+
self.pool[val] = key
|
123 |
+
|
124 |
+
self.vowels = frozenset(["a", "e", "i", "o", "u"])
|
125 |
+
|
126 |
+
def _is_consonant(self, word, i):
|
127 |
+
"""Returns True if word[i] is a consonant, False otherwise
|
128 |
+
|
129 |
+
A consonant is defined in the paper as follows:
|
130 |
+
|
131 |
+
A consonant in a word is a letter other than A, E, I, O or
|
132 |
+
U, and other than Y preceded by a consonant. (The fact that
|
133 |
+
the term `consonant' is defined to some extent in terms of
|
134 |
+
itself does not make it ambiguous.) So in TOY the consonants
|
135 |
+
are T and Y, and in SYZYGY they are S, Z and G. If a letter
|
136 |
+
is not a consonant it is a vowel.
|
137 |
+
"""
|
138 |
+
if word[i] in self.vowels:
|
139 |
+
return False
|
140 |
+
if word[i] == "y":
|
141 |
+
if i == 0:
|
142 |
+
return True
|
143 |
+
else:
|
144 |
+
return not self._is_consonant(word, i - 1)
|
145 |
+
return True
|
146 |
+
|
147 |
+
def _measure(self, stem):
|
148 |
+
r"""Returns the 'measure' of stem, per definition in the paper
|
149 |
+
|
150 |
+
From the paper:
|
151 |
+
|
152 |
+
A consonant will be denoted by c, a vowel by v. A list
|
153 |
+
ccc... of length greater than 0 will be denoted by C, and a
|
154 |
+
list vvv... of length greater than 0 will be denoted by V.
|
155 |
+
Any word, or part of a word, therefore has one of the four
|
156 |
+
forms:
|
157 |
+
|
158 |
+
CVCV ... C
|
159 |
+
CVCV ... V
|
160 |
+
VCVC ... C
|
161 |
+
VCVC ... V
|
162 |
+
|
163 |
+
These may all be represented by the single form
|
164 |
+
|
165 |
+
[C]VCVC ... [V]
|
166 |
+
|
167 |
+
where the square brackets denote arbitrary presence of their
|
168 |
+
contents. Using (VC){m} to denote VC repeated m times, this
|
169 |
+
may again be written as
|
170 |
+
|
171 |
+
[C](VC){m}[V].
|
172 |
+
|
173 |
+
m will be called the \measure\ of any word or word part when
|
174 |
+
represented in this form. The case m = 0 covers the null
|
175 |
+
word. Here are some examples:
|
176 |
+
|
177 |
+
m=0 TR, EE, TREE, Y, BY.
|
178 |
+
m=1 TROUBLE, OATS, TREES, IVY.
|
179 |
+
m=2 TROUBLES, PRIVATE, OATEN, ORRERY.
|
180 |
+
"""
|
181 |
+
cv_sequence = ""
|
182 |
+
|
183 |
+
# Construct a string of 'c's and 'v's representing whether each
|
184 |
+
# character in `stem` is a consonant or a vowel.
|
185 |
+
# e.g. 'falafel' becomes 'cvcvcvc',
|
186 |
+
# 'architecture' becomes 'vcccvcvccvcv'
|
187 |
+
for i in range(len(stem)):
|
188 |
+
if self._is_consonant(stem, i):
|
189 |
+
cv_sequence += "c"
|
190 |
+
else:
|
191 |
+
cv_sequence += "v"
|
192 |
+
|
193 |
+
# Count the number of 'vc' occurrences, which is equivalent to
|
194 |
+
# the number of 'VC' occurrences in Porter's reduced form in the
|
195 |
+
# docstring above, which is in turn equivalent to `m`
|
196 |
+
return cv_sequence.count("vc")
|
197 |
+
|
198 |
+
def _has_positive_measure(self, stem):
|
199 |
+
return self._measure(stem) > 0
|
200 |
+
|
201 |
+
def _contains_vowel(self, stem):
|
202 |
+
"""Returns True if stem contains a vowel, else False"""
|
203 |
+
for i in range(len(stem)):
|
204 |
+
if not self._is_consonant(stem, i):
|
205 |
+
return True
|
206 |
+
return False
|
207 |
+
|
208 |
+
def _ends_double_consonant(self, word):
|
209 |
+
"""Implements condition *d from the paper
|
210 |
+
|
211 |
+
Returns True if word ends with a double consonant
|
212 |
+
"""
|
213 |
+
return (
|
214 |
+
len(word) >= 2
|
215 |
+
and word[-1] == word[-2]
|
216 |
+
and self._is_consonant(word, len(word) - 1)
|
217 |
+
)
|
218 |
+
|
219 |
+
def _ends_cvc(self, word):
|
220 |
+
"""Implements condition *o from the paper
|
221 |
+
|
222 |
+
From the paper:
|
223 |
+
|
224 |
+
*o - the stem ends cvc, where the second c is not W, X or Y
|
225 |
+
(e.g. -WIL, -HOP).
|
226 |
+
"""
|
227 |
+
return (
|
228 |
+
len(word) >= 3
|
229 |
+
and self._is_consonant(word, len(word) - 3)
|
230 |
+
and not self._is_consonant(word, len(word) - 2)
|
231 |
+
and self._is_consonant(word, len(word) - 1)
|
232 |
+
and word[-1] not in ("w", "x", "y")
|
233 |
+
) or (
|
234 |
+
self.mode == self.NLTK_EXTENSIONS
|
235 |
+
and len(word) == 2
|
236 |
+
and not self._is_consonant(word, 0)
|
237 |
+
and self._is_consonant(word, 1)
|
238 |
+
)
|
239 |
+
|
240 |
+
def _replace_suffix(self, word, suffix, replacement):
|
241 |
+
"""Replaces `suffix` of `word` with `replacement"""
|
242 |
+
assert word.endswith(suffix), "Given word doesn't end with given suffix"
|
243 |
+
if suffix == "":
|
244 |
+
return word + replacement
|
245 |
+
else:
|
246 |
+
return word[: -len(suffix)] + replacement
|
247 |
+
|
248 |
+
def _apply_rule_list(self, word, rules):
|
249 |
+
"""Applies the first applicable suffix-removal rule to the word
|
250 |
+
|
251 |
+
Takes a word and a list of suffix-removal rules represented as
|
252 |
+
3-tuples, with the first element being the suffix to remove,
|
253 |
+
the second element being the string to replace it with, and the
|
254 |
+
final element being the condition for the rule to be applicable,
|
255 |
+
or None if the rule is unconditional.
|
256 |
+
"""
|
257 |
+
for rule in rules:
|
258 |
+
suffix, replacement, condition = rule
|
259 |
+
if suffix == "*d" and self._ends_double_consonant(word):
|
260 |
+
stem = word[:-2]
|
261 |
+
if condition is None or condition(stem):
|
262 |
+
return stem + replacement
|
263 |
+
else:
|
264 |
+
# Don't try any further rules
|
265 |
+
return word
|
266 |
+
if word.endswith(suffix):
|
267 |
+
stem = self._replace_suffix(word, suffix, "")
|
268 |
+
if condition is None or condition(stem):
|
269 |
+
return stem + replacement
|
270 |
+
else:
|
271 |
+
# Don't try any further rules
|
272 |
+
return word
|
273 |
+
|
274 |
+
return word
|
275 |
+
|
276 |
+
def _step1a(self, word):
|
277 |
+
"""Implements Step 1a from "An algorithm for suffix stripping"
|
278 |
+
|
279 |
+
From the paper:
|
280 |
+
|
281 |
+
SSES -> SS caresses -> caress
|
282 |
+
IES -> I ponies -> poni
|
283 |
+
ties -> ti
|
284 |
+
SS -> SS caress -> caress
|
285 |
+
S -> cats -> cat
|
286 |
+
"""
|
287 |
+
# this NLTK-only rule extends the original algorithm, so
|
288 |
+
# that 'flies'->'fli' but 'dies'->'die' etc
|
289 |
+
if self.mode == self.NLTK_EXTENSIONS:
|
290 |
+
if word.endswith("ies") and len(word) == 4:
|
291 |
+
return self._replace_suffix(word, "ies", "ie")
|
292 |
+
|
293 |
+
return self._apply_rule_list(
|
294 |
+
word,
|
295 |
+
[
|
296 |
+
("sses", "ss", None), # SSES -> SS
|
297 |
+
("ies", "i", None), # IES -> I
|
298 |
+
("ss", "ss", None), # SS -> SS
|
299 |
+
("s", "", None), # S ->
|
300 |
+
],
|
301 |
+
)
|
302 |
+
|
303 |
+
def _step1b(self, word):
|
304 |
+
"""Implements Step 1b from "An algorithm for suffix stripping"
|
305 |
+
|
306 |
+
From the paper:
|
307 |
+
|
308 |
+
(m>0) EED -> EE feed -> feed
|
309 |
+
agreed -> agree
|
310 |
+
(*v*) ED -> plastered -> plaster
|
311 |
+
bled -> bled
|
312 |
+
(*v*) ING -> motoring -> motor
|
313 |
+
sing -> sing
|
314 |
+
|
315 |
+
If the second or third of the rules in Step 1b is successful,
|
316 |
+
the following is done:
|
317 |
+
|
318 |
+
AT -> ATE conflat(ed) -> conflate
|
319 |
+
BL -> BLE troubl(ed) -> trouble
|
320 |
+
IZ -> IZE siz(ed) -> size
|
321 |
+
(*d and not (*L or *S or *Z))
|
322 |
+
-> single letter
|
323 |
+
hopp(ing) -> hop
|
324 |
+
tann(ed) -> tan
|
325 |
+
fall(ing) -> fall
|
326 |
+
hiss(ing) -> hiss
|
327 |
+
fizz(ed) -> fizz
|
328 |
+
(m=1 and *o) -> E fail(ing) -> fail
|
329 |
+
fil(ing) -> file
|
330 |
+
|
331 |
+
The rule to map to a single letter causes the removal of one of
|
332 |
+
the double letter pair. The -E is put back on -AT, -BL and -IZ,
|
333 |
+
so that the suffixes -ATE, -BLE and -IZE can be recognised
|
334 |
+
later. This E may be removed in step 4.
|
335 |
+
"""
|
336 |
+
# this NLTK-only block extends the original algorithm, so that
|
337 |
+
# 'spied'->'spi' but 'died'->'die' etc
|
338 |
+
if self.mode == self.NLTK_EXTENSIONS:
|
339 |
+
if word.endswith("ied"):
|
340 |
+
if len(word) == 4:
|
341 |
+
return self._replace_suffix(word, "ied", "ie")
|
342 |
+
else:
|
343 |
+
return self._replace_suffix(word, "ied", "i")
|
344 |
+
|
345 |
+
# (m>0) EED -> EE
|
346 |
+
if word.endswith("eed"):
|
347 |
+
stem = self._replace_suffix(word, "eed", "")
|
348 |
+
if self._measure(stem) > 0:
|
349 |
+
return stem + "ee"
|
350 |
+
else:
|
351 |
+
return word
|
352 |
+
|
353 |
+
rule_2_or_3_succeeded = False
|
354 |
+
|
355 |
+
for suffix in ["ed", "ing"]:
|
356 |
+
if word.endswith(suffix):
|
357 |
+
intermediate_stem = self._replace_suffix(word, suffix, "")
|
358 |
+
if self._contains_vowel(intermediate_stem):
|
359 |
+
rule_2_or_3_succeeded = True
|
360 |
+
break
|
361 |
+
|
362 |
+
if not rule_2_or_3_succeeded:
|
363 |
+
return word
|
364 |
+
|
365 |
+
return self._apply_rule_list(
|
366 |
+
intermediate_stem,
|
367 |
+
[
|
368 |
+
("at", "ate", None), # AT -> ATE
|
369 |
+
("bl", "ble", None), # BL -> BLE
|
370 |
+
("iz", "ize", None), # IZ -> IZE
|
371 |
+
# (*d and not (*L or *S or *Z))
|
372 |
+
# -> single letter
|
373 |
+
(
|
374 |
+
"*d",
|
375 |
+
intermediate_stem[-1],
|
376 |
+
lambda stem: intermediate_stem[-1] not in ("l", "s", "z"),
|
377 |
+
),
|
378 |
+
# (m=1 and *o) -> E
|
379 |
+
(
|
380 |
+
"",
|
381 |
+
"e",
|
382 |
+
lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),
|
383 |
+
),
|
384 |
+
],
|
385 |
+
)
|
386 |
+
|
387 |
+
def _step1c(self, word):
|
388 |
+
"""Implements Step 1c from "An algorithm for suffix stripping"
|
389 |
+
|
390 |
+
From the paper:
|
391 |
+
|
392 |
+
Step 1c
|
393 |
+
|
394 |
+
(*v*) Y -> I happy -> happi
|
395 |
+
sky -> sky
|
396 |
+
"""
|
397 |
+
|
398 |
+
def nltk_condition(stem):
|
399 |
+
"""
|
400 |
+
This has been modified from the original Porter algorithm so
|
401 |
+
that y->i is only done when y is preceded by a consonant,
|
402 |
+
but not if the stem is only a single consonant, i.e.
|
403 |
+
|
404 |
+
(*c and not c) Y -> I
|
405 |
+
|
406 |
+
So 'happy' -> 'happi', but
|
407 |
+
'enjoy' -> 'enjoy' etc
|
408 |
+
|
409 |
+
This is a much better rule. Formerly 'enjoy'->'enjoi' and
|
410 |
+
'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but
|
411 |
+
with this modification that no longer really matters.
|
412 |
+
|
413 |
+
Also, the removal of the contains_vowel(z) condition means
|
414 |
+
that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and
|
415 |
+
conflate with 'spied', 'tried', 'flies' ...
|
416 |
+
"""
|
417 |
+
return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)
|
418 |
+
|
419 |
+
def original_condition(stem):
|
420 |
+
return self._contains_vowel(stem)
|
421 |
+
|
422 |
+
return self._apply_rule_list(
|
423 |
+
word,
|
424 |
+
[
|
425 |
+
(
|
426 |
+
"y",
|
427 |
+
"i",
|
428 |
+
nltk_condition
|
429 |
+
if self.mode == self.NLTK_EXTENSIONS
|
430 |
+
else original_condition,
|
431 |
+
)
|
432 |
+
],
|
433 |
+
)
|
434 |
+
|
435 |
+
def _step2(self, word):
|
436 |
+
"""Implements Step 2 from "An algorithm for suffix stripping"
|
437 |
+
|
438 |
+
From the paper:
|
439 |
+
|
440 |
+
Step 2
|
441 |
+
|
442 |
+
(m>0) ATIONAL -> ATE relational -> relate
|
443 |
+
(m>0) TIONAL -> TION conditional -> condition
|
444 |
+
rational -> rational
|
445 |
+
(m>0) ENCI -> ENCE valenci -> valence
|
446 |
+
(m>0) ANCI -> ANCE hesitanci -> hesitance
|
447 |
+
(m>0) IZER -> IZE digitizer -> digitize
|
448 |
+
(m>0) ABLI -> ABLE conformabli -> conformable
|
449 |
+
(m>0) ALLI -> AL radicalli -> radical
|
450 |
+
(m>0) ENTLI -> ENT differentli -> different
|
451 |
+
(m>0) ELI -> E vileli - > vile
|
452 |
+
(m>0) OUSLI -> OUS analogousli -> analogous
|
453 |
+
(m>0) IZATION -> IZE vietnamization -> vietnamize
|
454 |
+
(m>0) ATION -> ATE predication -> predicate
|
455 |
+
(m>0) ATOR -> ATE operator -> operate
|
456 |
+
(m>0) ALISM -> AL feudalism -> feudal
|
457 |
+
(m>0) IVENESS -> IVE decisiveness -> decisive
|
458 |
+
(m>0) FULNESS -> FUL hopefulness -> hopeful
|
459 |
+
(m>0) OUSNESS -> OUS callousness -> callous
|
460 |
+
(m>0) ALITI -> AL formaliti -> formal
|
461 |
+
(m>0) IVITI -> IVE sensitiviti -> sensitive
|
462 |
+
(m>0) BILITI -> BLE sensibiliti -> sensible
|
463 |
+
"""
|
464 |
+
|
465 |
+
if self.mode == self.NLTK_EXTENSIONS:
|
466 |
+
# Instead of applying the ALLI -> AL rule after '(a)bli' per
|
467 |
+
# the published algorithm, instead we apply it first, and,
|
468 |
+
# if it succeeds, run the result through step2 again.
|
469 |
+
if word.endswith("alli") and self._has_positive_measure(
|
470 |
+
self._replace_suffix(word, "alli", "")
|
471 |
+
):
|
472 |
+
return self._step2(self._replace_suffix(word, "alli", "al"))
|
473 |
+
|
474 |
+
bli_rule = ("bli", "ble", self._has_positive_measure)
|
475 |
+
abli_rule = ("abli", "able", self._has_positive_measure)
|
476 |
+
|
477 |
+
rules = [
|
478 |
+
("ational", "ate", self._has_positive_measure),
|
479 |
+
("tional", "tion", self._has_positive_measure),
|
480 |
+
("enci", "ence", self._has_positive_measure),
|
481 |
+
("anci", "ance", self._has_positive_measure),
|
482 |
+
("izer", "ize", self._has_positive_measure),
|
483 |
+
abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule,
|
484 |
+
("alli", "al", self._has_positive_measure),
|
485 |
+
("entli", "ent", self._has_positive_measure),
|
486 |
+
("eli", "e", self._has_positive_measure),
|
487 |
+
("ousli", "ous", self._has_positive_measure),
|
488 |
+
("ization", "ize", self._has_positive_measure),
|
489 |
+
("ation", "ate", self._has_positive_measure),
|
490 |
+
("ator", "ate", self._has_positive_measure),
|
491 |
+
("alism", "al", self._has_positive_measure),
|
492 |
+
("iveness", "ive", self._has_positive_measure),
|
493 |
+
("fulness", "ful", self._has_positive_measure),
|
494 |
+
("ousness", "ous", self._has_positive_measure),
|
495 |
+
("aliti", "al", self._has_positive_measure),
|
496 |
+
("iviti", "ive", self._has_positive_measure),
|
497 |
+
("biliti", "ble", self._has_positive_measure),
|
498 |
+
]
|
499 |
+
|
500 |
+
if self.mode == self.NLTK_EXTENSIONS:
|
501 |
+
rules.append(("fulli", "ful", self._has_positive_measure))
|
502 |
+
|
503 |
+
# The 'l' of the 'logi' -> 'log' rule is put with the stem,
|
504 |
+
# so that short stems like 'geo' 'theo' etc work like
|
505 |
+
# 'archaeo' 'philo' etc.
|
506 |
+
rules.append(
|
507 |
+
("logi", "log", lambda stem: self._has_positive_measure(word[:-3]))
|
508 |
+
)
|
509 |
+
|
510 |
+
if self.mode == self.MARTIN_EXTENSIONS:
|
511 |
+
rules.append(("logi", "log", self._has_positive_measure))
|
512 |
+
|
513 |
+
return self._apply_rule_list(word, rules)
|
514 |
+
|
515 |
+
def _step3(self, word):
|
516 |
+
"""Implements Step 3 from "An algorithm for suffix stripping"
|
517 |
+
|
518 |
+
From the paper:
|
519 |
+
|
520 |
+
Step 3
|
521 |
+
|
522 |
+
(m>0) ICATE -> IC triplicate -> triplic
|
523 |
+
(m>0) ATIVE -> formative -> form
|
524 |
+
(m>0) ALIZE -> AL formalize -> formal
|
525 |
+
(m>0) ICITI -> IC electriciti -> electric
|
526 |
+
(m>0) ICAL -> IC electrical -> electric
|
527 |
+
(m>0) FUL -> hopeful -> hope
|
528 |
+
(m>0) NESS -> goodness -> good
|
529 |
+
"""
|
530 |
+
return self._apply_rule_list(
|
531 |
+
word,
|
532 |
+
[
|
533 |
+
("icate", "ic", self._has_positive_measure),
|
534 |
+
("ative", "", self._has_positive_measure),
|
535 |
+
("alize", "al", self._has_positive_measure),
|
536 |
+
("iciti", "ic", self._has_positive_measure),
|
537 |
+
("ical", "ic", self._has_positive_measure),
|
538 |
+
("ful", "", self._has_positive_measure),
|
539 |
+
("ness", "", self._has_positive_measure),
|
540 |
+
],
|
541 |
+
)
|
542 |
+
|
543 |
+
def _step4(self, word):
|
544 |
+
"""Implements Step 4 from "An algorithm for suffix stripping"
|
545 |
+
|
546 |
+
Step 4
|
547 |
+
|
548 |
+
(m>1) AL -> revival -> reviv
|
549 |
+
(m>1) ANCE -> allowance -> allow
|
550 |
+
(m>1) ENCE -> inference -> infer
|
551 |
+
(m>1) ER -> airliner -> airlin
|
552 |
+
(m>1) IC -> gyroscopic -> gyroscop
|
553 |
+
(m>1) ABLE -> adjustable -> adjust
|
554 |
+
(m>1) IBLE -> defensible -> defens
|
555 |
+
(m>1) ANT -> irritant -> irrit
|
556 |
+
(m>1) EMENT -> replacement -> replac
|
557 |
+
(m>1) MENT -> adjustment -> adjust
|
558 |
+
(m>1) ENT -> dependent -> depend
|
559 |
+
(m>1 and (*S or *T)) ION -> adoption -> adopt
|
560 |
+
(m>1) OU -> homologou -> homolog
|
561 |
+
(m>1) ISM -> communism -> commun
|
562 |
+
(m>1) ATE -> activate -> activ
|
563 |
+
(m>1) ITI -> angulariti -> angular
|
564 |
+
(m>1) OUS -> homologous -> homolog
|
565 |
+
(m>1) IVE -> effective -> effect
|
566 |
+
(m>1) IZE -> bowdlerize -> bowdler
|
567 |
+
|
568 |
+
The suffixes are now removed. All that remains is a little
|
569 |
+
tidying up.
|
570 |
+
"""
|
571 |
+
measure_gt_1 = lambda stem: self._measure(stem) > 1
|
572 |
+
|
573 |
+
return self._apply_rule_list(
|
574 |
+
word,
|
575 |
+
[
|
576 |
+
("al", "", measure_gt_1),
|
577 |
+
("ance", "", measure_gt_1),
|
578 |
+
("ence", "", measure_gt_1),
|
579 |
+
("er", "", measure_gt_1),
|
580 |
+
("ic", "", measure_gt_1),
|
581 |
+
("able", "", measure_gt_1),
|
582 |
+
("ible", "", measure_gt_1),
|
583 |
+
("ant", "", measure_gt_1),
|
584 |
+
("ement", "", measure_gt_1),
|
585 |
+
("ment", "", measure_gt_1),
|
586 |
+
("ent", "", measure_gt_1),
|
587 |
+
# (m>1 and (*S or *T)) ION ->
|
588 |
+
(
|
589 |
+
"ion",
|
590 |
+
"",
|
591 |
+
lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"),
|
592 |
+
),
|
593 |
+
("ou", "", measure_gt_1),
|
594 |
+
("ism", "", measure_gt_1),
|
595 |
+
("ate", "", measure_gt_1),
|
596 |
+
("iti", "", measure_gt_1),
|
597 |
+
("ous", "", measure_gt_1),
|
598 |
+
("ive", "", measure_gt_1),
|
599 |
+
("ize", "", measure_gt_1),
|
600 |
+
],
|
601 |
+
)
|
602 |
+
|
603 |
+
def _step5a(self, word):
|
604 |
+
"""Implements Step 5a from "An algorithm for suffix stripping"
|
605 |
+
|
606 |
+
From the paper:
|
607 |
+
|
608 |
+
Step 5a
|
609 |
+
|
610 |
+
(m>1) E -> probate -> probat
|
611 |
+
rate -> rate
|
612 |
+
(m=1 and not *o) E -> cease -> ceas
|
613 |
+
"""
|
614 |
+
# Note that Martin's test vocabulary and reference
|
615 |
+
# implementations are inconsistent in how they handle the case
|
616 |
+
# where two rules both refer to a suffix that matches the word
|
617 |
+
# to be stemmed, but only the condition of the second one is
|
618 |
+
# true.
|
619 |
+
# Earlier in step2b we had the rules:
|
620 |
+
# (m>0) EED -> EE
|
621 |
+
# (*v*) ED ->
|
622 |
+
# but the examples in the paper included "feed"->"feed", even
|
623 |
+
# though (*v*) is true for "fe" and therefore the second rule
|
624 |
+
# alone would map "feed"->"fe".
|
625 |
+
# However, in THIS case, we need to handle the consecutive rules
|
626 |
+
# differently and try both conditions (obviously; the second
|
627 |
+
# rule here would be redundant otherwise). Martin's paper makes
|
628 |
+
# no explicit mention of the inconsistency; you have to infer it
|
629 |
+
# from the examples.
|
630 |
+
# For this reason, we can't use _apply_rule_list here.
|
631 |
+
if word.endswith("e"):
|
632 |
+
stem = self._replace_suffix(word, "e", "")
|
633 |
+
if self._measure(stem) > 1:
|
634 |
+
return stem
|
635 |
+
if self._measure(stem) == 1 and not self._ends_cvc(stem):
|
636 |
+
return stem
|
637 |
+
return word
|
638 |
+
|
639 |
+
def _step5b(self, word):
|
640 |
+
"""Implements Step 5a from "An algorithm for suffix stripping"
|
641 |
+
|
642 |
+
From the paper:
|
643 |
+
|
644 |
+
Step 5b
|
645 |
+
|
646 |
+
(m > 1 and *d and *L) -> single letter
|
647 |
+
controll -> control
|
648 |
+
roll -> roll
|
649 |
+
"""
|
650 |
+
return self._apply_rule_list(
|
651 |
+
word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)]
|
652 |
+
)
|
653 |
+
|
654 |
+
def stem(self, word, to_lowercase=True):
|
655 |
+
"""
|
656 |
+
:param to_lowercase: if `to_lowercase=True` the word always lowercase
|
657 |
+
"""
|
658 |
+
stem = word.lower() if to_lowercase else word
|
659 |
+
|
660 |
+
if self.mode == self.NLTK_EXTENSIONS and word in self.pool:
|
661 |
+
return self.pool[stem]
|
662 |
+
|
663 |
+
if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2:
|
664 |
+
# With this line, strings of length 1 or 2 don't go through
|
665 |
+
# the stemming process, although no mention is made of this
|
666 |
+
# in the published algorithm.
|
667 |
+
return stem
|
668 |
+
|
669 |
+
stem = self._step1a(stem)
|
670 |
+
stem = self._step1b(stem)
|
671 |
+
stem = self._step1c(stem)
|
672 |
+
stem = self._step2(stem)
|
673 |
+
stem = self._step3(stem)
|
674 |
+
stem = self._step4(stem)
|
675 |
+
stem = self._step5a(stem)
|
676 |
+
stem = self._step5b(stem)
|
677 |
+
|
678 |
+
return stem
|
679 |
+
|
680 |
+
def __repr__(self):
|
681 |
+
return "<PorterStemmer>"
|
682 |
+
|
683 |
+
|
684 |
+
def demo():
|
685 |
+
"""
|
686 |
+
A demonstration of the porter stemmer on a sample from
|
687 |
+
the Penn Treebank corpus.
|
688 |
+
"""
|
689 |
+
|
690 |
+
from nltk import stem
|
691 |
+
from nltk.corpus import treebank
|
692 |
+
|
693 |
+
stemmer = stem.PorterStemmer()
|
694 |
+
|
695 |
+
orig = []
|
696 |
+
stemmed = []
|
697 |
+
for item in treebank.fileids()[:3]:
|
698 |
+
for (word, tag) in treebank.tagged_words(item):
|
699 |
+
orig.append(word)
|
700 |
+
stemmed.append(stemmer.stem(word))
|
701 |
+
|
702 |
+
# Convert the results to a string, and word-wrap them.
|
703 |
+
results = " ".join(stemmed)
|
704 |
+
results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip()
|
705 |
+
|
706 |
+
# Convert the original to a string, and word wrap it.
|
707 |
+
original = " ".join(orig)
|
708 |
+
original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip()
|
709 |
+
|
710 |
+
# Print the results.
|
711 |
+
print("-Original-".center(70).replace(" ", "*").replace("-", " "))
|
712 |
+
print(original)
|
713 |
+
print("-Results-".center(70).replace(" ", "*").replace("-", " "))
|
714 |
+
print(results)
|
715 |
+
print("*" * 70)
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/regexp.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Stemmers
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Trevor Cohn <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# Steven Bird <[email protected]>
|
7 |
+
# URL: <https://www.nltk.org/>
|
8 |
+
# For license information, see LICENSE.TXT
|
9 |
+
import re
|
10 |
+
|
11 |
+
from nltk.stem.api import StemmerI
|
12 |
+
|
13 |
+
|
14 |
+
class RegexpStemmer(StemmerI):
|
15 |
+
"""
|
16 |
+
A stemmer that uses regular expressions to identify morphological
|
17 |
+
affixes. Any substrings that match the regular expressions will
|
18 |
+
be removed.
|
19 |
+
|
20 |
+
>>> from nltk.stem import RegexpStemmer
|
21 |
+
>>> st = RegexpStemmer('ing$|s$|e$|able$', min=4)
|
22 |
+
>>> st.stem('cars')
|
23 |
+
'car'
|
24 |
+
>>> st.stem('mass')
|
25 |
+
'mas'
|
26 |
+
>>> st.stem('was')
|
27 |
+
'was'
|
28 |
+
>>> st.stem('bee')
|
29 |
+
'bee'
|
30 |
+
>>> st.stem('compute')
|
31 |
+
'comput'
|
32 |
+
>>> st.stem('advisable')
|
33 |
+
'advis'
|
34 |
+
|
35 |
+
:type regexp: str or regexp
|
36 |
+
:param regexp: The regular expression that should be used to
|
37 |
+
identify morphological affixes.
|
38 |
+
:type min: int
|
39 |
+
:param min: The minimum length of string to stem
|
40 |
+
"""
|
41 |
+
|
42 |
+
def __init__(self, regexp, min=0):
|
43 |
+
|
44 |
+
if not hasattr(regexp, "pattern"):
|
45 |
+
regexp = re.compile(regexp)
|
46 |
+
self._regexp = regexp
|
47 |
+
self._min = min
|
48 |
+
|
49 |
+
def stem(self, word):
|
50 |
+
if len(word) < self._min:
|
51 |
+
return word
|
52 |
+
else:
|
53 |
+
return self._regexp.sub("", word)
|
54 |
+
|
55 |
+
def __repr__(self):
|
56 |
+
return f"<RegexpStemmer: {self._regexp.pattern!r}>"
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/rslp.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: RSLP Stemmer
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Tiago Tresoldi <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
# This code is based on the algorithm presented in the paper "A Stemming
|
9 |
+
# Algorithm for the Portuguese Language" by Viviane Moreira Orengo and
|
10 |
+
# Christian Huyck, which unfortunately I had no access to. The code is a
|
11 |
+
# Python version, with some minor modifications of mine, to the description
|
12 |
+
# presented at https://www.webcitation.org/5NnvdIzOb and to the C source code
|
13 |
+
# available at http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html.
|
14 |
+
# Please note that this stemmer is intended for demonstration and educational
|
15 |
+
# purposes only. Feel free to write me for any comments, including the
|
16 |
+
# development of a different and/or better stemmer for Portuguese. I also
|
17 |
+
# suggest using NLTK's mailing list for Portuguese for any discussion.
|
18 |
+
|
19 |
+
# Este código é baseado no algoritmo apresentado no artigo "A Stemming
|
20 |
+
# Algorithm for the Portuguese Language" de Viviane Moreira Orengo e
|
21 |
+
# Christian Huyck, o qual infelizmente não tive a oportunidade de ler. O
|
22 |
+
# código é uma conversão para Python, com algumas pequenas modificações
|
23 |
+
# minhas, daquele apresentado em https://www.webcitation.org/5NnvdIzOb e do
|
24 |
+
# código para linguagem C disponível em
|
25 |
+
# http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. Por favor,
|
26 |
+
# lembre-se de que este stemmer foi desenvolvido com finalidades unicamente
|
27 |
+
# de demonstração e didáticas. Sinta-se livre para me escrever para qualquer
|
28 |
+
# comentário, inclusive sobre o desenvolvimento de um stemmer diferente
|
29 |
+
# e/ou melhor para o português. Também sugiro utilizar-se a lista de discussão
|
30 |
+
# do NLTK para o português para qualquer debate.
|
31 |
+
|
32 |
+
from nltk.data import load
|
33 |
+
from nltk.stem.api import StemmerI
|
34 |
+
|
35 |
+
|
36 |
+
class RSLPStemmer(StemmerI):
|
37 |
+
"""
|
38 |
+
A stemmer for Portuguese.
|
39 |
+
|
40 |
+
>>> from nltk.stem import RSLPStemmer
|
41 |
+
>>> st = RSLPStemmer()
|
42 |
+
>>> # opening lines of Erico Verissimo's "Música ao Longe"
|
43 |
+
>>> text = '''
|
44 |
+
... Clarissa risca com giz no quadro-negro a paisagem que os alunos
|
45 |
+
... devem copiar . Uma casinha de porta e janela , em cima duma
|
46 |
+
... coxilha .'''
|
47 |
+
>>> for token in text.split(): # doctest: +NORMALIZE_WHITESPACE
|
48 |
+
... print(st.stem(token))
|
49 |
+
clariss risc com giz no quadro-negr a pais que os alun dev copi .
|
50 |
+
uma cas de port e janel , em cim dum coxilh .
|
51 |
+
"""
|
52 |
+
|
53 |
+
def __init__(self):
|
54 |
+
self._model = []
|
55 |
+
|
56 |
+
self._model.append(self.read_rule("step0.pt"))
|
57 |
+
self._model.append(self.read_rule("step1.pt"))
|
58 |
+
self._model.append(self.read_rule("step2.pt"))
|
59 |
+
self._model.append(self.read_rule("step3.pt"))
|
60 |
+
self._model.append(self.read_rule("step4.pt"))
|
61 |
+
self._model.append(self.read_rule("step5.pt"))
|
62 |
+
self._model.append(self.read_rule("step6.pt"))
|
63 |
+
|
64 |
+
def read_rule(self, filename):
|
65 |
+
rules = load("nltk:stemmers/rslp/" + filename, format="raw").decode("utf8")
|
66 |
+
lines = rules.split("\n")
|
67 |
+
|
68 |
+
lines = [line for line in lines if line != ""] # remove blank lines
|
69 |
+
lines = [line for line in lines if line[0] != "#"] # remove comments
|
70 |
+
|
71 |
+
# NOTE: a simple but ugly hack to make this parser happy with double '\t's
|
72 |
+
lines = [line.replace("\t\t", "\t") for line in lines]
|
73 |
+
|
74 |
+
# parse rules
|
75 |
+
rules = []
|
76 |
+
for line in lines:
|
77 |
+
rule = []
|
78 |
+
tokens = line.split("\t")
|
79 |
+
|
80 |
+
# text to be searched for at the end of the string
|
81 |
+
rule.append(tokens[0][1:-1]) # remove quotes
|
82 |
+
|
83 |
+
# minimum stem size to perform the replacement
|
84 |
+
rule.append(int(tokens[1]))
|
85 |
+
|
86 |
+
# text to be replaced into
|
87 |
+
rule.append(tokens[2][1:-1]) # remove quotes
|
88 |
+
|
89 |
+
# exceptions to this rule
|
90 |
+
rule.append([token[1:-1] for token in tokens[3].split(",")])
|
91 |
+
|
92 |
+
# append to the results
|
93 |
+
rules.append(rule)
|
94 |
+
|
95 |
+
return rules
|
96 |
+
|
97 |
+
def stem(self, word):
|
98 |
+
word = word.lower()
|
99 |
+
|
100 |
+
# the word ends in 's'? apply rule for plural reduction
|
101 |
+
if word[-1] == "s":
|
102 |
+
word = self.apply_rule(word, 0)
|
103 |
+
|
104 |
+
# the word ends in 'a'? apply rule for feminine reduction
|
105 |
+
if word[-1] == "a":
|
106 |
+
word = self.apply_rule(word, 1)
|
107 |
+
|
108 |
+
# augmentative reduction
|
109 |
+
word = self.apply_rule(word, 3)
|
110 |
+
|
111 |
+
# adverb reduction
|
112 |
+
word = self.apply_rule(word, 2)
|
113 |
+
|
114 |
+
# noun reduction
|
115 |
+
prev_word = word
|
116 |
+
word = self.apply_rule(word, 4)
|
117 |
+
if word == prev_word:
|
118 |
+
# verb reduction
|
119 |
+
prev_word = word
|
120 |
+
word = self.apply_rule(word, 5)
|
121 |
+
if word == prev_word:
|
122 |
+
# vowel removal
|
123 |
+
word = self.apply_rule(word, 6)
|
124 |
+
|
125 |
+
return word
|
126 |
+
|
127 |
+
def apply_rule(self, word, rule_index):
|
128 |
+
rules = self._model[rule_index]
|
129 |
+
for rule in rules:
|
130 |
+
suffix_length = len(rule[0])
|
131 |
+
if word[-suffix_length:] == rule[0]: # if suffix matches
|
132 |
+
if len(word) >= suffix_length + rule[1]: # if we have minimum size
|
133 |
+
if word not in rule[3]: # if not an exception
|
134 |
+
word = word[:-suffix_length] + rule[2]
|
135 |
+
break
|
136 |
+
|
137 |
+
return word
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/snowball.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/nltk/stem/wordnet.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: WordNet stemmer interface
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>
|
5 |
+
# Edward Loper <[email protected]>
|
6 |
+
# URL: <https://www.nltk.org/>
|
7 |
+
# For license information, see LICENSE.TXT
|
8 |
+
|
9 |
+
from nltk.corpus import wordnet as wn
|
10 |
+
|
11 |
+
|
12 |
+
class WordNetLemmatizer:
|
13 |
+
"""
|
14 |
+
WordNet Lemmatizer
|
15 |
+
|
16 |
+
Lemmatize using WordNet's built-in morphy function.
|
17 |
+
Returns the input word unchanged if it cannot be found in WordNet.
|
18 |
+
|
19 |
+
>>> from nltk.stem import WordNetLemmatizer
|
20 |
+
>>> wnl = WordNetLemmatizer()
|
21 |
+
>>> print(wnl.lemmatize('dogs'))
|
22 |
+
dog
|
23 |
+
>>> print(wnl.lemmatize('churches'))
|
24 |
+
church
|
25 |
+
>>> print(wnl.lemmatize('aardwolves'))
|
26 |
+
aardwolf
|
27 |
+
>>> print(wnl.lemmatize('abaci'))
|
28 |
+
abacus
|
29 |
+
>>> print(wnl.lemmatize('hardrock'))
|
30 |
+
hardrock
|
31 |
+
"""
|
32 |
+
|
33 |
+
def lemmatize(self, word: str, pos: str = "n") -> str:
|
34 |
+
"""Lemmatize `word` using WordNet's built-in morphy function.
|
35 |
+
Returns the input word unchanged if it cannot be found in WordNet.
|
36 |
+
|
37 |
+
:param word: The input word to lemmatize.
|
38 |
+
:type word: str
|
39 |
+
:param pos: The Part Of Speech tag. Valid options are `"n"` for nouns,
|
40 |
+
`"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"`
|
41 |
+
for satellite adjectives.
|
42 |
+
:param pos: str
|
43 |
+
:return: The lemma of `word`, for the given `pos`.
|
44 |
+
"""
|
45 |
+
lemmas = wn._morphy(word, pos)
|
46 |
+
return min(lemmas, key=len) if lemmas else word
|
47 |
+
|
48 |
+
def __repr__(self):
|
49 |
+
return "<WordNetLemmatizer>"
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__init__.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Natural Language Toolkit: Machine Translation
|
2 |
+
#
|
3 |
+
# Copyright (C) 2001-2023 NLTK Project
|
4 |
+
# Author: Steven Bird <[email protected]>, Tah Wei Hoon <[email protected]>
|
5 |
+
# URL: <https://www.nltk.org/>
|
6 |
+
# For license information, see LICENSE.TXT
|
7 |
+
|
8 |
+
"""
|
9 |
+
Experimental features for machine translation.
|
10 |
+
These interfaces are prone to change.
|
11 |
+
|
12 |
+
isort:skip_file
|
13 |
+
"""
|
14 |
+
|
15 |
+
from nltk.translate.api import AlignedSent, Alignment, PhraseTable
|
16 |
+
from nltk.translate.ibm_model import IBMModel
|
17 |
+
from nltk.translate.ibm1 import IBMModel1
|
18 |
+
from nltk.translate.ibm2 import IBMModel2
|
19 |
+
from nltk.translate.ibm3 import IBMModel3
|
20 |
+
from nltk.translate.ibm4 import IBMModel4
|
21 |
+
from nltk.translate.ibm5 import IBMModel5
|
22 |
+
from nltk.translate.bleu_score import sentence_bleu as bleu
|
23 |
+
from nltk.translate.ribes_score import sentence_ribes as ribes
|
24 |
+
from nltk.translate.meteor_score import meteor_score as meteor
|
25 |
+
from nltk.translate.metrics import alignment_error_rate
|
26 |
+
from nltk.translate.stack_decoder import StackDecoder
|
27 |
+
from nltk.translate.nist_score import sentence_nist as nist
|
28 |
+
from nltk.translate.chrf_score import sentence_chrf as chrf
|
29 |
+
from nltk.translate.gale_church import trace
|
30 |
+
from nltk.translate.gdfa import grow_diag_final_and
|
31 |
+
from nltk.translate.gleu_score import sentence_gleu as gleu
|
32 |
+
from nltk.translate.phrase_based import extract
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.46 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc
ADDED
Binary file (26.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc
ADDED
Binary file (7.89 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc
ADDED
Binary file (7.96 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc
ADDED
Binary file (5.15 kB). View file
|
|