diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6480b1b3f83b4cf5c41c33a744716e79f6dd6894 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e74d1ef614f9952753f82fa7b71abd6afdd174c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a8dec72030c15a7a00381debc550c0f2128bedd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee816886766cb56264183c275bb4e6f8ada120c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7714135da523f407933a8a7e0939ab97c663db4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/collocations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..048cda5ba41945b678071507b2bd2acfaab0e521 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dabd02027106fc9590ed46f9e668d0834321a3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c78fcf6adcd047d78101dd9fe21de7b4a9a4d95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab4f771d1c9c394ac61cbf978c84d338b4322b6d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/downloader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a42b6afcdf65549f8d9b031149c2a9de9aefe5e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf6874218d7ce48f64a5512c6ff87ea020e4977 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..804a0d301db38d992a07e6245152f007ee0781b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb3b12d54d6815ab378dc98f65085af8f96aca40 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dfc9e701c371ff7202d8def1cac8fae09d22406 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/jsontags.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d105b0a1c527ab61e16b62958f8285af9684141 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a32927239f66f247633446d58c2d0811324f496 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fa481d11c2166dc23ef200d54be96c99f0dcd16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0591c45d220326834863ac78993f149355a1fc91 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/text.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73b06c5d4d9c416a79e97bd90c5693f676d905e5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ade879edb6b3a203f977704153f48c5fd7132c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/toolbox.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0e93bd057b8be55a13e6e7fbb0bee2da03800fe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treeprettyprinter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4824443fb7d18f0889964dba4fe2b27b3a5fd5b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/treetransforms.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2854b00a3f5f16d3ce8bb2a6c36245f9adf13a89 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..300927ee27289d7bc195b21e9627a349744fdb4f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..43c5876b74dcf07ea70c9d90c1dcd41971e515a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__init__.py @@ -0,0 +1,34 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +""" +Combinatory Categorial Grammar. + +For more information see nltk/doc/contrib/ccg/ccg.pdf +""" + +from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge +from nltk.ccg.combinator import ( + BackwardApplication, + BackwardBx, + BackwardCombinator, + BackwardComposition, + BackwardSx, + BackwardT, + DirectedBinaryCombinator, + ForwardApplication, + ForwardCombinator, + ForwardComposition, + ForwardSubstitution, + ForwardT, + UndirectedBinaryCombinator, + UndirectedComposition, + UndirectedFunctionApplication, + UndirectedSubstitution, + UndirectedTypeRaise, +) +from nltk.ccg.lexicon import CCGLexicon diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25d29d83dd5f8220a4401f204f5cbe00ee3b8941 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16c5b36add80c9ff8ae8ecd46eced3950d9e980a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7416ff4a16d2ce903815ceb04fba5cccf45bd164 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f628baeee2a835dada2dba8ca4898fe0996e37a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69314db211de7ddcd63e23d42a9ed8114afbf614 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb06982dc81b499ce1fbf9891049f039a6477ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/api.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d1355cfadca031cf0017584d819fe794ffaea3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/api.py @@ -0,0 +1,358 @@ +# Natural Language Toolkit: CCG Categories +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod +from functools import total_ordering + +from nltk.internals import raise_unorderable_types + + +@total_ordering +class AbstractCCGCategory(metaclass=ABCMeta): + """ + Interface for categories in combinatory grammars. + """ + + @abstractmethod + def is_primitive(self): + """ + Returns true if the category is primitive. + """ + + @abstractmethod + def is_function(self): + """ + Returns true if the category is a function application. + """ + + @abstractmethod + def is_var(self): + """ + Returns true if the category is a variable. + """ + + @abstractmethod + def substitute(self, substitutions): + """ + Takes a set of (var, category) substitutions, and replaces every + occurrence of the variable with the corresponding category. + """ + + @abstractmethod + def can_unify(self, other): + """ + Determines whether two categories can be unified. + - Returns None if they cannot be unified + - Returns a list of necessary substitutions if they can. + """ + + # Utility functions: comparison, strings and hashing. + @abstractmethod + def __str__(self): + pass + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, AbstractCCGCategory): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + +class CCGVar(AbstractCCGCategory): + """ + Class representing a variable CCG category. + Used for conjunctions (and possibly type-raising, if implemented as a + unary rule). + """ + + _maxID = 0 + + def __init__(self, prim_only=False): + """Initialize a variable (selects a new identifier) + + :param prim_only: a boolean that determines whether the variable is + restricted to primitives + :type prim_only: bool + """ + self._id = self.new_id() + self._prim_only = prim_only + self._comparison_key = self._id + + @classmethod + def new_id(cls): + """ + A class method allowing generation of unique variable identifiers. + """ + cls._maxID = cls._maxID + 1 + return cls._maxID - 1 + + @classmethod + def reset_id(cls): + cls._maxID = 0 + + def is_primitive(self): + return False + + def is_function(self): + return False + + def is_var(self): + return True + + def substitute(self, substitutions): + """If there is a substitution corresponding to this variable, + return the substituted category. + """ + for (var, cat) in substitutions: + if var == self: + return cat + return self + + def can_unify(self, other): + """If the variable can be replaced with other + a substitution is returned. + """ + if other.is_primitive() or not self._prim_only: + return [(self, other)] + return None + + def id(self): + return self._id + + def __str__(self): + return "_var" + str(self._id) + + +@total_ordering +class Direction: + """ + Class representing the direction of a function application. + Also contains maintains information as to which combinators + may be used with the category. + """ + + def __init__(self, dir, restrictions): + self._dir = dir + self._restrs = restrictions + self._comparison_key = (dir, tuple(restrictions)) + + # Testing the application direction + def is_forward(self): + return self._dir == "/" + + def is_backward(self): + return self._dir == "\\" + + def dir(self): + return self._dir + + def restrs(self): + """A list of restrictions on the combinators. + '.' denotes that permuting operations are disallowed + ',' denotes that function composition is disallowed + '_' denotes that the direction has variable restrictions. + (This is redundant in the current implementation of type-raising) + """ + return self._restrs + + def is_variable(self): + return self._restrs == "_" + + # Unification and substitution of variable directions. + # Used only if type-raising is implemented as a unary rule, as it + # must inherit restrictions from the argument category. + def can_unify(self, other): + if other.is_variable(): + return [("_", self.restrs())] + elif self.is_variable(): + return [("_", other.restrs())] + else: + if self.restrs() == other.restrs(): + return [] + return None + + def substitute(self, subs): + if not self.is_variable(): + return self + + for (var, restrs) in subs: + if var == "_": + return Direction(self._dir, restrs) + return self + + # Testing permitted combinators + def can_compose(self): + return "," not in self._restrs + + def can_cross(self): + return "." not in self._restrs + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Direction): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + def __str__(self): + r_str = "" + for r in self._restrs: + r_str = r_str + "%s" % r + return f"{self._dir}{r_str}" + + # The negation operator reverses the direction of the application + def __neg__(self): + if self._dir == "/": + return Direction("\\", self._restrs) + else: + return Direction("/", self._restrs) + + +class PrimitiveCategory(AbstractCCGCategory): + """ + Class representing primitive categories. + Takes a string representation of the category, and a + list of strings specifying the morphological subcategories. + """ + + def __init__(self, categ, restrictions=[]): + self._categ = categ + self._restrs = restrictions + self._comparison_key = (categ, tuple(restrictions)) + + def is_primitive(self): + return True + + def is_function(self): + return False + + def is_var(self): + return False + + def restrs(self): + return self._restrs + + def categ(self): + return self._categ + + # Substitution does nothing to a primitive category + def substitute(self, subs): + return self + + # A primitive can be unified with a class of the same + # base category, given that the other category shares all + # of its subclasses, or with a variable. + def can_unify(self, other): + if not other.is_primitive(): + return None + if other.is_var(): + return [(other, self)] + if other.categ() == self.categ(): + for restr in self._restrs: + if restr not in other.restrs(): + return None + return [] + return None + + def __str__(self): + if self._restrs == []: + return "%s" % self._categ + restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs) + return f"{self._categ}{restrictions}" + + +class FunctionalCategory(AbstractCCGCategory): + """ + Class that represents a function application category. + Consists of argument and result categories, together with + an application direction. + """ + + def __init__(self, res, arg, dir): + self._res = res + self._arg = arg + self._dir = dir + self._comparison_key = (arg, dir, res) + + def is_primitive(self): + return False + + def is_function(self): + return True + + def is_var(self): + return False + + # Substitution returns the category consisting of the + # substitution applied to each of its constituents. + def substitute(self, subs): + sub_res = self._res.substitute(subs) + sub_dir = self._dir.substitute(subs) + sub_arg = self._arg.substitute(subs) + return FunctionalCategory(sub_res, sub_arg, self._dir) + + # A function can unify with another function, so long as its + # constituents can unify, or with an unrestricted variable. + def can_unify(self, other): + if other.is_var(): + return [(other, self)] + if other.is_function(): + sa = self._res.can_unify(other.res()) + sd = self._dir.can_unify(other.dir()) + if sa is not None and sd is not None: + sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa)) + if sb is not None: + return sa + sb + return None + + # Constituent accessors + def arg(self): + return self._arg + + def res(self): + return self._res + + def dir(self): + return self._dir + + def __str__(self): + return f"({self._res}{self._dir}{self._arg})" diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/chart.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/chart.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9e61036199016f89e89a8b0980d38d856ac4dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/chart.py @@ -0,0 +1,480 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +""" +The lexicon is constructed by calling +``lexicon.fromstring()``. + +In order to construct a parser, you also need a rule set. +The standard English rules are provided in chart as +``chart.DefaultRuleSet``. + +The parser can then be constructed by calling, for example: +``parser = chart.CCGChartParser(, )`` + +Parsing is then performed by running +``parser.parse(.split())``. + +While this returns a list of trees, the default representation +of the produced trees is not very enlightening, particularly +given that it uses the same tree class as the CFG parsers. +It is probably better to call: +``chart.printCCGDerivation()`` +which should print a nice representation of the derivation. + +This entire process is shown far more clearly in the demonstration: +python chart.py +""" + +import itertools + +from nltk.ccg.combinator import * +from nltk.ccg.combinator import ( + BackwardApplication, + BackwardBx, + BackwardComposition, + BackwardSx, + BackwardT, + ForwardApplication, + ForwardComposition, + ForwardSubstitution, + ForwardT, +) +from nltk.ccg.lexicon import Token, fromstring +from nltk.ccg.logic import * +from nltk.parse import ParserI +from nltk.parse.chart import AbstractChartRule, Chart, EdgeI +from nltk.sem.logic import * +from nltk.tree import Tree + + +# Based on the EdgeI class from NLTK. +# A number of the properties of the EdgeI interface don't +# transfer well to CCGs, however. +class CCGEdge(EdgeI): + def __init__(self, span, categ, rule): + self._span = span + self._categ = categ + self._rule = rule + self._comparison_key = (span, categ, rule) + + # Accessors + def lhs(self): + return self._categ + + def span(self): + return self._span + + def start(self): + return self._span[0] + + def end(self): + return self._span[1] + + def length(self): + return self._span[1] - self.span[0] + + def rhs(self): + return () + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + def categ(self): + return self._categ + + def rule(self): + return self._rule + + +class CCGLeafEdge(EdgeI): + """ + Class representing leaf edges in a CCG derivation. + """ + + def __init__(self, pos, token, leaf): + self._pos = pos + self._token = token + self._leaf = leaf + self._comparison_key = (pos, token.categ(), leaf) + + # Accessors + def lhs(self): + return self._token.categ() + + def span(self): + return (self._pos, self._pos + 1) + + def start(self): + return self._pos + + def end(self): + return self._pos + 1 + + def length(self): + return 1 + + def rhs(self): + return self._leaf + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + def token(self): + return self._token + + def categ(self): + return self._token.categ() + + def leaf(self): + return self._leaf + + +class BinaryCombinatorRule(AbstractChartRule): + """ + Class implementing application of a binary combinator to a chart. + Takes the directed combinator to apply. + """ + + NUMEDGES = 2 + + def __init__(self, combinator): + self._combinator = combinator + + # Apply a combinator + def apply(self, chart, grammar, left_edge, right_edge): + # The left & right edges must be touching. + if not (left_edge.end() == right_edge.start()): + return + + # Check if the two edges are permitted to combine. + # If so, generate the corresponding edge. + if self._combinator.can_combine(left_edge.categ(), right_edge.categ()): + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge( + span=(left_edge.start(), right_edge.end()), + categ=res, + rule=self._combinator, + ) + if chart.insert(new_edge, (left_edge, right_edge)): + yield new_edge + + # The representation of the combinator (for printing derivations) + def __str__(self): + return "%s" % self._combinator + + +# Type-raising must be handled slightly differently to the other rules, as the +# resulting rules only span a single edge, rather than both edges. + + +class ForwardTypeRaiseRule(AbstractChartRule): + """ + Class for applying forward type raising + """ + + NUMEDGES = 2 + + def __init__(self): + self._combinator = ForwardT + + def apply(self, chart, grammar, left_edge, right_edge): + if not (left_edge.end() == right_edge.start()): + return + + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator) + if chart.insert(new_edge, (left_edge,)): + yield new_edge + + def __str__(self): + return "%s" % self._combinator + + +class BackwardTypeRaiseRule(AbstractChartRule): + """ + Class for applying backward type raising. + """ + + NUMEDGES = 2 + + def __init__(self): + self._combinator = BackwardT + + def apply(self, chart, grammar, left_edge, right_edge): + if not (left_edge.end() == right_edge.start()): + return + + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator) + if chart.insert(new_edge, (right_edge,)): + yield new_edge + + def __str__(self): + return "%s" % self._combinator + + +# Common sets of combinators used for English derivations. +ApplicationRuleSet = [ + BinaryCombinatorRule(ForwardApplication), + BinaryCombinatorRule(BackwardApplication), +] +CompositionRuleSet = [ + BinaryCombinatorRule(ForwardComposition), + BinaryCombinatorRule(BackwardComposition), + BinaryCombinatorRule(BackwardBx), +] +SubstitutionRuleSet = [ + BinaryCombinatorRule(ForwardSubstitution), + BinaryCombinatorRule(BackwardSx), +] +TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()] + +# The standard English rule set. +DefaultRuleSet = ( + ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet +) + + +class CCGChartParser(ParserI): + """ + Chart parser for CCGs. + Based largely on the ChartParser class from NLTK. + """ + + def __init__(self, lexicon, rules, trace=0): + self._lexicon = lexicon + self._rules = rules + self._trace = trace + + def lexicon(self): + return self._lexicon + + # Implements the CYK algorithm + def parse(self, tokens): + tokens = list(tokens) + chart = CCGChart(list(tokens)) + lex = self._lexicon + + # Initialize leaf edges. + for index in range(chart.num_leaves()): + for token in lex.categories(chart.leaf(index)): + new_edge = CCGLeafEdge(index, token, chart.leaf(index)) + chart.insert(new_edge, ()) + + # Select a span for the new edges + for span in range(2, chart.num_leaves() + 1): + for start in range(0, chart.num_leaves() - span + 1): + # Try all possible pairs of edges that could generate + # an edge for that span + for part in range(1, span): + lstart = start + mid = start + part + rend = start + span + + for left in chart.select(span=(lstart, mid)): + for right in chart.select(span=(mid, rend)): + # Generate all possible combinations of the two edges + for rule in self._rules: + edges_added_by_rule = 0 + for newedge in rule.apply(chart, lex, left, right): + edges_added_by_rule += 1 + + # Output the resulting parses + return chart.parses(lex.start()) + + +class CCGChart(Chart): + def __init__(self, tokens): + Chart.__init__(self, tokens) + + # Constructs the trees for a given parse. Unfortnunately, the parse trees need to be + # constructed slightly differently to those in the default Chart class, so it has to + # be reimplemented + def _trees(self, edge, complete, memo, tree_class): + assert complete, "CCGChart cannot build incomplete trees" + + if edge in memo: + return memo[edge] + + if isinstance(edge, CCGLeafEdge): + word = tree_class(edge.token(), [self._tokens[edge.start()]]) + leaf = tree_class((edge.token(), "Leaf"), [word]) + memo[edge] = [leaf] + return [leaf] + + memo[edge] = [] + trees = [] + + for cpl in self.child_pointer_lists(edge): + child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] + for children in itertools.product(*child_choices): + lhs = ( + Token( + self._tokens[edge.start() : edge.end()], + edge.lhs(), + compute_semantics(children, edge), + ), + str(edge.rule()), + ) + trees.append(tree_class(lhs, children)) + + memo[edge] = trees + return trees + + +def compute_semantics(children, edge): + if children[0].label()[0].semantics() is None: + return None + + if len(children) == 2: + if isinstance(edge.rule(), BackwardCombinator): + children = [children[1], children[0]] + + combinator = edge.rule()._combinator + function = children[0].label()[0].semantics() + argument = children[1].label()[0].semantics() + + if isinstance(combinator, UndirectedFunctionApplication): + return compute_function_semantics(function, argument) + elif isinstance(combinator, UndirectedComposition): + return compute_composition_semantics(function, argument) + elif isinstance(combinator, UndirectedSubstitution): + return compute_substitution_semantics(function, argument) + else: + raise AssertionError("Unsupported combinator '" + combinator + "'") + else: + return compute_type_raised_semantics(children[0].label()[0].semantics()) + + +# -------- +# Displaying derivations +# -------- +def printCCGDerivation(tree): + # Get the leaves and initial categories + leafcats = tree.pos() + leafstr = "" + catstr = "" + + # Construct a string with both the leaf word and corresponding + # category aligned. + for (leaf, cat) in leafcats: + str_cat = "%s" % cat + nextlen = 2 + max(len(leaf), len(str_cat)) + lcatlen = (nextlen - len(str_cat)) // 2 + rcatlen = lcatlen + (nextlen - len(str_cat)) % 2 + catstr += " " * lcatlen + str_cat + " " * rcatlen + lleaflen = (nextlen - len(leaf)) // 2 + rleaflen = lleaflen + (nextlen - len(leaf)) % 2 + leafstr += " " * lleaflen + leaf + " " * rleaflen + print(leafstr.rstrip()) + print(catstr.rstrip()) + + # Display the derivation steps + printCCGTree(0, tree) + + +# Prints the sequence of derivation steps. +def printCCGTree(lwidth, tree): + rwidth = lwidth + + # Is a leaf (word). + # Increment the span by the space occupied by the leaf. + if not isinstance(tree, Tree): + return 2 + lwidth + len(tree) + + # Find the width of the current derivation step + for child in tree: + rwidth = max(rwidth, printCCGTree(rwidth, child)) + + # Is a leaf node. + # Don't print anything, but account for the space occupied. + if not isinstance(tree.label(), tuple): + return max( + rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0]) + ) + + (token, op) = tree.label() + + if op == "Leaf": + return rwidth + + # Pad to the left with spaces, followed by a sequence of '-' + # and the derivation rule. + print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op) + # Print the resulting category on a new line. + str_res = "%s" % (token.categ()) + if token.semantics() is not None: + str_res += " {" + str(token.semantics()) + "}" + respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth + print(respadlen * " " + str_res) + return rwidth + + +### Demonstration code + +# Construct the lexicon +lex = fromstring( + """ + :- S, NP, N, VP # Primitive categories, S is the target primitive + + Det :: NP/N # Family of words + Pro :: NP + TV :: VP/NP + Modal :: (S\\NP)/VP # Backslashes need to be escaped + + I => Pro # Word -> Category mapping + you => Pro + + the => Det + + # Variables have the special keyword 'var' + # '.' prevents permutation + # ',' prevents composition + and => var\\.,var/.,var + + which => (N\\N)/(S/NP) + + will => Modal # Categories can be either explicit, or families. + might => Modal + + cook => TV + eat => TV + + mushrooms => N + parsnips => N + bacon => N + """ +) + + +def demo(): + parser = CCGChartParser(lex, DefaultRuleSet) + for parse in parser.parse("I might cook and eat the bacon".split()): + printCCGDerivation(parse) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/combinator.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/combinator.py new file mode 100644 index 0000000000000000000000000000000000000000..6efe6adf40d1aea7c98df1aceccdf9cf5c7b5c31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/combinator.py @@ -0,0 +1,339 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT +""" +CCG Combinators +""" + +from abc import ABCMeta, abstractmethod + +from nltk.ccg.api import FunctionalCategory + + +class UndirectedBinaryCombinator(metaclass=ABCMeta): + """ + Abstract class for representing a binary combinator. + Merely defines functions for checking if the function and argument + are able to be combined, and what the resulting category is. + + Note that as no assumptions are made as to direction, the unrestricted + combinators can perform all backward, forward and crossed variations + of the combinators; these restrictions must be added in the rule + class. + """ + + @abstractmethod + def can_combine(self, function, argument): + pass + + @abstractmethod + def combine(self, function, argument): + pass + + +class DirectedBinaryCombinator(metaclass=ABCMeta): + """ + Wrapper for the undirected binary combinator. + It takes left and right categories, and decides which is to be + the function, and which the argument. + It then decides whether or not they can be combined. + """ + + @abstractmethod + def can_combine(self, left, right): + pass + + @abstractmethod + def combine(self, left, right): + pass + + +class ForwardCombinator(DirectedBinaryCombinator): + """ + Class representing combinators where the primary functor is on the left. + + Takes an undirected combinator, and a predicate which adds constraints + restricting the cases in which it may apply. + """ + + def __init__(self, combinator, predicate, suffix=""): + self._combinator = combinator + self._predicate = predicate + self._suffix = suffix + + def can_combine(self, left, right): + return self._combinator.can_combine(left, right) and self._predicate( + left, right + ) + + def combine(self, left, right): + yield from self._combinator.combine(left, right) + + def __str__(self): + return f">{self._combinator}{self._suffix}" + + +class BackwardCombinator(DirectedBinaryCombinator): + """ + The backward equivalent of the ForwardCombinator class. + """ + + def __init__(self, combinator, predicate, suffix=""): + self._combinator = combinator + self._predicate = predicate + self._suffix = suffix + + def can_combine(self, left, right): + return self._combinator.can_combine(right, left) and self._predicate( + left, right + ) + + def combine(self, left, right): + yield from self._combinator.combine(right, left) + + def __str__(self): + return f"<{self._combinator}{self._suffix}" + + +class UndirectedFunctionApplication(UndirectedBinaryCombinator): + """ + Class representing function application. + Implements rules of the form: + X/Y Y -> X (>) + And the corresponding backwards application rule + """ + + def can_combine(self, function, argument): + if not function.is_function(): + return False + + return not function.arg().can_unify(argument) is None + + def combine(self, function, argument): + if not function.is_function(): + return + + subs = function.arg().can_unify(argument) + if subs is None: + return + + yield function.res().substitute(subs) + + def __str__(self): + return "" + + +# Predicates for function application. + +# Ensures the left functor takes an argument on the right +def forwardOnly(left, right): + return left.dir().is_forward() + + +# Ensures the right functor takes an argument on the left +def backwardOnly(left, right): + return right.dir().is_backward() + + +# Application combinator instances +ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly) +BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly) + + +class UndirectedComposition(UndirectedBinaryCombinator): + """ + Functional composition (harmonic) combinator. + Implements rules of the form + X/Y Y/Z -> X/Z (B>) + And the corresponding backwards and crossed variations. + """ + + def can_combine(self, function, argument): + # Can only combine two functions, and both functions must + # allow composition. + if not (function.is_function() and argument.is_function()): + return False + if function.dir().can_compose() and argument.dir().can_compose(): + return not function.arg().can_unify(argument.res()) is None + return False + + def combine(self, function, argument): + if not (function.is_function() and argument.is_function()): + return + if function.dir().can_compose() and argument.dir().can_compose(): + subs = function.arg().can_unify(argument.res()) + if subs is not None: + yield FunctionalCategory( + function.res().substitute(subs), + argument.arg().substitute(subs), + argument.dir(), + ) + + def __str__(self): + return "B" + + +# Predicates for restricting application of straight composition. +def bothForward(left, right): + return left.dir().is_forward() and right.dir().is_forward() + + +def bothBackward(left, right): + return left.dir().is_backward() and right.dir().is_backward() + + +# Predicates for crossed composition +def crossedDirs(left, right): + return left.dir().is_forward() and right.dir().is_backward() + + +def backwardBxConstraint(left, right): + # The functors must be crossed inwards + if not crossedDirs(left, right): + return False + # Permuting combinators must be allowed + if not left.dir().can_cross() and right.dir().can_cross(): + return False + # The resulting argument category is restricted to be primitive + return left.arg().is_primitive() + + +# Straight composition combinators +ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly) +BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly) + +# Backward crossed composition +BackwardBx = BackwardCombinator( + UndirectedComposition(), backwardBxConstraint, suffix="x" +) + + +class UndirectedSubstitution(UndirectedBinaryCombinator): + r""" + Substitution (permutation) combinator. + Implements rules of the form + Y/Z (X\Y)/Z -> X/Z ( N\N +def innermostFunction(categ): + while categ.res().is_function(): + categ = categ.res() + return categ + + +class UndirectedTypeRaise(UndirectedBinaryCombinator): + """ + Undirected combinator for type raising. + """ + + def can_combine(self, function, arg): + # The argument must be a function. + # The restriction that arg.res() must be a function + # merely reduces redundant type-raising; if arg.res() is + # primitive, we have: + # X Y\X =>((>) Y + # which is equivalent to + # X Y\X =>(<) Y + if not (arg.is_function() and arg.res().is_function()): + return False + + arg = innermostFunction(arg) + + # left, arg_categ are undefined! + subs = left.can_unify(arg_categ.arg()) + if subs is not None: + return True + return False + + def combine(self, function, arg): + if not ( + function.is_primitive() and arg.is_function() and arg.res().is_function() + ): + return + + # Type-raising matches only the innermost application. + arg = innermostFunction(arg) + + subs = function.can_unify(arg.arg()) + if subs is not None: + xcat = arg.res().substitute(subs) + yield FunctionalCategory( + xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir()) + ) + + def __str__(self): + return "T" + + +# Predicates for type-raising +# The direction of the innermost category must be towards +# the primary functor. +# The restriction that the variable must be primitive is not +# common to all versions of CCGs; some authors have other restrictions. +def forwardTConstraint(left, right): + arg = innermostFunction(right) + return arg.dir().is_backward() and arg.res().is_primitive() + + +def backwardTConstraint(left, right): + arg = innermostFunction(left) + return arg.dir().is_forward() and arg.res().is_primitive() + + +# Instances of type-raising combinators +ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint) +BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/lexicon.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..da7d00ab6bcdfa190f49fe7c141a23542426ff20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/lexicon.py @@ -0,0 +1,338 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT +""" +CCG Lexicons +""" + +import re +from collections import defaultdict + +from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory +from nltk.internals import deprecated +from nltk.sem.logic import Expression + +# ------------ +# Regular expressions used for parsing components of the lexicon +# ------------ + +# Parses a primitive category and subscripts +PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""") + +# Separates the next primitive category from the remainder of the +# string +NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""") + +# Separates the next application operator from the remainder +APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""") + +# Parses the definition of the right-hand side (rhs) of either a word or a family +LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE) + +# Parses the right hand side that contains category and maybe semantic predicate +RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE) + +# Parses the semantic predicate +SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE) + +# Strips comments from a line +COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""") + + +class Token: + """ + Class representing a token. + + token => category {semantics} + e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)} + + * `token` (string) + * `categ` (string) + * `semantics` (Expression) + """ + + def __init__(self, token, categ, semantics=None): + self._token = token + self._categ = categ + self._semantics = semantics + + def categ(self): + return self._categ + + def semantics(self): + return self._semantics + + def __str__(self): + semantics_str = "" + if self._semantics is not None: + semantics_str = " {" + str(self._semantics) + "}" + return "" + str(self._categ) + semantics_str + + def __cmp__(self, other): + if not isinstance(other, Token): + return -1 + return cmp((self._categ, self._semantics), other.categ(), other.semantics()) + + +class CCGLexicon: + """ + Class representing a lexicon for CCG grammars. + + * `primitives`: The list of primitive categories for the lexicon + * `families`: Families of categories + * `entries`: A mapping of words to possible categories + """ + + def __init__(self, start, primitives, families, entries): + self._start = PrimitiveCategory(start) + self._primitives = primitives + self._families = families + self._entries = entries + + def categories(self, word): + """ + Returns all the possible categories for a word + """ + return self._entries[word] + + def start(self): + """ + Return the target category for the parser + """ + return self._start + + def __str__(self): + """ + String representation of the lexicon. Used for debugging. + """ + string = "" + first = True + for ident in sorted(self._entries): + if not first: + string = string + "\n" + string = string + ident + " => " + + first = True + for cat in self._entries[ident]: + if not first: + string = string + " | " + else: + first = False + string = string + "%s" % cat + return string + + +# ----------- +# Parsing lexicons +# ----------- + + +def matchBrackets(string): + """ + Separate the contents matching the first set of brackets from the rest of + the input. + """ + rest = string[1:] + inside = "(" + + while rest != "" and not rest.startswith(")"): + if rest.startswith("("): + (part, rest) = matchBrackets(rest) + inside = inside + part + else: + inside = inside + rest[0] + rest = rest[1:] + if rest.startswith(")"): + return (inside + ")", rest[1:]) + raise AssertionError("Unmatched bracket in string '" + string + "'") + + +def nextCategory(string): + """ + Separate the string for the next portion of the category from the rest + of the string + """ + if string.startswith("("): + return matchBrackets(string) + return NEXTPRIM_RE.match(string).groups() + + +def parseApplication(app): + """ + Parse an application operator + """ + return Direction(app[0], app[1:]) + + +def parseSubscripts(subscr): + """ + Parse the subscripts for a primitive category + """ + if subscr: + return subscr[1:-1].split(",") + return [] + + +def parsePrimitiveCategory(chunks, primitives, families, var): + """ + Parse a primitive category + + If the primitive is the special category 'var', replace it with the + correct `CCGVar`. + """ + if chunks[0] == "var": + if chunks[1] is None: + if var is None: + var = CCGVar() + return (var, var) + + catstr = chunks[0] + if catstr in families: + (cat, cvar) = families[catstr] + if var is None: + var = cvar + else: + cat = cat.substitute([(cvar, var)]) + return (cat, var) + + if catstr in primitives: + subscrs = parseSubscripts(chunks[1]) + return (PrimitiveCategory(catstr, subscrs), var) + raise AssertionError( + "String '" + catstr + "' is neither a family nor primitive category." + ) + + +def augParseCategory(line, primitives, families, var=None): + """ + Parse a string representing a category, and returns a tuple with + (possibly) the CCG variable for the category + """ + (cat_string, rest) = nextCategory(line) + + if cat_string.startswith("("): + (res, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + + else: + (res, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + + while rest != "": + app = APP_RE.match(rest).groups() + direction = parseApplication(app[0:3]) + rest = app[3] + + (cat_string, rest) = nextCategory(rest) + if cat_string.startswith("("): + (arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + else: + (arg, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + res = FunctionalCategory(res, arg, direction) + + return (res, var) + + +def fromstring(lex_str, include_semantics=False): + """ + Convert string representation into a lexicon for CCGs. + """ + CCGVar.reset_id() + primitives = [] + families = {} + entries = defaultdict(list) + for line in lex_str.splitlines(): + # Strip comments and leading/trailing whitespace. + line = COMMENTS_RE.match(line).groups()[0].strip() + if line == "": + continue + + if line.startswith(":-"): + # A line of primitive categories. + # The first one is the target category + # ie, :- S, N, NP, VP + primitives = primitives + [ + prim.strip() for prim in line[2:].strip().split(",") + ] + else: + # Either a family definition, or a word definition + (ident, sep, rhs) = LEX_RE.match(line).groups() + (catstr, semantics_str) = RHS_RE.match(rhs).groups() + (cat, var) = augParseCategory(catstr, primitives, families) + + if sep == "::": + # Family definition + # ie, Det :: NP/N + families[ident] = (cat, var) + else: + semantics = None + if include_semantics is True: + if semantics_str is None: + raise AssertionError( + line + + " must contain semantics because include_semantics is set to True" + ) + else: + semantics = Expression.fromstring( + SEMANTICS_RE.match(semantics_str).groups()[0] + ) + # Word definition + # ie, which => (N\N)/(S/NP) + entries[ident].append(Token(ident, cat, semantics)) + return CCGLexicon(primitives[0], primitives, families, entries) + + +@deprecated("Use fromstring() instead.") +def parseLexicon(lex_str): + return fromstring(lex_str) + + +openccg_tinytiny = fromstring( + """ + # Rather minimal lexicon based on the openccg `tinytiny' grammar. + # Only incorporates a subset of the morphological subcategories, however. + :- S,NP,N # Primitive categories + Det :: NP/N # Determiners + Pro :: NP + IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular) + IntransVpl :: S\\NP[pl] # Plural + TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular) + TransVpl :: S\\NP[pl]/NP # Plural + + the => NP[sg]/N[sg] + the => NP[pl]/N[pl] + + I => Pro + me => Pro + we => Pro + us => Pro + + book => N[sg] + books => N[pl] + + peach => N[sg] + peaches => N[pl] + + policeman => N[sg] + policemen => N[pl] + + boy => N[sg] + boys => N[pl] + + sleep => IntransVsg + sleep => IntransVpl + + eat => IntransVpl + eat => TransVpl + eats => IntransVsg + eats => TransVsg + + see => TransVpl + sees => TransVsg + """ +) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/ccg/logic.py b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..2e347b7531f723b3d8fe0caa84c22e8fcb659a6c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/ccg/logic.py @@ -0,0 +1,60 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tanin Na Nakorn (@tanin) +# URL: +# For license information, see LICENSE.TXT +""" +Helper functions for CCG semantics computation +""" + +from nltk.sem.logic import * + + +def compute_type_raised_semantics(semantics): + core = semantics + parent = None + while isinstance(core, LambdaExpression): + parent = core + core = core.term + + var = Variable("F") + while var in core.free(): + var = unique_variable(pattern=var) + core = ApplicationExpression(FunctionVariableExpression(var), core) + + if parent is not None: + parent.term = core + else: + semantics = core + + return LambdaExpression(var, semantics) + + +def compute_function_semantics(function, argument): + return ApplicationExpression(function, argument).simplify() + + +def compute_composition_semantics(function, argument): + assert isinstance(argument, LambdaExpression), ( + "`" + str(argument) + "` must be a lambda expression" + ) + return LambdaExpression( + argument.variable, ApplicationExpression(function, argument.term).simplify() + ) + + +def compute_substitution_semantics(function, argument): + assert isinstance(function, LambdaExpression) and isinstance( + function.term, LambdaExpression + ), ("`" + str(function) + "` must be a lambda expression with 2 arguments") + assert isinstance(argument, LambdaExpression), ( + "`" + str(argument) + "` must be a lambda expression" + ) + + new_argument = ApplicationExpression( + argument, VariableExpression(function.variable) + ).simplify() + new_term = ApplicationExpression(function.term, new_argument).simplify() + + return LambdaExpression(function.variable, new_term) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/api.py new file mode 100644 index 0000000000000000000000000000000000000000..858490a7abb82375fba271d98037e53da6a17129 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/api.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Chunk parsing API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +##////////////////////////////////////////////////////// +## Chunk Parser Interface +##////////////////////////////////////////////////////// + +from nltk.chunk.util import ChunkScore +from nltk.internals import deprecated +from nltk.parse import ParserI + + +class ChunkParserI(ParserI): + """ + A processing interface for identifying non-overlapping groups in + unrestricted text. Typically, chunk parsers are used to find base + syntactic constituents, such as base noun phrases. Unlike + ``ParserI``, ``ChunkParserI`` guarantees that the ``parse()`` method + will always generate a parse. + """ + + def parse(self, tokens): + """ + Return the best chunk structure for the given tokens + and return a tree. + + :param tokens: The list of (word, tag) tokens to be chunked. + :type tokens: list(tuple) + :rtype: Tree + """ + raise NotImplementedError() + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the chunker against the gold standard. + Remove the chunking the gold standard text, rechunk it using + the chunker, and return a ``ChunkScore`` object + reflecting the performance of this chunk parser. + + :type gold: list(Tree) + :param gold: The list of chunked sentences to score the chunker on. + :rtype: ChunkScore + """ + chunkscore = ChunkScore() + for correct in gold: + chunkscore.score(correct, self.parse(correct.leaves())) + return chunkscore diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/named_entity.py b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/named_entity.py new file mode 100644 index 0000000000000000000000000000000000000000..b8ab97742c9f0721a0bc1744703871ea278aba07 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/named_entity.py @@ -0,0 +1,352 @@ +# Natural Language Toolkit: Chunk parsing API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Named entity chunker +""" + +import os +import pickle +import re +from xml.etree import ElementTree as ET + +from nltk.tag import ClassifierBasedTagger, pos_tag + +try: + from nltk.classify import MaxentClassifier +except ImportError: + pass + +from nltk.chunk.api import ChunkParserI +from nltk.chunk.util import ChunkScore +from nltk.data import find +from nltk.tokenize import word_tokenize +from nltk.tree import Tree + + +class NEChunkParserTagger(ClassifierBasedTagger): + """ + The IOB tagger used by the chunk parser. + """ + + def __init__(self, train): + ClassifierBasedTagger.__init__( + self, train=train, classifier_builder=self._classifier_builder + ) + + def _classifier_builder(self, train): + return MaxentClassifier.train( + train, algorithm="megam", gaussian_prior_sigma=1, trace=2 + ) + + def _english_wordlist(self): + try: + wl = self._en_wordlist + except AttributeError: + from nltk.corpus import words + + self._en_wordlist = set(words.words("en-basic")) + wl = self._en_wordlist + return wl + + def _feature_detector(self, tokens, index, history): + word = tokens[index][0] + pos = simplify_pos(tokens[index][1]) + if index == 0: + prevword = prevprevword = None + prevpos = prevprevpos = None + prevshape = prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1][0].lower() + prevprevword = None + prevpos = simplify_pos(tokens[index - 1][1]) + prevprevpos = None + prevtag = history[index - 1][0] + prevshape = prevprevtag = None + else: + prevword = tokens[index - 1][0].lower() + prevprevword = tokens[index - 2][0].lower() + prevpos = simplify_pos(tokens[index - 1][1]) + prevprevpos = simplify_pos(tokens[index - 2][1]) + prevtag = history[index - 1] + prevprevtag = history[index - 2] + prevshape = shape(prevword) + if index == len(tokens) - 1: + nextword = nextnextword = None + nextpos = nextnextpos = None + elif index == len(tokens) - 2: + nextword = tokens[index + 1][0].lower() + nextpos = tokens[index + 1][1].lower() + nextnextword = None + nextnextpos = None + else: + nextword = tokens[index + 1][0].lower() + nextpos = tokens[index + 1][1].lower() + nextnextword = tokens[index + 2][0].lower() + nextnextpos = tokens[index + 2][1].lower() + + # 89.6 + features = { + "bias": True, + "shape": shape(word), + "wordlen": len(word), + "prefix3": word[:3].lower(), + "suffix3": word[-3:].lower(), + "pos": pos, + "word": word, + "en-wordlist": (word in self._english_wordlist()), + "prevtag": prevtag, + "prevpos": prevpos, + "nextpos": nextpos, + "prevword": prevword, + "nextword": nextword, + "word+nextpos": f"{word.lower()}+{nextpos}", + "pos+prevtag": f"{pos}+{prevtag}", + "shape+prevtag": f"{prevshape}+{prevtag}", + } + + return features + + +class NEChunkParser(ChunkParserI): + """ + Expected input: list of pos-tagged words + """ + + def __init__(self, train): + self._train(train) + + def parse(self, tokens): + """ + Each token should be a pos-tagged word + """ + tagged = self._tagger.tag(tokens) + tree = self._tagged_to_parse(tagged) + return tree + + def _train(self, corpus): + # Convert to tagged sequence + corpus = [self._parse_to_tagged(s) for s in corpus] + + self._tagger = NEChunkParserTagger(train=corpus) + + def _tagged_to_parse(self, tagged_tokens): + """ + Convert a list of tagged tokens to a chunk-parse tree. + """ + sent = Tree("S", []) + + for (tok, tag) in tagged_tokens: + if tag == "O": + sent.append(tok) + elif tag.startswith("B-"): + sent.append(Tree(tag[2:], [tok])) + elif tag.startswith("I-"): + if sent and isinstance(sent[-1], Tree) and sent[-1].label() == tag[2:]: + sent[-1].append(tok) + else: + sent.append(Tree(tag[2:], [tok])) + return sent + + @staticmethod + def _parse_to_tagged(sent): + """ + Convert a chunk-parse tree to a list of tagged tokens. + """ + toks = [] + for child in sent: + if isinstance(child, Tree): + if len(child) == 0: + print("Warning -- empty chunk in sentence") + continue + toks.append((child[0], f"B-{child.label()}")) + for tok in child[1:]: + toks.append((tok, f"I-{child.label()}")) + else: + toks.append((child, "O")) + return toks + + +def shape(word): + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word, re.UNICODE): + return "number" + elif re.match(r"\W+$", word, re.UNICODE): + return "punct" + elif re.match(r"\w+$", word, re.UNICODE): + if word.istitle(): + return "upcase" + elif word.islower(): + return "downcase" + else: + return "mixedcase" + else: + return "other" + + +def simplify_pos(s): + if s.startswith("V"): + return "V" + else: + return s.split("-")[0] + + +def postag_tree(tree): + # Part-of-speech tagging. + words = tree.leaves() + tag_iter = (pos for (word, pos) in pos_tag(words)) + newtree = Tree("S", []) + for child in tree: + if isinstance(child, Tree): + newtree.append(Tree(child.label(), [])) + for subchild in child: + newtree[-1].append((subchild, next(tag_iter))) + else: + newtree.append((child, next(tag_iter))) + return newtree + + +def load_ace_data(roots, fmt="binary", skip_bnews=True): + for root in roots: + for root, dirs, files in os.walk(root): + if root.endswith("bnews") and skip_bnews: + continue + for f in files: + if f.endswith(".sgm"): + yield from load_ace_file(os.path.join(root, f), fmt) + + +def load_ace_file(textfile, fmt): + print(f" - {os.path.split(textfile)[1]}") + annfile = textfile + ".tmx.rdc.xml" + + # Read the xml file, and get a list of entities + entities = [] + with open(annfile) as infile: + xml = ET.parse(infile).getroot() + for entity in xml.findall("document/entity"): + typ = entity.find("entity_type").text + for mention in entity.findall("entity_mention"): + if mention.get("TYPE") != "NAME": + continue # only NEs + s = int(mention.find("head/charseq/start").text) + e = int(mention.find("head/charseq/end").text) + 1 + entities.append((s, e, typ)) + + # Read the text file, and mark the entities. + with open(textfile) as infile: + text = infile.read() + + # Strip XML tags, since they don't count towards the indices + text = re.sub("<(?!/?TEXT)[^>]+>", "", text) + + # Blank out anything before/after + def subfunc(m): + return " " * (m.end() - m.start() - 6) + + text = re.sub(r"[\s\S]*", subfunc, text) + text = re.sub(r"[\s\S]*", "", text) + + # Simplify quotes + text = re.sub("``", ' "', text) + text = re.sub("''", '" ', text) + + entity_types = {typ for (s, e, typ) in entities} + + # Binary distinction (NE or not NE) + if fmt == "binary": + i = 0 + toks = Tree("S", []) + for (s, e, typ) in sorted(entities): + if s < i: + s = i # Overlapping! Deal with this better? + if e <= s: + continue + toks.extend(word_tokenize(text[i:s])) + toks.append(Tree("NE", text[s:e].split())) + i = e + toks.extend(word_tokenize(text[i:])) + yield toks + + # Multiclass distinction (NE type) + elif fmt == "multiclass": + i = 0 + toks = Tree("S", []) + for (s, e, typ) in sorted(entities): + if s < i: + s = i # Overlapping! Deal with this better? + if e <= s: + continue + toks.extend(word_tokenize(text[i:s])) + toks.append(Tree(typ, text[s:e].split())) + i = e + toks.extend(word_tokenize(text[i:])) + yield toks + + else: + raise ValueError("bad fmt value") + + +# This probably belongs in a more general-purpose location (as does +# the parse_to_tagged function). +def cmp_chunks(correct, guessed): + correct = NEChunkParser._parse_to_tagged(correct) + guessed = NEChunkParser._parse_to_tagged(guessed) + ellipsis = False + for (w, ct), (w, gt) in zip(correct, guessed): + if ct == gt == "O": + if not ellipsis: + print(f" {ct:15} {gt:15} {w}") + print(" {:15} {:15} {2}".format("...", "...", "...")) + ellipsis = True + else: + ellipsis = False + print(f" {ct:15} {gt:15} {w}") + + +def build_model(fmt="binary"): + print("Loading training data...") + train_paths = [ + find("corpora/ace_data/ace.dev"), + find("corpora/ace_data/ace.heldout"), + find("corpora/ace_data/bbn.dev"), + find("corpora/ace_data/muc.dev"), + ] + train_trees = load_ace_data(train_paths, fmt) + train_data = [postag_tree(t) for t in train_trees] + print("Training...") + cp = NEChunkParser(train_data) + del train_data + + print("Loading eval data...") + eval_paths = [find("corpora/ace_data/ace.eval")] + eval_trees = load_ace_data(eval_paths, fmt) + eval_data = [postag_tree(t) for t in eval_trees] + + print("Evaluating...") + chunkscore = ChunkScore() + for i, correct in enumerate(eval_data): + guess = cp.parse(correct.leaves()) + chunkscore.score(correct, guess) + if i < 3: + cmp_chunks(correct, guess) + print(chunkscore) + + outfilename = f"/tmp/ne_chunker_{fmt}.pickle" + print(f"Saving chunker to {outfilename}...") + + with open(outfilename, "wb") as outfile: + pickle.dump(cp, outfile, -1) + + return cp + + +if __name__ == "__main__": + # Make sure that the pickled object has the right class name: + from nltk.chunk.named_entity import build_model + + build_model("binary") + build_model("multiclass") diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/util.py new file mode 100644 index 0000000000000000000000000000000000000000..64ab90f52d1cecd133d3c6511c71e10e44b7bbf1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/util.py @@ -0,0 +1,643 @@ +# Natural Language Toolkit: Chunk format conversions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.metrics import accuracy as _accuracy +from nltk.tag.mapping import map_tag +from nltk.tag.util import str2tuple +from nltk.tree import Tree + +##////////////////////////////////////////////////////// +## EVALUATION +##////////////////////////////////////////////////////// + + +def accuracy(chunker, gold): + """ + Score the accuracy of the chunker against the gold standard. + Strip the chunk information from the gold standard and rechunk it using + the chunker, then compute the accuracy score. + + :type chunker: ChunkParserI + :param chunker: The chunker being evaluated. + :type gold: tree + :param gold: The chunk structures to score the chunker on. + :rtype: float + """ + + gold_tags = [] + test_tags = [] + for gold_tree in gold: + test_tree = chunker.parse(gold_tree.flatten()) + gold_tags += tree2conlltags(gold_tree) + test_tags += tree2conlltags(test_tree) + + # print 'GOLD:', gold_tags[:50] + # print 'TEST:', test_tags[:50] + return _accuracy(gold_tags, test_tags) + + +# Patched for increased performance by Yoav Goldberg , 2006-01-13 +# -- statistics are evaluated only on demand, instead of at every sentence evaluation +# +# SB: use nltk.metrics for precision/recall scoring? +# +class ChunkScore: + """ + A utility class for scoring chunk parsers. ``ChunkScore`` can + evaluate a chunk parser's output, based on a number of statistics + (precision, recall, f-measure, misssed chunks, incorrect chunks). + It can also combine the scores from the parsing of multiple texts; + this makes it significantly easier to evaluate a chunk parser that + operates one sentence at a time. + + Texts are evaluated with the ``score`` method. The results of + evaluation can be accessed via a number of accessor methods, such + as ``precision`` and ``f_measure``. A typical use of the + ``ChunkScore`` class is:: + + >>> chunkscore = ChunkScore() # doctest: +SKIP + >>> for correct in correct_sentences: # doctest: +SKIP + ... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP + ... chunkscore.score(correct, guess) # doctest: +SKIP + >>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP + F Measure: 0.823 + + :ivar kwargs: Keyword arguments: + + - max_tp_examples: The maximum number actual examples of true + positives to record. This affects the ``correct`` member + function: ``correct`` will not return more than this number + of true positive examples. This does *not* affect any of + the numerical metrics (precision, recall, or f-measure) + + - max_fp_examples: The maximum number actual examples of false + positives to record. This affects the ``incorrect`` member + function and the ``guessed`` member function: ``incorrect`` + will not return more than this number of examples, and + ``guessed`` will not return more than this number of true + positive examples. This does *not* affect any of the + numerical metrics (precision, recall, or f-measure) + + - max_fn_examples: The maximum number actual examples of false + negatives to record. This affects the ``missed`` member + function and the ``correct`` member function: ``missed`` + will not return more than this number of examples, and + ``correct`` will not return more than this number of true + negative examples. This does *not* affect any of the + numerical metrics (precision, recall, or f-measure) + + - chunk_label: A regular expression indicating which chunks + should be compared. Defaults to ``'.*'`` (i.e., all chunks). + + :type _tp: list(Token) + :ivar _tp: List of true positives + :type _fp: list(Token) + :ivar _fp: List of false positives + :type _fn: list(Token) + :ivar _fn: List of false negatives + + :type _tp_num: int + :ivar _tp_num: Number of true positives + :type _fp_num: int + :ivar _fp_num: Number of false positives + :type _fn_num: int + :ivar _fn_num: Number of false negatives. + """ + + def __init__(self, **kwargs): + self._correct = set() + self._guessed = set() + self._tp = set() + self._fp = set() + self._fn = set() + self._max_tp = kwargs.get("max_tp_examples", 100) + self._max_fp = kwargs.get("max_fp_examples", 100) + self._max_fn = kwargs.get("max_fn_examples", 100) + self._chunk_label = kwargs.get("chunk_label", ".*") + self._tp_num = 0 + self._fp_num = 0 + self._fn_num = 0 + self._count = 0 + self._tags_correct = 0.0 + self._tags_total = 0.0 + + self._measuresNeedUpdate = False + + def _updateMeasures(self): + if self._measuresNeedUpdate: + self._tp = self._guessed & self._correct + self._fn = self._correct - self._guessed + self._fp = self._guessed - self._correct + self._tp_num = len(self._tp) + self._fp_num = len(self._fp) + self._fn_num = len(self._fn) + self._measuresNeedUpdate = False + + def score(self, correct, guessed): + """ + Given a correctly chunked sentence, score another chunked + version of the same sentence. + + :type correct: chunk structure + :param correct: The known-correct ("gold standard") chunked + sentence. + :type guessed: chunk structure + :param guessed: The chunked sentence to be scored. + """ + self._correct |= _chunksets(correct, self._count, self._chunk_label) + self._guessed |= _chunksets(guessed, self._count, self._chunk_label) + self._count += 1 + self._measuresNeedUpdate = True + # Keep track of per-tag accuracy (if possible) + try: + correct_tags = tree2conlltags(correct) + guessed_tags = tree2conlltags(guessed) + except ValueError: + # This exception case is for nested chunk structures, + # where tree2conlltags will fail with a ValueError: "Tree + # is too deeply nested to be printed in CoNLL format." + correct_tags = guessed_tags = () + self._tags_total += len(correct_tags) + self._tags_correct += sum( + 1 for (t, g) in zip(guessed_tags, correct_tags) if t == g + ) + + def accuracy(self): + """ + Return the overall tag-based accuracy for all text that have + been scored by this ``ChunkScore``, using the IOB (conll2000) + tag encoding. + + :rtype: float + """ + if self._tags_total == 0: + return 1 + return self._tags_correct / self._tags_total + + def precision(self): + """ + Return the overall precision for all texts that have been + scored by this ``ChunkScore``. + + :rtype: float + """ + self._updateMeasures() + div = self._tp_num + self._fp_num + if div == 0: + return 0 + else: + return self._tp_num / div + + def recall(self): + """ + Return the overall recall for all texts that have been + scored by this ``ChunkScore``. + + :rtype: float + """ + self._updateMeasures() + div = self._tp_num + self._fn_num + if div == 0: + return 0 + else: + return self._tp_num / div + + def f_measure(self, alpha=0.5): + """ + Return the overall F measure for all texts that have been + scored by this ``ChunkScore``. + + :param alpha: the relative weighting of precision and recall. + Larger alpha biases the score towards the precision value, + while smaller alpha biases the score towards the recall + value. ``alpha`` should have a value in the range [0,1]. + :type alpha: float + :rtype: float + """ + self._updateMeasures() + p = self.precision() + r = self.recall() + if p == 0 or r == 0: # what if alpha is 0 or 1? + return 0 + return 1 / (alpha / p + (1 - alpha) / r) + + def missed(self): + """ + Return the chunks which were included in the + correct chunk structures, but not in the guessed chunk + structures, listed in input order. + + :rtype: list of chunks + """ + self._updateMeasures() + chunks = list(self._fn) + return [c[1] for c in chunks] # discard position information + + def incorrect(self): + """ + Return the chunks which were included in the guessed chunk structures, + but not in the correct chunk structures, listed in input order. + + :rtype: list of chunks + """ + self._updateMeasures() + chunks = list(self._fp) + return [c[1] for c in chunks] # discard position information + + def correct(self): + """ + Return the chunks which were included in the correct + chunk structures, listed in input order. + + :rtype: list of chunks + """ + chunks = list(self._correct) + return [c[1] for c in chunks] # discard position information + + def guessed(self): + """ + Return the chunks which were included in the guessed + chunk structures, listed in input order. + + :rtype: list of chunks + """ + chunks = list(self._guessed) + return [c[1] for c in chunks] # discard position information + + def __len__(self): + self._updateMeasures() + return self._tp_num + self._fn_num + + def __repr__(self): + """ + Return a concise representation of this ``ChunkScoring``. + + :rtype: str + """ + return "" + + def __str__(self): + """ + Return a verbose representation of this ``ChunkScoring``. + This representation includes the precision, recall, and + f-measure scores. For other information about the score, + use the accessor methods (e.g., ``missed()`` and ``incorrect()``). + + :rtype: str + """ + return ( + "ChunkParse score:\n" + + (f" IOB Accuracy: {self.accuracy() * 100:5.1f}%%\n") + + (f" Precision: {self.precision() * 100:5.1f}%%\n") + + (f" Recall: {self.recall() * 100:5.1f}%%\n") + + (f" F-Measure: {self.f_measure() * 100:5.1f}%%") + ) + + +# extract chunks, and assign unique id, the absolute position of +# the first word of the chunk +def _chunksets(t, count, chunk_label): + pos = 0 + chunks = [] + for child in t: + if isinstance(child, Tree): + if re.match(chunk_label, child.label()): + chunks.append(((count, pos), child.freeze())) + pos += len(child.leaves()) + else: + pos += 1 + return set(chunks) + + +def tagstr2tree( + s, chunk_label="NP", root_label="S", sep="/", source_tagset=None, target_tagset=None +): + """ + Divide a string of bracketted tagged text into + chunks and unchunked tokens, and produce a Tree. + Chunks are marked by square brackets (``[...]``). Words are + delimited by whitespace, and each word should have the form + ``text/tag``. Words that do not contain a slash are + assigned a ``tag`` of None. + + :param s: The string to be converted + :type s: str + :param chunk_label: The label to use for chunk nodes + :type chunk_label: str + :param root_label: The label to use for the root of the tree + :type root_label: str + :rtype: Tree + """ + + WORD_OR_BRACKET = re.compile(r"\[|\]|[^\[\]\s]+") + + stack = [Tree(root_label, [])] + for match in WORD_OR_BRACKET.finditer(s): + text = match.group() + if text[0] == "[": + if len(stack) != 1: + raise ValueError(f"Unexpected [ at char {match.start():d}") + chunk = Tree(chunk_label, []) + stack[-1].append(chunk) + stack.append(chunk) + elif text[0] == "]": + if len(stack) != 2: + raise ValueError(f"Unexpected ] at char {match.start():d}") + stack.pop() + else: + if sep is None: + stack[-1].append(text) + else: + word, tag = str2tuple(text, sep) + if source_tagset and target_tagset: + tag = map_tag(source_tagset, target_tagset, tag) + stack[-1].append((word, tag)) + + if len(stack) != 1: + raise ValueError(f"Expected ] at char {len(s):d}") + return stack[0] + + +### CONLL + +_LINE_RE = re.compile(r"(\S+)\s+(\S+)\s+([IOB])-?(\S+)?") + + +def conllstr2tree(s, chunk_types=("NP", "PP", "VP"), root_label="S"): + """ + Return a chunk structure for a single sentence + encoded in the given CONLL 2000 style string. + This function converts a CoNLL IOB string into a tree. + It uses the specified chunk types + (defaults to NP, PP and VP), and creates a tree rooted at a node + labeled S (by default). + + :param s: The CoNLL string to be converted. + :type s: str + :param chunk_types: The chunk types to be converted. + :type chunk_types: tuple + :param root_label: The node label to use for the root. + :type root_label: str + :rtype: Tree + """ + + stack = [Tree(root_label, [])] + + for lineno, line in enumerate(s.split("\n")): + if not line.strip(): + continue + + # Decode the line. + match = _LINE_RE.match(line) + if match is None: + raise ValueError(f"Error on line {lineno:d}") + (word, tag, state, chunk_type) = match.groups() + + # If it's a chunk type we don't care about, treat it as O. + if chunk_types is not None and chunk_type not in chunk_types: + state = "O" + + # For "Begin"/"Outside", finish any completed chunks - + # also do so for "Inside" which don't match the previous token. + mismatch_I = state == "I" and chunk_type != stack[-1].label() + if state in "BO" or mismatch_I: + if len(stack) == 2: + stack.pop() + + # For "Begin", start a new chunk. + if state == "B" or mismatch_I: + chunk = Tree(chunk_type, []) + stack[-1].append(chunk) + stack.append(chunk) + + # Add the new word token. + stack[-1].append((word, tag)) + + return stack[0] + + +def tree2conlltags(t): + """ + Return a list of 3-tuples containing ``(word, tag, IOB-tag)``. + Convert a tree to the CoNLL IOB tag format. + + :param t: The tree to be converted. + :type t: Tree + :rtype: list(tuple) + """ + + tags = [] + for child in t: + try: + category = child.label() + prefix = "B-" + for contents in child: + if isinstance(contents, Tree): + raise ValueError( + "Tree is too deeply nested to be printed in CoNLL format" + ) + tags.append((contents[0], contents[1], prefix + category)) + prefix = "I-" + except AttributeError: + tags.append((child[0], child[1], "O")) + return tags + + +def conlltags2tree( + sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False +): + """ + Convert the CoNLL IOB format to a tree. + """ + tree = Tree(root_label, []) + for (word, postag, chunktag) in sentence: + if chunktag is None: + if strict: + raise ValueError("Bad conll tag sequence") + else: + # Treat as O + tree.append((word, postag)) + elif chunktag.startswith("B-"): + tree.append(Tree(chunktag[2:], [(word, postag)])) + elif chunktag.startswith("I-"): + if ( + len(tree) == 0 + or not isinstance(tree[-1], Tree) + or tree[-1].label() != chunktag[2:] + ): + if strict: + raise ValueError("Bad conll tag sequence") + else: + # Treat as B-* + tree.append(Tree(chunktag[2:], [(word, postag)])) + else: + tree[-1].append((word, postag)) + elif chunktag == "O": + tree.append((word, postag)) + else: + raise ValueError(f"Bad conll tag {chunktag!r}") + return tree + + +def tree2conllstr(t): + """ + Return a multiline string where each line contains a word, tag and IOB tag. + Convert a tree to the CoNLL IOB string format + + :param t: The tree to be converted. + :type t: Tree + :rtype: str + """ + lines = [" ".join(token) for token in tree2conlltags(t)] + return "\n".join(lines) + + +### IEER + +_IEER_DOC_RE = re.compile( + r"\s*" + r"(\s*(?P.+?)\s*\s*)?" + r"(\s*(?P.+?)\s*\s*)?" + r"(\s*(?P.+?)\s*\s*)?" + r"\s*" + r"(\s*(?P.+?)\s*\s*)?" + r"(?P.*?)\s*" + r"\s*\s*", + re.DOTALL, +) + +_IEER_TYPE_RE = re.compile(r']*?type="(?P\w+)"') + + +def _ieer_read_text(s, root_label): + stack = [Tree(root_label, [])] + # s will be None if there is no headline in the text + # return the empty list in place of a Tree + if s is None: + return [] + for piece_m in re.finditer(r"<[^>]+>|[^\s<]+", s): + piece = piece_m.group() + try: + if piece.startswith(".... + m = _IEER_DOC_RE.match(s) + if m: + return { + "text": _ieer_read_text(m.group("text"), root_label), + "docno": m.group("docno"), + "doctype": m.group("doctype"), + "date_time": m.group("date_time"), + #'headline': m.group('headline') + # we want to capture NEs in the headline too! + "headline": _ieer_read_text(m.group("headline"), root_label), + } + else: + return _ieer_read_text(s, root_label) + + +def demo(): + + s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./." + import nltk + + t = nltk.chunk.tagstr2tree(s, chunk_label="NP") + t.pprint() + print() + + s = """ +These DT B-NP +research NN I-NP +protocols NNS I-NP +offer VBP B-VP +to TO B-PP +the DT B-NP +patient NN I-NP +not RB O +only RB O +the DT B-NP +very RB I-NP +best JJS I-NP +therapy NN I-NP +which WDT B-NP +we PRP B-NP +have VBP B-VP +established VBN I-VP +today NN B-NP +but CC B-NP +also RB I-NP +the DT B-NP +hope NN I-NP +of IN B-PP +something NN B-NP +still RB B-ADJP +better JJR I-ADJP +. . O +""" + + conll_tree = conllstr2tree(s, chunk_types=("NP", "PP")) + conll_tree.pprint() + + # Demonstrate CoNLL output + print("CoNLL output:") + print(nltk.chunk.tree2conllstr(conll_tree)) + print() + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df093cb0a7964ea43df052ac42fb46b6fbadee0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__init__.py @@ -0,0 +1,92 @@ +# Natural Language Toolkit: Clusterers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +""" +This module contains a number of basic clustering algorithms. Clustering +describes the task of discovering groups of similar items with a large +collection. It is also describe as unsupervised machine learning, as the data +from which it learns is unannotated with class information, as is the case for +supervised learning. Annotated data is difficult and expensive to obtain in +the quantities required for the majority of supervised learning algorithms. +This problem, the knowledge acquisition bottleneck, is common to most natural +language processing tasks, thus fueling the need for quality unsupervised +approaches. + +This module contains a k-means clusterer, E-M clusterer and a group average +agglomerative clusterer (GAAC). All these clusterers involve finding good +cluster groupings for a set of vectors in multi-dimensional space. + +The K-means clusterer starts with k arbitrary chosen means then allocates each +vector to the cluster with the closest mean. It then recalculates the means of +each cluster as the centroid of the vectors in the cluster. This process +repeats until the cluster memberships stabilise. This is a hill-climbing +algorithm which may converge to a local maximum. Hence the clustering is +often repeated with random initial means and the most commonly occurring +output means are chosen. + +The GAAC clusterer starts with each of the *N* vectors as singleton clusters. +It then iteratively merges pairs of clusters which have the closest centroids. +This continues until there is only one cluster. The order of merges gives rise +to a dendrogram - a tree with the earlier merges lower than later merges. The +membership of a given number of clusters *c*, *1 <= c <= N*, can be found by +cutting the dendrogram at depth *c*. + +The Gaussian EM clusterer models the vectors as being produced by a mixture +of k Gaussian sources. The parameters of these sources (prior probability, +mean and covariance matrix) are then found to maximise the likelihood of the +given data. This is done with the expectation maximisation algorithm. It +starts with k arbitrarily chosen means, priors and covariance matrices. It +then calculates the membership probabilities for each vector in each of the +clusters - this is the 'E' step. The cluster parameters are then updated in +the 'M' step using the maximum likelihood estimate from the cluster membership +probabilities. This process continues until the likelihood of the data does +not significantly increase. + +They all extend the ClusterI interface which defines common operations +available with each clusterer. These operations include: + +- cluster: clusters a sequence of vectors +- classify: assign a vector to a cluster +- classification_probdist: give the probability distribution over cluster memberships + +The current existing classifiers also extend cluster.VectorSpace, an +abstract class which allows for singular value decomposition (SVD) and vector +normalisation. SVD is used to reduce the dimensionality of the vector space in +such a manner as to preserve as much of the variation as possible, by +reparameterising the axes in order of variability and discarding all bar the +first d dimensions. Normalisation ensures that vectors fall in the unit +hypersphere. + +Usage example (see also demo()):: + + from nltk import cluster + from nltk.cluster import euclidean_distance + from numpy import array + + vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]] + + # initialise the clusterer (will also assign the vectors to clusters) + clusterer = cluster.KMeansClusterer(2, euclidean_distance) + clusterer.cluster(vectors, True) + + # classify a new vector + print(clusterer.classify(array([3, 3]))) + +Note that the vectors must use numpy array-like +objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for +efficiency when required. +""" + +from nltk.cluster.em import EMClusterer +from nltk.cluster.gaac import GAAClusterer +from nltk.cluster.kmeans import KMeansClusterer +from nltk.cluster.util import ( + Dendrogram, + VectorSpaceClusterer, + cosine_distance, + euclidean_distance, +) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742a527525d4dd4c3ffaacd5b2fa67b62eee5408 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a09efd4cdd5738d7160edc579eb7faf4aa2b70d5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c39a757f722f2acd8c7d8751bd0c506f5cc683b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd39f6bfa5efb4d8b9215750d675be55bec0ebf5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b8b5ed4832e129064411ec511f258e91eabffe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d85163c5edb334313578efea17cb5d09a5d9cdc2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ada17ef29e19763f8bc42d103436e7fa72d3cfd0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__init__.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +NLTK Metrics + +Classes and methods for scoring processing modules. +""" + +from nltk.metrics.agreement import AnnotationTask +from nltk.metrics.aline import align +from nltk.metrics.association import ( + BigramAssocMeasures, + ContingencyMeasures, + NgramAssocMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) +from nltk.metrics.confusionmatrix import ConfusionMatrix +from nltk.metrics.distance import ( + binary_distance, + custom_distance, + edit_distance, + edit_distance_align, + fractional_presence, + interval_distance, + jaccard_distance, + masi_distance, + presence, +) +from nltk.metrics.paice import Paice +from nltk.metrics.scores import ( + accuracy, + approxrand, + f_measure, + log_likelihood, + precision, + recall, +) +from nltk.metrics.segmentation import ghd, pk, windowdiff +from nltk.metrics.spearman import ( + ranks_from_scores, + ranks_from_sequence, + spearman_correlation, +) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f090b1706c71bda91a254a68b131fd1c722b278 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/confusionmatrix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..689e06ceb4d64611740cf810c11a7424891be4ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/distance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a918cf22e4c84da2c4335f83f1f00b7975e5f2e3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/paice.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c01b9b6acd9b756e204b3968a4cd4e5d3e361363 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/segmentation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6176691716ed917d00a8e8338b5fa6bdceef449 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/__pycache__/spearman.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/aline.py b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/aline.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf8d9930228b2bba3d07b5c92201a011bb9ca25 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/aline.py @@ -0,0 +1,1354 @@ +# Natural Language Toolkit: ALINE +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Kondrak +# Geoff Bacon (Python port) +# URL: +# For license information, see LICENSE.TXT + +""" +ALINE +https://webdocs.cs.ualberta.ca/~kondrak/ +Copyright 2002 by Grzegorz Kondrak. + +ALINE is an algorithm for aligning phonetic sequences, described in [1]. +This module is a port of Kondrak's (2002) ALINE. It provides functions for +phonetic sequence alignment and similarity analysis. These are useful in +historical linguistics, sociolinguistics and synchronic phonology. + +ALINE has parameters that can be tuned for desired output. These parameters are: +- C_skip, C_sub, C_exp, C_vwl +- Salience weights +- Segmental features + +In this implementation, some parameters have been changed from their default +values as described in [1], in order to replicate published results. All changes +are noted in comments. + +Example usage +------------- + +# Get optimal alignment of two phonetic sequences + +>>> align('θin', 'tenwis') # doctest: +SKIP +[[('θ', 't'), ('i', 'e'), ('n', 'n'), ('-', 'w'), ('-', 'i'), ('-', 's')]] + +[1] G. Kondrak. Algorithms for Language Reconstruction. PhD dissertation, +University of Toronto. +""" + +try: + import numpy as np +except ImportError: + np = None + +# === Constants === + +inf = float("inf") + +# Default values for maximum similarity scores (Kondrak 2002: 54) +C_skip = -10 # Indels +C_sub = 35 # Substitutions +C_exp = 45 # Expansions/compressions +C_vwl = 5 # Vowel/consonant relative weight (decreased from 10) + +consonants = [ + "B", + "N", + "R", + "b", + "c", + "d", + "f", + "g", + "h", + "j", + "k", + "l", + "m", + "n", + "p", + "q", + "r", + "s", + "t", + "v", + "x", + "z", + "ç", + "ð", + "ħ", + "ŋ", + "ɖ", + "ɟ", + "ɢ", + "ɣ", + "ɦ", + "ɬ", + "ɮ", + "ɰ", + "ɱ", + "ɲ", + "ɳ", + "ɴ", + "ɸ", + "ɹ", + "ɻ", + "ɽ", + "ɾ", + "ʀ", + "ʁ", + "ʂ", + "ʃ", + "ʈ", + "ʋ", + "ʐ ", + "ʒ", + "ʔ", + "ʕ", + "ʙ", + "ʝ", + "β", + "θ", + "χ", + "ʐ", + "w", +] + +# Relevant features for comparing consonants and vowels +R_c = [ + "aspirated", + "lateral", + "manner", + "nasal", + "place", + "retroflex", + "syllabic", + "voice", +] +# 'high' taken out of R_v because same as manner +R_v = [ + "back", + "lateral", + "long", + "manner", + "nasal", + "place", + "retroflex", + "round", + "syllabic", + "voice", +] + +# Flattened feature matrix (Kondrak 2002: 56) +similarity_matrix = { + # place + "bilabial": 1.0, + "labiodental": 0.95, + "dental": 0.9, + "alveolar": 0.85, + "retroflex": 0.8, + "palato-alveolar": 0.75, + "palatal": 0.7, + "velar": 0.6, + "uvular": 0.5, + "pharyngeal": 0.3, + "glottal": 0.1, + "labiovelar": 1.0, + "vowel": -1.0, # added 'vowel' + # manner + "stop": 1.0, + "affricate": 0.9, + "fricative": 0.85, # increased fricative from 0.8 + "trill": 0.7, + "tap": 0.65, + "approximant": 0.6, + "high vowel": 0.4, + "mid vowel": 0.2, + "low vowel": 0.0, + "vowel2": 0.5, # added vowel + # high + "high": 1.0, + "mid": 0.5, + "low": 0.0, + # back + "front": 1.0, + "central": 0.5, + "back": 0.0, + # binary features + "plus": 1.0, + "minus": 0.0, +} + +# Relative weights of phonetic features (Kondrak 2002: 55) +salience = { + "syllabic": 5, + "place": 40, + "manner": 50, + "voice": 5, # decreased from 10 + "nasal": 20, # increased from 10 + "retroflex": 10, + "lateral": 10, + "aspirated": 5, + "long": 0, # decreased from 1 + "high": 3, # decreased from 5 + "back": 2, # decreased from 5 + "round": 2, # decreased from 5 +} + +# (Kondrak 2002: 59-60) +feature_matrix = { + # Consonants + "p": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "b": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "t": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "d": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʈ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɖ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "c": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɟ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "k": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "g": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "q": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɢ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʔ": { + "place": "glottal", + "manner": "stop", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "m": { + "place": "bilabial", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɱ": { + "place": "labiodental", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "n": { + "place": "alveolar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɳ": { + "place": "retroflex", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɲ": { + "place": "palatal", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ŋ": { + "place": "velar", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɴ": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "N": { + "place": "uvular", + "manner": "stop", + "syllabic": "minus", + "voice": "plus", + "nasal": "plus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʙ": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "B": { + "place": "bilabial", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "r": { + "place": "alveolar", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʀ": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "R": { + "place": "uvular", + "manner": "trill", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɾ": { + "place": "alveolar", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɽ": { + "place": "retroflex", + "manner": "tap", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɸ": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "β": { + "place": "bilabial", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "f": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "v": { + "place": "labiodental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "θ": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ð": { + "place": "dental", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "s": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "z": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʃ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʒ": { + "place": "palato-alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʂ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʐ": { + "place": "retroflex", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "ç": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʝ": { + "place": "palatal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "x": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɣ": { + "place": "velar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "χ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʁ": { + "place": "uvular", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ħ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ʕ": { + "place": "pharyngeal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "h": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɦ": { + "place": "glottal", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɬ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "minus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ɮ": { + "place": "alveolar", + "manner": "fricative", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "ʋ": { + "place": "labiodental", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɹ": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɻ": { + "place": "retroflex", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "plus", + "lateral": "minus", + "aspirated": "minus", + }, + "j": { + "place": "palatal", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "ɰ": { + "place": "velar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + "l": { + "place": "alveolar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "plus", + "aspirated": "minus", + }, + "w": { + "place": "labiovelar", + "manner": "approximant", + "syllabic": "minus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "aspirated": "minus", + }, + # Vowels + "i": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "y": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "e": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "E": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ø": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɛ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "œ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "front", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "æ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "a": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "A": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, + "ɨ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "ʉ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "central", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ə": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "central", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "u": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "U": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "o": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "O": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "plus", + "aspirated": "minus", + }, + "ɔ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "mid", + "back": "back", + "round": "plus", + "long": "minus", + "aspirated": "minus", + }, + "ɒ": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "low", + "back": "back", + "round": "minus", + "long": "minus", + "aspirated": "minus", + }, + "I": { + "place": "vowel", + "manner": "vowel2", + "syllabic": "plus", + "voice": "plus", + "nasal": "minus", + "retroflex": "minus", + "lateral": "minus", + "high": "high", + "back": "front", + "round": "minus", + "long": "plus", + "aspirated": "minus", + }, +} + +# === Algorithm === + + +def align(str1, str2, epsilon=0): + """ + Compute the alignment of two phonetic strings. + + :param str str1: First string to be aligned + :param str str2: Second string to be aligned + + :type epsilon: float (0.0 to 1.0) + :param epsilon: Adjusts threshold similarity score for near-optimal alignments + + :rtype: list(list(tuple(str, str))) + :return: Alignment(s) of str1 and str2 + + (Kondrak 2002: 51) + """ + if np is None: + raise ImportError("You need numpy in order to use the align function") + + assert 0.0 <= epsilon <= 1.0, "Epsilon must be between 0.0 and 1.0." + m = len(str1) + n = len(str2) + # This includes Kondrak's initialization of row 0 and column 0 to all 0s. + S = np.zeros((m + 1, n + 1), dtype=float) + + # If i <= 1 or j <= 1, don't allow expansions as it doesn't make sense, + # and breaks array and string indices. Make sure they never get chosen + # by setting them to -inf. + for i in range(1, m + 1): + for j in range(1, n + 1): + edit1 = S[i - 1, j] + sigma_skip(str1[i - 1]) + edit2 = S[i, j - 1] + sigma_skip(str2[j - 1]) + edit3 = S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + if i > 1: + edit4 = S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + else: + edit4 = -inf + if j > 1: + edit5 = S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + else: + edit5 = -inf + S[i, j] = max(edit1, edit2, edit3, edit4, edit5, 0) + + T = (1 - epsilon) * np.amax(S) # Threshold score for near-optimal alignments + + alignments = [] + for i in range(1, m + 1): + for j in range(1, n + 1): + if S[i, j] >= T: + alignments.append(_retrieve(i, j, 0, S, T, str1, str2, [])) + return alignments + + +def _retrieve(i, j, s, S, T, str1, str2, out): + """ + Retrieve the path through the similarity matrix S starting at (i, j). + + :rtype: list(tuple(str, str)) + :return: Alignment of str1 and str2 + """ + if S[i, j] == 0: + return out + else: + if j > 1 and S[i - 1, j - 2] + sigma_exp(str1[i - 1], str2[j - 2 : j]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 2 : j])) + _retrieve( + i - 1, + j - 2, + s + sigma_exp(str1[i - 1], str2[j - 2 : j]), + S, + T, + str1, + str2, + out, + ) + elif ( + i > 1 and S[i - 2, j - 1] + sigma_exp(str2[j - 1], str1[i - 2 : i]) + s >= T + ): + out.insert(0, (str1[i - 2 : i], str2[j - 1])) + _retrieve( + i - 2, + j - 1, + s + sigma_exp(str2[j - 1], str1[i - 2 : i]), + S, + T, + str1, + str2, + out, + ) + elif S[i, j - 1] + sigma_skip(str2[j - 1]) + s >= T: + out.insert(0, ("-", str2[j - 1])) + _retrieve(i, j - 1, s + sigma_skip(str2[j - 1]), S, T, str1, str2, out) + elif S[i - 1, j] + sigma_skip(str1[i - 1]) + s >= T: + out.insert(0, (str1[i - 1], "-")) + _retrieve(i - 1, j, s + sigma_skip(str1[i - 1]), S, T, str1, str2, out) + elif S[i - 1, j - 1] + sigma_sub(str1[i - 1], str2[j - 1]) + s >= T: + out.insert(0, (str1[i - 1], str2[j - 1])) + _retrieve( + i - 1, + j - 1, + s + sigma_sub(str1[i - 1], str2[j - 1]), + S, + T, + str1, + str2, + out, + ) + return out + + +def sigma_skip(p): + """ + Returns score of an indel of P. + + (Kondrak 2002: 54) + """ + return C_skip + + +def sigma_sub(p, q): + """ + Returns score of a substitution of P with Q. + + (Kondrak 2002: 54) + """ + return C_sub - delta(p, q) - V(p) - V(q) + + +def sigma_exp(p, q): + """ + Returns score of an expansion/compression. + + (Kondrak 2002: 54) + """ + q1 = q[0] + q2 = q[1] + return C_exp - delta(p, q1) - delta(p, q2) - V(p) - max(V(q1), V(q2)) + + +def delta(p, q): + """ + Return weighted sum of difference between P and Q. + + (Kondrak 2002: 54) + """ + features = R(p, q) + total = 0 + for f in features: + total += diff(p, q, f) * salience[f] + return total + + +def diff(p, q, f): + """ + Returns difference between phonetic segments P and Q for feature F. + + (Kondrak 2002: 52, 54) + """ + p_features, q_features = feature_matrix[p], feature_matrix[q] + return abs(similarity_matrix[p_features[f]] - similarity_matrix[q_features[f]]) + + +def R(p, q): + """ + Return relevant features for segment comparison. + + (Kondrak 2002: 54) + """ + if p in consonants or q in consonants: + return R_c + return R_v + + +def V(p): + """ + Return vowel weight if P is vowel. + + (Kondrak 2002: 54) + """ + if p in consonants: + return 0 + return C_vwl + + +# === Test === + + +def demo(): + """ + A demonstration of the result of aligning phonetic sequences + used in Kondrak's (2002) dissertation. + """ + data = [pair.split(",") for pair in cognate_data.split("\n")] + for pair in data: + alignment = align(pair[0], pair[1])[0] + alignment = [f"({a[0]}, {a[1]})" for a in alignment] + alignment = " ".join(alignment) + print(f"{pair[0]} ~ {pair[1]} : {alignment}") + + +cognate_data = """jo,ʒə +tu,ty +nosotros,nu +kjen,ki +ke,kwa +todos,tu +una,ən +dos,dø +tres,trwa +ombre,om +arbol,arbrə +pluma,plym +kabeθa,kap +boka,buʃ +pje,pje +koraθon,kœr +ber,vwar +benir,vənir +deθir,dir +pobre,povrə +ðis,dIzes +ðæt,das +wat,vas +nat,nixt +loŋ,laŋ +mæn,man +fleʃ,flajʃ +bləd,blyt +feðər,fEdər +hær,hAr +ir,Or +aj,awgə +nowz,nAzə +mawθ,munt +təŋ,tsuŋə +fut,fys +nij,knI +hænd,hant +hart,herts +livər,lEbər +ænd,ante +æt,ad +blow,flAre +ir,awris +ijt,edere +fiʃ,piʃkis +flow,fluere +staɾ,stella +ful,plenus +græs,gramen +hart,kordis +horn,korny +aj,ego +nij,genU +məðər,mAter +mawntən,mons +nejm,nomen +njuw,nowus +wən,unus +rawnd,rotundus +sow,suere +sit,sedere +θrij,tres +tuwθ,dentis +θin,tenwis +kinwawa,kenuaʔ +nina,nenah +napewa,napɛw +wapimini,wapemen +namesa,namɛʔs +okimawa,okemaw +ʃiʃipa,seʔsep +ahkohkwa,ahkɛh +pematesiweni,pematesewen +asenja,aʔsɛn""" + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py new file mode 100644 index 0000000000000000000000000000000000000000..3cb6ee9b2a7e1a9b2235d9268d20fc1269908fe1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/confusionmatrix.py @@ -0,0 +1,353 @@ +# Natural Language Toolkit: Confusion Matrices +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +from nltk.probability import FreqDist + + +class ConfusionMatrix: + """ + The confusion matrix between a list of reference values and a + corresponding list of test values. Entry *[r,t]* of this + matrix is a count of the number of times that the reference value + *r* corresponds to the test value *t*. E.g.: + + >>> from nltk.metrics import ConfusionMatrix + >>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split() + >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split() + >>> cm = ConfusionMatrix(ref, test) + >>> print(cm['NN', 'NN']) + 3 + + Note that the diagonal entries *Ri=Tj* of this matrix + corresponds to correct values; and the off-diagonal entries + correspond to incorrect values. + """ + + def __init__(self, reference, test, sort_by_count=False): + """ + Construct a new confusion matrix from a list of reference + values and a corresponding list of test values. + + :type reference: list + :param reference: An ordered list of reference values. + :type test: list + :param test: A list of values to compare against the + corresponding reference values. + :raise ValueError: If ``reference`` and ``length`` do not have + the same length. + """ + if len(reference) != len(test): + raise ValueError("Lists must have the same length.") + + # Get a list of all values. + if sort_by_count: + ref_fdist = FreqDist(reference) + test_fdist = FreqDist(test) + + def key(v): + return -(ref_fdist[v] + test_fdist[v]) + + values = sorted(set(reference + test), key=key) + else: + values = sorted(set(reference + test)) + + # Construct a value->index dictionary + indices = {val: i for (i, val) in enumerate(values)} + + # Make a confusion matrix table. + confusion = [[0 for _ in values] for _ in values] + max_conf = 0 # Maximum confusion + for w, g in zip(reference, test): + confusion[indices[w]][indices[g]] += 1 + max_conf = max(max_conf, confusion[indices[w]][indices[g]]) + + #: A list of all values in ``reference`` or ``test``. + self._values = values + #: A dictionary mapping values in ``self._values`` to their indices. + self._indices = indices + #: The confusion matrix itself (as a list of lists of counts). + self._confusion = confusion + #: The greatest count in ``self._confusion`` (used for printing). + self._max_conf = max_conf + #: The total number of values in the confusion matrix. + self._total = len(reference) + #: The number of correct (on-diagonal) values in the matrix. + self._correct = sum(confusion[i][i] for i in range(len(values))) + + def __getitem__(self, li_lj_tuple): + """ + :return: The number of times that value ``li`` was expected and + value ``lj`` was given. + :rtype: int + """ + (li, lj) = li_lj_tuple + i = self._indices[li] + j = self._indices[lj] + return self._confusion[i][j] + + def __repr__(self): + return f"" + + def __str__(self): + return self.pretty_format() + + def pretty_format( + self, + show_percents=False, + values_in_chart=True, + truncate=None, + sort_by_count=False, + ): + """ + :return: A multi-line string representation of this confusion matrix. + :type truncate: int + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. + :param sort_by_count: If true, then sort by the count of each + label in the reference data. I.e., labels that occur more + frequently in the reference label will be towards the left + edge of the matrix, and labels that occur less frequently + will be towards the right edge. + + @todo: add marginals? + """ + confusion = self._confusion + + values = self._values + if sort_by_count: + values = sorted( + values, key=lambda v: -sum(self._confusion[self._indices[v]]) + ) + + if truncate: + values = values[:truncate] + + if values_in_chart: + value_strings = ["%s" % val for val in values] + else: + value_strings = [str(n + 1) for n in range(len(values))] + + # Construct a format string for row values + valuelen = max(len(val) for val in value_strings) + value_format = "%" + repr(valuelen) + "s | " + # Construct a format string for matrix entries + if show_percents: + entrylen = 6 + entry_format = "%5.1f%%" + zerostr = " ." + else: + entrylen = len(repr(self._max_conf)) + entry_format = "%" + repr(entrylen) + "d" + zerostr = " " * (entrylen - 1) + "." + + # Write the column values. + s = "" + for i in range(valuelen): + s += (" " * valuelen) + " |" + for val in value_strings: + if i >= valuelen - len(val): + s += val[i - valuelen + len(val)].rjust(entrylen + 1) + else: + s += " " * (entrylen + 1) + s += " |\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write the entries. + for val, li in zip(value_strings, values): + i = self._indices[li] + s += value_format % val + for lj in values: + j = self._indices[lj] + if confusion[i][j] == 0: + s += zerostr + elif show_percents: + s += entry_format % (100.0 * confusion[i][j] / self._total) + else: + s += entry_format % confusion[i][j] + if i == j: + prevspace = s.rfind(" ") + s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">" + else: + s += " " + s += "|\n" + + # Write a dividing line + s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) + + # Write a key + s += "(row = reference; col = test)\n" + if not values_in_chart: + s += "Value key:\n" + for i, value in enumerate(values): + s += "%6d: %s\n" % (i + 1, value) + + return s + + def key(self): + values = self._values + str = "Value key:\n" + indexlen = len(repr(len(values) - 1)) + key_format = " %" + repr(indexlen) + "d: %s\n" + for i in range(len(values)): + str += key_format % (i, values[i]) + + return str + + def recall(self, value): + """Given a value in the confusion matrix, return the recall + that corresponds to this value. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to how often ``value`` was + the true result. + + :param value: value used in the ConfusionMatrix + :return: the recall corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was correct + TP_FN = sum(self[value, pred_value] for pred_value in self._values) + if TP_FN == 0: + return 0.0 + return TP / TP_FN + + def precision(self, value): + """Given a value in the confusion matrix, return the precision + that corresponds to this value. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + and can loosely be considered the ratio of how often ``value`` + was predicted correctly relative to the number of predictions + for ``value``. + + :param value: value used in the ConfusionMatrix + :return: the precision corresponding to ``value``. + :rtype: float + """ + # Number of times `value` was correct, and also predicted + TP = self[value, value] + # Number of times `value` was predicted + TP_FP = sum(self[real_value, value] for real_value in self._values) + if TP_FP == 0: + return 0.0 + return TP / TP_FP + + def f_measure(self, value, alpha=0.5): + """ + Given a value used in the confusion matrix, return the f-measure + that corresponds to this value. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param value: value used in the ConfusionMatrix + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: the F-measure corresponding to ``value``. + :rtype: float + """ + p = self.precision(value) + r = self.recall(value) + if p == 0.0 or r == 0.0: + return 0.0 + return 1.0 / (alpha / p + (1 - alpha) / r) + + def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False): + """ + Tabulate the **recall**, **precision** and **f-measure** + for each value in this confusion matrix. + + >>> reference = "DET NN VB DET JJ NN NN IN DET NN".split() + >>> test = "DET VB VB DET NN NN NN IN DET NN".split() + >>> cm = ConfusionMatrix(reference, test) + >>> print(cm.evaluate()) + Tag | Prec. | Recall | F-measure + ----+--------+--------+----------- + DET | 1.0000 | 1.0000 | 1.0000 + IN | 1.0000 | 1.0000 | 1.0000 + JJ | 0.0000 | 0.0000 | 0.0000 + NN | 0.7500 | 0.7500 | 0.7500 + VB | 0.5000 | 1.0000 | 0.6667 + + + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on frequency + in the reference label. Defaults to False. + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + tags = self._values + + # Apply keyword parameters + if sort_by_count: + tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]])) + if truncate: + tags = tags[:truncate] + + tag_column_len = max(max(len(tag) for tag in tags), 3) + + # Construct the header + s = ( + f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n" + f"{'-' * tag_column_len}-+--------+--------+-----------\n" + ) + + # Construct the body + for tag in tags: + s += ( + f"{tag:>{tag_column_len}} | " + f"{self.precision(tag):<6.4f} | " + f"{self.recall(tag):<6.4f} | " + f"{self.f_measure(tag, alpha=alpha):.4f}\n" + ) + + return s + + +def demo(): + reference = "DET NN VB DET JJ NN NN IN DET NN".split() + test = "DET VB VB DET NN NN NN IN DET NN".split() + print("Reference =", reference) + print("Test =", test) + print("Confusion matrix:") + print(ConfusionMatrix(reference, test)) + print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True)) + + print(ConfusionMatrix(reference, test).recall("VB")) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/paice.py b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/paice.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7de1930b61654f9120a2ec2cd5bf6ef090fc47 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/paice.py @@ -0,0 +1,389 @@ +# Natural Language Toolkit: Agreement Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Lauri Hallila +# URL: +# For license information, see LICENSE.TXT +# + +"""Counts Paice's performance statistics for evaluating stemming algorithms. + +What is required: + - A dictionary of words grouped by their real lemmas + - A dictionary of words grouped by stems from a stemming algorithm + +When these are given, Understemming Index (UI), Overstemming Index (OI), +Stemming Weight (SW) and Error-rate relative to truncation (ERRT) are counted. + +References: +Chris D. Paice (1994). An evaluation method for stemming algorithms. +In Proceedings of SIGIR, 42--50. +""" + +from math import sqrt + + +def get_words_from_dictionary(lemmas): + """ + Get original set of words used for analysis. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :type lemmas: dict(str): list(str) + :return: Set of words that exist as values in the dictionary + :rtype: set(str) + """ + words = set() + for lemma in lemmas: + words.update(set(lemmas[lemma])) + return words + + +def _truncate(words, cutlength): + """Group words by stems defined by truncating them at given length. + + :param words: Set of words used for analysis + :param cutlength: Words are stemmed by cutting at this length. + :type words: set(str) or list(str) + :type cutlength: int + :return: Dictionary where keys are stems and values are sets of words + corresponding to that stem. + :rtype: dict(str): set(str) + """ + stems = {} + for word in words: + stem = word[:cutlength] + try: + stems[stem].update([word]) + except KeyError: + stems[stem] = {word} + return stems + + +# Reference: https://en.wikipedia.org/wiki/Line-line_intersection +def _count_intersection(l1, l2): + """Count intersection between two line segments defined by coordinate pairs. + + :param l1: Tuple of two coordinate pairs defining the first line segment + :param l2: Tuple of two coordinate pairs defining the second line segment + :type l1: tuple(float, float) + :type l2: tuple(float, float) + :return: Coordinates of the intersection + :rtype: tuple(float, float) + """ + x1, y1 = l1[0] + x2, y2 = l1[1] + x3, y3 = l2[0] + x4, y4 = l2[1] + + denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + + if denominator == 0.0: # lines are parallel + if x1 == x2 == x3 == x4 == 0.0: + # When lines are parallel, they must be on the y-axis. + # We can ignore x-axis because we stop counting the + # truncation line when we get there. + # There are no other options as UI (x-axis) grows and + # OI (y-axis) diminishes when we go along the truncation line. + return (0.0, y4) + + x = ( + (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4) + ) / denominator + y = ( + (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4) + ) / denominator + return (x, y) + + +def _get_derivative(coordinates): + """Get derivative of the line from (0,0) to given coordinates. + + :param coordinates: A coordinate pair + :type coordinates: tuple(float, float) + :return: Derivative; inf if x is zero + :rtype: float + """ + try: + return coordinates[1] / coordinates[0] + except ZeroDivisionError: + return float("inf") + + +def _calculate_cut(lemmawords, stems): + """Count understemmed and overstemmed pairs for (lemma, stem) pair with common words. + + :param lemmawords: Set or list of words corresponding to certain lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmawords: set(str) or list(str) + :type stems: dict(str): set(str) + :return: Amount of understemmed and overstemmed pairs contributed by words + existing in both lemmawords and stems. + :rtype: tuple(float, float) + """ + umt, wmt = 0.0, 0.0 + for stem in stems: + cut = set(lemmawords) & set(stems[stem]) + if cut: + cutcount = len(cut) + stemcount = len(stems[stem]) + # Unachieved merge total + umt += cutcount * (len(lemmawords) - cutcount) + # Wrongly merged total + wmt += cutcount * (stemcount - cutcount) + return (umt, wmt) + + +def _calculate(lemmas, stems): + """Calculate actual and maximum possible amounts of understemmed and overstemmed word pairs. + + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + :return: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :rtype: tuple(float, float, float, float) + """ + + n = sum(len(lemmas[word]) for word in lemmas) + + gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0) + + for lemma in lemmas: + lemmacount = len(lemmas[lemma]) + + # Desired merge total + gdmt += lemmacount * (lemmacount - 1) + + # Desired non-merge total + gdnt += lemmacount * (n - lemmacount) + + # For each (lemma, stem) pair with common words, count how many + # pairs are understemmed and overstemmed. + umt, wmt = _calculate_cut(lemmas[lemma], stems) + + # Add to total undesired and wrongly-merged totals + gumt += umt + gwmt += wmt + + # Each object is counted twice, so divide by two + return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2) + + +def _indexes(gumt, gdmt, gwmt, gdnt): + """Count Understemming Index (UI), Overstemming Index (OI) and Stemming Weight (SW). + + :param gumt, gdmt, gwmt, gdnt: Global unachieved merge total (gumt), + global desired merge total (gdmt), + global wrongly merged total (gwmt) and + global desired non-merge total (gdnt). + :type gumt, gdmt, gwmt, gdnt: float + :return: Understemming Index (UI), + Overstemming Index (OI) and + Stemming Weight (SW). + :rtype: tuple(float, float, float) + """ + # Calculate Understemming Index (UI), + # Overstemming Index (OI) and Stemming Weight (SW) + try: + ui = gumt / gdmt + except ZeroDivisionError: + # If GDMT (max merge total) is 0, define UI as 0 + ui = 0.0 + try: + oi = gwmt / gdnt + except ZeroDivisionError: + # IF GDNT (max non-merge total) is 0, define OI as 0 + oi = 0.0 + try: + sw = oi / ui + except ZeroDivisionError: + if oi == 0.0: + # OI and UI are 0, define SW as 'not a number' + sw = float("nan") + else: + # UI is 0, define SW as infinity + sw = float("inf") + return (ui, oi, sw) + + +class Paice: + """Class for storing lemmas, stems and evaluation metrics.""" + + def __init__(self, lemmas, stems): + """ + :param lemmas: A dictionary where keys are lemmas and values are sets + or lists of words corresponding to that lemma. + :param stems: A dictionary where keys are stems and values are sets + or lists of words corresponding to that stem. + :type lemmas: dict(str): list(str) + :type stems: dict(str): set(str) + """ + self.lemmas = lemmas + self.stems = stems + self.coords = [] + self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None) + self.ui, self.oi, self.sw = (None, None, None) + self.errt = None + self.update() + + def __str__(self): + text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt] + text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt) + text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt) + text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt) + text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui) + text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi) + text.append("Stemming Weight (OI / UI): %s\n" % self.sw) + text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt) + coordinates = " ".join(["(%s, %s)" % item for item in self.coords]) + text.append("Truncation line: %s" % coordinates) + return "".join(text) + + def _get_truncation_indexes(self, words, cutlength): + """Count (UI, OI) when stemming is done by truncating words at \'cutlength\'. + + :param words: Words used for the analysis + :param cutlength: Words are stemmed by cutting them at this length + :type words: set(str) or list(str) + :type cutlength: int + :return: Understemming and overstemming indexes + :rtype: tuple(int, int) + """ + + truncated = _truncate(words, cutlength) + gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated) + ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2] + return (ui, oi) + + def _get_truncation_coordinates(self, cutlength=0): + """Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line. + + :param cutlength: Optional parameter to start counting from (ui, oi) + coordinates gotten by stemming at this length. Useful for speeding up + the calculations when you know the approximate location of the + intersection. + :type cutlength: int + :return: List of coordinate pairs that define the truncation line + :rtype: list(tuple(float, float)) + """ + words = get_words_from_dictionary(self.lemmas) + maxlength = max(len(word) for word in words) + + # Truncate words from different points until (0, 0) - (ui, oi) segment crosses the truncation line + coords = [] + while cutlength <= maxlength: + # Get (UI, OI) pair of current truncation point + pair = self._get_truncation_indexes(words, cutlength) + + # Store only new coordinates so we'll have an actual + # line segment when counting the intersection point + if pair not in coords: + coords.append(pair) + if pair == (0.0, 0.0): + # Stop counting if truncation line goes through origo; + # length from origo to truncation line is 0 + return coords + if len(coords) >= 2 and pair[0] > 0.0: + derivative1 = _get_derivative(coords[-2]) + derivative2 = _get_derivative(coords[-1]) + # Derivative of the truncation line is a decreasing value; + # when it passes Stemming Weight, we've found the segment + # of truncation line intersecting with (0, 0) - (ui, oi) segment + if derivative1 >= self.sw >= derivative2: + return coords + cutlength += 1 + return coords + + def _errt(self): + """Count Error-Rate Relative to Truncation (ERRT). + + :return: ERRT, length of the line from origo to (UI, OI) divided by + the length of the line from origo to the point defined by the same + line when extended until the truncation line. + :rtype: float + """ + # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line + self.coords = self._get_truncation_coordinates() + if (0.0, 0.0) in self.coords: + # Truncation line goes through origo, so ERRT cannot be counted + if (self.ui, self.oi) != (0.0, 0.0): + return float("inf") + else: + return float("nan") + if (self.ui, self.oi) == (0.0, 0.0): + # (ui, oi) is origo; define errt as 0.0 + return 0.0 + # Count the intersection point + # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates + # so we have actual line segments instead of a line segment and a point + intersection = _count_intersection( + ((0, 0), (self.ui, self.oi)), self.coords[-2:] + ) + # Count OP (length of the line from origo to (ui, oi)) + op = sqrt(self.ui**2 + self.oi**2) + # Count OT (length of the line from origo to truncation line that goes through (ui, oi)) + ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2) + # OP / OT tells how well the stemming algorithm works compared to just truncating words + return op / ot + + def update(self): + """Update statistics after lemmas and stems have been set.""" + self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems) + self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt) + self.errt = self._errt() + + +def demo(): + """Demonstration of the module.""" + # Some words with their real lemmas + lemmas = { + "kneel": ["kneel", "knelt"], + "range": ["range", "ranged"], + "ring": ["ring", "rang", "rung"], + } + # Same words with stems from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang", "range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Words grouped by their lemmas:") + for lemma in sorted(lemmas): + print("{} => {}".format(lemma, " ".join(lemmas[lemma]))) + print() + print("Same words grouped by a stemming algorithm:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p = Paice(lemmas, stems) + print(p) + print() + # Let's "change" results from a stemming algorithm + stems = { + "kneel": ["kneel"], + "knelt": ["knelt"], + "rang": ["rang"], + "range": ["range", "ranged"], + "ring": ["ring"], + "rung": ["rung"], + } + print("Counting stats after changing stemming results:") + for stem in sorted(stems): + print("{} => {}".format(stem, " ".join(stems[stem]))) + print() + p.stems = stems + p.update() + print(p) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/metrics/segmentation.py b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..518197d35dff62ce5735b8e76fb5939b19ebedd2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/metrics/segmentation.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: Text Segmentation Metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# David Doukhan +# URL: +# For license information, see LICENSE.TXT + + +""" +Text Segmentation Metrics + +1. Windowdiff + +Pevzner, L., and Hearst, M., A Critique and Improvement of + an Evaluation Metric for Text Segmentation, + Computational Linguistics 28, 19-36 + + +2. Generalized Hamming Distance + +Bookstein A., Kulyukin V.A., Raita T. +Generalized Hamming Distance +Information Retrieval 5, 2002, pp 353-375 + +Baseline implementation in C++ +http://digital.cs.usu.edu/~vkulyukin/vkweb/software/ghd/ghd.html + +Study describing benefits of Generalized Hamming Distance Versus +WindowDiff for evaluating text segmentation tasks +Begsten, Y. Quel indice pour mesurer l'efficacite en segmentation de textes ? +TALN 2009 + + +3. Pk text segmentation metric + +Beeferman D., Berger A., Lafferty J. (1999) +Statistical Models for Text Segmentation +Machine Learning, 34, 177-210 +""" + +try: + import numpy as np +except ImportError: + pass + + +def windowdiff(seg1, seg2, k, boundary="1", weighted=False): + """ + Compute the windowdiff score for a pair of segmentations. A + segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + >>> s1 = "000100000010" + >>> s2 = "000010000100" + >>> s3 = "100000010000" + >>> '%.2f' % windowdiff(s1, s1, 3) + '0.00' + >>> '%.2f' % windowdiff(s1, s2, 3) + '0.30' + >>> '%.2f' % windowdiff(s2, s3, 3) + '0.80' + + :param seg1: a segmentation + :type seg1: str or list + :param seg2: a segmentation + :type seg2: str or list + :param k: window width + :type k: int + :param boundary: boundary value + :type boundary: str or int or bool + :param weighted: use the weighted variant of windowdiff + :type weighted: boolean + :rtype: float + """ + + if len(seg1) != len(seg2): + raise ValueError("Segmentations have unequal length") + if k > len(seg1): + raise ValueError( + "Window width k should be smaller or equal than segmentation lengths" + ) + wd = 0 + for i in range(len(seg1) - k + 1): + ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary)) + if weighted: + wd += ndiff + else: + wd += min(1, ndiff) + return wd / (len(seg1) - k + 1.0) + + +# Generalized Hamming Distance + + +def _init_mat(nrows, ncols, ins_cost, del_cost): + mat = np.empty((nrows, ncols)) + mat[0, :] = ins_cost * np.arange(ncols) + mat[:, 0] = del_cost * np.arange(nrows) + return mat + + +def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff): + for i, rowi in enumerate(rowv): + for j, colj in enumerate(colv): + shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j] + if rowi == colj: + # boundaries are at the same location, no transformation required + tcost = mat[i, j] + elif rowi > colj: + # boundary match through a deletion + tcost = del_cost + mat[i, j + 1] + else: + # boundary match through an insertion + tcost = ins_cost + mat[i + 1, j] + mat[i + 1, j + 1] = min(tcost, shift_cost) + + +def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"): + """ + Compute the Generalized Hamming Distance for a reference and a hypothetical + segmentation, corresponding to the cost related to the transformation + of the hypothetical segmentation into the reference segmentation + through boundary insertion, deletion and shift operations. + + A segmentation is any sequence over a vocabulary of two items + (e.g. "0", "1"), where the specified boundary value is used to + mark the edge of a segmentation. + + Recommended parameter values are a shift_cost_coeff of 2. + Associated with a ins_cost, and del_cost equal to the mean segment + length in the reference segmentation. + + >>> # Same examples as Kulyukin C++ implementation + >>> ghd('1100100000', '1100010000', 1.0, 1.0, 0.5) + 0.5 + >>> ghd('1100100000', '1100000001', 1.0, 1.0, 0.5) + 2.0 + >>> ghd('011', '110', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('1', '0', 1.0, 1.0, 0.5) + 1.0 + >>> ghd('111', '000', 1.0, 1.0, 0.5) + 3.0 + >>> ghd('000', '111', 1.0, 2.0, 0.5) + 6.0 + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the hypothetical segmentation + :type hyp: str or list + :param ins_cost: insertion cost + :type ins_cost: float + :param del_cost: deletion cost + :type del_cost: float + :param shift_cost_coeff: constant used to compute the cost of a shift. + ``shift cost = shift_cost_coeff * |i - j|`` where ``i`` and ``j`` + are the positions indicating the shift + :type shift_cost_coeff: float + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + ref_idx = [i for (i, val) in enumerate(ref) if val == boundary] + hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary] + + nref_bound = len(ref_idx) + nhyp_bound = len(hyp_idx) + + if nref_bound == 0 and nhyp_bound == 0: + return 0.0 + elif nref_bound > 0 and nhyp_bound == 0: + return nref_bound * ins_cost + elif nref_bound == 0 and nhyp_bound > 0: + return nhyp_bound * del_cost + + mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost) + _ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff) + return mat[-1, -1] + + +# Beeferman's Pk text segmentation evaluation metric + + +def pk(ref, hyp, k=None, boundary="1"): + """ + Compute the Pk metric for a pair of segmentations A segmentation + is any sequence over a vocabulary of two items (e.g. "0", "1"), + where the specified boundary value is used to mark the edge of a + segmentation. + + >>> '%.2f' % pk('0100'*100, '1'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0'*400, 2) + '0.50' + >>> '%.2f' % pk('0100'*100, '0100'*100, 2) + '0.00' + + :param ref: the reference segmentation + :type ref: str or list + :param hyp: the segmentation to evaluate + :type hyp: str or list + :param k: window size, if None, set to half of the average reference segment length + :type boundary: str or int or bool + :param boundary: boundary value + :type boundary: str or int or bool + :rtype: float + """ + + if k is None: + k = int(round(len(ref) / (ref.count(boundary) * 2.0))) + + err = 0 + for i in range(len(ref) - k + 1): + r = ref[i : i + k].count(boundary) > 0 + h = hyp[i : i + k].count(boundary) > 0 + if r != h: + err += 1 + return err / (len(ref) - k + 1.0) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem.py new file mode 100644 index 0000000000000000000000000000000000000000..566a4dd36b37e148a24eb840b5e9478dd24d4b55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem.py @@ -0,0 +1,361 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Siham Ouamour +# Halim Sayoud +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem Arabic Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia, S. Ouamour and H. Sayoud, A Novel Robust Arabic Light Stemmer , +Journal of Experimental & Theoretical Artificial Intelligence (JETAI'17), +Vol. 29, No. 3, 2017, pp. 557-573. +The ARLSTem is a light Arabic stemmer that is based on removing the affixes +from the word (i.e. prefixes, suffixes and infixes). It was evaluated and +compared to several other stemmers using Paice's parameters (under-stemming +index, over-stemming index and stemming weight), and the results showed that +ARLSTem is promising and producing high performances. This stemmer is not +based on any dictionary and can be used on-line effectively. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem(StemmerI): + """ + ARLSTem stemmer : a light Arabic Stemming algorithm without any dictionary. + Department of Telecommunication & Information Processing. USTHB University, + Algiers, Algeria. + ARLSTem.stem(token) returns the Arabic stem for the input token. + The ARLSTem Stemmer requires that all tokens are encoded using Unicode + encoding. + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem(self, token): + """ + call this function to get the word's stem based on ARLSTem . + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip common prefixes of the nouns + pre = self.pref(token) + if pre is not None: + token = pre + # strip the suffixes which are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + # transform from the feminine form to the masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + else: + if pre is None: # if the prefixes are not stripped + # strip the verb prefixes and suffixes + return self.verb(token) + else: + return ps + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replacing hamzated Alif + with Alif replacing AlifMaqsura with Yaa and removing Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is 3 letters + # at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def suff(self, token): + """ + remove suffixes from the word's end. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 3 and token.endswith("\u0627\u062A"): + return token[:-2] + if len(token) > 3 and token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + if len(token) > 4 and token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + return self.verb_t6(token) + + def verb_t1(self, token): + """ + stem the present prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + return token + + def verb_t6(self, token): + """ + stem the order prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + return token diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem2.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2d9e9551ecffff219821bb570f96b21f588a6f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/arlstem2.py @@ -0,0 +1,457 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer v2 +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Hamza Rebbani +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem2 Arabic Light Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia and H. Rebbani, Comparing the Effectiveness of the Improved ARLSTem +Algorithm with Existing Arabic Light Stemmers, International Conference on +Theoretical and Applicative Aspects of Computer Science (ICTAACS'19), Skikda, +Algeria, December 15-16, 2019. +ARLSTem2 is an Arabic light stemmer based on removing the affixes from +the words (i.e. prefixes, suffixes and infixes). It is an improvement +of the previous Arabic light stemmer (ARLSTem). The new version was compared to +the original algorithm and several existing Arabic light stemmers, where the +results showed that the new version considerably improves the under-stemming +errors that are common to light stemmers. Both ARLSTem and ARLSTem2 can be run +online and do not use any dictionary. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem2(StemmerI): + """ + Return a stemmed Arabic word after removing affixes. This an improved + version of the previous algorithm, which reduces under-stemming errors. + Typically used in Arabic search engine, information retrieval and NLP. + + >>> from nltk.stem import arlstem2 + >>> stemmer = ARLSTem2() + >>> word = stemmer.stem('يعمل') + >>> print(word) + عمل + + :param token: The input Arabic word (unicode) to be stemmed + :type token: unicode + :return: A unicode Arabic word + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem1(self, token): + """ + call this function to get the first stem + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + self.is_verb = False + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip the common noun prefixes + pre = self.pref(token) + if pre is not None: + token = pre + # transform the feminine form to masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + # strip the adjective affixes + adj = self.adjective(token) + if adj is not None: + return adj + # strip the suffixes that are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + if pre is None: # if the noun prefixes are not stripped + # strip the verb prefixes and suffixes + verb = self.verb(token) + if verb is not None: + self.is_verb = True + return verb + else: + return ps + return token + except ValueError as e: + print(e) + + def stem(self, token): + # stem the input word + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # run the first round of stemming + token = self.stem1(token) + # check if there is some additional noun affixes + if len(token) > 4: + # ^Taa, $Yaa + char + if token.startswith("\u062A") and token[-2] == "\u064A": + token = token[1:-2] + token[-1] + return token + # ^Miim, $Waaw + char + if token.startswith("\u0645") and token[-2] == "\u0648": + token = token[1:-2] + token[-1] + return token + if len(token) > 3: + # !^Alif, $Yaa + if not token.startswith("\u0627") and token.endswith("\u064A"): + token = token[:-1] + return token + # $Laam + if token.startswith("\u0644"): + return token[1:] + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replace hamzated Alif + with Alif bare, replace AlifMaqsura with Yaa and remove Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is + # tri-literal at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def adjective(self, token): + """ + remove the infixes from adjectives + """ + # ^Alif, Alif, $Yaa + if len(token) > 5: + if ( + token.startswith("\u0627") + and token[-3] == "\u0627" + and token.endswith("\u064A") + ): + return token[:-3] + token[-2] + + def suff(self, token): + """ + remove the suffixes from the word's ending. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + # $Noon and Alif + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if len(token) > 6: + # ^Taa, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u062A") + and token[-4] == "\u064A" + and token.endswith("\u064A\u0629") + ): + return token[1:-4] + token[-3] + # ^Alif, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u0627") + and token[-4] == "\u0627" + and token.endswith("\u064A\u0629") + ): + return token[:-4] + token[-3] + # $Alif, Yaa and Taa Marbuta + if token.endswith("\u0627\u064A\u0629") and len(token) > 5: + return token[:-2] + if len(token) > 4: + # Alif, $Taa Marbuta + if token[1] == "\u0627" and token.endswith("\u0629"): + return token[0] + token[2:-1] + # $Yaa and Taa Marbuta + if token.endswith("\u064A\u0629"): + return token[:-2] + # $Taa Marbuta + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + # ^Haa, $Noon, Waaw + if len(token) > 5: + if token.startswith("\u0645") and token.endswith("\u0648\u0646"): + return token[1:-2] + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 4: + # $Alif, Taa + if token.endswith("\u0627\u062A"): + return token[:-2] + # ^Alif Alif + if token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + # ^Alif Alif + if token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + vb = self.verb_t6(token) + return vb + + def verb_t1(self, token): + """ + stem the present tense co-occurred prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future tense co-occurred prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present tense suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present tense prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future tense prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + + def verb_t6(self, token): + """ + stem the imperative tense prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + + return token diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/cistem.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/cistem.py new file mode 100644 index 0000000000000000000000000000000000000000..69c07a42a373cec1eca9d75e9d474c4c1063e70b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/cistem.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit: CISTEM Stemmer for German +# Copyright (C) 2001-2023 NLTK Project +# Author: Leonie Weissweiler +# Tom Aarsen <> (modifications) +# Algorithm: Leonie Weissweiler +# Alexander Fraser +# URL: +# For license information, see LICENSE.TXT + +import re +from typing import Tuple + +from nltk.stem.api import StemmerI + + +class Cistem(StemmerI): + """ + CISTEM Stemmer for German + + This is the official Python implementation of the CISTEM stemmer. + It is based on the paper + Leonie Weissweiler, Alexander Fraser (2017). Developing a Stemmer for German + Based on a Comparative Analysis of Publicly Available Stemmers. + In Proceedings of the German Society for Computational Linguistics and Language + Technology (GSCL) + which can be read here: + https://www.cis.lmu.de/~weissweiler/cistem/ + + In the paper, we conducted an analysis of publicly available stemmers, + developed two gold standards for German stemming and evaluated the stemmers + based on the two gold standards. We then proposed the stemmer implemented here + and show that it achieves slightly better f-measure than the other stemmers and + is thrice as fast as the Snowball stemmer for German while being about as fast + as most other stemmers. + + case_insensitive is a a boolean specifying if case-insensitive stemming + should be used. Case insensitivity improves performance only if words in the + text may be incorrectly upper case. For all-lowercase and correctly cased + text, best performance is achieved by setting case_insensitive for false. + + :param case_insensitive: if True, the stemming is case insensitive. False by default. + :type case_insensitive: bool + """ + + strip_ge = re.compile(r"^ge(.{4,})") + repl_xx = re.compile(r"(.)\1") + strip_emr = re.compile(r"e[mr]$") + strip_nd = re.compile(r"nd$") + strip_t = re.compile(r"t$") + strip_esn = re.compile(r"[esn]$") + repl_xx_back = re.compile(r"(.)\*") + + def __init__(self, case_insensitive: bool = False): + self._case_insensitive = case_insensitive + + @staticmethod + def replace_to(word: str) -> str: + word = word.replace("sch", "$") + word = word.replace("ei", "%") + word = word.replace("ie", "&") + word = Cistem.repl_xx.sub(r"\1*", word) + + return word + + @staticmethod + def replace_back(word: str) -> str: + word = Cistem.repl_xx_back.sub(r"\1\1", word) + word = word.replace("%", "ei") + word = word.replace("&", "ie") + word = word.replace("$", "sch") + + return word + + def stem(self, word: str) -> str: + """Stems the input word. + + :param word: The word that is to be stemmed. + :type word: str + :return: The stemmed word. + :rtype: str + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.stem(s1) + 'speicherbehalt' + >>> s2 = "Grenzpostens" + >>> stemmer.stem(s2) + 'grenzpost' + >>> s3 = "Ausgefeiltere" + >>> stemmer.stem(s3) + 'ausgefeilt' + >>> stemmer = Cistem(True) + >>> stemmer.stem(s1) + 'speicherbehal' + >>> stemmer.stem(s2) + 'grenzpo' + >>> stemmer.stem(s3) + 'ausgefeil' + """ + if len(word) == 0: + return word + + upper = word[0].isupper() + word = word.lower() + + word = word.replace("ü", "u") + word = word.replace("ö", "o") + word = word.replace("ä", "a") + word = word.replace("ß", "ss") + + word = Cistem.strip_ge.sub(r"\1", word) + + return self._segment_inner(word, upper)[0] + + def segment(self, word: str) -> Tuple[str, str]: + """ + This method works very similarly to stem (:func:'cistem.stem'). The difference is that in + addition to returning the stem, it also returns the rest that was removed at + the end. To be able to return the stem unchanged so the stem and the rest + can be concatenated to form the original word, all subsitutions that altered + the stem in any other way than by removing letters at the end were left out. + + :param word: The word that is to be stemmed. + :type word: str + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.segment(s1) + ('speicherbehält', 'ern') + >>> s2 = "Grenzpostens" + >>> stemmer.segment(s2) + ('grenzpost', 'ens') + >>> s3 = "Ausgefeiltere" + >>> stemmer.segment(s3) + ('ausgefeilt', 'ere') + >>> stemmer = Cistem(True) + >>> stemmer.segment(s1) + ('speicherbehäl', 'tern') + >>> stemmer.segment(s2) + ('grenzpo', 'stens') + >>> stemmer.segment(s3) + ('ausgefeil', 'tere') + """ + if len(word) == 0: + return ("", "") + + upper = word[0].isupper() + word = word.lower() + + return self._segment_inner(word, upper) + + def _segment_inner(self, word: str, upper: bool): + """Inner method for iteratively applying the code stemming regexes. + This method receives a pre-processed variant of the word to be stemmed, + or the word to be segmented, and returns a tuple of the word and the + removed suffix. + + :param word: A pre-processed variant of the word that is to be stemmed. + :type word: str + :param upper: Whether the original word started with a capital letter. + :type upper: bool + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + """ + + rest_length = 0 + word_copy = word[:] + + # Pre-processing before applying the substitution patterns + word = Cistem.replace_to(word) + rest = "" + + # Apply the substitution patterns + while len(word) > 3: + if len(word) > 5: + word, n = Cistem.strip_emr.subn("", word) + if n != 0: + rest_length += 2 + continue + + word, n = Cistem.strip_nd.subn("", word) + if n != 0: + rest_length += 2 + continue + + if not upper or self._case_insensitive: + word, n = Cistem.strip_t.subn("", word) + if n != 0: + rest_length += 1 + continue + + word, n = Cistem.strip_esn.subn("", word) + if n != 0: + rest_length += 1 + continue + else: + break + + # Post-processing after applying the substitution patterns + word = Cistem.replace_back(word) + + if rest_length: + rest = word_copy[-rest_length:] + + return (word, rest) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/lancaster.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/lancaster.py new file mode 100644 index 0000000000000000000000000000000000000000..40a87331848c9f25332e5e655bc24d85b563c2c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/lancaster.py @@ -0,0 +1,343 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Tomcavage +# URL: +# For license information, see LICENSE.TXT + +""" +A word stemmer based on the Lancaster (Paice/Husk) stemming algorithm. +Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61. +""" +import re + +from nltk.stem.api import StemmerI + + +class LancasterStemmer(StemmerI): + """ + Lancaster Stemmer + + >>> from nltk.stem.lancaster import LancasterStemmer + >>> st = LancasterStemmer() + >>> st.stem('maximum') # Remove "-um" when word is intact + 'maxim' + >>> st.stem('presumably') # Don't remove "-um" when word is not intact + 'presum' + >>> st.stem('multiply') # No action taken if word ends with "-ply" + 'multiply' + >>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules + 'provid' + >>> st.stem('owed') # Word starting with vowel must contain at least 2 letters + 'ow' + >>> st.stem('ear') # ditto + 'ear' + >>> st.stem('saying') # Words starting with consonant must contain at least 3 + 'say' + >>> st.stem('crying') # letters and one of those letters must be a vowel + 'cry' + >>> st.stem('string') # ditto + 'string' + >>> st.stem('meant') # ditto + 'meant' + >>> st.stem('cement') # ditto + 'cem' + >>> st_pre = LancasterStemmer(strip_prefix_flag=True) + >>> st_pre.stem('kilometer') # Test Prefix + 'met' + >>> st_custom = LancasterStemmer(rule_tuple=("ssen4>", "s1t.")) + >>> st_custom.stem("ness") # Change s to t + 'nest' + """ + + # The rule list is static since it doesn't change between instances + default_rule_tuple = ( + "ai*2.", # -ia > - if intact + "a*1.", # -a > - if intact + "bb1.", # -bb > -b + "city3s.", # -ytic > -ys + "ci2>", # -ic > - + "cn1t>", # -nc > -nt + "dd1.", # -dd > -d + "dei3y>", # -ied > -y + "deec2ss.", # -ceed >", -cess + "dee1.", # -eed > -ee + "de2>", # -ed > - + "dooh4>", # -hood > - + "e1>", # -e > - + "feil1v.", # -lief > -liev + "fi2>", # -if > - + "gni3>", # -ing > - + "gai3y.", # -iag > -y + "ga2>", # -ag > - + "gg1.", # -gg > -g + "ht*2.", # -th > - if intact + "hsiug5ct.", # -guish > -ct + "hsi3>", # -ish > - + "i*1.", # -i > - if intact + "i1y>", # -i > -y + "ji1d.", # -ij > -id -- see nois4j> & vis3j> + "juf1s.", # -fuj > -fus + "ju1d.", # -uj > -ud + "jo1d.", # -oj > -od + "jeh1r.", # -hej > -her + "jrev1t.", # -verj > -vert + "jsim2t.", # -misj > -mit + "jn1d.", # -nj > -nd + "j1s.", # -j > -s + "lbaifi6.", # -ifiabl > - + "lbai4y.", # -iabl > -y + "lba3>", # -abl > - + "lbi3.", # -ibl > - + "lib2l>", # -bil > -bl + "lc1.", # -cl > c + "lufi4y.", # -iful > -y + "luf3>", # -ful > - + "lu2.", # -ul > - + "lai3>", # -ial > - + "lau3>", # -ual > - + "la2>", # -al > - + "ll1.", # -ll > -l + "mui3.", # -ium > - + "mu*2.", # -um > - if intact + "msi3>", # -ism > - + "mm1.", # -mm > -m + "nois4j>", # -sion > -j + "noix4ct.", # -xion > -ct + "noi3>", # -ion > - + "nai3>", # -ian > - + "na2>", # -an > - + "nee0.", # protect -een + "ne2>", # -en > - + "nn1.", # -nn > -n + "pihs4>", # -ship > - + "pp1.", # -pp > -p + "re2>", # -er > - + "rae0.", # protect -ear + "ra2.", # -ar > - + "ro2>", # -or > - + "ru2>", # -ur > - + "rr1.", # -rr > -r + "rt1>", # -tr > -t + "rei3y>", # -ier > -y + "sei3y>", # -ies > -y + "sis2.", # -sis > -s + "si2>", # -is > - + "ssen4>", # -ness > - + "ss0.", # protect -ss + "suo3>", # -ous > - + "su*2.", # -us > - if intact + "s*1>", # -s > - if intact + "s0.", # -s > -s + "tacilp4y.", # -plicat > -ply + "ta2>", # -at > - + "tnem4>", # -ment > - + "tne3>", # -ent > - + "tna3>", # -ant > - + "tpir2b.", # -ript > -rib + "tpro2b.", # -orpt > -orb + "tcud1.", # -duct > -duc + "tpmus2.", # -sumpt > -sum + "tpec2iv.", # -cept > -ceiv + "tulo2v.", # -olut > -olv + "tsis0.", # protect -sist + "tsi3>", # -ist > - + "tt1.", # -tt > -t + "uqi3.", # -iqu > - + "ugo1.", # -ogu > -og + "vis3j>", # -siv > -j + "vie0.", # protect -eiv + "vi2>", # -iv > - + "ylb1>", # -bly > -bl + "yli3y>", # -ily > -y + "ylp0.", # protect -ply + "yl2>", # -ly > - + "ygo1.", # -ogy > -og + "yhp1.", # -phy > -ph + "ymo1.", # -omy > -om + "ypo1.", # -opy > -op + "yti3>", # -ity > - + "yte3>", # -ety > - + "ytl2.", # -lty > -l + "yrtsi5.", # -istry > - + "yra3>", # -ary > - + "yro3>", # -ory > - + "yfi3.", # -ify > - + "ycn2t>", # -ncy > -nt + "yca3>", # -acy > - + "zi2>", # -iz > - + "zy1s.", # -yz > -ys + ) + + def __init__(self, rule_tuple=None, strip_prefix_flag=False): + """Create an instance of the Lancaster stemmer.""" + # Setup an empty rule dictionary - this will be filled in later + self.rule_dictionary = {} + # Check if a user wants to strip prefix + self._strip_prefix = strip_prefix_flag + # Check if a user wants to use his/her own rule tuples. + self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple + + def parseRules(self, rule_tuple=None): + """Validate the set of rules used in this stemmer. + + If this function is called as an individual method, without using stem + method, rule_tuple argument will be compiled into self.rule_dictionary. + If this function is called within stem, self._rule_tuple will be used. + + """ + # If there is no argument for the function, use class' own rule tuple. + rule_tuple = rule_tuple if rule_tuple else self._rule_tuple + valid_rule = re.compile(r"^[a-z]+\*?\d[a-z]*[>\.]?$") + # Empty any old rules from the rule set before adding new ones + self.rule_dictionary = {} + + for rule in rule_tuple: + if not valid_rule.match(rule): + raise ValueError(f"The rule {rule} is invalid") + first_letter = rule[0:1] + if first_letter in self.rule_dictionary: + self.rule_dictionary[first_letter].append(rule) + else: + self.rule_dictionary[first_letter] = [rule] + + def stem(self, word): + """Stem a word using the Lancaster stemmer.""" + # Lower-case the word, since all the rules are lower-cased + word = word.lower() + word = self.__stripPrefix(word) if self._strip_prefix else word + + # Save a copy of the original word + intact_word = word + + # If rule dictionary is empty, parse rule tuple. + if not self.rule_dictionary: + self.parseRules() + + return self.__doStemming(word, intact_word) + + def __doStemming(self, word, intact_word): + """Perform the actual word stemming""" + + valid_rule = re.compile(r"^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$") + + proceed = True + + while proceed: + + # Find the position of the last letter of the word to be stemmed + last_letter_position = self.__getLastLetter(word) + + # Only stem the word if it has a last letter and a rule matching that last letter + if ( + last_letter_position < 0 + or word[last_letter_position] not in self.rule_dictionary + ): + proceed = False + + else: + rule_was_applied = False + + # Go through each rule that matches the word's final letter + for rule in self.rule_dictionary[word[last_letter_position]]: + rule_match = valid_rule.match(rule) + if rule_match: + ( + ending_string, + intact_flag, + remove_total, + append_string, + cont_flag, + ) = rule_match.groups() + + # Convert the number of chars to remove when stemming + # from a string to an integer + remove_total = int(remove_total) + + # Proceed if word's ending matches rule's word ending + if word.endswith(ending_string[::-1]): + if intact_flag: + if word == intact_word and self.__isAcceptable( + word, remove_total + ): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + elif self.__isAcceptable(word, remove_total): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + # If no rules apply, the word doesn't need any more stemming + if rule_was_applied == False: + proceed = False + return word + + def __getLastLetter(self, word): + """Get the zero-based index of the last alphabetic character in this string""" + last_letter = -1 + for position in range(len(word)): + if word[position].isalpha(): + last_letter = position + else: + break + return last_letter + + def __isAcceptable(self, word, remove_total): + """Determine if the word is acceptable for stemming.""" + word_is_acceptable = False + # If the word starts with a vowel, it must be at least 2 + # characters long to be stemmed + if word[0] in "aeiouy": + if len(word) - remove_total >= 2: + word_is_acceptable = True + # If the word starts with a consonant, it must be at least 3 + # characters long (including one vowel) to be stemmed + elif len(word) - remove_total >= 3: + if word[1] in "aeiouy": + word_is_acceptable = True + elif word[2] in "aeiouy": + word_is_acceptable = True + return word_is_acceptable + + def __applyRule(self, word, remove_total, append_string): + """Apply the stemming rule to the word""" + # Remove letters from the end of the word + new_word_length = len(word) - remove_total + word = word[0:new_word_length] + + # And add new letters to the end of the truncated word + if append_string: + word += append_string + return word + + def __stripPrefix(self, word): + """Remove prefix from a word. + + This function originally taken from Whoosh. + + """ + for prefix in ( + "kilo", + "micro", + "milli", + "intra", + "ultra", + "mega", + "nano", + "pico", + "pseudo", + ): + if word.startswith(prefix): + return word[len(prefix) :] + return word + + def __repr__(self): + return "" diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/porter.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/porter.py new file mode 100644 index 0000000000000000000000000000000000000000..c84402d8083677ea9e727f5f5b0998529ad96ba6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/porter.py @@ -0,0 +1,715 @@ +""" +Porter Stemmer + +This is the Porter stemming algorithm. It follows the algorithm +presented in + +Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137. + +with some optional deviations that can be turned on or off with the +`mode` argument to the constructor. + +Martin Porter, the algorithm's inventor, maintains a web page about the +algorithm at + + https://www.tartarus.org/~martin/PorterStemmer/ + +which includes another Python implementation and other implementations +in many languages. +""" + +__docformat__ = "plaintext" + +import re + +from nltk.stem.api import StemmerI + + +class PorterStemmer(StemmerI): + """ + A word stemmer based on the Porter stemming algorithm. + + Porter, M. "An algorithm for suffix stripping." + Program 14.3 (1980): 130-137. + + See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage + of the algorithm. + + Martin Porter has endorsed several modifications to the Porter + algorithm since writing his original paper, and those extensions are + included in the implementations on his website. Additionally, others + have proposed further improvements to the algorithm, including NLTK + contributors. There are thus three modes that can be selected by + passing the appropriate constant to the class constructor's `mode` + attribute: + + - PorterStemmer.ORIGINAL_ALGORITHM + + An implementation that is faithful to the original paper. + + Note that Martin Porter has deprecated this version of the + algorithm. Martin distributes implementations of the Porter + Stemmer in many languages, hosted at: + + https://www.tartarus.org/~martin/PorterStemmer/ + + and all of these implementations include his extensions. He + strongly recommends against using the original, published + version of the algorithm; only use this mode if you clearly + understand why you are choosing to do so. + + - PorterStemmer.MARTIN_EXTENSIONS + + An implementation that only uses the modifications to the + algorithm that are included in the implementations on Martin + Porter's website. He has declared Porter frozen, so the + behaviour of those implementations should never change. + + - PorterStemmer.NLTK_EXTENSIONS (default) + + An implementation that includes further improvements devised by + NLTK contributors or taken from other modified implementations + found on the web. + + For the best stemming, you should use the default NLTK_EXTENSIONS + version. However, if you need to get the same results as either the + original algorithm or one of Martin Porter's hosted versions for + compatibility with an existing implementation or dataset, you can use + one of the other modes instead. + """ + + # Modes the Stemmer can be instantiated in + NLTK_EXTENSIONS = "NLTK_EXTENSIONS" + MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS" + ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM" + + def __init__(self, mode=NLTK_EXTENSIONS): + if mode not in ( + self.NLTK_EXTENSIONS, + self.MARTIN_EXTENSIONS, + self.ORIGINAL_ALGORITHM, + ): + raise ValueError( + "Mode must be one of PorterStemmer.NLTK_EXTENSIONS, " + "PorterStemmer.MARTIN_EXTENSIONS, or " + "PorterStemmer.ORIGINAL_ALGORITHM" + ) + + self.mode = mode + + if self.mode == self.NLTK_EXTENSIONS: + # This is a table of irregular forms. It is quite short, + # but still reflects the errors actually drawn to Martin + # Porter's attention over a 20 year period! + irregular_forms = { + "sky": ["sky", "skies"], + "die": ["dying"], + "lie": ["lying"], + "tie": ["tying"], + "news": ["news"], + "inning": ["innings", "inning"], + "outing": ["outings", "outing"], + "canning": ["cannings", "canning"], + "howe": ["howe"], + "proceed": ["proceed"], + "exceed": ["exceed"], + "succeed": ["succeed"], + } + + self.pool = {} + for key in irregular_forms: + for val in irregular_forms[key]: + self.pool[val] = key + + self.vowels = frozenset(["a", "e", "i", "o", "u"]) + + def _is_consonant(self, word, i): + """Returns True if word[i] is a consonant, False otherwise + + A consonant is defined in the paper as follows: + + A consonant in a word is a letter other than A, E, I, O or + U, and other than Y preceded by a consonant. (The fact that + the term `consonant' is defined to some extent in terms of + itself does not make it ambiguous.) So in TOY the consonants + are T and Y, and in SYZYGY they are S, Z and G. If a letter + is not a consonant it is a vowel. + """ + if word[i] in self.vowels: + return False + if word[i] == "y": + if i == 0: + return True + else: + return not self._is_consonant(word, i - 1) + return True + + def _measure(self, stem): + r"""Returns the 'measure' of stem, per definition in the paper + + From the paper: + + A consonant will be denoted by c, a vowel by v. A list + ccc... of length greater than 0 will be denoted by C, and a + list vvv... of length greater than 0 will be denoted by V. + Any word, or part of a word, therefore has one of the four + forms: + + CVCV ... C + CVCV ... V + VCVC ... C + VCVC ... V + + These may all be represented by the single form + + [C]VCVC ... [V] + + where the square brackets denote arbitrary presence of their + contents. Using (VC){m} to denote VC repeated m times, this + may again be written as + + [C](VC){m}[V]. + + m will be called the \measure\ of any word or word part when + represented in this form. The case m = 0 covers the null + word. Here are some examples: + + m=0 TR, EE, TREE, Y, BY. + m=1 TROUBLE, OATS, TREES, IVY. + m=2 TROUBLES, PRIVATE, OATEN, ORRERY. + """ + cv_sequence = "" + + # Construct a string of 'c's and 'v's representing whether each + # character in `stem` is a consonant or a vowel. + # e.g. 'falafel' becomes 'cvcvcvc', + # 'architecture' becomes 'vcccvcvccvcv' + for i in range(len(stem)): + if self._is_consonant(stem, i): + cv_sequence += "c" + else: + cv_sequence += "v" + + # Count the number of 'vc' occurrences, which is equivalent to + # the number of 'VC' occurrences in Porter's reduced form in the + # docstring above, which is in turn equivalent to `m` + return cv_sequence.count("vc") + + def _has_positive_measure(self, stem): + return self._measure(stem) > 0 + + def _contains_vowel(self, stem): + """Returns True if stem contains a vowel, else False""" + for i in range(len(stem)): + if not self._is_consonant(stem, i): + return True + return False + + def _ends_double_consonant(self, word): + """Implements condition *d from the paper + + Returns True if word ends with a double consonant + """ + return ( + len(word) >= 2 + and word[-1] == word[-2] + and self._is_consonant(word, len(word) - 1) + ) + + def _ends_cvc(self, word): + """Implements condition *o from the paper + + From the paper: + + *o - the stem ends cvc, where the second c is not W, X or Y + (e.g. -WIL, -HOP). + """ + return ( + len(word) >= 3 + and self._is_consonant(word, len(word) - 3) + and not self._is_consonant(word, len(word) - 2) + and self._is_consonant(word, len(word) - 1) + and word[-1] not in ("w", "x", "y") + ) or ( + self.mode == self.NLTK_EXTENSIONS + and len(word) == 2 + and not self._is_consonant(word, 0) + and self._is_consonant(word, 1) + ) + + def _replace_suffix(self, word, suffix, replacement): + """Replaces `suffix` of `word` with `replacement""" + assert word.endswith(suffix), "Given word doesn't end with given suffix" + if suffix == "": + return word + replacement + else: + return word[: -len(suffix)] + replacement + + def _apply_rule_list(self, word, rules): + """Applies the first applicable suffix-removal rule to the word + + Takes a word and a list of suffix-removal rules represented as + 3-tuples, with the first element being the suffix to remove, + the second element being the string to replace it with, and the + final element being the condition for the rule to be applicable, + or None if the rule is unconditional. + """ + for rule in rules: + suffix, replacement, condition = rule + if suffix == "*d" and self._ends_double_consonant(word): + stem = word[:-2] + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + if word.endswith(suffix): + stem = self._replace_suffix(word, suffix, "") + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + + return word + + def _step1a(self, word): + """Implements Step 1a from "An algorithm for suffix stripping" + + From the paper: + + SSES -> SS caresses -> caress + IES -> I ponies -> poni + ties -> ti + SS -> SS caress -> caress + S -> cats -> cat + """ + # this NLTK-only rule extends the original algorithm, so + # that 'flies'->'fli' but 'dies'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ies") and len(word) == 4: + return self._replace_suffix(word, "ies", "ie") + + return self._apply_rule_list( + word, + [ + ("sses", "ss", None), # SSES -> SS + ("ies", "i", None), # IES -> I + ("ss", "ss", None), # SS -> SS + ("s", "", None), # S -> + ], + ) + + def _step1b(self, word): + """Implements Step 1b from "An algorithm for suffix stripping" + + From the paper: + + (m>0) EED -> EE feed -> feed + agreed -> agree + (*v*) ED -> plastered -> plaster + bled -> bled + (*v*) ING -> motoring -> motor + sing -> sing + + If the second or third of the rules in Step 1b is successful, + the following is done: + + AT -> ATE conflat(ed) -> conflate + BL -> BLE troubl(ed) -> trouble + IZ -> IZE siz(ed) -> size + (*d and not (*L or *S or *Z)) + -> single letter + hopp(ing) -> hop + tann(ed) -> tan + fall(ing) -> fall + hiss(ing) -> hiss + fizz(ed) -> fizz + (m=1 and *o) -> E fail(ing) -> fail + fil(ing) -> file + + The rule to map to a single letter causes the removal of one of + the double letter pair. The -E is put back on -AT, -BL and -IZ, + so that the suffixes -ATE, -BLE and -IZE can be recognised + later. This E may be removed in step 4. + """ + # this NLTK-only block extends the original algorithm, so that + # 'spied'->'spi' but 'died'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ied"): + if len(word) == 4: + return self._replace_suffix(word, "ied", "ie") + else: + return self._replace_suffix(word, "ied", "i") + + # (m>0) EED -> EE + if word.endswith("eed"): + stem = self._replace_suffix(word, "eed", "") + if self._measure(stem) > 0: + return stem + "ee" + else: + return word + + rule_2_or_3_succeeded = False + + for suffix in ["ed", "ing"]: + if word.endswith(suffix): + intermediate_stem = self._replace_suffix(word, suffix, "") + if self._contains_vowel(intermediate_stem): + rule_2_or_3_succeeded = True + break + + if not rule_2_or_3_succeeded: + return word + + return self._apply_rule_list( + intermediate_stem, + [ + ("at", "ate", None), # AT -> ATE + ("bl", "ble", None), # BL -> BLE + ("iz", "ize", None), # IZ -> IZE + # (*d and not (*L or *S or *Z)) + # -> single letter + ( + "*d", + intermediate_stem[-1], + lambda stem: intermediate_stem[-1] not in ("l", "s", "z"), + ), + # (m=1 and *o) -> E + ( + "", + "e", + lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)), + ), + ], + ) + + def _step1c(self, word): + """Implements Step 1c from "An algorithm for suffix stripping" + + From the paper: + + Step 1c + + (*v*) Y -> I happy -> happi + sky -> sky + """ + + def nltk_condition(stem): + """ + This has been modified from the original Porter algorithm so + that y->i is only done when y is preceded by a consonant, + but not if the stem is only a single consonant, i.e. + + (*c and not c) Y -> I + + So 'happy' -> 'happi', but + 'enjoy' -> 'enjoy' etc + + This is a much better rule. Formerly 'enjoy'->'enjoi' and + 'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but + with this modification that no longer really matters. + + Also, the removal of the contains_vowel(z) condition means + that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and + conflate with 'spied', 'tried', 'flies' ... + """ + return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1) + + def original_condition(stem): + return self._contains_vowel(stem) + + return self._apply_rule_list( + word, + [ + ( + "y", + "i", + nltk_condition + if self.mode == self.NLTK_EXTENSIONS + else original_condition, + ) + ], + ) + + def _step2(self, word): + """Implements Step 2 from "An algorithm for suffix stripping" + + From the paper: + + Step 2 + + (m>0) ATIONAL -> ATE relational -> relate + (m>0) TIONAL -> TION conditional -> condition + rational -> rational + (m>0) ENCI -> ENCE valenci -> valence + (m>0) ANCI -> ANCE hesitanci -> hesitance + (m>0) IZER -> IZE digitizer -> digitize + (m>0) ABLI -> ABLE conformabli -> conformable + (m>0) ALLI -> AL radicalli -> radical + (m>0) ENTLI -> ENT differentli -> different + (m>0) ELI -> E vileli - > vile + (m>0) OUSLI -> OUS analogousli -> analogous + (m>0) IZATION -> IZE vietnamization -> vietnamize + (m>0) ATION -> ATE predication -> predicate + (m>0) ATOR -> ATE operator -> operate + (m>0) ALISM -> AL feudalism -> feudal + (m>0) IVENESS -> IVE decisiveness -> decisive + (m>0) FULNESS -> FUL hopefulness -> hopeful + (m>0) OUSNESS -> OUS callousness -> callous + (m>0) ALITI -> AL formaliti -> formal + (m>0) IVITI -> IVE sensitiviti -> sensitive + (m>0) BILITI -> BLE sensibiliti -> sensible + """ + + if self.mode == self.NLTK_EXTENSIONS: + # Instead of applying the ALLI -> AL rule after '(a)bli' per + # the published algorithm, instead we apply it first, and, + # if it succeeds, run the result through step2 again. + if word.endswith("alli") and self._has_positive_measure( + self._replace_suffix(word, "alli", "") + ): + return self._step2(self._replace_suffix(word, "alli", "al")) + + bli_rule = ("bli", "ble", self._has_positive_measure) + abli_rule = ("abli", "able", self._has_positive_measure) + + rules = [ + ("ational", "ate", self._has_positive_measure), + ("tional", "tion", self._has_positive_measure), + ("enci", "ence", self._has_positive_measure), + ("anci", "ance", self._has_positive_measure), + ("izer", "ize", self._has_positive_measure), + abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule, + ("alli", "al", self._has_positive_measure), + ("entli", "ent", self._has_positive_measure), + ("eli", "e", self._has_positive_measure), + ("ousli", "ous", self._has_positive_measure), + ("ization", "ize", self._has_positive_measure), + ("ation", "ate", self._has_positive_measure), + ("ator", "ate", self._has_positive_measure), + ("alism", "al", self._has_positive_measure), + ("iveness", "ive", self._has_positive_measure), + ("fulness", "ful", self._has_positive_measure), + ("ousness", "ous", self._has_positive_measure), + ("aliti", "al", self._has_positive_measure), + ("iviti", "ive", self._has_positive_measure), + ("biliti", "ble", self._has_positive_measure), + ] + + if self.mode == self.NLTK_EXTENSIONS: + rules.append(("fulli", "ful", self._has_positive_measure)) + + # The 'l' of the 'logi' -> 'log' rule is put with the stem, + # so that short stems like 'geo' 'theo' etc work like + # 'archaeo' 'philo' etc. + rules.append( + ("logi", "log", lambda stem: self._has_positive_measure(word[:-3])) + ) + + if self.mode == self.MARTIN_EXTENSIONS: + rules.append(("logi", "log", self._has_positive_measure)) + + return self._apply_rule_list(word, rules) + + def _step3(self, word): + """Implements Step 3 from "An algorithm for suffix stripping" + + From the paper: + + Step 3 + + (m>0) ICATE -> IC triplicate -> triplic + (m>0) ATIVE -> formative -> form + (m>0) ALIZE -> AL formalize -> formal + (m>0) ICITI -> IC electriciti -> electric + (m>0) ICAL -> IC electrical -> electric + (m>0) FUL -> hopeful -> hope + (m>0) NESS -> goodness -> good + """ + return self._apply_rule_list( + word, + [ + ("icate", "ic", self._has_positive_measure), + ("ative", "", self._has_positive_measure), + ("alize", "al", self._has_positive_measure), + ("iciti", "ic", self._has_positive_measure), + ("ical", "ic", self._has_positive_measure), + ("ful", "", self._has_positive_measure), + ("ness", "", self._has_positive_measure), + ], + ) + + def _step4(self, word): + """Implements Step 4 from "An algorithm for suffix stripping" + + Step 4 + + (m>1) AL -> revival -> reviv + (m>1) ANCE -> allowance -> allow + (m>1) ENCE -> inference -> infer + (m>1) ER -> airliner -> airlin + (m>1) IC -> gyroscopic -> gyroscop + (m>1) ABLE -> adjustable -> adjust + (m>1) IBLE -> defensible -> defens + (m>1) ANT -> irritant -> irrit + (m>1) EMENT -> replacement -> replac + (m>1) MENT -> adjustment -> adjust + (m>1) ENT -> dependent -> depend + (m>1 and (*S or *T)) ION -> adoption -> adopt + (m>1) OU -> homologou -> homolog + (m>1) ISM -> communism -> commun + (m>1) ATE -> activate -> activ + (m>1) ITI -> angulariti -> angular + (m>1) OUS -> homologous -> homolog + (m>1) IVE -> effective -> effect + (m>1) IZE -> bowdlerize -> bowdler + + The suffixes are now removed. All that remains is a little + tidying up. + """ + measure_gt_1 = lambda stem: self._measure(stem) > 1 + + return self._apply_rule_list( + word, + [ + ("al", "", measure_gt_1), + ("ance", "", measure_gt_1), + ("ence", "", measure_gt_1), + ("er", "", measure_gt_1), + ("ic", "", measure_gt_1), + ("able", "", measure_gt_1), + ("ible", "", measure_gt_1), + ("ant", "", measure_gt_1), + ("ement", "", measure_gt_1), + ("ment", "", measure_gt_1), + ("ent", "", measure_gt_1), + # (m>1 and (*S or *T)) ION -> + ( + "ion", + "", + lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"), + ), + ("ou", "", measure_gt_1), + ("ism", "", measure_gt_1), + ("ate", "", measure_gt_1), + ("iti", "", measure_gt_1), + ("ous", "", measure_gt_1), + ("ive", "", measure_gt_1), + ("ize", "", measure_gt_1), + ], + ) + + def _step5a(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5a + + (m>1) E -> probate -> probat + rate -> rate + (m=1 and not *o) E -> cease -> ceas + """ + # Note that Martin's test vocabulary and reference + # implementations are inconsistent in how they handle the case + # where two rules both refer to a suffix that matches the word + # to be stemmed, but only the condition of the second one is + # true. + # Earlier in step2b we had the rules: + # (m>0) EED -> EE + # (*v*) ED -> + # but the examples in the paper included "feed"->"feed", even + # though (*v*) is true for "fe" and therefore the second rule + # alone would map "feed"->"fe". + # However, in THIS case, we need to handle the consecutive rules + # differently and try both conditions (obviously; the second + # rule here would be redundant otherwise). Martin's paper makes + # no explicit mention of the inconsistency; you have to infer it + # from the examples. + # For this reason, we can't use _apply_rule_list here. + if word.endswith("e"): + stem = self._replace_suffix(word, "e", "") + if self._measure(stem) > 1: + return stem + if self._measure(stem) == 1 and not self._ends_cvc(stem): + return stem + return word + + def _step5b(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5b + + (m > 1 and *d and *L) -> single letter + controll -> control + roll -> roll + """ + return self._apply_rule_list( + word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)] + ) + + def stem(self, word, to_lowercase=True): + """ + :param to_lowercase: if `to_lowercase=True` the word always lowercase + """ + stem = word.lower() if to_lowercase else word + + if self.mode == self.NLTK_EXTENSIONS and word in self.pool: + return self.pool[stem] + + if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2: + # With this line, strings of length 1 or 2 don't go through + # the stemming process, although no mention is made of this + # in the published algorithm. + return stem + + stem = self._step1a(stem) + stem = self._step1b(stem) + stem = self._step1c(stem) + stem = self._step2(stem) + stem = self._step3(stem) + stem = self._step4(stem) + stem = self._step5a(stem) + stem = self._step5b(stem) + + return stem + + def __repr__(self): + return "" + + +def demo(): + """ + A demonstration of the porter stemmer on a sample from + the Penn Treebank corpus. + """ + + from nltk import stem + from nltk.corpus import treebank + + stemmer = stem.PorterStemmer() + + orig = [] + stemmed = [] + for item in treebank.fileids()[:3]: + for (word, tag) in treebank.tagged_words(item): + orig.append(word) + stemmed.append(stemmer.stem(word)) + + # Convert the results to a string, and word-wrap them. + results = " ".join(stemmed) + results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip() + + # Convert the original to a string, and word wrap it. + original = " ".join(orig) + original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip() + + # Print the results. + print("-Original-".center(70).replace(" ", "*").replace("-", " ")) + print(original) + print("-Results-".center(70).replace(" ", "*").replace("-", " ")) + print(results) + print("*" * 70) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/regexp.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..473b42bd4a194bc11a51db9db7a00178a945862a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/regexp.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT +import re + +from nltk.stem.api import StemmerI + + +class RegexpStemmer(StemmerI): + """ + A stemmer that uses regular expressions to identify morphological + affixes. Any substrings that match the regular expressions will + be removed. + + >>> from nltk.stem import RegexpStemmer + >>> st = RegexpStemmer('ing$|s$|e$|able$', min=4) + >>> st.stem('cars') + 'car' + >>> st.stem('mass') + 'mas' + >>> st.stem('was') + 'was' + >>> st.stem('bee') + 'bee' + >>> st.stem('compute') + 'comput' + >>> st.stem('advisable') + 'advis' + + :type regexp: str or regexp + :param regexp: The regular expression that should be used to + identify morphological affixes. + :type min: int + :param min: The minimum length of string to stem + """ + + def __init__(self, regexp, min=0): + + if not hasattr(regexp, "pattern"): + regexp = re.compile(regexp) + self._regexp = regexp + self._min = min + + def stem(self, word): + if len(word) < self._min: + return word + else: + return self._regexp.sub("", word) + + def __repr__(self): + return f"" diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/snowball.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/snowball.py new file mode 100644 index 0000000000000000000000000000000000000000..08cd9e76993213eafb0d1698f3f9b019af21068d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/snowball.py @@ -0,0 +1,5946 @@ +# +# Natural Language Toolkit: Snowball Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Michael Stahl +# Peter Ljunglof (revisions) +# Lakhdar Benzahia (co-writer) +# Assem Chelli (reviewer arabicstemmer) +# Abdelkrim Aries (reviewer arabicstemmer) +# Algorithms: Dr Martin Porter +# Assem Chelli arabic stemming algorithm +# Benzahia Lakhdar +# URL: +# For license information, see LICENSE.TXT + +""" +Snowball stemmers + +This module provides a port of the Snowball stemmers +developed by Martin Porter. + +There is also a demo function: `snowball.demo()`. + +""" + +import re + +from nltk.corpus import stopwords +from nltk.stem import porter +from nltk.stem.api import StemmerI +from nltk.stem.util import prefix_replace, suffix_replace + + +class SnowballStemmer(StemmerI): + + """ + Snowball Stemmer + + The following languages are supported: + Arabic, Danish, Dutch, English, Finnish, French, German, + Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian, + Spanish and Swedish. + + The algorithm for English is documented here: + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + The algorithms have been developed by Martin Porter. + These stemmers are called Snowball, because Porter created + a programming language with this name for creating + new stemming algorithms. There is more information available + at http://snowball.tartarus.org/ + + The stemmer is invoked as shown below: + + >>> from nltk.stem import SnowballStemmer # See which languages are supported + >>> print(" ".join(SnowballStemmer.languages)) # doctest: +NORMALIZE_WHITESPACE + arabic danish dutch english finnish french german hungarian + italian norwegian porter portuguese romanian russian + spanish swedish + >>> stemmer = SnowballStemmer("german") # Choose a language + >>> stemmer.stem("Autobahnen") # Stem a word + 'autobahn' + + Invoking the stemmers that way is useful if you do not know the + language to be stemmed at runtime. Alternatively, if you already know + the language, then you can invoke the language specific stemmer directly: + + >>> from nltk.stem.snowball import GermanStemmer + >>> stemmer = GermanStemmer() + >>> stemmer.stem("Autobahnen") + 'autobahn' + + :param language: The language whose subclass is instantiated. + :type language: str or unicode + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + :raise ValueError: If there is no stemmer for the specified + language, a ValueError is raised. + """ + + languages = ( + "arabic", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "hungarian", + "italian", + "norwegian", + "porter", + "portuguese", + "romanian", + "russian", + "spanish", + "swedish", + ) + + def __init__(self, language, ignore_stopwords=False): + if language not in self.languages: + raise ValueError(f"The language '{language}' is not supported.") + stemmerclass = globals()[language.capitalize() + "Stemmer"] + self.stemmer = stemmerclass(ignore_stopwords) + self.stem = self.stemmer.stem + self.stopwords = self.stemmer.stopwords + + def stem(self, token): + return self.stemmer.stem(self, token) + + +class _LanguageSpecificStemmer(StemmerI): + + """ + This helper subclass offers the possibility + to invoke a specific stemmer directly. + This is useful if you already know the language to be stemmed at runtime. + + Create an instance of the Snowball stemmer. + + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + """ + + def __init__(self, ignore_stopwords=False): + # The language is the name of the class, minus the final "Stemmer". + language = type(self).__name__.lower() + if language.endswith("stemmer"): + language = language[:-7] + + self.stopwords = set() + if ignore_stopwords: + try: + for word in stopwords.words(language): + self.stopwords.add(word) + except OSError as e: + raise ValueError( + "{!r} has no list of stopwords. Please set" + " 'ignore_stopwords' to 'False'.".format(self) + ) from e + + def __repr__(self): + """ + Print out the string representation of the respective class. + + """ + return f"<{type(self).__name__}>" + + +class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer): + """ + A word stemmer based on the original Porter stemming algorithm. + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + A few minor modifications have been made to Porter's basic + algorithm. See the source code of the module + nltk.stem.porter for more information. + + """ + + def __init__(self, ignore_stopwords=False): + _LanguageSpecificStemmer.__init__(self, ignore_stopwords) + porter.PorterStemmer.__init__(self) + + +class _ScandinavianStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates a method for defining the string region R1. + It is used by the Danish, Norwegian, and Swedish stemmer. + + """ + + def _r1_scandinavian(self, word, vowels): + """ + Return the region R1 that is used by the Scandinavian stemmers. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. But then R1 is adjusted so that the region + before it contains at least three letters. + + :param word: The word whose region R1 is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region R1. + :type vowels: unicode + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses DanishStemmer, NorwegianStemmer, and + SwedishStemmer. It is not to be invoked directly! + + """ + r1 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) >= 3: + r1 = word[i + 1 :] + else: + return word + break + + return r1 + + +class _StandardStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates two methods for defining the standard versions + of the string regions R1, R2, and RV. + + """ + + def _r1r2_standard(self, word, vowels): + """ + Return the standard interpretations of the string regions R1 and R2. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. + + R2 is the region after the first non-vowel following a vowel + in R1, or is the null region at the end of the word if there + is no such non-vowel. + + :param word: The word whose regions R1 and R2 are determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the regions R1 and R2. + :type vowels: unicode + :return: (r1,r2), the regions R1 and R2 for the respective word. + :rtype: tuple + :note: This helper method is invoked by the respective stem method of + the subclasses DutchStemmer, FinnishStemmer, + FrenchStemmer, GermanStemmer, ItalianStemmer, + PortugueseStemmer, RomanianStemmer, and SpanishStemmer. + It is not to be invoked directly! + :note: A detailed description of how to define R1 and R2 + can be found at http://snowball.tartarus.org/texts/r1r2.html + + """ + r1 = "" + r2 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + return (r1, r2) + + def _rv_standard(self, word, vowels): + """ + Return the standard interpretation of the string region RV. + + If the second letter is a consonant, RV is the region after the + next following vowel. If the first two letters are vowels, RV is + the region after the next following consonant. Otherwise, RV is + the region after the third letter. + + :param word: The word whose region RV is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region RV. + :type vowels: unicode + :return: the region RV for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses ItalianStemmer, PortugueseStemmer, + RomanianStemmer, and SpanishStemmer. It is not to be + invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word[1] not in vowels: + for i in range(2, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + elif word[0] in vowels and word[1] in vowels: + for i in range(2, len(word)): + if word[i] not in vowels: + rv = word[i + 1 :] + break + else: + rv = word[3:] + + return rv + + +class ArabicStemmer(_StandardStemmer): + """ + https://github.com/snowballstem/snowball/blob/master/algorithms/arabic/stem_Unicode.sbl (Original Algorithm) + The Snowball Arabic light Stemmer + Algorithm: + + - Assem Chelli + - Abdelkrim Aries + - Lakhdar Benzahia + + NLTK Version Author: + + - Lakhdar Benzahia + """ + + # Normalize_pre stes + __vocalization = re.compile( + r"[\u064b-\u064c-\u064d-\u064e-\u064f-\u0650-\u0651-\u0652]" + ) # ً، ٌ، ٍ، َ، ُ، ِ، ّ، ْ + + __kasheeda = re.compile(r"[\u0640]") # ـ tatweel/kasheeda + + __arabic_punctuation_marks = re.compile(r"[\u060C-\u061B-\u061F]") # ؛ ، ؟ + + # Normalize_post + __last_hamzat = ("\u0623", "\u0625", "\u0622", "\u0624", "\u0626") # أ، إ، آ، ؤ، ئ + + # normalize other hamza's + __initial_hamzat = re.compile(r"^[\u0622\u0623\u0625]") # أ، إ، آ + + __waw_hamza = re.compile(r"[\u0624]") # ؤ + + __yeh_hamza = re.compile(r"[\u0626]") # ئ + + __alefat = re.compile(r"[\u0623\u0622\u0625]") # أ، إ، آ + + # Checks + __checks1 = ( + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + "\u0627\u0644", + "\u0644\u0644", # لل، ال + ) + + __checks2 = ("\u0629", "\u0627\u062a") # ة # female plural ات + + # Suffixes + __suffix_noun_step1a = ( + "\u064a", + "\u0643", + "\u0647", # ي، ك، ه + "\u0646\u0627", + "\u0643\u0645", + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # نا، كم، ها، هن، هم + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", # كما، هما + ) + + __suffix_noun_step1b = "\u0646" # ن + + __suffix_noun_step2a = ("\u0627", "\u064a", "\u0648") # ا، ي، و + + __suffix_noun_step2b = "\u0627\u062a" # ات + + __suffix_noun_step2c1 = "\u062a" # ت + + __suffix_noun_step2c2 = "\u0629" # ة + + __suffix_noun_step3 = "\u064a" # ي + + __suffix_verb_step1 = ( + "\u0647", + "\u0643", # ه، ك + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", + "\u0647\u0645", # ني، نا، ها، هم + "\u0647\u0646", + "\u0643\u0645", + "\u0643\u0646", # هن، كم، كن + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", # هما، كما، كمو + ) + + __suffix_verb_step2a = ( + "\u062a", + "\u0627", + "\u0646", + "\u064a", # ت، ا، ن، ي + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", # نا، تا، تن Past + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", # ان، هن، ين Present + "\u062a\u0645\u0627", # تما + ) + + __suffix_verb_step2b = ("\u0648\u0627", "\u062a\u0645") # وا، تم + + __suffix_verb_step2c = ("\u0648", "\u062a\u0645\u0648") # و # تمو + + __suffix_all_alef_maqsura = "\u0649" # ى + + # Prefixes + __prefix_step1 = ( + "\u0623", # أ + "\u0623\u0623", + "\u0623\u0622", + "\u0623\u0624", + "\u0623\u0627", + "\u0623\u0625", # أأ، أآ، أؤ، أا، أإ + ) + + __prefix_step2a = ("\u0641\u0627\u0644", "\u0648\u0627\u0644") # فال، وال + + __prefix_step2b = ("\u0641", "\u0648") # ف، و + + __prefix_step3a_noun = ( + "\u0627\u0644", + "\u0644\u0644", # لل، ال + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + ) + + __prefix_step3b_noun = ( + "\u0628", + "\u0643", + "\u0644", # ب، ك، ل + "\u0628\u0628", + "\u0643\u0643", # بب، كك + ) + + __prefix_step3_verb = ( + "\u0633\u064a", + "\u0633\u062a", + "\u0633\u0646", + "\u0633\u0623", + ) # سي، ست، سن، سأ + + __prefix_step4_verb = ( + "\u064a\u0633\u062a", + "\u0646\u0633\u062a", + "\u062a\u0633\u062a", + ) # يست، نست، تست + + # Suffixes added due to Conjugation Verbs + __conjugation_suffix_verb_1 = ("\u0647", "\u0643") # ه، ك + + __conjugation_suffix_verb_2 = ( + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", # ني، نا، ها + "\u0647\u0645", + "\u0647\u0646", + "\u0643\u0645", # هم، هن، كم + "\u0643\u0646", # كن + ) + __conjugation_suffix_verb_3 = ( + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", + ) # هما، كما، كمو + + __conjugation_suffix_verb_4 = ("\u0627", "\u0646", "\u064a") # ا، ن، ي + + __conjugation_suffix_verb_past = ( + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", + ) # نا، تا، تن + + __conjugation_suffix_verb_present = ( + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", + ) # ان، ون، ين + + # Suffixes added due to derivation Names + __conjugation_suffix_noun_1 = ("\u064a", "\u0643", "\u0647") # ي، ك، ه + + __conjugation_suffix_noun_2 = ( + "\u0646\u0627", + "\u0643\u0645", # نا، كم + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # ها، هن، هم + ) + + __conjugation_suffix_noun_3 = ( + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", + ) # كما، هما + + # Prefixes added due to derivation Names + __prefixes1 = ("\u0648\u0627", "\u0641\u0627") # فا، وا + + __articles_3len = ("\u0643\u0627\u0644", "\u0628\u0627\u0644") # بال كال + + __articles_2len = ("\u0627\u0644", "\u0644\u0644") # ال لل + + # Prepositions letters + __prepositions1 = ("\u0643", "\u0644") # ك، ل + __prepositions2 = ("\u0628\u0628", "\u0643\u0643") # بب، كك + + is_verb = True + is_noun = True + is_defined = False + + suffixes_verb_step1_success = False + suffix_verb_step2a_success = False + suffix_verb_step2b_success = False + suffix_noun_step2c2_success = False + suffix_noun_step1a_success = False + suffix_noun_step2a_success = False + suffix_noun_step2b_success = False + suffixe_noun_step1b_success = False + prefix_step2a_success = False + prefix_step3a_noun_success = False + prefix_step3b_noun_success = False + + def __normalize_pre(self, token): + """ + :param token: string + :return: normalized token type string + """ + # strip diacritics + token = self.__vocalization.sub("", token) + # strip kasheeda + token = self.__kasheeda.sub("", token) + # strip punctuation marks + token = self.__arabic_punctuation_marks.sub("", token) + return token + + def __normalize_post(self, token): + # normalize last hamza + for hamza in self.__last_hamzat: + if token.endswith(hamza): + token = suffix_replace(token, hamza, "\u0621") + break + # normalize other hamzat + token = self.__initial_hamzat.sub("\u0627", token) + token = self.__waw_hamza.sub("\u0648", token) + token = self.__yeh_hamza.sub("\u064a", token) + token = self.__alefat.sub("\u0627", token) + return token + + def __checks_1(self, token): + for prefix in self.__checks1: + if token.startswith(prefix): + if prefix in self.__articles_3len and len(token) > 4: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + if prefix in self.__articles_2len and len(token) > 3: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + def __checks_2(self, token): + for suffix in self.__checks2: + if token.endswith(suffix): + if suffix == "\u0629" and len(token) > 2: + self.is_noun = True + self.is_verb = False + break + + if suffix == "\u0627\u062a" and len(token) > 3: + self.is_noun = True + self.is_verb = False + break + + def __Suffix_Verb_Step1(self, token): + for suffix in self.__suffix_verb_step1: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_verb_1 and len(token) >= 4: + token = token[:-1] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_2 and len(token) >= 5: + token = token[:-2] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_3 and len(token) >= 6: + token = token[:-3] + self.suffixes_verb_step1_success = True + break + return token + + def __Suffix_Verb_Step2a(self, token): + for suffix in self.__suffix_verb_step2a: + if token.endswith(suffix) and len(token) > 3: + if suffix == "\u062a" and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5: + token = token[:-2] # past + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_present and len(token) > 5: + token = token[:-2] # present + self.suffix_verb_step2a_success = True + break + + if suffix == "\u062a\u0645\u0627" and len(token) >= 6: + token = token[:-3] + self.suffix_verb_step2a_success = True + break + return token + + def __Suffix_Verb_Step2c(self, token): + for suffix in self.__suffix_verb_step2c: + if token.endswith(suffix): + if suffix == "\u062a\u0645\u0648" and len(token) >= 6: + token = token[:-3] + break + + if suffix == "\u0648" and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Verb_Step2b(self, token): + for suffix in self.__suffix_verb_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_verb_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c2(self, token): + for suffix in self.__suffix_noun_step2c2: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] + self.suffix_noun_step2c2_success = True + break + return token + + def __Suffix_Noun_Step1a(self, token): + for suffix in self.__suffix_noun_step1a: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_noun_1 and len(token) >= 4: + token = token[:-1] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_2 and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_3 and len(token) >= 6: + token = token[:-3] + self.suffix_noun_step1a_success = True + break + return token + + def __Suffix_Noun_Step2a(self, token): + for suffix in self.__suffix_noun_step2a: + if token.endswith(suffix) and len(token) > 4: + token = token[:-1] + self.suffix_noun_step2a_success = True + break + return token + + def __Suffix_Noun_Step2b(self, token): + for suffix in self.__suffix_noun_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c1(self, token): + for suffix in self.__suffix_noun_step2c1: + if token.endswith(suffix) and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Noun_Step1b(self, token): + for suffix in self.__suffix_noun_step1b: + if token.endswith(suffix) and len(token) > 5: + token = token[:-1] + self.suffixe_noun_step1b_success = True + break + return token + + def __Suffix_Noun_Step3(self, token): + for suffix in self.__suffix_noun_step3: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] # ya' nisbiya + break + return token + + def __Suffix_All_alef_maqsura(self, token): + for suffix in self.__suffix_all_alef_maqsura: + if token.endswith(suffix): + token = suffix_replace(token, suffix, "\u064a") + return token + + def __Prefix_Step1(self, token): + for prefix in self.__prefix_step1: + if token.startswith(prefix) and len(token) > 3: + if prefix == "\u0623\u0623": + token = prefix_replace(token, prefix, "\u0623") + break + + elif prefix == "\u0623\u0622": + token = prefix_replace(token, prefix, "\u0622") + break + + elif prefix == "\u0623\u0624": + token = prefix_replace(token, prefix, "\u0624") + break + + elif prefix == "\u0623\u0627": + token = prefix_replace(token, prefix, "\u0627") + break + + elif prefix == "\u0623\u0625": + token = prefix_replace(token, prefix, "\u0625") + break + return token + + def __Prefix_Step2a(self, token): + for prefix in self.__prefix_step2a: + if token.startswith(prefix) and len(token) > 5: + token = token[len(prefix) :] + self.prefix_step2a_success = True + break + return token + + def __Prefix_Step2b(self, token): + for prefix in self.__prefix_step2b: + if token.startswith(prefix) and len(token) > 3: + if token[:2] not in self.__prefixes1: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3a_Noun(self, token): + for prefix in self.__prefix_step3a_noun: + if token.startswith(prefix): + if prefix in self.__articles_2len and len(token) > 4: + token = token[len(prefix) :] + self.prefix_step3a_noun_success = True + break + if prefix in self.__articles_3len and len(token) > 5: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3b_Noun(self, token): + for prefix in self.__prefix_step3b_noun: + if token.startswith(prefix): + if len(token) > 3: + if prefix == "\u0628": + token = token[len(prefix) :] + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions2: + token = prefix_replace(token, prefix, prefix[1]) + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions1 and len(token) > 4: + token = token[len(prefix) :] # BUG: cause confusion + self.prefix_step3b_noun_success = True + break + return token + + def __Prefix_Step3_Verb(self, token): + for prefix in self.__prefix_step3_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, prefix[1]) + break + return token + + def __Prefix_Step4_Verb(self, token): + for prefix in self.__prefix_step4_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, "\u0627\u0633\u062a") + self.is_verb = True + self.is_noun = False + break + return token + + def stem(self, word): + """ + Stem an Arabic word and return the stemmed form. + + :param word: string + :return: string + """ + # set initial values + self.is_verb = True + self.is_noun = True + self.is_defined = False + + self.suffix_verb_step2a_success = False + self.suffix_verb_step2b_success = False + self.suffix_noun_step2c2_success = False + self.suffix_noun_step1a_success = False + self.suffix_noun_step2a_success = False + self.suffix_noun_step2b_success = False + self.suffixe_noun_step1b_success = False + self.prefix_step2a_success = False + self.prefix_step3a_noun_success = False + self.prefix_step3b_noun_success = False + + modified_word = word + # guess type and properties + # checks1 + self.__checks_1(modified_word) + # checks2 + self.__checks_2(modified_word) + # Pre_Normalization + modified_word = self.__normalize_pre(modified_word) + # Avoid stopwords + if modified_word in self.stopwords or len(modified_word) <= 2: + return modified_word + # Start stemming + if self.is_verb: + modified_word = self.__Suffix_Verb_Step1(modified_word) + if self.suffixes_verb_step1_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if not self.suffix_verb_step2a_success: + modified_word = self.__Suffix_Verb_Step2c(modified_word) + # or next TODO: How to deal with or next instruction + else: + modified_word = self.__Suffix_Verb_Step2b(modified_word) + if not self.suffix_verb_step2b_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if self.is_noun: + modified_word = self.__Suffix_Noun_Step2c2(modified_word) + if not self.suffix_noun_step2c2_success: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step1a(modified_word) + # if self.suffix_noun_step1a_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + # or next ? todo : how to deal with or next + else: + modified_word = self.__Suffix_Noun_Step1b(modified_word) + if self.suffixe_noun_step1b_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + else: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + modified_word = self.__Suffix_Noun_Step2b(modified_word) + modified_word = self.__Suffix_Noun_Step3(modified_word) + if not self.is_noun and self.is_verb: + modified_word = self.__Suffix_All_alef_maqsura(modified_word) + + # prefixes + modified_word = self.__Prefix_Step1(modified_word) + modified_word = self.__Prefix_Step2a(modified_word) + if not self.prefix_step2a_success: + modified_word = self.__Prefix_Step2b(modified_word) + modified_word = self.__Prefix_Step3a_Noun(modified_word) + if not self.prefix_step3a_noun_success and self.is_noun: + modified_word = self.__Prefix_Step3b_Noun(modified_word) + else: + if not self.prefix_step3b_noun_success and self.is_verb: + modified_word = self.__Prefix_Step3_Verb(modified_word) + modified_word = self.__Prefix_Step4_Verb(modified_word) + + # post normalization stemming + modified_word = self.__normalize_post(modified_word) + stemmed_word = modified_word + return stemmed_word + + +class DanishStemmer(_ScandinavianStemmer): + + """ + The Danish Snowball stemmer. + + :cvar __vowels: The Danish vowels. + :type __vowels: unicode + :cvar __consonants: The Danish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Danish double consonants. + :type __double_consonants: tuple + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Danish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/danish/stemmer.html + + """ + + # The language's vowels and other important characters are defined. + __vowels = "aeiouy\xE6\xE5\xF8" + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __s_ending = "abcdfghjklmnoprtvyz\xE5" + + # The different suffixes, divided into the algorithm's steps + # and organized by length, are listed in tuples. + __step1_suffixes = ( + "erendes", + "erende", + "hedens", + "ethed", + "erede", + "heden", + "heder", + "endes", + "ernes", + "erens", + "erets", + "ered", + "ende", + "erne", + "eren", + "erer", + "heds", + "enes", + "eres", + "eret", + "hed", + "ene", + "ere", + "ens", + "ers", + "ets", + "en", + "er", + "es", + "et", + "e", + "s", + ) + __step2_suffixes = ("gd", "dt", "gt", "kt") + __step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig") + + def stem(self, word): + """ + Stem a Danish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + # Every word is put into lower case for normalization. + word = word.lower() + + if word in self.stopwords: + return word + + # After this, the required regions are generated + # by the respective helper method. + r1 = self._r1_scandinavian(word, self.__vowels) + + # Then the actual stemming process starts. + # Every new step is explicitly indicated + # according to the descriptions on the Snowball website. + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + if r1.endswith("igst"): + word = word[:-2] + r1 = r1[:-2] + + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "l\xF8st": + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith(self.__step2_suffixes): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 4: Undouble + for double_cons in self.__double_consonants: + if word.endswith(double_cons) and len(word) > 3: + word = word[:-1] + break + + return word + + +class DutchStemmer(_StandardStemmer): + + """ + The Dutch Snowball stemmer. + + :cvar __vowels: The Dutch vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm. + :type __step3b_suffixes: tuple + :note: A detailed description of the Dutch + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/dutch/stemmer.html + + """ + + __vowels = "aeiouy\xE8" + __step1_suffixes = ("heden", "ene", "en", "se", "s") + __step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig") + + def stem(self, word): + """ + Stem a Dutch word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step2_success = False + + # Vowel accents are removed. + word = ( + word.replace("\xE4", "a") + .replace("\xE1", "a") + .replace("\xEB", "e") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xEF", "i") + .replace("\xF6", "o") + .replace("\xF3", "o") + .replace("\xFC", "u") + .replace("\xFA", "u") + ) + + # An initial 'y', a 'y' after a vowel, + # and an 'i' between self.__vowels is put into upper case. + # As from now these are treated as consonants. + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + for i in range(1, len(word) - 1): + if ( + word[i - 1] in self.__vowels + and word[i] == "i" + and word[i + 1] in self.__vowels + ): + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "heden": + word = suffix_replace(word, suffix, "heid") + r1 = suffix_replace(r1, suffix, "heid") + if r2.endswith("heden"): + r2 = suffix_replace(r2, suffix, "heid") + + elif ( + suffix in ("ene", "en") + and not word.endswith("heden") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 3 : -len(suffix)] != "gem" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + suffix in ("se", "s") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 1] != "j" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + if r1.endswith("e") and word[-2] not in self.__vowels: + step2_success = True + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3a + if r2.endswith("heid") and word[-5] != "c": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + if ( + r1.endswith("en") + and word[-3] not in self.__vowels + and word[-5:-2] != "gem" + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3b: Derivational suffixes + for suffix in self.__step3b_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ing"): + word = word[:-3] + r2 = r2[:-3] + + if r2.endswith("ig") and word[-3] != "e": + word = word[:-2] + else: + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "ig" and word[-3] != "e": + word = word[:-2] + + elif suffix == "lijk": + word = word[:-4] + r1 = r1[:-4] + + if r1.endswith("e") and word[-2] not in self.__vowels: + word = word[:-1] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "baar": + word = word[:-4] + + elif suffix == "bar" and step2_success: + word = word[:-3] + break + + # STEP 4: Undouble vowel + if len(word) >= 4: + if word[-1] not in self.__vowels and word[-1] != "I": + if word[-3:-1] in ("aa", "ee", "oo", "uu"): + if word[-4] not in self.__vowels: + word = "".join((word[:-3], word[-3], word[-1])) + + # All occurrences of 'I' and 'Y' are put back into lower case. + word = word.replace("I", "i").replace("Y", "y") + + return word + + +class EnglishStemmer(_StandardStemmer): + + """ + The English Snowball stemmer. + + :cvar __vowels: The English vowels. + :type __vowels: unicode + :cvar __double_consonants: The English double consonants. + :type __double_consonants: tuple + :cvar __li_ending: Letters that may directly appear before a word final 'li'. + :type __li_ending: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm. + :type __step1a_suffixes: tuple + :cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm. + :type __step1b_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __special_words: A dictionary containing words + which have to be stemmed specially. + :type __special_words: dict + :note: A detailed description of the English + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/english/stemmer.html + """ + + __vowels = "aeiouy" + __double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt") + __li_ending = "cdeghkmnrt" + __step0_suffixes = ("'s'", "'s", "'") + __step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s") + __step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed") + __step2_suffixes = ( + "ization", + "ational", + "fulness", + "ousness", + "iveness", + "tional", + "biliti", + "lessli", + "entli", + "ation", + "alism", + "aliti", + "ousli", + "iviti", + "fulli", + "enci", + "anci", + "abli", + "izer", + "ator", + "alli", + "bli", + "ogi", + "li", + ) + __step3_suffixes = ( + "ational", + "tional", + "alize", + "icate", + "iciti", + "ative", + "ical", + "ness", + "ful", + ) + __step4_suffixes = ( + "ement", + "ance", + "ence", + "able", + "ible", + "ment", + "ant", + "ent", + "ism", + "ate", + "iti", + "ous", + "ive", + "ize", + "ion", + "al", + "er", + "ic", + ) + __step5_suffixes = ("e", "l") + __special_words = { + "skis": "ski", + "skies": "sky", + "dying": "die", + "lying": "lie", + "tying": "tie", + "idly": "idl", + "gently": "gentl", + "ugly": "ugli", + "early": "earli", + "only": "onli", + "singly": "singl", + "sky": "sky", + "news": "news", + "howe": "howe", + "atlas": "atlas", + "cosmos": "cosmos", + "bias": "bias", + "andes": "andes", + "inning": "inning", + "innings": "inning", + "outing": "outing", + "outings": "outing", + "canning": "canning", + "cannings": "canning", + "herring": "herring", + "herrings": "herring", + "earring": "earring", + "earrings": "earring", + "proceed": "proceed", + "proceeds": "proceed", + "proceeded": "proceed", + "proceeding": "proceed", + "exceed": "exceed", + "exceeds": "exceed", + "exceeded": "exceed", + "exceeding": "exceed", + "succeed": "succeed", + "succeeds": "succeed", + "succeeded": "succeed", + "succeeding": "succeed", + } + + def stem(self, word): + + """ + Stem an English word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords or len(word) <= 2: + return word + + elif word in self.__special_words: + return self.__special_words[word] + + # Map the different apostrophe characters to a single consistent one + word = ( + word.replace("\u2019", "\x27") + .replace("\u2018", "\x27") + .replace("\u201B", "\x27") + ) + + if word.startswith("\x27"): + word = word[1:] + + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + step1a_vowel_found = False + step1b_vowel_found = False + + r1 = "" + r2 = "" + + if word.startswith(("gener", "commun", "arsen")): + if word.startswith(("gener", "arsen")): + r1 = word[5:] + else: + r1 = word[6:] + + for i in range(1, len(r1)): + if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels: + r2 = r1[i + 1 :] + break + else: + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 0 + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 1a + for suffix in self.__step1a_suffixes: + if word.endswith(suffix): + + if suffix == "sses": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("ied", "ies"): + if len(word[: -len(suffix)]) > 1: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix == "s": + for letter in word[:-2]: + if letter in self.__vowels: + step1a_vowel_found = True + break + + if step1a_vowel_found: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + break + + # STEP 1b + for suffix in self.__step1b_suffixes: + if word.endswith(suffix): + if suffix in ("eed", "eedly"): + + if r1.endswith(suffix): + word = suffix_replace(word, suffix, "ee") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ee") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ee") + else: + r2 = "" + else: + for letter in word[: -len(suffix)]: + if letter in self.__vowels: + step1b_vowel_found = True + break + + if step1b_vowel_found: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + if word.endswith(("at", "bl", "iz")): + word = "".join((word, "e")) + r1 = "".join((r1, "e")) + + if len(word) > 5 or len(r1) >= 3: + r2 = "".join((r2, "e")) + + elif word.endswith(self.__double_consonants): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + r1 == "" + and len(word) >= 3 + and word[-1] not in self.__vowels + and word[-1] not in "wxY" + and word[-2] in self.__vowels + and word[-3] not in self.__vowels + ) or ( + r1 == "" + and len(word) == 2 + and word[0] in self.__vowels + and word[1] not in self.__vowels + ): + + word = "".join((word, "e")) + + if len(r1) > 0: + r1 = "".join((r1, "e")) + + if len(r2) > 0: + r2 = "".join((r2, "e")) + break + + # STEP 1c + if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels: + word = "".join((word[:-1], "i")) + if len(r1) >= 1: + r1 = "".join((r1[:-1], "i")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "i")) + else: + r2 = "" + + # STEP 2 + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("enci", "anci", "abli"): + word = "".join((word[:-1], "e")) + + if len(r1) >= 1: + r1 = "".join((r1[:-1], "e")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "e")) + else: + r2 = "" + + elif suffix == "entli": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("izer", "ization"): + word = suffix_replace(word, suffix, "ize") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ize") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ize") + else: + r2 = "" + + elif suffix in ("ational", "ation", "ator"): + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "e" + + elif suffix in ("alism", "aliti", "alli"): + word = suffix_replace(word, suffix, "al") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "al") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "al") + else: + r2 = "" + + elif suffix == "fulness": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + elif suffix in ("ousli", "ousness"): + word = suffix_replace(word, suffix, "ous") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ous") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ous") + else: + r2 = "" + + elif suffix in ("iveness", "iviti"): + word = suffix_replace(word, suffix, "ive") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ive") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ive") + else: + r2 = "e" + + elif suffix in ("biliti", "bli"): + word = suffix_replace(word, suffix, "ble") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ble") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ble") + else: + r2 = "" + + elif suffix == "ogi" and word[-4] == "l": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix in ("fulli", "lessli"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "li" and word[-3] in self.__li_ending: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ational": + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "" + + elif suffix == "alize": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + + elif suffix in ("icate", "iciti", "ical"): + word = suffix_replace(word, suffix, "ic") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ic") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ic") + else: + r2 = "" + + elif suffix in ("ful", "ness"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + elif suffix == "ative" and r2.endswith(suffix): + word = word[:-5] + r1 = r1[:-5] + r2 = r2[:-5] + break + + # STEP 4 + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if r2.endswith(suffix): + if suffix == "ion": + if word[-4] in "st": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5 + if r2.endswith("l") and word[-2] == "l": + word = word[:-1] + elif r2.endswith("e"): + word = word[:-1] + elif r1.endswith("e"): + if len(word) >= 4 and ( + word[-2] in self.__vowels + or word[-2] in "wxY" + or word[-3] not in self.__vowels + or word[-4] in self.__vowels + ): + word = word[:-1] + + word = word.replace("Y", "y") + + return word + + +class FinnishStemmer(_StandardStemmer): + + """ + The Finnish Snowball stemmer. + + :cvar __vowels: The Finnish vowels. + :type __vowels: unicode + :cvar __restricted_vowels: A subset of the Finnish vowels. + :type __restricted_vowels: unicode + :cvar __long_vowels: The Finnish vowels in their long forms. + :type __long_vowels: tuple + :cvar __consonants: The Finnish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Finnish double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Finnish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/finnish/stemmer.html + """ + + __vowels = "aeiouy\xE4\xF6" + __restricted_vowels = "aeiou\xE4\xF6" + __long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4", "\xF6\xF6") + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __step1_suffixes = ( + "kaan", + "k\xE4\xE4n", + "sti", + "kin", + "han", + "h\xE4n", + "ko", + "k\xF6", + "pa", + "p\xE4", + ) + __step2_suffixes = ("nsa", "ns\xE4", "mme", "nne", "si", "ni", "an", "\xE4n", "en") + __step3_suffixes = ( + "siin", + "tten", + "seen", + "han", + "hen", + "hin", + "hon", + "h\xE4n", + "h\xF6n", + "den", + "tta", + "tt\xE4", + "ssa", + "ss\xE4", + "sta", + "st\xE4", + "lla", + "ll\xE4", + "lta", + "lt\xE4", + "lle", + "ksi", + "ine", + "ta", + "t\xE4", + "na", + "n\xE4", + "a", + "\xE4", + "n", + ) + __step4_suffixes = ( + "impi", + "impa", + "imp\xE4", + "immi", + "imma", + "imm\xE4", + "mpi", + "mpa", + "mp\xE4", + "mmi", + "mma", + "mm\xE4", + "eja", + "ej\xE4", + ) + + def stem(self, word): + """ + Stem a Finnish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step3_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 1: Particles etc. + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "sti": + if suffix in r2: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + if word[-len(suffix) - 1] in "ntaeiouy\xE4\xF6": + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2: Possessives + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "si": + if word[-3] != "k": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ni": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + if word.endswith("kse"): + word = suffix_replace(word, "kse", "ksi") + + if r1.endswith("kse"): + r1 = suffix_replace(r1, "kse", "ksi") + + if r2.endswith("kse"): + r2 = suffix_replace(r2, "kse", "ksi") + + elif suffix == "an": + if word[-4:-2] in ("ta", "na") or word[-5:-2] in ( + "ssa", + "sta", + "lla", + "lta", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "\xE4n": + if word[-4:-2] in ("t\xE4", "n\xE4") or word[-5:-2] in ( + "ss\xE4", + "st\xE4", + "ll\xE4", + "lt\xE4", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "en": + if word[-5:-2] in ("lle", "ine"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + break + + # STEP 3: Cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("han", "hen", "hin", "hon", "h\xE4n", "h\xF6n"): + if ( + (suffix == "han" and word[-4] == "a") + or (suffix == "hen" and word[-4] == "e") + or (suffix == "hin" and word[-4] == "i") + or (suffix == "hon" and word[-4] == "o") + or (suffix == "h\xE4n" and word[-4] == "\xE4") + or (suffix == "h\xF6n" and word[-4] == "\xF6") + ): + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix in ("siin", "den", "tten"): + if ( + word[-len(suffix) - 1] == "i" + and word[-len(suffix) - 2] in self.__restricted_vowels + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + else: + continue + + elif suffix == "seen": + if word[-6:-4] in self.__long_vowels: + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + step3_success = True + else: + continue + + elif suffix in ("a", "\xE4"): + if word[-2] in self.__vowels and word[-3] in self.__consonants: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + elif suffix in ("tta", "tt\xE4"): + if word[-4] == "e": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix == "n": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + if word[-2:] == "ie" or word[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + break + + # STEP 4: Other endings + for suffix in self.__step4_suffixes: + if r2.endswith(suffix): + if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma", "mm\xE4"): + if word[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5: Plurals + if step3_success and len(r1) >= 1 and r1[-1] in "ij": + word = word[:-1] + r1 = r1[:-1] + + elif ( + not step3_success + and len(r1) >= 2 + and r1[-1] == "t" + and r1[-2] in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + if r2.endswith("imma"): + word = word[:-4] + r1 = r1[:-4] + elif r2.endswith("mma") and r2[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + + # STEP 6: Tidying up + if r1[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + + if len(r1) >= 2 and r1[-2] in self.__consonants and r1[-1] in "a\xE4ei": + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith(("oj", "uj")): + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith("jo"): + word = word[:-1] + r1 = r1[:-1] + + # If the word ends with a double consonant + # followed by zero or more vowels, the last consonant is removed. + for i in range(1, len(word)): + if word[-i] in self.__vowels: + continue + else: + if i == 1: + if word[-i - 1 :] in self.__double_consonants: + word = word[:-1] + else: + if word[-i - 1 : -i + 1] in self.__double_consonants: + word = "".join((word[:-i], word[-i + 1 :])) + break + + return word + + +class FrenchStemmer(_StandardStemmer): + + """ + The French Snowball stemmer. + + :cvar __vowels: The French vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the French + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/french/stemmer.html + """ + + __vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9" + __step1_suffixes = ( + "issements", + "issement", + "atrices", + "atrice", + "ateurs", + "ations", + "logies", + "usions", + "utions", + "ements", + "amment", + "emment", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + "ateur", + "ation", + "logie", + "usion", + "ution", + "ences", + "ement", + "euses", + "ments", + "ance", + "iqUe", + "isme", + "able", + "iste", + "ence", + "it\xE9s", + "ives", + "eaux", + "euse", + "ment", + "eux", + "it\xE9", + "ive", + "ifs", + "aux", + "if", + ) + __step2a_suffixes = ( + "issaIent", + "issantes", + "iraIent", + "issante", + "issants", + "issions", + "irions", + "issais", + "issait", + "issant", + "issent", + "issiez", + "issons", + "irais", + "irait", + "irent", + "iriez", + "irons", + "iront", + "isses", + "issez", + "\xEEmes", + "\xEEtes", + "irai", + "iras", + "irez", + "isse", + "ies", + "ira", + "\xEEt", + "ie", + "ir", + "is", + "it", + "i", + ) + __step2b_suffixes = ( + "eraIent", + "assions", + "erions", + "assent", + "assiez", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "aIent", + "antes", + "asses", + "ions", + "erai", + "eras", + "erez", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "\xE9es", + "era", + "iez", + "ais", + "ait", + "ant", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE2t", + "ai", + "as", + "\xE9", + "a", + ) + __step4_suffixes = ("i\xE8re", "I\xE8re", "ion", "ier", "Ier", "e", "\xEB") + + def stem(self, word): + """ + Stem a French word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + rv_ending_found = False + step2a_success = False + step2b_success = False + + # Every occurrence of 'u' after 'q' is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + # Every occurrence of 'y' preceded or + # followed by a vowel is also put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + if word[i - 1] in self.__vowels or word[i + 1] in self.__vowels: + if word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self.__rv_french(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "eaux": + word = word[:-1] + step1_success = True + + elif suffix in ("euse", "euses"): + if suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + elif suffix in r1: + word = suffix_replace(word, suffix, "eux") + step1_success = True + + elif suffix in ("ement", "ements") and suffix in rv: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "iv" and "iv" in r2: + word = word[:-2] + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + elif word[-3:] == "eus": + if "eus" in r2: + word = word[:-3] + elif "eus" in r1: + word = "".join((word[:-1], "x")) + + elif word[-3:] in ("abl", "iqU"): + if "abl" in r2 or "iqU" in r2: + word = word[:-3] + + elif word[-3:] in ("i\xE8r", "I\xE8r"): + if "i\xE8r" in rv or "I\xE8r" in rv: + word = "".join((word[:-3], "i")) + + elif suffix == "amment" and suffix in rv: + word = suffix_replace(word, "amment", "ant") + rv = suffix_replace(rv, "amment", "ant") + rv_ending_found = True + + elif suffix == "emment" and suffix in rv: + word = suffix_replace(word, "emment", "ent") + rv_ending_found = True + + elif ( + suffix in ("ment", "ments") + and suffix in rv + and not rv.startswith(suffix) + and rv[rv.rindex(suffix) - 1] in self.__vowels + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + rv_ending_found = True + + elif suffix == "aux" and suffix in r1: + word = "".join((word[:-2], "l")) + step1_success = True + + elif ( + suffix in ("issement", "issements") + and suffix in r1 + and word[-len(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ( + "ance", + "iqUe", + "isme", + "able", + "iste", + "eux", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + ) + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ("atrice", "ateur", "ation", "atrices", "ateurs", "ations") + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif suffix in ("logie", "logies") and suffix in r2: + word = suffix_replace(word, suffix, "log") + step1_success = True + + elif suffix in ("usion", "ution", "usions", "utions") and suffix in r2: + word = suffix_replace(word, suffix, "u") + step1_success = True + + elif suffix in ("ence", "ences") and suffix in r2: + word = suffix_replace(word, suffix, "ent") + step1_success = True + + elif suffix in ("it\xE9", "it\xE9s") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-4:] == "abil": + if "abil" in r2: + word = word[:-4] + else: + word = "".join((word[:-2], "l")) + + elif word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif word[-2:] == "iv": + if "iv" in r2: + word = word[:-2] + + elif suffix in ("if", "ive", "ifs", "ives") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + break + + # STEP 2a: Verb suffixes beginning 'i' + if not step1_success or rv_ending_found: + for suffix in self.__step2a_suffixes: + if word.endswith(suffix): + if ( + suffix in rv + and len(rv) > len(suffix) + and rv[rv.rindex(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step2a_success = True + break + + # STEP 2b: Other verb suffixes + if not step2a_success: + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + if suffix == "ions" and "ions" in r2: + word = word[:-4] + step2b_success = True + + elif suffix in ( + "eraIent", + "erions", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "erai", + "eras", + "erez", + "\xE9es", + "era", + "iez", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE9", + ): + word = word[: -len(suffix)] + step2b_success = True + + elif suffix in ( + "assions", + "assent", + "assiez", + "aIent", + "antes", + "asses", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "ais", + "ait", + "ant", + "\xE2t", + "ai", + "as", + "a", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + step2b_success = True + if rv.endswith("e"): + word = word[:-1] + break + + # STEP 3 + if step1_success or step2a_success or step2b_success: + if word[-1] == "Y": + word = "".join((word[:-1], "i")) + elif word[-1] == "\xE7": + word = "".join((word[:-1], "c")) + + # STEP 4: Residual suffixes + else: + if len(word) >= 2 and word[-1] == "s" and word[-2] not in "aiou\xE8s": + word = word[:-1] + + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix == "ion" and suffix in r2 and rv[-4] in "st": + word = word[:-3] + + elif suffix in ("ier", "i\xE8re", "Ier", "I\xE8re"): + word = suffix_replace(word, suffix, "i") + + elif suffix == "e": + word = word[:-1] + + elif suffix == "\xEB" and word[-3:-1] == "gu": + word = word[:-1] + break + + # STEP 5: Undouble + if word.endswith(("enn", "onn", "ett", "ell", "eill")): + word = word[:-1] + + # STEP 6: Un-accent + for i in range(1, len(word)): + if word[-i] not in self.__vowels: + i += 1 + else: + if i != 1 and word[-i] in ("\xE9", "\xE8"): + word = "".join((word[:-i], "e", word[-i + 1 :])) + break + + word = word.replace("I", "i").replace("U", "u").replace("Y", "y") + + return word + + def __rv_french(self, word, vowels): + """ + Return the region RV that is used by the French stemmer. + + If the word begins with two vowels, RV is the region after + the third letter. Otherwise, it is the region after the first + vowel not at the beginning of the word, or the end of the word + if these positions cannot be found. (Exceptionally, u'par', + u'col' or u'tap' at the beginning of a word is also taken to + define RV as the region to their right.) + + :param word: The French word whose region RV is determined. + :type word: str or unicode + :param vowels: The French vowels that are used to determine + the region RV. + :type vowels: unicode + :return: the region RV for the respective French word. + :rtype: unicode + :note: This helper method is invoked by the stem method of + the subclass FrenchStemmer. It is not to be invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word.startswith(("par", "col", "tap")) or ( + word[0] in vowels and word[1] in vowels + ): + rv = word[3:] + else: + for i in range(1, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + return rv + + +class GermanStemmer(_StandardStemmer): + + """ + The German Snowball stemmer. + + :cvar __vowels: The German vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __st_ending: Letter that may directly appear before a word final 'st'. + :type __st_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the German + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/german/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xF6\xFC" + __s_ending = "bdfghklmnrt" + __st_ending = "bdfghklmnt" + + __step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s") + __step2_suffixes = ("est", "en", "er", "st") + __step3_suffixes = ("isch", "lich", "heit", "keit", "end", "ung", "ig", "ik") + + def stem(self, word): + """ + Stem a German word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + word = word.replace("\xDF", "ss") + + # Every occurrence of 'u' and 'y' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if ( + suffix in ("en", "es", "e") + and word[-len(suffix) - 4 : -len(suffix)] == "niss" + ): + word = word[: -len(suffix) - 1] + r1 = r1[: -len(suffix) - 1] + r2 = r2[: -len(suffix) - 1] + + elif suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "st": + if word[-3] in self.__st_ending and len(word[:-3]) >= 3: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 3: Derivational suffixes + for suffix in self.__step3_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ung"): + if ( + "ig" in r2[-len(suffix) - 2 : -len(suffix)] + and "e" not in r2[-len(suffix) - 3 : -len(suffix) - 2] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif ( + suffix in ("ig", "ik", "isch") + and "e" not in r2[-len(suffix) - 1 : -len(suffix)] + ): + word = word[: -len(suffix)] + + elif suffix in ("lich", "heit"): + if ( + "er" in r1[-len(suffix) - 2 : -len(suffix)] + or "en" in r1[-len(suffix) - 2 : -len(suffix)] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif suffix == "keit": + if "lich" in r2[-len(suffix) - 4 : -len(suffix)]: + word = word[: -len(suffix) - 4] + + elif "ig" in r2[-len(suffix) - 2 : -len(suffix)]: + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + break + + # Umlaut accents are removed and + # 'u' and 'y' are put back into lower case. + word = ( + word.replace("\xE4", "a") + .replace("\xF6", "o") + .replace("\xFC", "u") + .replace("U", "u") + .replace("Y", "y") + ) + + return word + + +class HungarianStemmer(_LanguageSpecificStemmer): + + """ + The Hungarian Snowball stemmer. + + :cvar __vowels: The Hungarian vowels. + :type __vowels: unicode + :cvar __digraphs: The Hungarian digraphs. + :type __digraphs: tuple + :cvar __double_consonants: The Hungarian double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm. + :type __step6_suffixes: tuple + :cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm. + :type __step7_suffixes: tuple + :cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm. + :type __step8_suffixes: tuple + :cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm. + :type __step9_suffixes: tuple + :note: A detailed description of the Hungarian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/hungarian/stemmer.html + + """ + + __vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB" + __digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs") + __double_consonants = ( + "bb", + "cc", + "ccs", + "dd", + "ff", + "gg", + "ggy", + "jj", + "kk", + "ll", + "lly", + "mm", + "nn", + "nny", + "pp", + "rr", + "ss", + "ssz", + "tt", + "tty", + "vv", + "zz", + "zzs", + ) + + __step1_suffixes = ("al", "el") + __step2_suffixes = ( + "k\xE9ppen", + "onk\xE9nt", + "enk\xE9nt", + "ank\xE9nt", + "k\xE9pp", + "k\xE9nt", + "ban", + "ben", + "nak", + "nek", + "val", + "vel", + "t\xF3l", + "t\xF5l", + "r\xF3l", + "r\xF5l", + "b\xF3l", + "b\xF5l", + "hoz", + "hez", + "h\xF6z", + "n\xE1l", + "n\xE9l", + "\xE9rt", + "kor", + "ba", + "be", + "ra", + "re", + "ig", + "at", + "et", + "ot", + "\xF6t", + "ul", + "\xFCl", + "v\xE1", + "v\xE9", + "en", + "on", + "an", + "\xF6n", + "n", + "t", + ) + __step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n") + __step4_suffixes = ( + "astul", + "est\xFCl", + "\xE1stul", + "\xE9st\xFCl", + "stul", + "st\xFCl", + ) + __step5_suffixes = ("\xE1", "\xE9") + __step6_suffixes = ( + "ok\xE9", + "\xF6k\xE9", + "ak\xE9", + "ek\xE9", + "\xE1k\xE9", + "\xE1\xE9i", + "\xE9k\xE9", + "\xE9\xE9i", + "k\xE9", + "\xE9i", + "\xE9\xE9", + "\xE9", + ) + __step7_suffixes = ( + "\xE1juk", + "\xE9j\xFCk", + "\xFCnk", + "unk", + "juk", + "j\xFCk", + "\xE1nk", + "\xE9nk", + "nk", + "uk", + "\xFCk", + "em", + "om", + "am", + "od", + "ed", + "ad", + "\xF6d", + "ja", + "je", + "\xE1m", + "\xE1d", + "\xE9m", + "\xE9d", + "m", + "d", + "a", + "e", + "o", + "\xE1", + "\xE9", + ) + __step8_suffixes = ( + "jaitok", + "jeitek", + "jaink", + "jeink", + "aitok", + "eitek", + "\xE1itok", + "\xE9itek", + "jaim", + "jeim", + "jaid", + "jeid", + "eink", + "aink", + "itek", + "jeik", + "jaik", + "\xE1ink", + "\xE9ink", + "aim", + "eim", + "aid", + "eid", + "jai", + "jei", + "ink", + "aik", + "eik", + "\xE1im", + "\xE1id", + "\xE1ik", + "\xE9im", + "\xE9id", + "\xE9ik", + "im", + "id", + "ai", + "ei", + "ik", + "\xE1i", + "\xE9i", + "i", + ) + __step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok", "ek", "ak", "k") + + def stem(self, word): + """ + Stem an Hungarian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs) + + # STEP 1: Remove instrumental case + if r1.endswith(self.__step1_suffixes): + for double_cons in self.__double_consonants: + if word[-2 - len(double_cons) : -2] == double_cons: + word = "".join((word[:-4], word[-3])) + + if r1[-2 - len(double_cons) : -2] == double_cons: + r1 = "".join((r1[:-4], r1[-3])) + break + + # STEP 2: Remove frequent cases + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith("\xE1"): + word = "".join((word[:-1], "a")) + r1 = suffix_replace(r1, "\xE1", "a") + + elif r1.endswith("\xE9"): + word = "".join((word[:-1], "e")) + r1 = suffix_replace(r1, "\xE9", "e") + break + + # STEP 3: Remove special cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "\xE9n": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + break + + # STEP 4: Remove other cases + for suffix in self.__step4_suffixes: + if r1.endswith(suffix): + if suffix == "\xE1stul": + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix == "\xE9st\xFCl": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 5: Remove factive case + for suffix in self.__step5_suffixes: + if r1.endswith(suffix): + for double_cons in self.__double_consonants: + if word[-1 - len(double_cons) : -1] == double_cons: + word = "".join((word[:-3], word[-2])) + + if r1[-1 - len(double_cons) : -1] == double_cons: + r1 = "".join((r1[:-3], r1[-2])) + break + + # STEP 6: Remove owned + for suffix in self.__step6_suffixes: + if r1.endswith(suffix): + if suffix in ("\xE1k\xE9", "\xE1\xE9i"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9k\xE9", "\xE9\xE9i", "\xE9\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 7: Remove singular owner suffixes + for suffix in self.__step7_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ("\xE1nk", "\xE1juk", "\xE1m", "\xE1d", "\xE1"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9nk", "\xE9j\xFCk", "\xE9m", "\xE9d", "\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 8: Remove plural owner suffixes + for suffix in self.__step8_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ( + "\xE1im", + "\xE1id", + "\xE1i", + "\xE1ink", + "\xE1itok", + "\xE1ik", + ): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ( + "\xE9im", + "\xE9id", + "\xE9i", + "\xE9ink", + "\xE9itek", + "\xE9ik", + ): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 9: Remove plural suffixes + for suffix in self.__step9_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "\xE1k": + word = suffix_replace(word, suffix, "a") + elif suffix == "\xE9k": + word = suffix_replace(word, suffix, "e") + else: + word = word[: -len(suffix)] + break + + return word + + def __r1_hungarian(self, word, vowels, digraphs): + """ + Return the region R1 that is used by the Hungarian stemmer. + + If the word begins with a vowel, R1 is defined as the region + after the first consonant or digraph (= two letters stand for + one phoneme) in the word. If the word begins with a consonant, + it is defined as the region after the first vowel in the word. + If the word does not contain both a vowel and consonant, R1 + is the null region at the end of the word. + + :param word: The Hungarian word whose region R1 is determined. + :type word: str or unicode + :param vowels: The Hungarian vowels that are used to determine + the region R1. + :type vowels: unicode + :param digraphs: The digraphs that are used to determine the + region R1. + :type digraphs: tuple + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + HungarianStemmer. It is not to be invoked directly! + + """ + r1 = "" + if word[0] in vowels: + for digraph in digraphs: + if digraph in word[1:]: + r1 = word[word.index(digraph[-1]) + 1 :] + return r1 + + for i in range(1, len(word)): + if word[i] not in vowels: + r1 = word[i + 1 :] + break + else: + for i in range(1, len(word)): + if word[i] in vowels: + r1 = word[i + 1 :] + break + + return r1 + + +class ItalianStemmer(_StandardStemmer): + + """ + The Italian Snowball stemmer. + + :cvar __vowels: The Italian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :note: A detailed description of the Italian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/italian/stemmer.html + + """ + + __vowels = "aeiou\xE0\xE8\xEC\xF2\xF9" + __step0_suffixes = ( + "gliela", + "gliele", + "glieli", + "glielo", + "gliene", + "sene", + "mela", + "mele", + "meli", + "melo", + "mene", + "tela", + "tele", + "teli", + "telo", + "tene", + "cela", + "cele", + "celi", + "celo", + "cene", + "vela", + "vele", + "veli", + "velo", + "vene", + "gli", + "ci", + "la", + "le", + "li", + "lo", + "mi", + "ne", + "si", + "ti", + "vi", + ) + __step1_suffixes = ( + "atrice", + "atrici", + "azione", + "azioni", + "uzione", + "uzioni", + "usione", + "usioni", + "amento", + "amenti", + "imento", + "imenti", + "amente", + "abile", + "abili", + "ibile", + "ibili", + "mente", + "atore", + "atori", + "logia", + "logie", + "anza", + "anze", + "iche", + "ichi", + "ismo", + "ismi", + "ista", + "iste", + "isti", + "ist\xE0", + "ist\xE8", + "ist\xEC", + "ante", + "anti", + "enza", + "enze", + "ico", + "ici", + "ica", + "ice", + "oso", + "osi", + "osa", + "ose", + "it\xE0", + "ivo", + "ivi", + "iva", + "ive", + ) + __step2_suffixes = ( + "erebbero", + "irebbero", + "assero", + "assimo", + "eranno", + "erebbe", + "eremmo", + "ereste", + "eresti", + "essero", + "iranno", + "irebbe", + "iremmo", + "ireste", + "iresti", + "iscano", + "iscono", + "issero", + "arono", + "avamo", + "avano", + "avate", + "eremo", + "erete", + "erono", + "evamo", + "evano", + "evate", + "iremo", + "irete", + "irono", + "ivamo", + "ivano", + "ivate", + "ammo", + "ando", + "asse", + "assi", + "emmo", + "enda", + "ende", + "endi", + "endo", + "erai", + "erei", + "Yamo", + "iamo", + "immo", + "irai", + "irei", + "isca", + "isce", + "isci", + "isco", + "ano", + "are", + "ata", + "ate", + "ati", + "ato", + "ava", + "avi", + "avo", + "er\xE0", + "ere", + "er\xF2", + "ete", + "eva", + "evi", + "evo", + "ir\xE0", + "ire", + "ir\xF2", + "ita", + "ite", + "iti", + "ito", + "iva", + "ivi", + "ivo", + "ono", + "uta", + "ute", + "uti", + "uto", + "ar", + "ir", + ) + + def stem(self, word): + """ + Stem an Italian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + # All acute accents are replaced by grave accents. + word = ( + word.replace("\xE1", "\xE0") + .replace("\xE9", "\xE8") + .replace("\xED", "\xEC") + .replace("\xF3", "\xF2") + .replace("\xFA", "\xF9") + ) + + # Every occurrence of 'u' after 'q' + # is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if rv.endswith(suffix): + if rv[-len(suffix) - 4 : -len(suffix)] in ("ando", "endo"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + elif rv[-len(suffix) - 2 : -len(suffix)] in ("ar", "er", "ir"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + r2 = suffix_replace(r2, suffix, "e") + rv = suffix_replace(rv, suffix, "e") + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("amento", "amenti", "imento", "imenti") and rv.endswith( + suffix + ): + step1_success = True + word = word[:-6] + rv = rv[:-6] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ("azione", "azioni", "atore", "atori"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("logia", "logie"): + word = word[:-2] + rv = word[:-2] + + elif suffix in ("uzione", "uzioni", "usione", "usioni"): + word = word[:-5] + rv = rv[:-5] + + elif suffix in ("enza", "enze"): + word = suffix_replace(word, suffix, "te") + rv = suffix_replace(rv, suffix, "te") + + elif suffix == "it\xE0": + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("ivo", "ivi", "iva", "ive"): + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith("at"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3a + if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8", "\xEC", "\xF2")): + word = word[:-1] + rv = rv[:-1] + + if rv.endswith("i"): + word = word[:-1] + rv = rv[:-1] + + # STEP 3b + if rv.endswith(("ch", "gh")): + word = word[:-1] + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class NorwegianStemmer(_ScandinavianStemmer): + + """ + The Norwegian Snowball stemmer. + + :cvar __vowels: The Norwegian vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Norwegian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/norwegian/stemmer.html + + """ + + __vowels = "aeiouy\xE6\xE5\xF8" + __s_ending = "bcdfghjlmnoprtvyz" + __step1_suffixes = ( + "hetenes", + "hetene", + "hetens", + "heter", + "heten", + "endes", + "ande", + "ende", + "edes", + "enes", + "erte", + "ede", + "ane", + "ene", + "ens", + "ers", + "ets", + "het", + "ast", + "ert", + "en", + "ar", + "er", + "as", + "es", + "et", + "a", + "e", + "s", + ) + + __step2_suffixes = ("dt", "vt") + + __step3_suffixes = ( + "hetslov", + "eleg", + "elig", + "elov", + "slov", + "leg", + "eig", + "lig", + "els", + "lov", + "ig", + ) + + def stem(self, word): + """ + Stem a Norwegian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix in ("erte", "ert"): + word = suffix_replace(word, suffix, "er") + r1 = suffix_replace(r1, suffix, "er") + + elif suffix == "s": + if word[-2] in self.__s_ending or ( + word[-2] == "k" and word[-3] not in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + word = word[: -len(suffix)] + break + + return word + + +class PortugueseStemmer(_StandardStemmer): + + """ + The Portuguese Snowball stemmer. + + :cvar __vowels: The Portuguese vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Portuguese + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/portuguese/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4" + __step1_suffixes = ( + "amentos", + "imentos", + "uço~es", + "amento", + "imento", + "adoras", + "adores", + "a\xE7o~es", + "logias", + "\xEAncias", + "amente", + "idades", + "an\xE7as", + "ismos", + "istas", + "adora", + "a\xE7a~o", + "antes", + "\xE2ncia", + "logia", + "uça~o", + "\xEAncia", + "mente", + "idade", + "an\xE7a", + "ezas", + "icos", + "icas", + "ismo", + "\xE1vel", + "\xEDvel", + "ista", + "osos", + "osas", + "ador", + "ante", + "ivas", + "ivos", + "iras", + "eza", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + "ira", + ) + __step2_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "\xE1ssemos", + "\xEAssemos", + "\xEDssemos", + "ar\xEDeis", + "er\xEDeis", + "ir\xEDeis", + "\xE1sseis", + "\xE9sseis", + "\xEDsseis", + "\xE1ramos", + "\xE9ramos", + "\xEDramos", + "\xE1vamos", + "aremos", + "eremos", + "iremos", + "ariam", + "eriam", + "iriam", + "assem", + "essem", + "issem", + "ara~o", + "era~o", + "ira~o", + "arias", + "erias", + "irias", + "ardes", + "erdes", + "irdes", + "asses", + "esses", + "isses", + "astes", + "estes", + "istes", + "\xE1reis", + "areis", + "\xE9reis", + "ereis", + "\xEDreis", + "ireis", + "\xE1veis", + "\xEDamos", + "armos", + "ermos", + "irmos", + "aria", + "eria", + "iria", + "asse", + "esse", + "isse", + "aste", + "este", + "iste", + "arei", + "erei", + "irei", + "aram", + "eram", + "iram", + "avam", + "arem", + "erem", + "irem", + "ando", + "endo", + "indo", + "adas", + "idas", + "ar\xE1s", + "aras", + "er\xE1s", + "eras", + "ir\xE1s", + "avas", + "ares", + "eres", + "ires", + "\xEDeis", + "ados", + "idos", + "\xE1mos", + "amos", + "emos", + "imos", + "iras", + "ada", + "ida", + "ar\xE1", + "ara", + "er\xE1", + "era", + "ir\xE1", + "ava", + "iam", + "ado", + "ido", + "ias", + "ais", + "eis", + "ira", + "ia", + "ei", + "am", + "em", + "ar", + "er", + "ir", + "as", + "es", + "is", + "eu", + "iu", + "ou", + ) + __step4_suffixes = ("os", "a", "i", "o", "\xE1", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Portuguese word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + word = ( + word.replace("\xE3", "a~") + .replace("\xF5", "o~") + .replace("q\xFC", "qu") + .replace("g\xFC", "gu") + ) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif ( + suffix in ("ira", "iras") + and rv.endswith(suffix) + and word[-len(suffix) - 1 : -len(suffix)] == "e" + ): + step1_success = True + + word = suffix_replace(word, suffix, "ir") + rv = suffix_replace(rv, suffix, "ir") + + elif r2.endswith(suffix): + step1_success = True + + if suffix in ("logia", "logias"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uça~o", "uço~es"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("\xEAncia", "\xEAncias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[:-5] + r2 = r2[:-5] + rv = rv[:-5] + + if r2.endswith(("ante", "avel", "ivel")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idade", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("iva", "ivo", "ivas", "ivos"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + step2_success = True + + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3 + if step1_success or step2_success: + if rv.endswith("i") and word[-2] == "c": + word = word[:-1] + rv = rv[:-1] + + ### STEP 4: Residual suffix + if not step1_success and not step2_success: + for suffix in self.__step4_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 5 + if rv.endswith(("e", "\xE9", "\xEA")): + word = word[:-1] + rv = rv[:-1] + + if (word.endswith("gu") and rv.endswith("u")) or ( + word.endswith("ci") and rv.endswith("i") + ): + word = word[:-1] + + elif word.endswith("\xE7"): + word = suffix_replace(word, "\xE7", "c") + + word = word.replace("a~", "\xE3").replace("o~", "\xF5") + + return word + + +class RomanianStemmer(_StandardStemmer): + + """ + The Romanian Snowball stemmer. + + :cvar __vowels: The Romanian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Romanian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/romanian/stemmer.html + + """ + + __vowels = "aeiou\u0103\xE2\xEE" + __step0_suffixes = ( + "iilor", + "ului", + "elor", + "iile", + "ilor", + "atei", + "a\u0163ie", + "a\u0163ia", + "aua", + "ele", + "iua", + "iei", + "ile", + "ul", + "ea", + "ii", + ) + __step1_suffixes = ( + "abilitate", + "abilitati", + "abilit\u0103\u0163i", + "ibilitate", + "abilit\u0103i", + "ivitate", + "ivitati", + "ivit\u0103\u0163i", + "icitate", + "icitati", + "icit\u0103\u0163i", + "icatori", + "ivit\u0103i", + "icit\u0103i", + "icator", + "a\u0163iune", + "atoare", + "\u0103toare", + "i\u0163iune", + "itoare", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "icala", + "icale", + "icali", + "ical\u0103", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "atori", + "\u0103tori", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "itori", + "iciv", + "ical", + "ativ", + "ator", + "\u0103tor", + "itiv", + "itor", + ) + __step2_suffixes = ( + "abila", + "abile", + "abili", + "abil\u0103", + "ibila", + "ibile", + "ibili", + "ibil\u0103", + "atori", + "itate", + "itati", + "it\u0103\u0163i", + "abil", + "ibil", + "oasa", + "oas\u0103", + "oase", + "anta", + "ante", + "anti", + "ant\u0103", + "ator", + "it\u0103i", + "iune", + "iuni", + "isme", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + "ata", + "at\u0103", + "ati", + "ate", + "uta", + "ut\u0103", + "uti", + "ute", + "ita", + "it\u0103", + "iti", + "ite", + "ica", + "ice", + "ici", + "ic\u0103", + "osi", + "o\u015Fi", + "ant", + "iva", + "ive", + "ivi", + "iv\u0103", + "ism", + "ist", + "at", + "ut", + "it", + "ic", + "os", + "iv", + ) + __step3_suffixes = ( + "seser\u0103\u0163i", + "aser\u0103\u0163i", + "iser\u0103\u0163i", + "\xE2ser\u0103\u0163i", + "user\u0103\u0163i", + "seser\u0103m", + "aser\u0103m", + "iser\u0103m", + "\xE2ser\u0103m", + "user\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "easc\u0103", + "ar\u0103\u0163i", + "ur\u0103\u0163i", + "ir\u0103\u0163i", + "\xE2r\u0103\u0163i", + "ase\u015Fi", + "aser\u0103", + "ise\u015Fi", + "iser\u0103", + "\xe2se\u015Fi", + "\xE2ser\u0103", + "use\u015Fi", + "user\u0103", + "ser\u0103m", + "sesem", + "indu", + "\xE2ndu", + "eaz\u0103", + "e\u015Fti", + "e\u015Fte", + "\u0103\u015Fti", + "\u0103\u015Fte", + "ea\u0163i", + "ia\u0163i", + "ar\u0103m", + "ur\u0103m", + "ir\u0103m", + "\xE2r\u0103m", + "asem", + "isem", + "\xE2sem", + "usem", + "se\u015Fi", + "ser\u0103", + "sese", + "are", + "ere", + "ire", + "\xE2re", + "ind", + "\xE2nd", + "eze", + "ezi", + "esc", + "\u0103sc", + "eam", + "eai", + "eau", + "iam", + "iai", + "iau", + "a\u015Fi", + "ar\u0103", + "u\u015Fi", + "ur\u0103", + "i\u015Fi", + "ir\u0103", + "\xE2\u015Fi", + "\xe2r\u0103", + "ase", + "ise", + "\xE2se", + "use", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xe2\u0163i", + "sei", + "ez", + "am", + "ai", + "au", + "ea", + "ia", + "ui", + "\xE2i", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ) + + def stem(self, word): + """ + Stem a Romanian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Removal of plurals and other simplifications + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + if suffix in r1: + if suffix in ("ul", "ului"): + word = word[: -len(suffix)] + + if suffix in rv: + rv = rv[: -len(suffix)] + else: + rv = "" + + elif ( + suffix == "aua" + or suffix == "atei" + or (suffix == "ile" and word[-5:-3] != "ab") + ): + word = word[:-2] + + elif suffix in ("ea", "ele", "elor"): + word = suffix_replace(word, suffix, "e") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "e") + else: + rv = "" + + elif suffix in ("ii", "iua", "iei", "iile", "iilor", "ilor"): + word = suffix_replace(word, suffix, "i") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "i") + else: + rv = "" + + elif suffix in ("a\u0163ie", "a\u0163ia"): + word = word[:-1] + break + + # STEP 1: Reduction of combining suffixes + while True: + + replacement_done = False + + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix in r1: + step1_success = True + replacement_done = True + + if suffix in ( + "abilitate", + "abilitati", + "abilit\u0103i", + "abilit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "abil") + + elif suffix == "ibilitate": + word = word[:-5] + + elif suffix in ( + "ivitate", + "ivitati", + "ivit\u0103i", + "ivit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "iv") + + elif suffix in ( + "icitate", + "icitati", + "icit\u0103i", + "icit\u0103\u0163i", + "icator", + "icatori", + "iciv", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "ical", + "icala", + "icale", + "icali", + "ical\u0103", + ): + word = suffix_replace(word, suffix, "ic") + + elif suffix in ( + "ativ", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "a\u0163iune", + "atoare", + "ator", + "atori", + "\u0103toare", + "\u0103tor", + "\u0103tori", + ): + word = suffix_replace(word, suffix, "at") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "at") + + elif suffix in ( + "itiv", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "i\u0163iune", + "itoare", + "itor", + "itori", + ): + word = suffix_replace(word, suffix, "it") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "it") + else: + step1_success = False + break + + if not replacement_done: + break + + # STEP 2: Removal of standard suffixes + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if suffix in r2: + step2_success = True + + if suffix in ("iune", "iuni"): + if word[-5] == "\u0163": + word = "".join((word[:-5], "t")) + + elif suffix in ( + "ism", + "isme", + "ist", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + ): + word = suffix_replace(word, suffix, "ist") + + else: + word = word[: -len(suffix)] + break + + # STEP 3: Removal of verb suffixes + if not step1_success and not step2_success: + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix in ( + "seser\u0103\u0163i", + "seser\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "ser\u0103m", + "sesem", + "se\u015Fi", + "ser\u0103", + "sese", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xE2\u0163i", + "sei", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + else: + if ( + not rv.startswith(suffix) + and rv[rv.index(suffix) - 1] not in "aeio\u0103\xE2\xEE" + ): + word = word[: -len(suffix)] + break + + # STEP 4: Removal of final vowel + for suffix in ("ie", "a", "e", "i", "\u0103"): + if word.endswith(suffix): + if suffix in rv: + word = word[: -len(suffix)] + break + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class RussianStemmer(_LanguageSpecificStemmer): + + """ + The Russian Snowball stemmer. + + :cvar __perfective_gerund_suffixes: Suffixes to be deleted. + :type __perfective_gerund_suffixes: tuple + :cvar __adjectival_suffixes: Suffixes to be deleted. + :type __adjectival_suffixes: tuple + :cvar __reflexive_suffixes: Suffixes to be deleted. + :type __reflexive_suffixes: tuple + :cvar __verb_suffixes: Suffixes to be deleted. + :type __verb_suffixes: tuple + :cvar __noun_suffixes: Suffixes to be deleted. + :type __noun_suffixes: tuple + :cvar __superlative_suffixes: Suffixes to be deleted. + :type __superlative_suffixes: tuple + :cvar __derivational_suffixes: Suffixes to be deleted. + :type __derivational_suffixes: tuple + :note: A detailed description of the Russian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/russian/stemmer.html + + """ + + __perfective_gerund_suffixes = ( + "ivshis'", + "yvshis'", + "vshis'", + "ivshi", + "yvshi", + "vshi", + "iv", + "yv", + "v", + ) + __adjectival_suffixes = ( + "ui^ushchi^ui^u", + "ui^ushchi^ai^a", + "ui^ushchimi", + "ui^ushchymi", + "ui^ushchego", + "ui^ushchogo", + "ui^ushchemu", + "ui^ushchomu", + "ui^ushchikh", + "ui^ushchykh", + "ui^ushchui^u", + "ui^ushchaia", + "ui^ushchoi^u", + "ui^ushchei^u", + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "ui^ushchee", + "ui^ushchie", + "ui^ushchye", + "ui^ushchoe", + "ui^ushchei`", + "ui^ushchii`", + "ui^ushchyi`", + "ui^ushchoi`", + "ui^ushchem", + "ui^ushchim", + "ui^ushchym", + "ui^ushchom", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "shchi^ui^u", + "shchi^ai^a", + "ivshi^ui^u", + "ivshi^ai^a", + "yvshi^ui^u", + "yvshi^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "ivshimi", + "ivshymi", + "ivshego", + "ivshogo", + "ivshemu", + "ivshomu", + "ivshikh", + "ivshykh", + "ivshui^u", + "ivshai^a", + "ivshoi^u", + "ivshei^u", + "yvshimi", + "yvshymi", + "yvshego", + "yvshogo", + "yvshemu", + "yvshomu", + "yvshikh", + "yvshykh", + "yvshui^u", + "yvshai^a", + "yvshoi^u", + "yvshei^u", + "vshi^ui^u", + "vshi^ai^a", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "ivshee", + "ivshie", + "ivshye", + "ivshoe", + "ivshei`", + "ivshii`", + "ivshyi`", + "ivshoi`", + "ivshem", + "ivshim", + "ivshym", + "ivshom", + "yvshee", + "yvshie", + "yvshye", + "yvshoe", + "yvshei`", + "yvshii`", + "yvshyi`", + "yvshoi`", + "yvshem", + "yvshim", + "yvshym", + "yvshom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + "i^ui^u", + "i^ai^a", + "imi", + "ymi", + "ego", + "ogo", + "emu", + "omu", + "ikh", + "ykh", + "ui^u", + "ai^a", + "oi^u", + "ei^u", + "ee", + "ie", + "ye", + "oe", + "ei`", + "ii`", + "yi`", + "oi`", + "em", + "im", + "ym", + "om", + ) + __reflexive_suffixes = ("si^a", "s'") + __verb_suffixes = ( + "esh'", + "ei`te", + "ui`te", + "ui^ut", + "ish'", + "ete", + "i`te", + "i^ut", + "nno", + "ila", + "yla", + "ena", + "ite", + "ili", + "yli", + "ilo", + "ylo", + "eno", + "i^at", + "uet", + "eny", + "it'", + "yt'", + "ui^u", + "la", + "na", + "li", + "em", + "lo", + "no", + "et", + "ny", + "t'", + "ei`", + "ui`", + "il", + "yl", + "im", + "ym", + "en", + "it", + "yt", + "i^u", + "i`", + "l", + "n", + ) + __noun_suffixes = ( + "ii^ami", + "ii^akh", + "i^ami", + "ii^am", + "i^akh", + "ami", + "iei`", + "i^am", + "iem", + "akh", + "ii^u", + "'i^u", + "ii^a", + "'i^a", + "ev", + "ov", + "ie", + "'e", + "ei", + "ii", + "ei`", + "oi`", + "ii`", + "em", + "am", + "om", + "i^u", + "i^a", + "a", + "e", + "i", + "i`", + "o", + "u", + "y", + "'", + ) + __superlative_suffixes = ("ei`she", "ei`sh") + __derivational_suffixes = ("ost'", "ost") + + def stem(self, word): + """ + Stem a Russian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + if word in self.stopwords: + return word + + chr_exceeded = False + for i in range(len(word)): + if ord(word[i]) > 255: + chr_exceeded = True + break + + if not chr_exceeded: + return word + + word = self.__cyrillic_to_roman(word) + + step1_success = False + adjectival_removed = False + verb_removed = False + undouble_success = False + superlative_removed = False + + rv, r2 = self.__regions_russian(word) + + # Step 1 + for suffix in self.__perfective_gerund_suffixes: + if rv.endswith(suffix): + if suffix in ("v", "vshi", "vshis'"): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + + if not step1_success: + for suffix in self.__reflexive_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + for suffix in self.__adjectival_suffixes: + if rv.endswith(suffix): + if suffix in ( + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "shchi^ui^u", + "shchi^ai^a", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "vshi^ui^u", + "vshi^ai^a", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + + if not adjectival_removed: + for suffix in self.__verb_suffixes: + if rv.endswith(suffix): + if suffix in ( + "la", + "na", + "ete", + "i`te", + "li", + "i`", + "l", + "em", + "n", + "lo", + "no", + "et", + "i^ut", + "ny", + "t'", + "esh'", + "nno", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + + if not adjectival_removed and not verb_removed: + for suffix in self.__noun_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # Step 2 + if rv.endswith("i"): + word = word[:-1] + r2 = r2[:-1] + + # Step 3 + for suffix in self.__derivational_suffixes: + if r2.endswith(suffix): + word = word[: -len(suffix)] + break + + # Step 4 + if word.endswith("nn"): + word = word[:-1] + undouble_success = True + + if not undouble_success: + for suffix in self.__superlative_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + superlative_removed = True + break + if word.endswith("nn"): + word = word[:-1] + + if not undouble_success and not superlative_removed: + if word.endswith("'"): + word = word[:-1] + + word = self.__roman_to_cyrillic(word) + + return word + + def __regions_russian(self, word): + """ + Return the regions RV and R2 which are used by the Russian stemmer. + + In any word, RV is the region after the first vowel, + or the end of the word if it contains no vowel. + + R2 is the region after the first non-vowel following + a vowel in R1, or the end of the word if there is no such non-vowel. + + R1 is the region after the first non-vowel following a vowel, + or the end of the word if there is no such non-vowel. + + :param word: The Russian word whose regions RV and R2 are determined. + :type word: str or unicode + :return: the regions RV and R2 for the respective Russian word. + :rtype: tuple + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + r1 = "" + r2 = "" + rv = "" + + vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y") + word = word.replace("i^a", "A").replace("i^u", "U").replace("e`", "E") + + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + for i in range(len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + r2 = r2.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + rv = rv.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + + return (rv, r2) + + def __cyrillic_to_roman(self, word): + """ + Transliterate a Russian word into the Roman alphabet. + + A Russian word whose letters consist of the Cyrillic + alphabet are transliterated into the Roman alphabet + in order to ease the forthcoming stemming process. + + :param word: The word that is transliterated. + :type word: unicode + :return: the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("\u0410", "a") + .replace("\u0430", "a") + .replace("\u0411", "b") + .replace("\u0431", "b") + .replace("\u0412", "v") + .replace("\u0432", "v") + .replace("\u0413", "g") + .replace("\u0433", "g") + .replace("\u0414", "d") + .replace("\u0434", "d") + .replace("\u0415", "e") + .replace("\u0435", "e") + .replace("\u0401", "e") + .replace("\u0451", "e") + .replace("\u0416", "zh") + .replace("\u0436", "zh") + .replace("\u0417", "z") + .replace("\u0437", "z") + .replace("\u0418", "i") + .replace("\u0438", "i") + .replace("\u0419", "i`") + .replace("\u0439", "i`") + .replace("\u041A", "k") + .replace("\u043A", "k") + .replace("\u041B", "l") + .replace("\u043B", "l") + .replace("\u041C", "m") + .replace("\u043C", "m") + .replace("\u041D", "n") + .replace("\u043D", "n") + .replace("\u041E", "o") + .replace("\u043E", "o") + .replace("\u041F", "p") + .replace("\u043F", "p") + .replace("\u0420", "r") + .replace("\u0440", "r") + .replace("\u0421", "s") + .replace("\u0441", "s") + .replace("\u0422", "t") + .replace("\u0442", "t") + .replace("\u0423", "u") + .replace("\u0443", "u") + .replace("\u0424", "f") + .replace("\u0444", "f") + .replace("\u0425", "kh") + .replace("\u0445", "kh") + .replace("\u0426", "t^s") + .replace("\u0446", "t^s") + .replace("\u0427", "ch") + .replace("\u0447", "ch") + .replace("\u0428", "sh") + .replace("\u0448", "sh") + .replace("\u0429", "shch") + .replace("\u0449", "shch") + .replace("\u042A", "''") + .replace("\u044A", "''") + .replace("\u042B", "y") + .replace("\u044B", "y") + .replace("\u042C", "'") + .replace("\u044C", "'") + .replace("\u042D", "e`") + .replace("\u044D", "e`") + .replace("\u042E", "i^u") + .replace("\u044E", "i^u") + .replace("\u042F", "i^a") + .replace("\u044F", "i^a") + ) + + return word + + def __roman_to_cyrillic(self, word): + """ + Transliterate a Russian word back into the Cyrillic alphabet. + + A Russian word formerly transliterated into the Roman alphabet + in order to ease the stemming process, is transliterated back + into the Cyrillic alphabet, its original form. + + :param word: The word that is transliterated. + :type word: str or unicode + :return: word, the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("i^u", "\u044E") + .replace("i^a", "\u044F") + .replace("shch", "\u0449") + .replace("kh", "\u0445") + .replace("t^s", "\u0446") + .replace("ch", "\u0447") + .replace("e`", "\u044D") + .replace("i`", "\u0439") + .replace("sh", "\u0448") + .replace("k", "\u043A") + .replace("e", "\u0435") + .replace("zh", "\u0436") + .replace("a", "\u0430") + .replace("b", "\u0431") + .replace("v", "\u0432") + .replace("g", "\u0433") + .replace("d", "\u0434") + .replace("e", "\u0435") + .replace("z", "\u0437") + .replace("i", "\u0438") + .replace("l", "\u043B") + .replace("m", "\u043C") + .replace("n", "\u043D") + .replace("o", "\u043E") + .replace("p", "\u043F") + .replace("r", "\u0440") + .replace("s", "\u0441") + .replace("t", "\u0442") + .replace("u", "\u0443") + .replace("f", "\u0444") + .replace("''", "\u044A") + .replace("y", "\u044B") + .replace("'", "\u044C") + ) + + return word + + +class SpanishStemmer(_StandardStemmer): + + """ + The Spanish Snowball stemmer. + + :cvar __vowels: The Spanish vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Spanish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/spanish/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC" + __step0_suffixes = ( + "selas", + "selos", + "sela", + "selo", + "las", + "les", + "los", + "nos", + "me", + "se", + "la", + "le", + "lo", + ) + __step1_suffixes = ( + "amientos", + "imientos", + "amiento", + "imiento", + "acion", + "aciones", + "uciones", + "adoras", + "adores", + "ancias", + "log\xEDas", + "encias", + "amente", + "idades", + "anzas", + "ismos", + "ables", + "ibles", + "istas", + "adora", + "aci\xF3n", + "antes", + "ancia", + "log\xEDa", + "uci\xf3n", + "encia", + "mente", + "anza", + "icos", + "icas", + "ismo", + "able", + "ible", + "ista", + "osos", + "osas", + "ador", + "ante", + "idad", + "ivas", + "ivos", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + ) + __step2a_suffixes = ( + "yeron", + "yendo", + "yamos", + "yais", + "yan", + "yen", + "yas", + "yes", + "ya", + "ye", + "yo", + "y\xF3", + ) + __step2b_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "i\xE9ramos", + "i\xE9semos", + "ar\xEDais", + "aremos", + "er\xEDais", + "eremos", + "ir\xEDais", + "iremos", + "ierais", + "ieseis", + "asteis", + "isteis", + "\xE1bamos", + "\xE1ramos", + "\xE1semos", + "ar\xEDan", + "ar\xEDas", + "ar\xE9is", + "er\xEDan", + "er\xEDas", + "er\xE9is", + "ir\xEDan", + "ir\xEDas", + "ir\xE9is", + "ieran", + "iesen", + "ieron", + "iendo", + "ieras", + "ieses", + "abais", + "arais", + "aseis", + "\xE9amos", + "ar\xE1n", + "ar\xE1s", + "ar\xEDa", + "er\xE1n", + "er\xE1s", + "er\xEDa", + "ir\xE1n", + "ir\xE1s", + "ir\xEDa", + "iera", + "iese", + "aste", + "iste", + "aban", + "aran", + "asen", + "aron", + "ando", + "abas", + "adas", + "idas", + "aras", + "ases", + "\xEDais", + "ados", + "idos", + "amos", + "imos", + "emos", + "ar\xE1", + "ar\xE9", + "er\xE1", + "er\xE9", + "ir\xE1", + "ir\xE9", + "aba", + "ada", + "ida", + "ara", + "ase", + "\xEDan", + "ado", + "ido", + "\xEDas", + "\xE1is", + "\xE9is", + "\xEDa", + "ad", + "ed", + "id", + "an", + "i\xF3", + "ar", + "er", + "ir", + "as", + "\xEDs", + "en", + "es", + ) + __step3_suffixes = ("os", "a", "e", "o", "\xE1", "\xE9", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Spanish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if not (word.endswith(suffix) and rv.endswith(suffix)): + continue + + if ( + rv[: -len(suffix)].endswith( + ( + "ando", + "\xE1ndo", + "ar", + "\xE1r", + "er", + "\xE9r", + "iendo", + "i\xE9ndo", + "ir", + "\xEDr", + ) + ) + ) or ( + rv[: -len(suffix)].endswith("yendo") + and word[: -len(suffix)].endswith("uyendo") + ): + + word = self.__replace_accented(word[: -len(suffix)]) + r1 = self.__replace_accented(r1[: -len(suffix)]) + r2 = self.__replace_accented(r2[: -len(suffix)]) + rv = self.__replace_accented(rv[: -len(suffix)]) + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if not word.endswith(suffix): + continue + + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ( + "adora", + "ador", + "aci\xF3n", + "adoras", + "adores", + "acion", + "aciones", + "ante", + "antes", + "ancia", + "ancias", + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("log\xEDa", "log\xEDas"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uci\xF3n", "uciones"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("encia", "encias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ante", "able", "ible")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idad", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + for pre_suff in ("abil", "ic", "iv"): + if r2.endswith(pre_suff): + word = word[: -len(pre_suff)] + rv = rv[: -len(pre_suff)] + + elif suffix in ("ivo", "iva", "ivos", "ivas"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2a: Verb suffixes beginning 'y' + if not step1_success: + for suffix in self.__step2a_suffixes: + if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == "u": + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2b: Other verb suffixes + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + if suffix in ("en", "es", "\xE9is", "emos"): + if word.endswith("gu"): + word = word[:-1] + + if rv.endswith("gu"): + rv = rv[:-1] + break + + # STEP 3: Residual suffix + for suffix in self.__step3_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + if suffix in ("e", "\xE9"): + rv = rv[: -len(suffix)] + + if word[-2:] == "gu" and rv.endswith("u"): + word = word[:-1] + break + + word = self.__replace_accented(word) + + return word + + def __replace_accented(self, word): + """ + Replaces all accented letters on a word with their non-accented + counterparts. + + :param word: A spanish word, with or without accents + :type word: str or unicode + :return: a word with the accented letters (á, é, í, ó, ú) replaced with + their non-accented counterparts (a, e, i, o, u) + :rtype: str or unicode + """ + return ( + word.replace("\xE1", "a") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xF3", "o") + .replace("\xFA", "u") + ) + + +class SwedishStemmer(_ScandinavianStemmer): + + """ + The Swedish Snowball stemmer. + + :cvar __vowels: The Swedish vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Swedish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/swedish/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xE5\xF6" + __s_ending = "bcdfghjklmnoprtvy" + __step1_suffixes = ( + "heterna", + "hetens", + "heter", + "heten", + "anden", + "arnas", + "ernas", + "ornas", + "andes", + "andet", + "arens", + "arna", + "erna", + "orna", + "ande", + "arne", + "aste", + "aren", + "ades", + "erns", + "ade", + "are", + "ern", + "ens", + "het", + "ast", + "ad", + "en", + "ar", + "er", + "or", + "as", + "es", + "at", + "a", + "e", + "s", + ) + __step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt") + __step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig") + + def stem(self, word): + """ + Stem a Swedish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("els", "lig", "ig"): + word = word[: -len(suffix)] + elif suffix in ("fullt", "l\xF6st"): + word = word[:-1] + break + + return word + + +def demo(): + """ + This function provides a demonstration of the Snowball stemmers. + + After invoking this function and specifying a language, + it stems an excerpt of the Universal Declaration of Human Rights + (which is a part of the NLTK corpus collection) and then prints + out the original and the stemmed text. + + """ + + from nltk.corpus import udhr + + udhr_corpus = { + "arabic": "Arabic_Alarabia-Arabic", + "danish": "Danish_Dansk-Latin1", + "dutch": "Dutch_Nederlands-Latin1", + "english": "English-Latin1", + "finnish": "Finnish_Suomi-Latin1", + "french": "French_Francais-Latin1", + "german": "German_Deutsch-Latin1", + "hungarian": "Hungarian_Magyar-UTF8", + "italian": "Italian_Italiano-Latin1", + "norwegian": "Norwegian-Latin1", + "porter": "English-Latin1", + "portuguese": "Portuguese_Portugues-Latin1", + "romanian": "Romanian_Romana-Latin2", + "russian": "Russian-UTF8", + "spanish": "Spanish-Latin1", + "swedish": "Swedish_Svenska-Latin1", + } + + print("\n") + print("******************************") + print("Demo for the Snowball stemmers") + print("******************************") + + while True: + + language = input( + "Please enter the name of the language " + + "to be demonstrated\n" + + "/".join(SnowballStemmer.languages) + + "\n" + + "(enter 'exit' in order to leave): " + ) + + if language == "exit": + break + + if language not in SnowballStemmer.languages: + print( + "\nOops, there is no stemmer for this language. " + + "Please try again.\n" + ) + continue + + stemmer = SnowballStemmer(language) + excerpt = udhr.words(udhr_corpus[language])[:300] + + stemmed = " ".join(stemmer.stem(word) for word in excerpt) + stemmed = re.sub(r"(.{,70})\s", r"\1\n", stemmed + " ").rstrip() + excerpt = " ".join(excerpt) + excerpt = re.sub(r"(.{,70})\s", r"\1\n", excerpt + " ").rstrip() + + print("\n") + print("-" * 70) + print("ORIGINAL".center(70)) + print(excerpt) + print("\n\n") + print("STEMMED RESULTS".center(70)) + print(stemmed) + print("-" * 70) + print("\n") diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..71d3a661e582e5dd60ef0a2bdcb2674ef29d472c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/util.py @@ -0,0 +1,25 @@ +# Natural Language Toolkit: Stemmer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Helder +# URL: +# For license information, see LICENSE.TXT + + +def suffix_replace(original, old, new): + """ + Replaces the old suffix of the original string by a new suffix + """ + return original[: -len(old)] + new + + +def prefix_replace(original, old, new): + """ + Replaces the old prefix of the original string by a new suffix + + :param original: string + :param old: string + :param new: string + :return: string + """ + return new + original[len(old) :] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/stem/wordnet.py b/env-llmeval/lib/python3.10/site-packages/nltk/stem/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0ccb056f7e2172f46d03236d76942c3080d2f107 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/stem/wordnet.py @@ -0,0 +1,49 @@ +# Natural Language Toolkit: WordNet stemmer interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import wordnet as wn + + +class WordNetLemmatizer: + """ + WordNet Lemmatizer + + Lemmatize using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + >>> from nltk.stem import WordNetLemmatizer + >>> wnl = WordNetLemmatizer() + >>> print(wnl.lemmatize('dogs')) + dog + >>> print(wnl.lemmatize('churches')) + church + >>> print(wnl.lemmatize('aardwolves')) + aardwolf + >>> print(wnl.lemmatize('abaci')) + abacus + >>> print(wnl.lemmatize('hardrock')) + hardrock + """ + + def lemmatize(self, word: str, pos: str = "n") -> str: + """Lemmatize `word` using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + :param word: The input word to lemmatize. + :type word: str + :param pos: The Part Of Speech tag. Valid options are `"n"` for nouns, + `"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"` + for satellite adjectives. + :param pos: str + :return: The lemma of `word`, for the given `pos`. + """ + lemmas = wn._morphy(word, pos) + return min(lemmas, key=len) if lemmas else word + + def __repr__(self): + return "" diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff372697a122673bd395f99efeefba397efab455 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21c4ce62ac3555b22e2f51412305fa407e8a2f1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80ee4eea9562a880374923e1de6f29530155398d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f8d7544d7e0b0d1947c77e393b2969897056c7a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de8a7b404e3425f06a71bd163e6f27f526e42897 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..247b68e9d7509a38be4a40dc33743a669a99f04c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/common.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/common.py new file mode 100644 index 0000000000000000000000000000000000000000..d9428724cfa8cae69e14d899cb73eee5607475d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/common.py @@ -0,0 +1,270 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for the `twitterclient` module which do not require +the `twython` library to have been installed. +""" +import csv +import gzip +import json + +from nltk.internals import deprecated + +HIER_SEPARATOR = "." + + +def extract_fields(tweet, fields): + """ + Extract field values from a full tweet and return them as a list + + :param json tweet: The tweet in JSON format + :param list fields: The fields to be extracted from the tweet + :rtype: list(str) + """ + out = [] + for field in fields: + try: + _add_field_to_out(tweet, field, out) + except TypeError as e: + raise RuntimeError( + "Fatal error when extracting fields. Cannot find field ", field + ) from e + return out + + +def _add_field_to_out(json, field, out): + if _is_composed_key(field): + key, value = _get_key_value_composed(field) + _add_field_to_out(json[key], value, out) + else: + out += [json[field]] + + +def _is_composed_key(field): + return HIER_SEPARATOR in field + + +def _get_key_value_composed(field): + out = field.split(HIER_SEPARATOR) + # there could be up to 3 levels + key = out[0] + value = HIER_SEPARATOR.join(out[1:]) + return key, value + + +def _get_entity_recursive(json, entity): + if not json: + return None + elif isinstance(json, dict): + for key, value in json.items(): + if key == entity: + return value + # 'entities' and 'extended_entities' are wrappers in Twitter json + # structure that contain other Twitter objects. See: + # https://dev.twitter.com/overview/api/entities-in-twitter-objects + + if key == "entities" or key == "extended_entities": + candidate = _get_entity_recursive(value, entity) + if candidate is not None: + return candidate + return None + elif isinstance(json, list): + for item in json: + candidate = _get_entity_recursive(item, entity) + if candidate is not None: + return candidate + return None + else: + return None + + +def json2csv( + fp, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full tweets to be easily converted + to a CSV file for easier processing. For example, just TweetIDs or + just the text content of the Tweets can be extracted. + + Additionally, the function allows combinations of fields of other Twitter + objects (mainly the users, see below). + + For Twitter entities (e.g. hashtags of a Tweet), and for geolocation, see + `json2csv_entities` + + :param str infile: The name of the file containing full tweets + + :param str outfile: The name of the text file where results should be\ + written + + :param list fields: The list of fields to be extracted. Useful examples\ + are 'id_str' for the tweetID and 'text' for the text of the tweet. See\ + for a full list of fields.\ + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count']\ + Additionally, it allows IDs from other Twitter objects, e. g.,\ + ['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count'] + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + # write the list of fields as header + writer.writerow(fields) + # process the file + for line in fp: + tweet = json.loads(line) + row = extract_fields(tweet, fields) + writer.writerow(row) + outf.close() + + +@deprecated("Use open() and csv.writer() directly instead.") +def outf_writer_compat(outfile, encoding, errors, gzip_compress=False): + """Get a CSV writer with optional compression.""" + return _outf_writer(outfile, encoding, errors, gzip_compress) + + +def _outf_writer(outfile, encoding, errors, gzip_compress=False): + if gzip_compress: + outf = gzip.open(outfile, "wt", newline="", encoding=encoding, errors=errors) + else: + outf = open(outfile, "w", newline="", encoding=encoding, errors=errors) + writer = csv.writer(outf) + return (writer, outf) + + +def json2csv_entities( + tweets_file, + outfile, + main_fields, + entity_type, + entity_fields, + encoding="utf8", + errors="replace", + gzip_compress=False, +): + """ + Extract selected fields from a file of line-separated JSON tweets and + write to a file in CSV format. + + This utility function allows a file of full Tweets to be easily converted + to a CSV file for easier processing of Twitter entities. For example, the + hashtags or media elements of a tweet can be extracted. + + It returns one line per entity of a Tweet, e.g. if a tweet has two hashtags + there will be two lines in the output file, one per hashtag + + :param tweets_file: the file-like object containing full Tweets + + :param str outfile: The path of the text file where results should be\ + written + + :param list main_fields: The list of fields to be extracted from the main\ + object, usually the tweet. Useful examples: 'id_str' for the tweetID. See\ + for a full list of fields. + e. g.: ['id_str'], ['id', 'text', 'favorite_count', 'retweet_count'] + If `entity_type` is expressed with hierarchy, then it is the list of\ + fields of the object that corresponds to the key of the entity_type,\ + (e.g., for entity_type='user.urls', the fields in the main_fields list\ + belong to the user object; for entity_type='place.bounding_box', the\ + files in the main_field list belong to the place object of the tweet). + + :param list entity_type: The name of the entity: 'hashtags', 'media',\ + 'urls' and 'user_mentions' for the tweet object. For a user object,\ + this needs to be expressed with a hierarchy: `'user.urls'`. For the\ + bounding box of the Tweet location, use `'place.bounding_box'`. + + :param list entity_fields: The list of fields to be extracted from the\ + entity. E.g. `['text']` (of the Tweet) + + :param error: Behaviour for encoding errors, see\ + https://docs.python.org/3/library/codecs.html#codec-base-classes + + :param gzip_compress: if `True`, output files are compressed with gzip + """ + + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + header = get_header_field_list(main_fields, entity_type, entity_fields) + writer.writerow(header) + for line in tweets_file: + tweet = json.loads(line) + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + object_json = _get_entity_recursive(tweet, key) + if not object_json: + # this can happen in the case of "place" + continue + object_fields = extract_fields(object_json, main_fields) + items = _get_entity_recursive(object_json, value) + _write_to_file(object_fields, items, entity_fields, writer) + else: + tweet_fields = extract_fields(tweet, main_fields) + items = _get_entity_recursive(tweet, entity_type) + _write_to_file(tweet_fields, items, entity_fields, writer) + outf.close() + + +def get_header_field_list(main_fields, entity_type, entity_fields): + if _is_composed_key(entity_type): + key, value = _get_key_value_composed(entity_type) + main_entity = key + sub_entity = value + else: + main_entity = None + sub_entity = entity_type + + if main_entity: + output1 = [HIER_SEPARATOR.join([main_entity, x]) for x in main_fields] + else: + output1 = main_fields + output2 = [HIER_SEPARATOR.join([sub_entity, x]) for x in entity_fields] + return output1 + output2 + + +def _write_to_file(object_fields, items, entity_fields, writer): + if not items: + # it could be that the entity is just not present for the tweet + # e.g. tweet hashtag is always present, even as [], however + # tweet media may not be present + return + if isinstance(items, dict): + # this happens e.g. for "place" of a tweet + row = object_fields + # there might be composed keys in de list of required fields + entity_field_values = [x for x in entity_fields if not _is_composed_key(x)] + entity_field_composed = [x for x in entity_fields if _is_composed_key(x)] + for field in entity_field_values: + value = items[field] + if isinstance(value, list): + row += value + else: + row += [value] + # now check required dictionaries + for d in entity_field_composed: + kd, vd = _get_key_value_composed(d) + json_dict = items[kd] + if not isinstance(json_dict, dict): + raise RuntimeError( + """Key {} does not contain a dictionary + in the json file""".format( + kd + ) + ) + row += [json_dict[vd]] + writer.writerow(row) + return + # in general it is a list + for item in items: + row = object_fields + extract_fields(item, entity_fields) + writer.writerow(row) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..554bdfef511190b28504f9ded8dc8a6098e16ed9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitter_demo.py @@ -0,0 +1,306 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Examples to demo the :py:mod:`twitterclient` code. + +These demo functions should all run, with the following caveats: + +* You must have obtained API keys from Twitter, and installed them according to + the instructions in the `twitter HOWTO `_. + +* If you are on a slow network, some of the calls to the Twitter API may + timeout. + +* If you are being rate limited while searching, you will receive a 420 + error response. + +* Your terminal window / console must be able to display UTF-8 encoded characters. + +For documentation about the Twitter APIs, see `The Streaming APIs Overview +`_ and `The REST APIs Overview +`_. + +For error codes see Twitter's +`Error Codes and Responses ` +""" + +import datetime +import json +from functools import wraps +from io import StringIO + +from nltk.twitter import ( + Query, + Streamer, + TweetViewer, + TweetWriter, + Twitter, + credsfromfile, +) + +SPACER = "###################################" + + +def verbose(func): + """Decorator for demo functions""" + + @wraps(func) + def with_formatting(*args, **kwargs): + print() + print(SPACER) + print("Using %s" % (func.__name__)) + print(SPACER) + return func(*args, **kwargs) + + return with_formatting + + +def yesterday(): + """ + Get yesterday's datetime as a 5-tuple. + """ + date = datetime.datetime.now() + date -= datetime.timedelta(days=1) + date_tuple = date.timetuple()[:6] + return date_tuple + + +def setup(): + """ + Initialize global variables for the demos. + """ + global USERIDS, FIELDS + + USERIDS = ["759251", "612473", "15108702", "6017542", "2673523800"] + # UserIDs corresponding to\ + # @CNN, @BBCNews, @ReutersLive, @BreakingNews, @AJELive + FIELDS = ["id_str"] + + +@verbose +def twitterclass_demo(): + """ + Use the simplified :class:`Twitter` class to write some tweets to a file. + """ + tw = Twitter() + print("Track from the public stream\n") + tw.tweets(keywords="love, hate", limit=10) # public stream + print(SPACER) + print("Search past Tweets\n") + tw = Twitter() + tw.tweets(keywords="love, hate", stream=False, limit=10) # search past tweets + print(SPACER) + print( + "Follow two accounts in the public stream" + + " -- be prepared to wait a few minutes\n" + ) + tw = Twitter() + tw.tweets(follow=["759251", "6017542"], stream=True, limit=5) # public stream + + +@verbose +def sampletoscreen_demo(limit=20): + """ + Sample from the Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.sample() + + +@verbose +def tracktoscreen_demo(track="taylor swift", limit=10): + """ + Track keywords from the public Streaming API and send output to terminal. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.filter(track=track) + + +@verbose +def search_demo(keywords="nltk"): + """ + Use the REST API to search for past tweets containing a given keyword. + """ + oauth = credsfromfile() + client = Query(**oauth) + for tweet in client.search_tweets(keywords=keywords, limit=10): + print(tweet["text"]) + + +@verbose +def tweets_by_user_demo(user="NLTK_org", count=200): + """ + Use the REST API to search for past tweets by a given user. + """ + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetWriter()) + client.user_tweets(user, count) + + +@verbose +def lookup_by_userid_demo(): + """ + Use the REST API to convert a userID to a screen name. + """ + oauth = credsfromfile() + client = Query(**oauth) + user_info = client.user_info_from_id(USERIDS) + for info in user_info: + name = info["screen_name"] + followers = info["followers_count"] + following = info["friends_count"] + print(f"{name}, followers: {followers}, following: {following}") + + +@verbose +def followtoscreen_demo(limit=10): + """ + Using the Streaming API, select just the tweets from a specified list of + userIDs. + + This is will only give results in a reasonable time if the users in + question produce a high volume of tweets, and may even so show some delay. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetViewer(limit=limit)) + client.statuses.filter(follow=USERIDS) + + +@verbose +def streamtofile_demo(limit=20): + """ + Write 20 tweets sampled from the public Streaming API to a file. + """ + oauth = credsfromfile() + client = Streamer(**oauth) + client.register(TweetWriter(limit=limit, repeat=False)) + client.statuses.sample() + + +@verbose +def limit_by_time_demo(keywords="nltk"): + """ + Query the REST API for Tweets about NLTK since yesterday and send + the output to terminal. + + This example makes the assumption that there are sufficient Tweets since + yesterday for the date to be an effective cut-off. + """ + date = yesterday() + dt_date = datetime.datetime(*date) + oauth = credsfromfile() + client = Query(**oauth) + client.register(TweetViewer(limit=100, lower_date_limit=date)) + + print(f"Cutoff date: {dt_date}\n") + + for tweet in client.search_tweets(keywords=keywords): + print("{} ".format(tweet["created_at"]), end="") + client.handler.handle(tweet) + + +@verbose +def corpusreader_demo(): + """ + Use `TwitterCorpusReader` tp read a file of tweets, and print out + + * some full tweets in JSON format; + * some raw strings from the tweets (i.e., the value of the `text` field); and + * the result of tokenising the raw strings. + + """ + from nltk.corpus import twitter_samples as tweets + + print() + print("Complete tweet documents") + print(SPACER) + for tweet in tweets.docs("tweets.20150430-223406.json")[:1]: + print(json.dumps(tweet, indent=1, sort_keys=True)) + + print() + print("Raw tweet strings:") + print(SPACER) + for text in tweets.strings("tweets.20150430-223406.json")[:15]: + print(text) + + print() + print("Tokenized tweet strings:") + print(SPACER) + for toks in tweets.tokenized("tweets.20150430-223406.json")[:15]: + print(toks) + + +@verbose +def expand_tweetids_demo(): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets, if available. + + """ + ids_f = StringIO( + """\ + 588665495492124672 + 588665495487909888 + 588665495508766721 + 588665495513006080 + 588665495517200384 + 588665495487811584 + 588665495525588992 + 588665495487844352 + 588665495492014081 + 588665495512948737""" + ) + oauth = credsfromfile() + client = Query(**oauth) + hydrated = client.expand_tweetids(ids_f) + + for tweet in hydrated: + id_str = tweet["id_str"] + print(f"id: {id_str}") + text = tweet["text"] + if text.startswith("@null"): + text = "[Tweet not available]" + print(text + "\n") + + +ALL = [ + twitterclass_demo, + sampletoscreen_demo, + tracktoscreen_demo, + search_demo, + tweets_by_user_demo, + lookup_by_userid_demo, + followtoscreen_demo, + streamtofile_demo, + limit_by_time_demo, + corpusreader_demo, + expand_tweetids_demo, +] + +""" +Select demo functions to run. E.g. replace the following line with "DEMOS = +ALL[8:]" to execute only the final three demos. +""" +DEMOS = ALL[:] + +if __name__ == "__main__": + setup() + + for demo in DEMOS: + demo() + + print("\n" + SPACER) + print("All demos completed") + print(SPACER) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitterclient.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitterclient.py new file mode 100644 index 0000000000000000000000000000000000000000..d556738e0849faf35454166cec8a5949fcca93dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/twitterclient.py @@ -0,0 +1,564 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + + +""" +NLTK Twitter client + +This module offers methods for collecting and processing Tweets. Most of the +functionality depends on access to the Twitter APIs, and this is handled via +the third party Twython library. + +If one of the methods below returns an integer, it is probably a `Twitter +error code `_. For +example, the response of '420' means that you have reached the limit of the +requests you can currently make to the Twitter API. Currently, `rate limits +for the search API `_ are +divided into 15 minute windows. +""" + +import datetime +import gzip +import itertools +import json +import os +import time + +import requests +from twython import Twython, TwythonStreamer +from twython.exceptions import TwythonError, TwythonRateLimitError + +from nltk.twitter.api import BasicTweetHandler, TweetHandlerI +from nltk.twitter.util import credsfromfile, guess_path + + +class Streamer(TwythonStreamer): + """ + Retrieve data from the Twitter Streaming API. + + The streaming API requires + `OAuth 1.0 `_ authentication. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + + self.handler = None + self.do_continue = True + TwythonStreamer.__init__( + self, app_key, app_secret, oauth_token, oauth_token_secret + ) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing + """ + self.handler = handler + + def on_success(self, data): + """ + :param data: response from Twitter API + """ + if self.do_continue: + if self.handler is not None: + if "text" in data: + self.handler.counter += 1 + self.handler.handle(data) + self.do_continue = self.handler.do_continue() + else: + raise ValueError("No data handler has been registered.") + else: + self.disconnect() + self.handler.on_finish() + + def on_error(self, status_code, data): + """ + :param status_code: The status code returned by the Twitter API + :param data: The response from Twitter API + + """ + print(status_code) + + def sample(self): + """ + Wrapper for 'statuses / sample' API call + """ + while self.do_continue: + + # Stream in an endless loop until limit is reached. See twython + # issue 288: https://github.com/ryanmcgrath/twython/issues/288 + # colditzjb commented on 9 Dec 2014 + + try: + self.statuses.sample() + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + def filter(self, track="", follow="", lang="en"): + """ + Wrapper for 'statuses / filter' API call + """ + while self.do_continue: + # Stream in an endless loop until limit is reached + + try: + if track == "" and follow == "": + msg = "Please supply a value for 'track', 'follow'" + raise ValueError(msg) + self.statuses.filter(track=track, follow=follow, lang=lang) + except requests.exceptions.ChunkedEncodingError as e: + if e is not None: + print(f"Error (stream will continue): {e}") + continue + + +class Query(Twython): + """ + Retrieve data from the Twitter REST API. + """ + + def __init__(self, app_key, app_secret, oauth_token, oauth_token_secret): + """ + :param app_key: (optional) Your applications key + :param app_secret: (optional) Your applications secret key + :param oauth_token: (optional) When using **OAuth 1**, combined with + oauth_token_secret to make authenticated calls + :param oauth_token_secret: (optional) When using **OAuth 1** combined + with oauth_token to make authenticated calls + """ + self.handler = None + self.do_continue = True + Twython.__init__(self, app_key, app_secret, oauth_token, oauth_token_secret) + + def register(self, handler): + """ + Register a method for handling Tweets. + + :param TweetHandlerI handler: method for viewing or writing Tweets to a file. + """ + self.handler = handler + + def expand_tweetids(self, ids_f, verbose=True): + """ + Given a file object containing a list of Tweet IDs, fetch the + corresponding full Tweets from the Twitter API. + + The API call `statuses/lookup` will fail to retrieve a Tweet if the + user has deleted it. + + This call to the Twitter API is rate-limited. See + for details. + + :param ids_f: input file object consisting of Tweet IDs, one to a line + :return: iterable of Tweet objects in JSON format + """ + ids = [line.strip() for line in ids_f if line] + + if verbose: + print(f"Counted {len(ids)} Tweet IDs in {ids_f}.") + + # The Twitter endpoint takes lists of up to 100 ids, so we chunk the + # ids. + id_chunks = [ids[i : i + 100] for i in range(0, len(ids), 100)] + + chunked_tweets = (self.lookup_status(id=chunk) for chunk in id_chunks) + + return itertools.chain.from_iterable(chunked_tweets) + + def _search_tweets(self, keywords, limit=100, lang="en"): + """ + Assumes that the handler has been informed. Fetches Tweets from + search_tweets generator output and passses them to handler + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string. + :param int limit: Number of Tweets to process + :param str lang: language + """ + while True: + tweets = self.search_tweets( + keywords=keywords, limit=limit, lang=lang, max_id=self.handler.max_id + ) + for tweet in tweets: + self.handler.handle(tweet) + if not (self.handler.do_continue() and self.handler.repeat): + break + self.handler.on_finish() + + def search_tweets( + self, + keywords, + limit=100, + lang="en", + max_id=None, + retries_after_twython_exception=0, + ): + """ + Call the REST API ``'search/tweets'`` endpoint with some plausible + defaults. See `the Twitter search documentation + `_ for more information + about admissible search parameters. + + :param str keywords: A list of query terms to search for, written as\ + a comma-separated string + :param int limit: Number of Tweets to process + :param str lang: language + :param int max_id: id of the last tweet fetched + :param int retries_after_twython_exception: number of retries when\ + searching Tweets before raising an exception + :rtype: python generator + """ + if not self.handler: + # if no handler is provided, `BasicTweetHandler` provides minimum + # functionality for limiting the number of Tweets retrieved + self.handler = BasicTweetHandler(limit=limit) + + count_from_query = 0 + if max_id: + self.handler.max_id = max_id + else: + results = self.search( + q=keywords, count=min(100, limit), lang=lang, result_type="recent" + ) + count = len(results["statuses"]) + if count == 0: + print("No Tweets available through REST API for those keywords") + return + count_from_query = count + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + # Pagination loop: keep fetching Tweets until the desired count is + # reached while dealing with Twitter rate limits. + retries = 0 + while count_from_query < limit: + try: + mcount = min(100, limit - count_from_query) + results = self.search( + q=keywords, + count=mcount, + lang=lang, + max_id=self.handler.max_id, + result_type="recent", + ) + except TwythonRateLimitError as e: + print(f"Waiting for 15 minutes -{e}") + time.sleep(15 * 60) # wait 15 minutes + continue + except TwythonError as e: + print(f"Fatal error in Twython request -{e}") + if retries_after_twython_exception == retries: + raise e + retries += 1 + + count = len(results["statuses"]) + if count == 0: + print("No more Tweets available through rest api") + return + count_from_query += count + # the max_id is also present in the Tweet metadata + # results['search_metadata']['next_results'], but as part of a + # query and difficult to fetch. This is doing the equivalent + # (last tweet id minus one) + self.handler.max_id = results["statuses"][count - 1]["id"] - 1 + + for result in results["statuses"]: + yield result + self.handler.counter += 1 + if self.handler.do_continue() == False: + return + + def user_info_from_id(self, userids): + """ + Convert a list of userIDs into a variety of information about the users. + + See . + + :param list userids: A list of integer strings corresponding to Twitter userIDs + :rtype: list(json) + """ + return [self.show_user(user_id=userid) for userid in userids] + + def user_tweets(self, screen_name, limit, include_rts="false"): + """ + Return a collection of the most recent Tweets posted by the user + + :param str user: The user's screen name; the initial '@' symbol\ + should be omitted + :param int limit: The number of Tweets to recover; 200 is the maximum allowed + :param str include_rts: Whether to include statuses which have been\ + retweeted by the user; possible values are 'true' and 'false' + """ + data = self.get_user_timeline( + screen_name=screen_name, count=limit, include_rts=include_rts + ) + for item in data: + self.handler.handle(item) + + +class Twitter: + """ + Wrapper class with restricted functionality and fewer options. + """ + + def __init__(self): + self._oauth = credsfromfile() + self.streamer = Streamer(**self._oauth) + self.query = Query(**self._oauth) + + def tweets( + self, + keywords="", + follow="", + to_screen=True, + stream=True, + limit=100, + date_limit=None, + lang="en", + repeat=False, + gzip_compress=False, + ): + """ + Process some Tweets in a simple manner. + + :param str keywords: Keywords to use for searching or filtering + :param list follow: UserIDs to use for filtering Tweets from the public stream + :param bool to_screen: If `True`, display the tweet texts on the screen,\ + otherwise print to a file + + :param bool stream: If `True`, use the live public stream,\ + otherwise search past public Tweets + + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + Note that, in the case of streaming, this is the maximum date, i.e.\ + a date in the future; if not, it is the minimum date, i.e. a date\ + in the past + + :param str lang: language + + :param bool repeat: A flag to determine whether multiple files should\ + be written. If `True`, the length of each file will be set by the\ + value of `limit`. Use only if `to_screen` is `False`. See also + :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + if to_screen: + handler = TweetViewer( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + ) + else: + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if to_screen: + handler = TweetViewer(limit=limit) + else: + if stream: + upper_date_limit = date_limit + lower_date_limit = None + else: + upper_date_limit = None + lower_date_limit = date_limit + + handler = TweetWriter( + limit=limit, + upper_date_limit=upper_date_limit, + lower_date_limit=lower_date_limit, + repeat=repeat, + gzip_compress=gzip_compress, + ) + + if stream: + self.streamer.register(handler) + if keywords == "" and follow == "": + self.streamer.sample() + else: + self.streamer.filter(track=keywords, follow=follow, lang=lang) + else: + self.query.register(handler) + if keywords == "": + raise ValueError("Please supply at least one keyword to search for.") + else: + self.query._search_tweets(keywords, limit=limit, lang=lang) + + +class TweetViewer(TweetHandlerI): + """ + Handle data by sending it to the terminal. + """ + + def handle(self, data): + """ + Direct data to `sys.stdout` + + :return: return ``False`` if processing should cease, otherwise return ``True``. + :rtype: bool + :param data: Tweet object returned by Twitter API + """ + text = data["text"] + print(text) + + self.check_date_limit(data) + if self.do_stop: + return + + def on_finish(self): + print(f"Written {self.counter} Tweets") + + +class TweetWriter(TweetHandlerI): + """ + Handle data by writing it to a file. + """ + + def __init__( + self, + limit=2000, + upper_date_limit=None, + lower_date_limit=None, + fprefix="tweets", + subdir="twitter-files", + repeat=False, + gzip_compress=False, + ): + """ + The difference between the upper and lower date limits depends on + whether Tweets are coming in an ascending date order (i.e. when + streaming) or descending date order (i.e. when searching past Tweets). + + :param int limit: number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting new\ + data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`. E.g. `upper_date_limit=(2015, 4, 1, 12,\ + 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting new\ + data. See `upper_data_limit` for formatting. + + :param str fprefix: The prefix to use in creating file names for Tweet\ + collections. + + :param str subdir: The name of the directory where Tweet collection\ + files should be stored. + + :param bool repeat: flag to determine whether multiple files should be\ + written. If `True`, the length of each file will be set by the value\ + of `limit`. See also :py:func:`handle`. + + :param gzip_compress: if `True`, output files are compressed with gzip. + """ + self.fprefix = fprefix + self.subdir = guess_path(subdir) + self.gzip_compress = gzip_compress + self.fname = self.timestamped_file() + self.repeat = repeat + self.output = None + TweetHandlerI.__init__(self, limit, upper_date_limit, lower_date_limit) + + def timestamped_file(self): + """ + :return: timestamped file name + :rtype: str + """ + subdir = self.subdir + fprefix = self.fprefix + if subdir: + if not os.path.exists(subdir): + os.mkdir(subdir) + + fname = os.path.join(subdir, fprefix) + fmt = "%Y%m%d-%H%M%S" + timestamp = datetime.datetime.now().strftime(fmt) + if self.gzip_compress: + suffix = ".gz" + else: + suffix = "" + outfile = f"{fname}.{timestamp}.json{suffix}" + return outfile + + def handle(self, data): + """ + Write Twitter data as line-delimited JSON into one or more files. + + :return: return `False` if processing should cease, otherwise return `True`. + :param data: tweet object returned by Twitter API + """ + if self.startingup: + if self.gzip_compress: + self.output = gzip.open(self.fname, "w") + else: + self.output = open(self.fname, "w") + print(f"Writing to {self.fname}") + + json_data = json.dumps(data) + if self.gzip_compress: + self.output.write((json_data + "\n").encode("utf-8")) + else: + self.output.write(json_data + "\n") + + self.check_date_limit(data) + if self.do_stop: + return + + self.startingup = False + + def on_finish(self): + print(f"Written {self.counter} Tweets") + if self.output: + self.output.close() + + def do_continue(self): + if self.repeat == False: + return TweetHandlerI.do_continue(self) + + if self.do_stop: + # stop for a functional cause (e.g. date limit) + return False + + if self.counter == self.limit: + # repeat is True, thus close output file and + # create a new one + self._restart_file() + return True + + def _restart_file(self): + self.on_finish() + self.fname = self.timestamped_file() + self.startingup = True + self.counter = 0 diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/util.py new file mode 100644 index 0000000000000000000000000000000000000000..adfa08853867280da85642778c3e9fb89a532574 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/util.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Twitter client +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +Authentication utilities to accompany `twitterclient`. +""" + +import os +import pprint + +from twython import Twython + + +def credsfromfile(creds_file=None, subdir=None, verbose=False): + """ + Convenience function for authentication + """ + return Authenticate().load_creds( + creds_file=creds_file, subdir=subdir, verbose=verbose + ) + + +class Authenticate: + """ + Methods for authenticating with Twitter. + """ + + def __init__(self): + self.creds_file = "credentials.txt" + self.creds_fullpath = None + + self.oauth = {} + try: + self.twitter_dir = os.environ["TWITTER"] + self.creds_subdir = self.twitter_dir + except KeyError: + self.twitter_dir = None + self.creds_subdir = None + + def load_creds(self, creds_file=None, subdir=None, verbose=False): + """ + Read OAuth credentials from a text file. + + File format for OAuth 1:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + oauth_token=OAUTH_TOKEN + oauth_token_secret=OAUTH_TOKEN_SECRET + + + File format for OAuth 2:: + + app_key=YOUR_APP_KEY + app_secret=YOUR_APP_SECRET + access_token=ACCESS_TOKEN + + :param str file_name: File containing credentials. ``None`` (default) reads + data from `TWITTER/'credentials.txt'` + """ + if creds_file is not None: + self.creds_file = creds_file + + if subdir is None: + if self.creds_subdir is None: + msg = ( + "Supply a value to the 'subdir' parameter or" + + " set the TWITTER environment variable." + ) + raise ValueError(msg) + else: + self.creds_subdir = subdir + + self.creds_fullpath = os.path.normpath( + os.path.join(self.creds_subdir, self.creds_file) + ) + + if not os.path.isfile(self.creds_fullpath): + raise OSError(f"Cannot find file {self.creds_fullpath}") + + with open(self.creds_fullpath) as infile: + if verbose: + print(f"Reading credentials file {self.creds_fullpath}") + + for line in infile: + if "=" in line: + name, value = line.split("=", 1) + self.oauth[name.strip()] = value.strip() + + self._validate_creds_file(verbose=verbose) + + return self.oauth + + def _validate_creds_file(self, verbose=False): + """Check validity of a credentials file.""" + oauth1 = False + oauth1_keys = ["app_key", "app_secret", "oauth_token", "oauth_token_secret"] + oauth2 = False + oauth2_keys = ["app_key", "app_secret", "access_token"] + if all(k in self.oauth for k in oauth1_keys): + oauth1 = True + elif all(k in self.oauth for k in oauth2_keys): + oauth2 = True + + if not (oauth1 or oauth2): + msg = f"Missing or incorrect entries in {self.creds_file}\n" + msg += pprint.pformat(self.oauth) + raise ValueError(msg) + elif verbose: + print(f'Credentials file "{self.creds_file}" looks good') + + +def add_access_token(creds_file=None): + """ + For OAuth 2, retrieve an access token for an app and append it to a + credentials file. + """ + if creds_file is None: + path = os.path.dirname(__file__) + creds_file = os.path.join(path, "credentials2.txt") + oauth2 = credsfromfile(creds_file=creds_file) + app_key = oauth2["app_key"] + app_secret = oauth2["app_secret"] + + twitter = Twython(app_key, app_secret, oauth_version=2) + access_token = twitter.obtain_access_token() + tok = f"access_token={access_token}\n" + with open(creds_file, "a") as infile: + print(tok, file=infile) + + +def guess_path(pth): + """ + If the path is not absolute, guess that it is a subdirectory of the + user's home directory. + + :param str pth: The pathname of the directory where files of tweets should be written + """ + if os.path.isabs(pth): + return pth + else: + return os.path.expanduser(os.path.join("~", pth))