diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/ccg/lexicon.py b/llmeval-env/lib/python3.10/site-packages/nltk/ccg/lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..da7d00ab6bcdfa190f49fe7c141a23542426ff20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/ccg/lexicon.py @@ -0,0 +1,338 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT +""" +CCG Lexicons +""" + +import re +from collections import defaultdict + +from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory +from nltk.internals import deprecated +from nltk.sem.logic import Expression + +# ------------ +# Regular expressions used for parsing components of the lexicon +# ------------ + +# Parses a primitive category and subscripts +PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""") + +# Separates the next primitive category from the remainder of the +# string +NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""") + +# Separates the next application operator from the remainder +APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""") + +# Parses the definition of the right-hand side (rhs) of either a word or a family +LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE) + +# Parses the right hand side that contains category and maybe semantic predicate +RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE) + +# Parses the semantic predicate +SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE) + +# Strips comments from a line +COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""") + + +class Token: + """ + Class representing a token. + + token => category {semantics} + e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)} + + * `token` (string) + * `categ` (string) + * `semantics` (Expression) + """ + + def __init__(self, token, categ, semantics=None): + self._token = token + self._categ = categ + self._semantics = semantics + + def categ(self): + return self._categ + + def semantics(self): + return self._semantics + + def __str__(self): + semantics_str = "" + if self._semantics is not None: + semantics_str = " {" + str(self._semantics) + "}" + return "" + str(self._categ) + semantics_str + + def __cmp__(self, other): + if not isinstance(other, Token): + return -1 + return cmp((self._categ, self._semantics), other.categ(), other.semantics()) + + +class CCGLexicon: + """ + Class representing a lexicon for CCG grammars. + + * `primitives`: The list of primitive categories for the lexicon + * `families`: Families of categories + * `entries`: A mapping of words to possible categories + """ + + def __init__(self, start, primitives, families, entries): + self._start = PrimitiveCategory(start) + self._primitives = primitives + self._families = families + self._entries = entries + + def categories(self, word): + """ + Returns all the possible categories for a word + """ + return self._entries[word] + + def start(self): + """ + Return the target category for the parser + """ + return self._start + + def __str__(self): + """ + String representation of the lexicon. Used for debugging. + """ + string = "" + first = True + for ident in sorted(self._entries): + if not first: + string = string + "\n" + string = string + ident + " => " + + first = True + for cat in self._entries[ident]: + if not first: + string = string + " | " + else: + first = False + string = string + "%s" % cat + return string + + +# ----------- +# Parsing lexicons +# ----------- + + +def matchBrackets(string): + """ + Separate the contents matching the first set of brackets from the rest of + the input. + """ + rest = string[1:] + inside = "(" + + while rest != "" and not rest.startswith(")"): + if rest.startswith("("): + (part, rest) = matchBrackets(rest) + inside = inside + part + else: + inside = inside + rest[0] + rest = rest[1:] + if rest.startswith(")"): + return (inside + ")", rest[1:]) + raise AssertionError("Unmatched bracket in string '" + string + "'") + + +def nextCategory(string): + """ + Separate the string for the next portion of the category from the rest + of the string + """ + if string.startswith("("): + return matchBrackets(string) + return NEXTPRIM_RE.match(string).groups() + + +def parseApplication(app): + """ + Parse an application operator + """ + return Direction(app[0], app[1:]) + + +def parseSubscripts(subscr): + """ + Parse the subscripts for a primitive category + """ + if subscr: + return subscr[1:-1].split(",") + return [] + + +def parsePrimitiveCategory(chunks, primitives, families, var): + """ + Parse a primitive category + + If the primitive is the special category 'var', replace it with the + correct `CCGVar`. + """ + if chunks[0] == "var": + if chunks[1] is None: + if var is None: + var = CCGVar() + return (var, var) + + catstr = chunks[0] + if catstr in families: + (cat, cvar) = families[catstr] + if var is None: + var = cvar + else: + cat = cat.substitute([(cvar, var)]) + return (cat, var) + + if catstr in primitives: + subscrs = parseSubscripts(chunks[1]) + return (PrimitiveCategory(catstr, subscrs), var) + raise AssertionError( + "String '" + catstr + "' is neither a family nor primitive category." + ) + + +def augParseCategory(line, primitives, families, var=None): + """ + Parse a string representing a category, and returns a tuple with + (possibly) the CCG variable for the category + """ + (cat_string, rest) = nextCategory(line) + + if cat_string.startswith("("): + (res, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + + else: + (res, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + + while rest != "": + app = APP_RE.match(rest).groups() + direction = parseApplication(app[0:3]) + rest = app[3] + + (cat_string, rest) = nextCategory(rest) + if cat_string.startswith("("): + (arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + else: + (arg, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + res = FunctionalCategory(res, arg, direction) + + return (res, var) + + +def fromstring(lex_str, include_semantics=False): + """ + Convert string representation into a lexicon for CCGs. + """ + CCGVar.reset_id() + primitives = [] + families = {} + entries = defaultdict(list) + for line in lex_str.splitlines(): + # Strip comments and leading/trailing whitespace. + line = COMMENTS_RE.match(line).groups()[0].strip() + if line == "": + continue + + if line.startswith(":-"): + # A line of primitive categories. + # The first one is the target category + # ie, :- S, N, NP, VP + primitives = primitives + [ + prim.strip() for prim in line[2:].strip().split(",") + ] + else: + # Either a family definition, or a word definition + (ident, sep, rhs) = LEX_RE.match(line).groups() + (catstr, semantics_str) = RHS_RE.match(rhs).groups() + (cat, var) = augParseCategory(catstr, primitives, families) + + if sep == "::": + # Family definition + # ie, Det :: NP/N + families[ident] = (cat, var) + else: + semantics = None + if include_semantics is True: + if semantics_str is None: + raise AssertionError( + line + + " must contain semantics because include_semantics is set to True" + ) + else: + semantics = Expression.fromstring( + SEMANTICS_RE.match(semantics_str).groups()[0] + ) + # Word definition + # ie, which => (N\N)/(S/NP) + entries[ident].append(Token(ident, cat, semantics)) + return CCGLexicon(primitives[0], primitives, families, entries) + + +@deprecated("Use fromstring() instead.") +def parseLexicon(lex_str): + return fromstring(lex_str) + + +openccg_tinytiny = fromstring( + """ + # Rather minimal lexicon based on the openccg `tinytiny' grammar. + # Only incorporates a subset of the morphological subcategories, however. + :- S,NP,N # Primitive categories + Det :: NP/N # Determiners + Pro :: NP + IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular) + IntransVpl :: S\\NP[pl] # Plural + TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular) + TransVpl :: S\\NP[pl]/NP # Plural + + the => NP[sg]/N[sg] + the => NP[pl]/N[pl] + + I => Pro + me => Pro + we => Pro + us => Pro + + book => N[sg] + books => N[pl] + + peach => N[sg] + peaches => N[pl] + + policeman => N[sg] + policemen => N[pl] + + boy => N[sg] + boys => N[pl] + + sleep => IntransVsg + sleep => IntransVpl + + eat => IntransVpl + eat => TransVpl + eats => IntransVsg + eats => TransVsg + + see => TransVpl + sees => TransVsg + """ +) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df093cb0a7964ea43df052ac42fb46b6fbadee0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__init__.py @@ -0,0 +1,92 @@ +# Natural Language Toolkit: Clusterers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +""" +This module contains a number of basic clustering algorithms. Clustering +describes the task of discovering groups of similar items with a large +collection. It is also describe as unsupervised machine learning, as the data +from which it learns is unannotated with class information, as is the case for +supervised learning. Annotated data is difficult and expensive to obtain in +the quantities required for the majority of supervised learning algorithms. +This problem, the knowledge acquisition bottleneck, is common to most natural +language processing tasks, thus fueling the need for quality unsupervised +approaches. + +This module contains a k-means clusterer, E-M clusterer and a group average +agglomerative clusterer (GAAC). All these clusterers involve finding good +cluster groupings for a set of vectors in multi-dimensional space. + +The K-means clusterer starts with k arbitrary chosen means then allocates each +vector to the cluster with the closest mean. It then recalculates the means of +each cluster as the centroid of the vectors in the cluster. This process +repeats until the cluster memberships stabilise. This is a hill-climbing +algorithm which may converge to a local maximum. Hence the clustering is +often repeated with random initial means and the most commonly occurring +output means are chosen. + +The GAAC clusterer starts with each of the *N* vectors as singleton clusters. +It then iteratively merges pairs of clusters which have the closest centroids. +This continues until there is only one cluster. The order of merges gives rise +to a dendrogram - a tree with the earlier merges lower than later merges. The +membership of a given number of clusters *c*, *1 <= c <= N*, can be found by +cutting the dendrogram at depth *c*. + +The Gaussian EM clusterer models the vectors as being produced by a mixture +of k Gaussian sources. The parameters of these sources (prior probability, +mean and covariance matrix) are then found to maximise the likelihood of the +given data. This is done with the expectation maximisation algorithm. It +starts with k arbitrarily chosen means, priors and covariance matrices. It +then calculates the membership probabilities for each vector in each of the +clusters - this is the 'E' step. The cluster parameters are then updated in +the 'M' step using the maximum likelihood estimate from the cluster membership +probabilities. This process continues until the likelihood of the data does +not significantly increase. + +They all extend the ClusterI interface which defines common operations +available with each clusterer. These operations include: + +- cluster: clusters a sequence of vectors +- classify: assign a vector to a cluster +- classification_probdist: give the probability distribution over cluster memberships + +The current existing classifiers also extend cluster.VectorSpace, an +abstract class which allows for singular value decomposition (SVD) and vector +normalisation. SVD is used to reduce the dimensionality of the vector space in +such a manner as to preserve as much of the variation as possible, by +reparameterising the axes in order of variability and discarding all bar the +first d dimensions. Normalisation ensures that vectors fall in the unit +hypersphere. + +Usage example (see also demo()):: + + from nltk import cluster + from nltk.cluster import euclidean_distance + from numpy import array + + vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]] + + # initialise the clusterer (will also assign the vectors to clusters) + clusterer = cluster.KMeansClusterer(2, euclidean_distance) + clusterer.cluster(vectors, True) + + # classify a new vector + print(clusterer.classify(array([3, 3]))) + +Note that the vectors must use numpy array-like +objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for +efficiency when required. +""" + +from nltk.cluster.em import EMClusterer +from nltk.cluster.gaac import GAAClusterer +from nltk.cluster.kmeans import KMeansClusterer +from nltk.cluster.util import ( + Dendrogram, + VectorSpaceClusterer, + cosine_distance, + euclidean_distance, +) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1224978dd92638e71d33b634c5dc30caf7f7dbb5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c10b6a121f963c7b55c9808198f3fc228191948 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f62747d3a601cf96c916d63b032e8f1507fb815 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d179b6aeab050cd8b9666ea690a3af7dd947ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3235d84f5352551bf24586f9c797d874a60405fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..351cadbec2209dfd24a3dcd8aaffd83613ce5a44 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/api.py new file mode 100644 index 0000000000000000000000000000000000000000..8da588408f83894b512166334197ec43b6899631 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/api.py @@ -0,0 +1,74 @@ +# Natural Language Toolkit: Clusterer Interfaces +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Porting: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk.probability import DictionaryProbDist + + +class ClusterI(metaclass=ABCMeta): + """ + Interface covering basic clustering functionality. + """ + + @abstractmethod + def cluster(self, vectors, assign_clusters=False): + """ + Assigns the vectors to clusters, learning the clustering parameters + from the data. Returns a cluster identifier for each vector. + """ + + @abstractmethod + def classify(self, token): + """ + Classifies the token into a cluster, setting the token's CLUSTER + parameter to that cluster identifier. + """ + + def likelihood(self, vector, label): + """ + Returns the likelihood (a float) of the token having the + corresponding cluster. + """ + if self.classify(vector) == label: + return 1.0 + else: + return 0.0 + + def classification_probdist(self, vector): + """ + Classifies the token into a cluster, returning + a probability distribution over the cluster identifiers. + """ + likelihoods = {} + sum = 0.0 + for cluster in self.cluster_names(): + likelihoods[cluster] = self.likelihood(vector, cluster) + sum += likelihoods[cluster] + for cluster in self.cluster_names(): + likelihoods[cluster] /= sum + return DictionaryProbDist(likelihoods) + + @abstractmethod + def num_clusters(self): + """ + Returns the number of clusters. + """ + + def cluster_names(self): + """ + Returns the names of the clusters. + :rtype: list + """ + return list(range(self.num_clusters())) + + def cluster_name(self, index): + """ + Returns the names of the cluster at index. + """ + return index diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/em.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/em.py new file mode 100644 index 0000000000000000000000000000000000000000..cb46fe35700afed79b728336bd1f07c33ed50dcb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/em.py @@ -0,0 +1,219 @@ +# Natural Language Toolkit: Expectation Maximization Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import VectorSpaceClusterer + + +class EMClusterer(VectorSpaceClusterer): + """ + The Gaussian EM clusterer models the vectors as being produced by + a mixture of k Gaussian sources. The parameters of these sources + (prior probability, mean and covariance matrix) are then found to + maximise the likelihood of the given data. This is done with the + expectation maximisation algorithm. It starts with k arbitrarily + chosen means, priors and covariance matrices. It then calculates + the membership probabilities for each vector in each of the + clusters; this is the 'E' step. The cluster parameters are then + updated in the 'M' step using the maximum likelihood estimate from + the cluster membership probabilities. This process continues until + the likelihood of the data does not significantly increase. + """ + + def __init__( + self, + initial_means, + priors=None, + covariance_matrices=None, + conv_threshold=1e-6, + bias=0.1, + normalise=False, + svd_dimensions=None, + ): + """ + Creates an EM clusterer with the given starting parameters, + convergence threshold and vector mangling parameters. + + :param initial_means: the means of the gaussian cluster centers + :type initial_means: [seq of] numpy array or seq of SparseArray + :param priors: the prior probability for each cluster + :type priors: numpy array or seq of float + :param covariance_matrices: the covariance matrix for each cluster + :type covariance_matrices: [seq of] numpy array + :param conv_threshold: maximum change in likelihood before deemed + convergent + :type conv_threshold: int or float + :param bias: variance bias used to ensure non-singular covariance + matrices + :type bias: float + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._means = numpy.array(initial_means, numpy.float64) + self._num_clusters = len(initial_means) + self._conv_threshold = conv_threshold + self._covariance_matrices = covariance_matrices + self._priors = priors + self._bias = bias + + def num_clusters(self): + return self._num_clusters + + def cluster_vectorspace(self, vectors, trace=False): + assert len(vectors) > 0 + + # set the parameters to initial values + dimensions = len(vectors[0]) + means = self._means + priors = self._priors + if not priors: + priors = self._priors = ( + numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters + ) + covariances = self._covariance_matrices + if not covariances: + covariances = self._covariance_matrices = [ + numpy.identity(dimensions, numpy.float64) + for i in range(self._num_clusters) + ] + + # do the E and M steps until the likelihood plateaus + lastl = self._loglikelihood(vectors, priors, means, covariances) + converged = False + + while not converged: + if trace: + print("iteration; loglikelihood", lastl) + # E-step, calculate hidden variables, h[i,j] + h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64) + for i in range(len(vectors)): + for j in range(self._num_clusters): + h[i, j] = priors[j] * self._gaussian( + means[j], covariances[j], vectors[i] + ) + h[i, :] /= sum(h[i, :]) + + # M-step, update parameters - cvm, p, mean + for j in range(self._num_clusters): + covariance_before = covariances[j] + new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64) + new_mean = numpy.zeros(dimensions, numpy.float64) + sum_hj = 0.0 + for i in range(len(vectors)): + delta = vectors[i] - means[j] + new_covariance += h[i, j] * numpy.multiply.outer(delta, delta) + sum_hj += h[i, j] + new_mean += h[i, j] * vectors[i] + covariances[j] = new_covariance / sum_hj + means[j] = new_mean / sum_hj + priors[j] = sum_hj / len(vectors) + + # bias term to stop covariance matrix being singular + covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64) + + # calculate likelihood - FIXME: may be broken + l = self._loglikelihood(vectors, priors, means, covariances) + + # check for convergence + if abs(lastl - l) < self._conv_threshold: + converged = True + lastl = l + + def classify_vectorspace(self, vector): + best = None + for j in range(self._num_clusters): + p = self._priors[j] * self._gaussian( + self._means[j], self._covariance_matrices[j], vector + ) + if not best or p > best[0]: + best = (p, j) + return best[1] + + def likelihood_vectorspace(self, vector, cluster): + cid = self.cluster_names().index(cluster) + return self._priors[cluster] * self._gaussian( + self._means[cluster], self._covariance_matrices[cluster], vector + ) + + def _gaussian(self, mean, cvm, x): + m = len(mean) + assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape) + try: + det = numpy.linalg.det(cvm) + inv = numpy.linalg.inv(cvm) + a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0) + dx = x - mean + print(dx, inv) + b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx) + return a * numpy.exp(b) + except OverflowError: + # happens when the exponent is negative infinity - i.e. b = 0 + # i.e. the inverse of cvm is huge (cvm is almost zero) + return 0 + + def _loglikelihood(self, vectors, priors, means, covariances): + llh = 0.0 + for vector in vectors: + p = 0 + for j in range(len(priors)): + p += priors[j] * self._gaussian(means[j], covariances[j], vector) + llh += numpy.log(p) + return llh + + def __repr__(self): + return "" % list(self._means) + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk import cluster + + # example from figure 14.10, page 519, Manning and Schutze + + vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]] + means = [[4, 2], [4, 2.01]] + + clusterer = cluster.EMClusterer(means, bias=0.1) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As: ", clusters) + print() + + for c in range(2): + print("Cluster:", c) + print("Prior: ", clusterer._priors[c]) + print("Mean: ", clusterer._means[c]) + print("Covar: ", clusterer._covariance_matrices[c]) + print() + + # classify a new vector + vector = numpy.array([2, 2]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + + # show the classification probabilities + vector = numpy.array([2, 2]) + print("classification_probdist(%s):" % vector) + pdist = clusterer.classification_probdist(vector) + for sample in pdist.samples(): + print(f"{sample} => {pdist.prob(sample) * 100:.0f}%") + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/gaac.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/gaac.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb9e2c51141ba915bf4defe2d8cdeadaa14e6b0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/gaac.py @@ -0,0 +1,170 @@ +# Natural Language Toolkit: Group Average Agglomerative Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance + + +class GAAClusterer(VectorSpaceClusterer): + """ + The Group Average Agglomerative starts with each of the N vectors as singleton + clusters. It then iteratively merges pairs of clusters which have the + closest centroids. This continues until there is only one cluster. The + order of merges gives rise to a dendrogram: a tree with the earlier merges + lower than later merges. The membership of a given number of clusters c, 1 + <= c <= N, can be found by cutting the dendrogram at depth c. + + This clusterer uses the cosine similarity metric only, which allows for + efficient speed-up in the clustering process. + """ + + def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None): + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_clusters = num_clusters + self._dendrogram = None + self._groups_values = None + + def cluster(self, vectors, assign_clusters=False, trace=False): + # stores the merge order + self._dendrogram = Dendrogram( + [numpy.array(vector, numpy.float64) for vector in vectors] + ) + return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace) + + def cluster_vectorspace(self, vectors, trace=False): + # variables describing the initial situation + N = len(vectors) + cluster_len = [1] * N + cluster_count = N + index_map = numpy.arange(N) + + # construct the similarity matrix + dims = (N, N) + dist = numpy.ones(dims, dtype=float) * numpy.inf + for i in range(N): + for j in range(i + 1, N): + dist[i, j] = cosine_distance(vectors[i], vectors[j]) + + while cluster_count > max(self._num_clusters, 1): + i, j = numpy.unravel_index(dist.argmin(), dims) + if trace: + print("merging %d and %d" % (i, j)) + + # update similarities for merging i and j + self._merge_similarities(dist, cluster_len, i, j) + + # remove j + dist[:, j] = numpy.inf + dist[j, :] = numpy.inf + + # merge the clusters + cluster_len[i] = cluster_len[i] + cluster_len[j] + self._dendrogram.merge(index_map[i], index_map[j]) + cluster_count -= 1 + + # update the index map to reflect the indexes if we + # had removed j + index_map[j + 1 :] -= 1 + index_map[j] = N + + self.update_clusters(self._num_clusters) + + def _merge_similarities(self, dist, cluster_len, i, j): + # the new cluster i merged from i and j adopts the average of + # i and j's similarity to each other cluster, weighted by the + # number of points in the clusters i and j + i_weight = cluster_len[i] + j_weight = cluster_len[j] + weight_sum = i_weight + j_weight + + # update for x 0 + if self._should_normalise: + centroid = self._normalise(cluster[0]) + else: + centroid = numpy.array(cluster[0]) + for vector in cluster[1:]: + if self._should_normalise: + centroid += self._normalise(vector) + else: + centroid += vector + centroid /= len(cluster) + self._centroids.append(centroid) + self._num_clusters = len(self._centroids) + + def classify_vectorspace(self, vector): + best = None + for i in range(self._num_clusters): + centroid = self._centroids[i] + dist = cosine_distance(vector, centroid) + if not best or dist < best[0]: + best = (dist, i) + return best[1] + + def dendrogram(self): + """ + :return: The dendrogram representing the current clustering + :rtype: Dendrogram + """ + return self._dendrogram + + def num_clusters(self): + return self._num_clusters + + def __repr__(self): + return "" % self._num_clusters + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk.cluster import GAAClusterer + + # use a set of tokens with 2D indices + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test the GAAC clusterer with 4 clusters + clusterer = GAAClusterer(4) + clusters = clusterer.cluster(vectors, True) + + print("Clusterer:", clusterer) + print("Clustered:", vectors) + print("As:", clusters) + print() + + # show the dendrogram + clusterer.dendrogram().show() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/kmeans.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0d02f7dc0178f5bb1406d7a71a07ae46acaa93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/kmeans.py @@ -0,0 +1,231 @@ +# Natural Language Toolkit: K-Means Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +import copy +import random +import sys + +try: + import numpy +except ImportError: + pass + + +from nltk.cluster.util import VectorSpaceClusterer + + +class KMeansClusterer(VectorSpaceClusterer): + """ + The K-means clusterer starts with k arbitrary chosen means then allocates + each vector to the cluster with the closest mean. It then recalculates the + means of each cluster as the centroid of the vectors in the cluster. This + process repeats until the cluster memberships stabilise. This is a + hill-climbing algorithm which may converge to a local maximum. Hence the + clustering is often repeated with random initial means and the most + commonly occurring output means are chosen. + """ + + def __init__( + self, + num_means, + distance, + repeats=1, + conv_test=1e-6, + initial_means=None, + normalise=False, + svd_dimensions=None, + rng=None, + avoid_empty_clusters=False, + ): + + """ + :param num_means: the number of means to use (may use fewer) + :type num_means: int + :param distance: measure of distance between two vectors + :type distance: function taking two vectors and returning a float + :param repeats: number of randomised clustering trials to use + :type repeats: int + :param conv_test: maximum variation in mean differences before + deemed convergent + :type conv_test: number + :param initial_means: set of k initial means + :type initial_means: sequence of vectors + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + :param rng: random number generator (or None) + :type rng: Random + :param avoid_empty_clusters: include current centroid in computation + of next one; avoids undefined behavior + when clusters become empty + :type avoid_empty_clusters: boolean + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_means = num_means + self._distance = distance + self._max_difference = conv_test + assert not initial_means or len(initial_means) == num_means + self._means = initial_means + assert repeats >= 1 + assert not (initial_means and repeats > 1) + self._repeats = repeats + self._rng = rng if rng else random.Random() + self._avoid_empty_clusters = avoid_empty_clusters + + def cluster_vectorspace(self, vectors, trace=False): + if self._means and self._repeats > 1: + print("Warning: means will be discarded for subsequent trials") + + meanss = [] + for trial in range(self._repeats): + if trace: + print("k-means trial", trial) + if not self._means or trial > 1: + self._means = self._rng.sample(list(vectors), self._num_means) + self._cluster_vectorspace(vectors, trace) + meanss.append(self._means) + + if len(meanss) > 1: + # sort the means first (so that different cluster numbering won't + # effect the distance comparison) + for means in meanss: + means.sort(key=sum) + + # find the set of means that's minimally different from the others + min_difference = min_means = None + for i in range(len(meanss)): + d = 0 + for j in range(len(meanss)): + if i != j: + d += self._sum_distances(meanss[i], meanss[j]) + if min_difference is None or d < min_difference: + min_difference, min_means = d, meanss[i] + + # use the best means + self._means = min_means + + def _cluster_vectorspace(self, vectors, trace=False): + if self._num_means < len(vectors): + # perform k-means clustering + converged = False + while not converged: + # assign the tokens to clusters based on minimum distance to + # the cluster means + clusters = [[] for m in range(self._num_means)] + for vector in vectors: + index = self.classify_vectorspace(vector) + clusters[index].append(vector) + + if trace: + print("iteration") + # for i in range(self._num_means): + # print ' mean', i, 'allocated', len(clusters[i]), 'vectors' + + # recalculate cluster means by computing the centroid of each cluster + new_means = list(map(self._centroid, clusters, self._means)) + + # measure the degree of change from the previous step for convergence + difference = self._sum_distances(self._means, new_means) + if difference < self._max_difference: + converged = True + + # remember the new means + self._means = new_means + + def classify_vectorspace(self, vector): + # finds the closest cluster centroid + # returns that cluster's index + best_distance = best_index = None + for index in range(len(self._means)): + mean = self._means[index] + dist = self._distance(vector, mean) + if best_distance is None or dist < best_distance: + best_index, best_distance = index, dist + return best_index + + def num_clusters(self): + if self._means: + return len(self._means) + else: + return self._num_means + + def means(self): + """ + The means used for clustering. + """ + return self._means + + def _sum_distances(self, vectors1, vectors2): + difference = 0.0 + for u, v in zip(vectors1, vectors2): + difference += self._distance(u, v) + return difference + + def _centroid(self, cluster, mean): + if self._avoid_empty_clusters: + centroid = copy.copy(mean) + for vector in cluster: + centroid += vector + return centroid / (1 + len(cluster)) + else: + if not len(cluster): + sys.stderr.write("Error: no centroid defined for empty cluster.\n") + sys.stderr.write( + "Try setting argument 'avoid_empty_clusters' to True\n" + ) + assert False + centroid = copy.copy(cluster[0]) + for vector in cluster[1:]: + centroid += vector + return centroid / len(cluster) + + def __repr__(self): + return "" % (self._means, self._repeats) + + +################################################################################# + + +def demo(): + # example from figure 14.9, page 517, Manning and Schutze + + from nltk.cluster import KMeansClusterer, euclidean_distance + + vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]] + means = [[4, 3], [5, 5]] + + clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test k-means using the euclidean distance metric, 2 means and repeat + # clustering 10 times with random seeds + + clusterer = KMeansClusterer(2, euclidean_distance, repeats=10) + clusters = clusterer.cluster(vectors, True) + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/cluster/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8ed5e9f0b97be7ce80eef87d36fdbf8c59bdfb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/cluster/util.py @@ -0,0 +1,300 @@ +# Natural Language Toolkit: Clusterer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Contributor: J Richard Snape +# URL: +# For license information, see LICENSE.TXT +import copy +from abc import abstractmethod +from math import sqrt +from sys import stdout + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.api import ClusterI + + +class VectorSpaceClusterer(ClusterI): + """ + Abstract clusterer which takes tokens and maps them into a vector space. + Optionally performs singular value decomposition to reduce the + dimensionality. + """ + + def __init__(self, normalise=False, svd_dimensions=None): + """ + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + self._Tt = None + self._should_normalise = normalise + self._svd_dimensions = svd_dimensions + + def cluster(self, vectors, assign_clusters=False, trace=False): + assert len(vectors) > 0 + + # normalise the vectors + if self._should_normalise: + vectors = list(map(self._normalise, vectors)) + + # use SVD to reduce the dimensionality + if self._svd_dimensions and self._svd_dimensions < len(vectors[0]): + [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors))) + S = d[: self._svd_dimensions] * numpy.identity( + self._svd_dimensions, numpy.float64 + ) + T = u[:, : self._svd_dimensions] + Dt = vt[: self._svd_dimensions, :] + vectors = numpy.transpose(numpy.dot(S, Dt)) + self._Tt = numpy.transpose(T) + + # call abstract method to cluster the vectors + self.cluster_vectorspace(vectors, trace) + + # assign the vectors to clusters + if assign_clusters: + return [self.classify(vector) for vector in vectors] + + @abstractmethod + def cluster_vectorspace(self, vectors, trace): + """ + Finds the clusters using the given set of vectors. + """ + + def classify(self, vector): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + cluster = self.classify_vectorspace(vector) + return self.cluster_name(cluster) + + @abstractmethod + def classify_vectorspace(self, vector): + """ + Returns the index of the appropriate cluster for the vector. + """ + + def likelihood(self, vector, label): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return self.likelihood_vectorspace(vector, label) + + def likelihood_vectorspace(self, vector, cluster): + """ + Returns the likelihood of the vector belonging to the cluster. + """ + predicted = self.classify_vectorspace(vector) + return 1.0 if cluster == predicted else 0.0 + + def vector(self, vector): + """ + Returns the vector after normalisation and dimensionality reduction + """ + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return vector + + def _normalise(self, vector): + """ + Normalises the vector to unit length. + """ + return vector / sqrt(numpy.dot(vector, vector)) + + +def euclidean_distance(u, v): + """ + Returns the euclidean distance between vectors u and v. This is equivalent + to the length of the vector (u - v). + """ + diff = u - v + return sqrt(numpy.dot(diff, diff)) + + +def cosine_distance(u, v): + """ + Returns 1 minus the cosine of the angle between vectors v and u. This is + equal to ``1 - (u.v / |u||v|)``. + """ + return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v)))) + + +class _DendrogramNode: + """Tree node of a dendrogram.""" + + def __init__(self, value, *children): + self._value = value + self._children = children + + def leaves(self, values=True): + if self._children: + leaves = [] + for child in self._children: + leaves.extend(child.leaves(values)) + return leaves + elif values: + return [self._value] + else: + return [self] + + def groups(self, n): + queue = [(self._value, self)] + + while len(queue) < n: + priority, node = queue.pop() + if not node._children: + queue.push((priority, node)) + break + for child in node._children: + if child._children: + queue.append((child._value, child)) + else: + queue.append((0, child)) + # makes the earliest merges at the start, latest at the end + queue.sort() + + groups = [] + for priority, node in queue: + groups.append(node.leaves()) + return groups + + def __lt__(self, comparator): + return cosine_distance(self._value, comparator._value) < 0 + + +class Dendrogram: + """ + Represents a dendrogram, a tree with a specified branching order. This + must be initialised with the leaf items, then iteratively call merge for + each branch. This class constructs a tree representing the order of calls + to the merge function. + """ + + def __init__(self, items=[]): + """ + :param items: the items at the leaves of the dendrogram + :type items: sequence of (any) + """ + self._items = [_DendrogramNode(item) for item in items] + self._original_items = copy.copy(self._items) + self._merge = 1 + + def merge(self, *indices): + """ + Merges nodes at given indices in the dendrogram. The nodes will be + combined which then replaces the first node specified. All other nodes + involved in the merge will be removed. + + :param indices: indices of the items to merge (at least two) + :type indices: seq of int + """ + assert len(indices) >= 2 + node = _DendrogramNode(self._merge, *(self._items[i] for i in indices)) + self._merge += 1 + self._items[indices[0]] = node + for i in indices[1:]: + del self._items[i] + + def groups(self, n): + """ + Finds the n-groups of items (leaves) reachable from a cut at depth n. + :param n: number of groups + :type n: int + """ + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + return root.groups(n) + + def show(self, leaf_labels=[]): + """ + Print the dendrogram in ASCII art to standard out. + + :param leaf_labels: an optional list of strings to use for labeling the + leaves + :type leaf_labels: list + """ + + # ASCII rendering characters + JOIN, HLINK, VLINK = "+", "-", "|" + + # find the root (or create one) + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = self._original_items + + if leaf_labels: + last_row = leaf_labels + else: + last_row = ["%s" % leaf._value for leaf in leaves] + + # find the bottom row and the best cell width + width = max(map(len, last_row)) + 1 + lhalf = width // 2 + rhalf = int(width - lhalf - 1) + + # display functions + def format(centre, left=" ", right=" "): + return f"{lhalf * left}{centre}{right * rhalf}" + + def display(str): + stdout.write(str) + + # for each merge, top down + queue = [(root._value, root)] + verticals = [format(" ") for leaf in leaves] + while queue: + priority, node = queue.pop() + child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children)) + indices = list(map(leaves.index, child_left_leaf)) + if child_left_leaf: + min_idx = min(indices) + max_idx = max(indices) + for i in range(len(leaves)): + if leaves[i] in child_left_leaf: + if i == min_idx: + display(format(JOIN, " ", HLINK)) + elif i == max_idx: + display(format(JOIN, HLINK, " ")) + else: + display(format(JOIN, HLINK, HLINK)) + verticals[i] = format(VLINK) + elif min_idx <= i <= max_idx: + display(format(HLINK, HLINK, HLINK)) + else: + display(verticals[i]) + display("\n") + for child in node._children: + if child._children: + queue.append((child._value, child)) + queue.sort() + + for vertical in verticals: + display(vertical) + display("\n") + + # finally, display the last line + display("".join(item.center(width) for item in last_row)) + display("\n") + + def __repr__(self): + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = root.leaves(False) + return "" % len(leaves) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82600563c78bd7fb762777967a43454ffd7ab226 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__init__.py @@ -0,0 +1,102 @@ +# Natural Language Toolkit: Parsers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +NLTK Parsers + +Classes and interfaces for producing tree structures that represent +the internal organization of a text. This task is known as "parsing" +the text, and the resulting tree structures are called the text's +"parses". Typically, the text is a single sentence, and the tree +structure represents the syntactic structure of the sentence. +However, parsers can also be used in other domains. For example, +parsers can be used to derive the morphological structure of the +morphemes that make up a word, or to derive the discourse structure +for a set of utterances. + +Sometimes, a single piece of text can be represented by more than one +tree structure. Texts represented by more than one tree structure are +called "ambiguous" texts. Note that there are actually two ways in +which a text can be ambiguous: + + - The text has multiple correct parses. + - There is not enough information to decide which of several + candidate parses is correct. + +However, the parser module does *not* distinguish these two types of +ambiguity. + +The parser module defines ``ParserI``, a standard interface for parsing +texts; and two simple implementations of that interface, +``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains +three sub-modules for specialized kinds of parsing: + + - ``nltk.parser.chart`` defines chart parsing, which uses dynamic + programming to efficiently parse texts. + - ``nltk.parser.probabilistic`` defines probabilistic parsing, which + associates a probability with each parse. +""" + +from nltk.parse.api import ParserI +from nltk.parse.bllip import BllipParser +from nltk.parse.chart import ( + BottomUpChartParser, + BottomUpLeftCornerChartParser, + ChartParser, + LeftCornerChartParser, + SteppingChartParser, + TopDownChartParser, +) +from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser +from nltk.parse.dependencygraph import DependencyGraph +from nltk.parse.earleychart import ( + EarleyChartParser, + FeatureEarleyChartParser, + FeatureIncrementalBottomUpChartParser, + FeatureIncrementalBottomUpLeftCornerChartParser, + FeatureIncrementalChartParser, + FeatureIncrementalTopDownChartParser, + IncrementalBottomUpChartParser, + IncrementalBottomUpLeftCornerChartParser, + IncrementalChartParser, + IncrementalLeftCornerChartParser, + IncrementalTopDownChartParser, +) +from nltk.parse.evaluate import DependencyEvaluator +from nltk.parse.featurechart import ( + FeatureBottomUpChartParser, + FeatureBottomUpLeftCornerChartParser, + FeatureChartParser, + FeatureTopDownChartParser, +) +from nltk.parse.malt import MaltParser +from nltk.parse.nonprojectivedependencyparser import ( + NaiveBayesDependencyScorer, + NonprojectiveDependencyParser, + ProbabilisticNonprojectiveParser, +) +from nltk.parse.pchart import ( + BottomUpProbabilisticChartParser, + InsideChartParser, + LongestChartParser, + RandomChartParser, + UnsortedChartParser, +) +from nltk.parse.projectivedependencyparser import ( + ProbabilisticProjectiveDependencyParser, + ProjectiveDependencyParser, +) +from nltk.parse.recursivedescent import ( + RecursiveDescentParser, + SteppingRecursiveDescentParser, +) +from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser +from nltk.parse.transitionparser import TransitionParser +from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser +from nltk.parse.viterbi import ViterbiParser diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/api.py new file mode 100644 index 0000000000000000000000000000000000000000..280c1a5a8225e7832ecb6f80e4e96feb25ca4f8d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/api.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Parser API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +import itertools + +from nltk.internals import overridden + + +class ParserI: + """ + A processing class for deriving trees that represent possible + structures for a sequence of tokens. These tree structures are + known as "parses". Typically, parsers are used to derive syntax + trees for sentences. But parsers can also be used to derive other + kinds of tree structure, such as morphological trees and discourse + structures. + + Subclasses must define: + - at least one of: ``parse()``, ``parse_sents()``. + + Subclasses may define: + - ``grammar()`` + """ + + def grammar(self): + """ + :return: The grammar used by this parser. + """ + raise NotImplementedError() + + def parse(self, sent, *args, **kwargs): + """ + :return: An iterator that generates parse trees for the sentence. + When possible this list is sorted from most likely to least likely. + + :param sent: The sentence to be parsed + :type sent: list(str) + :rtype: iter(Tree) + """ + if overridden(self.parse_sents): + return next(self.parse_sents([sent], *args, **kwargs)) + elif overridden(self.parse_one): + return ( + tree + for tree in [self.parse_one(sent, *args, **kwargs)] + if tree is not None + ) + elif overridden(self.parse_all): + return iter(self.parse_all(sent, *args, **kwargs)) + else: + raise NotImplementedError() + + def parse_sents(self, sents, *args, **kwargs): + """ + Apply ``self.parse()`` to each element of ``sents``. + :rtype: iter(iter(Tree)) + """ + return (self.parse(sent, *args, **kwargs) for sent in sents) + + def parse_all(self, sent, *args, **kwargs): + """:rtype: list(Tree)""" + return list(self.parse(sent, *args, **kwargs)) + + def parse_one(self, sent, *args, **kwargs): + """:rtype: Tree or None""" + return next(self.parse(sent, *args, **kwargs), None) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/dependencygraph.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/dependencygraph.py new file mode 100644 index 0000000000000000000000000000000000000000..7300f0596baf3a6fdae5a4183aca862c1aa09ea8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/dependencygraph.py @@ -0,0 +1,799 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# Steven Bird (modifications) +# +# URL: +# For license information, see LICENSE.TXT +# + +""" +Tools for reading and writing dependency trees. +The input is assumed to be in Malt-TAB format +(https://stp.lingfil.uu.se/~nivre/research/MaltXML.html). +""" + +import subprocess +import warnings +from collections import defaultdict +from itertools import chain +from pprint import pformat + +from nltk.internals import find_binary +from nltk.tree import Tree + +################################################################# +# DependencyGraph Class +################################################################# + + +class DependencyGraph: + """ + A container for the nodes and labelled edges of a dependency structure. + """ + + def __init__( + self, + tree_str=None, + cell_extractor=None, + zero_based=False, + cell_separator=None, + top_relation_label="ROOT", + ): + """Dependency graph. + + We place a dummy `TOP` node with the index 0, since the root node is + often assigned 0 as its head. This also means that the indexing of the + nodes corresponds directly to the Malt-TAB format, which starts at 1. + + If zero-based is True, then Malt-TAB-like input with node numbers + starting at 0 and the root node assigned -1 (as produced by, e.g., + zpar). + + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + """ + self.nodes = defaultdict( + lambda: { + "address": None, + "word": None, + "lemma": None, + "ctag": None, + "tag": None, + "feats": None, + "head": None, + "deps": defaultdict(list), + "rel": None, + } + ) + + self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0}) + + self.root = None + + if tree_str: + self._parse( + tree_str, + cell_extractor=cell_extractor, + zero_based=zero_based, + cell_separator=cell_separator, + top_relation_label=top_relation_label, + ) + + def remove_by_address(self, address): + """ + Removes the node with the given address. References + to this node in others will still exist. + """ + del self.nodes[address] + + def redirect_arcs(self, originals, redirect): + """ + Redirects arcs to any of the nodes in the originals list + to the redirect node address. + """ + for node in self.nodes.values(): + new_deps = [] + for dep in node["deps"]: + if dep in originals: + new_deps.append(redirect) + else: + new_deps.append(dep) + node["deps"] = new_deps + + def add_arc(self, head_address, mod_address): + """ + Adds an arc from the node specified by head_address to the + node specified by the mod address. + """ + relation = self.nodes[mod_address]["rel"] + self.nodes[head_address]["deps"].setdefault(relation, []) + self.nodes[head_address]["deps"][relation].append(mod_address) + # self.nodes[head_address]['deps'].append(mod_address) + + def connect_graph(self): + """ + Fully connects all non-root nodes. All nodes are set to be dependents + of the root node. + """ + for node1 in self.nodes.values(): + for node2 in self.nodes.values(): + if node1["address"] != node2["address"] and node2["rel"] != "TOP": + relation = node2["rel"] + node1["deps"].setdefault(relation, []) + node1["deps"][relation].append(node2["address"]) + # node1['deps'].append(node2['address']) + + def get_by_address(self, node_address): + """Return the node with the given address.""" + return self.nodes[node_address] + + def contains_address(self, node_address): + """ + Returns true if the graph contains a node with the given node + address, false otherwise. + """ + return node_address in self.nodes + + def to_dot(self): + """Return a dot representation suitable for using with Graphviz. + + >>> dg = DependencyGraph( + ... 'John N 2\\n' + ... 'loves V 0\\n' + ... 'Mary N 2' + ... ) + >>> print(dg.to_dot()) + digraph G{ + edge [dir=forward] + node [shape=plaintext] + + 0 [label="0 (None)"] + 0 -> 2 [label="ROOT"] + 1 [label="1 (John)"] + 2 [label="2 (loves)"] + 2 -> 1 [label=""] + 2 -> 3 [label=""] + 3 [label="3 (Mary)"] + } + + """ + # Start the digraph specification + s = "digraph G{\n" + s += "edge [dir=forward]\n" + s += "node [shape=plaintext]\n" + + # Draw the remaining nodes + for node in sorted(self.nodes.values(), key=lambda v: v["address"]): + s += '\n{} [label="{} ({})"]'.format( + node["address"], + node["address"], + node["word"], + ) + for rel, deps in node["deps"].items(): + for dep in deps: + if rel is not None: + s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel) + else: + s += "\n{} -> {} ".format(node["address"], dep) + s += "\n}" + + return s + + def _repr_svg_(self): + """Show SVG representation of the transducer (IPython magic). + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('dot') + >>> dg = DependencyGraph( + ... 'John N 2\\n' + ... 'loves V 0\\n' + ... 'Mary N 2' + ... ) + >>> dg._repr_svg_().split('\\n')[0] + '' + + """ + dot_string = self.to_dot() + return dot2img(dot_string) + + def __str__(self): + return pformat(self.nodes) + + def __repr__(self): + return f"" + + @staticmethod + def load( + filename, zero_based=False, cell_separator=None, top_relation_label="ROOT" + ): + """ + :param filename: a name of a file in Malt-TAB format + :param zero_based: nodes in the input file are numbered starting from 0 + rather than 1 (as produced by, e.g., zpar) + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + + :return: a list of DependencyGraphs + + """ + with open(filename) as infile: + return [ + DependencyGraph( + tree_str, + zero_based=zero_based, + cell_separator=cell_separator, + top_relation_label=top_relation_label, + ) + for tree_str in infile.read().split("\n\n") + ] + + def left_children(self, node_index): + """ + Returns the number of left children under the node specified + by the given address. + """ + children = chain.from_iterable(self.nodes[node_index]["deps"].values()) + index = self.nodes[node_index]["address"] + return sum(1 for c in children if c < index) + + def right_children(self, node_index): + """ + Returns the number of right children under the node specified + by the given address. + """ + children = chain.from_iterable(self.nodes[node_index]["deps"].values()) + index = self.nodes[node_index]["address"] + return sum(1 for c in children if c > index) + + def add_node(self, node): + if not self.contains_address(node["address"]): + self.nodes[node["address"]].update(node) + + def _parse( + self, + input_, + cell_extractor=None, + zero_based=False, + cell_separator=None, + top_relation_label="ROOT", + ): + """Parse a sentence. + + :param extractor: a function that given a tuple of cells returns a + 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head, + rel``. + + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + + """ + + def extract_3_cells(cells, index): + word, tag, head = cells + return index, word, word, tag, tag, "", head, "" + + def extract_4_cells(cells, index): + word, tag, head, rel = cells + return index, word, word, tag, tag, "", head, rel + + def extract_7_cells(cells, index): + line_index, word, lemma, tag, _, head, rel = cells + try: + index = int(line_index) + except ValueError: + # index can't be parsed as an integer, use default + pass + return index, word, lemma, tag, tag, "", head, rel + + def extract_10_cells(cells, index): + line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells + try: + index = int(line_index) + except ValueError: + # index can't be parsed as an integer, use default + pass + return index, word, lemma, ctag, tag, feats, head, rel + + extractors = { + 3: extract_3_cells, + 4: extract_4_cells, + 7: extract_7_cells, + 10: extract_10_cells, + } + + if isinstance(input_, str): + input_ = (line for line in input_.split("\n")) + + lines = (l.rstrip() for l in input_) + lines = (l for l in lines if l) + + cell_number = None + for index, line in enumerate(lines, start=1): + cells = line.split(cell_separator) + if cell_number is None: + cell_number = len(cells) + else: + assert cell_number == len(cells) + + if cell_extractor is None: + try: + cell_extractor = extractors[cell_number] + except KeyError as e: + raise ValueError( + "Number of tab-delimited fields ({}) not supported by " + "CoNLL(10) or Malt-Tab(4) format".format(cell_number) + ) from e + + try: + index, word, lemma, ctag, tag, feats, head, rel = cell_extractor( + cells, index + ) + except (TypeError, ValueError): + # cell_extractor doesn't take 2 arguments or doesn't return 8 + # values; assume the cell_extractor is an older external + # extractor and doesn't accept or return an index. + word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells) + + if head == "_": + continue + + head = int(head) + if zero_based: + head += 1 + + self.nodes[index].update( + { + "address": index, + "word": word, + "lemma": lemma, + "ctag": ctag, + "tag": tag, + "feats": feats, + "head": head, + "rel": rel, + } + ) + + # Make sure that the fake root node has labeled dependencies. + if (cell_number == 3) and (head == 0): + rel = top_relation_label + self.nodes[head]["deps"][rel].append(index) + + if self.nodes[0]["deps"][top_relation_label]: + root_address = self.nodes[0]["deps"][top_relation_label][0] + self.root = self.nodes[root_address] + self.top_relation_label = top_relation_label + else: + warnings.warn( + "The graph doesn't contain a node " "that depends on the root element." + ) + + def _word(self, node, filter=True): + w = node["word"] + if filter: + if w != ",": + return w + return w + + def _tree(self, i): + """Turn dependency graphs into NLTK trees. + + :param int i: index of a node + :return: either a word (if the indexed node is a leaf) or a ``Tree``. + """ + node = self.get_by_address(i) + word = node["word"] + deps = sorted(chain.from_iterable(node["deps"].values())) + + if deps: + return Tree(word, [self._tree(dep) for dep in deps]) + else: + return word + + def tree(self): + """ + Starting with the ``root`` node, build a dependency tree using the NLTK + ``Tree`` constructor. Dependency labels are omitted. + """ + node = self.root + + word = node["word"] + deps = sorted(chain.from_iterable(node["deps"].values())) + return Tree(word, [self._tree(dep) for dep in deps]) + + def triples(self, node=None): + """ + Extract dependency triples of the form: + ((head word, head tag), rel, (dep word, dep tag)) + """ + + if not node: + node = self.root + + head = (node["word"], node["ctag"]) + for i in sorted(chain.from_iterable(node["deps"].values())): + dep = self.get_by_address(i) + yield (head, dep["rel"], (dep["word"], dep["ctag"])) + yield from self.triples(node=dep) + + def _hd(self, i): + try: + return self.nodes[i]["head"] + except IndexError: + return None + + def _rel(self, i): + try: + return self.nodes[i]["rel"] + except IndexError: + return None + + # what's the return type? Boolean or list? + def contains_cycle(self): + """Check whether there are cycles. + + >>> dg = DependencyGraph(treebank_data) + >>> dg.contains_cycle() + False + + >>> cyclic_dg = DependencyGraph() + >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0} + >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1} + >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2} + >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3} + >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4} + >>> cyclic_dg.nodes = { + ... 0: top, + ... 1: child1, + ... 2: child2, + ... 3: child3, + ... 4: child4, + ... } + >>> cyclic_dg.root = top + + >>> cyclic_dg.contains_cycle() + [1, 2, 4, 3] + + """ + distances = {} + + for node in self.nodes.values(): + for dep in node["deps"]: + key = tuple([node["address"], dep]) + distances[key] = 1 + + for _ in self.nodes: + new_entries = {} + + for pair1 in distances: + for pair2 in distances: + if pair1[1] == pair2[0]: + key = tuple([pair1[0], pair2[1]]) + new_entries[key] = distances[pair1] + distances[pair2] + + for pair in new_entries: + distances[pair] = new_entries[pair] + if pair[0] == pair[1]: + path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0]) + return path + + return False # return []? + + def get_cycle_path(self, curr_node, goal_node_index): + for dep in curr_node["deps"]: + if dep == goal_node_index: + return [curr_node["address"]] + for dep in curr_node["deps"]: + path = self.get_cycle_path(self.get_by_address(dep), goal_node_index) + if len(path) > 0: + path.insert(0, curr_node["address"]) + return path + return [] + + def to_conll(self, style): + """ + The dependency graph in CoNLL format. + + :param style: the style to use for the format (3, 4, 10 columns) + :type style: int + :rtype: str + """ + + if style == 3: + template = "{word}\t{tag}\t{head}\n" + elif style == 4: + template = "{word}\t{tag}\t{head}\t{rel}\n" + elif style == 10: + template = ( + "{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n" + ) + else: + raise ValueError( + "Number of tab-delimited fields ({}) not supported by " + "CoNLL(10) or Malt-Tab(4) format".format(style) + ) + + return "".join( + template.format(i=i, **node) + for i, node in sorted(self.nodes.items()) + if node["tag"] != "TOP" + ) + + def nx_graph(self): + """Convert the data in a ``nodelist`` into a networkx labeled directed graph.""" + import networkx + + nx_nodelist = list(range(1, len(self.nodes))) + nx_edgelist = [ + (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n) + ] + self.nx_labels = {} + for n in nx_nodelist: + self.nx_labels[n] = self.nodes[n]["word"] + + g = networkx.MultiDiGraph() + g.add_nodes_from(nx_nodelist) + g.add_edges_from(nx_edgelist) + + return g + + +def dot2img(dot_string, t="svg"): + """ + Create image representation fom dot_string, using the 'dot' program + from the Graphviz package. + + Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps', + 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats). + + Note that the "capture_output" option of subprocess.run() is only available + with text formats (like svg), but not with binary image formats (like png). + """ + + try: + find_binary("dot") + try: + if t in ["dot", "dot_json", "json", "svg"]: + proc = subprocess.run( + ["dot", "-T%s" % t], + capture_output=True, + input=dot_string, + text=True, + ) + else: + proc = subprocess.run( + ["dot", "-T%s" % t], + input=bytes(dot_string, encoding="utf8"), + ) + return proc.stdout + except: + raise Exception( + "Cannot create image representation by running dot from string: {}" + "".format(dot_string) + ) + except OSError as e: + raise Exception("Cannot find the dot binary from Graphviz package") from e + + +class DependencyGraphError(Exception): + """Dependency graph exception.""" + + +def demo(): + malt_demo() + conll_demo() + conll_file_demo() + cycle_finding_demo() + + +def malt_demo(nx=False): + """ + A demonstration of the result of reading a dependency + version of the first sentence of the Penn Treebank. + """ + dg = DependencyGraph( + """Pierre NNP 2 NMOD +Vinken NNP 8 SUB +, , 2 P +61 CD 5 NMOD +years NNS 6 AMOD +old JJ 2 NMOD +, , 2 P +will MD 0 ROOT +join VB 8 VC +the DT 11 NMOD +board NN 9 OBJ +as IN 9 VMOD +a DT 15 NMOD +nonexecutive JJ 15 NMOD +director NN 12 PMOD +Nov. NNP 9 VMOD +29 CD 16 NMOD +. . 9 VMOD +""" + ) + tree = dg.tree() + tree.pprint() + if nx: + # currently doesn't work + import networkx + from matplotlib import pylab + + g = dg.nx_graph() + g.info() + pos = networkx.spring_layout(g, dim=1) + networkx.draw_networkx_nodes(g, pos, node_size=50) + # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8) + networkx.draw_networkx_labels(g, pos, dg.nx_labels) + pylab.xticks([]) + pylab.yticks([]) + pylab.savefig("tree.png") + pylab.show() + + +def conll_demo(): + """ + A demonstration of how to read a string representation of + a CoNLL format dependency tree. + """ + dg = DependencyGraph(conll_data1) + tree = dg.tree() + tree.pprint() + print(dg) + print(dg.to_conll(4)) + + +def conll_file_demo(): + print("Mass conll_read demo...") + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + for graph in graphs: + tree = graph.tree() + print("\n") + tree.pprint() + + +def cycle_finding_demo(): + dg = DependencyGraph(treebank_data) + print(dg.contains_cycle()) + cyclic_dg = DependencyGraph() + cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0}) + cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1}) + cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2}) + cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3}) + cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4}) + print(cyclic_dg.contains_cycle()) + + +treebank_data = """Pierre NNP 2 NMOD +Vinken NNP 8 SUB +, , 2 P +61 CD 5 NMOD +years NNS 6 AMOD +old JJ 2 NMOD +, , 2 P +will MD 0 ROOT +join VB 8 VC +the DT 11 NMOD +board NN 9 OBJ +as IN 9 VMOD +a DT 15 NMOD +nonexecutive JJ 15 NMOD +director NN 12 PMOD +Nov. NNP 9 VMOD +29 CD 16 NMOD +. . 9 VMOD +""" + +conll_data1 = """ +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 met met Prep Prep voor 8 mod _ _ +4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ +5 moeder moeder N N soort|ev|neut 3 obj1 _ _ +6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ +7 gaan ga V V hulp|inf 6 vc _ _ +8 winkelen winkel V V intrans|inf 11 cnj _ _ +9 , , Punc Punc komma 8 punct _ _ +10 zwemmen zwem V V intrans|inf 11 cnj _ _ +11 of of Conj Conj neven 7 vc _ _ +12 terrassen terras N N soort|mv|neut 11 cnj _ _ +13 . . Punc Punc punt 12 punct _ _ +""" + +conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _ +2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _ +4 wild wild Adj Adj attr|stell|onverv 5 mod _ _ +5 zwaaien zwaai N N soort|mv|neut 2 vc _ _ +6 . . Punc Punc punt 5 punct _ _ + +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 met met Prep Prep voor 8 mod _ _ +4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ +5 moeder moeder N N soort|ev|neut 3 obj1 _ _ +6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ +7 gaan ga V V hulp|inf 6 vc _ _ +8 winkelen winkel V V intrans|inf 11 cnj _ _ +9 , , Punc Punc komma 8 punct _ _ +10 zwemmen zwem V V intrans|inf 11 cnj _ _ +11 of of Conj Conj neven 7 vc _ _ +12 terrassen terras N N soort|mv|neut 11 cnj _ _ +13 . . Punc Punc punt 12 punct _ _ + +1 Dat dat Pron Pron aanw|neut|attr 2 det _ _ +2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _ +3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _ +4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _ +5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _ +6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _ +7 . . Punc Punc punt 6 punct _ _ + +1 Het het Pron Pron onbep|neut|zelfst 2 su _ _ +2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 bij bij Prep Prep voor 2 ld _ _ +4 de de Art Art bep|zijdofmv|neut 6 det _ _ +5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _ +6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _ +7 die die Pron Pron betr|neut|zelfst 6 mod _ _ +8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _ +9 ginds ginds Adv Adv gew|aanw 12 mod _ _ +10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _ +11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _ +12 gelaten laat V V trans|verldw|onverv 11 vc _ _ +13 . . Punc Punc punt 12 punct _ _ + +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _ +3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _ +4 naast naast Prep Prep voor 11 mod _ _ +5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _ +6 op op Prep Prep voor 11 ld _ _ +7 de de Art Art bep|zijdofmv|neut 8 det _ _ +8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _ +9 kunnen kan V V hulp|inf 2 vc _ _ +10 gaan ga V V hulp|inf 9 vc _ _ +11 liggen lig V V intrans|inf 10 vc _ _ +12 . . Punc Punc punt 11 punct _ _ + +1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _ +2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _ +3 mams mams N N soort|ev|neut 4 det _ _ +4 rug rug N N soort|ev|neut 5 obj1 _ _ +5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _ +6 hebben heb V V hulp|inf 2 vc _ _ +7 en en Conj Conj neven 0 ROOT _ _ +8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _ +9 de de Art Art bep|zijdofmv|neut 10 det _ _ +10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _ +11 . . Punc Punc punt 10 punct _ _ + +1 Of of Conj Conj onder|metfin 0 ROOT _ _ +2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _ +3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _ +4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _ +5 met met Prep Prep voor 10 mod _ _ +6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _ +7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _ +8 rond rond Adv Adv deelv 10 svp _ _ +9 kunnen kan V V hulp|inf 3 vc _ _ +10 slenteren slenter V V intrans|inf 9 vc _ _ +11 in in Prep Prep voor 10 mod _ _ +12 de de Art Art bep|zijdofmv|neut 13 det _ _ +13 buurt buurt N N soort|ev|neut 11 obj1 _ _ +14 van van Prep Prep voor 13 mod _ _ +15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _ +16 . . Punc Punc punt 15 punct _ _ +""" + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/generate.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..fb2f4e9fa03ee09d5de2c25bf15d728033b577e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/generate.py @@ -0,0 +1,85 @@ +# Natural Language Toolkit: Generating from a CFG +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT +# + +import itertools +import sys + +from nltk.grammar import Nonterminal + + +def generate(grammar, start=None, depth=None, n=None): + """ + Generates an iterator of all sentences from a CFG. + + :param grammar: The Grammar used to generate sentences. + :param start: The Nonterminal from which to start generate sentences. + :param depth: The maximal depth of the generated tree. + :param n: The maximum number of sentences to return. + :return: An iterator of lists of terminal tokens. + """ + if not start: + start = grammar.start() + if depth is None: + depth = sys.maxsize + + iter = _generate_all(grammar, [start], depth) + + if n: + iter = itertools.islice(iter, n) + + return iter + + +def _generate_all(grammar, items, depth): + if items: + try: + for frag1 in _generate_one(grammar, items[0], depth): + for frag2 in _generate_all(grammar, items[1:], depth): + yield frag1 + frag2 + except RecursionError as error: + # Helpful error message while still showing the recursion stack. + raise RuntimeError( + "The grammar has rule(s) that yield infinite recursion!" + ) from error + else: + yield [] + + +def _generate_one(grammar, item, depth): + if depth > 0: + if isinstance(item, Nonterminal): + for prod in grammar.productions(lhs=item): + yield from _generate_all(grammar, prod.rhs(), depth - 1) + else: + yield [item] + + +demo_grammar = """ + S -> NP VP + NP -> Det N + PP -> P NP + VP -> 'slept' | 'saw' NP | 'walked' PP + Det -> 'the' | 'a' + N -> 'man' | 'park' | 'dog' + P -> 'in' | 'with' +""" + + +def demo(N=23): + from nltk.grammar import CFG + + print("Generating the first %d sentences for demo grammar:" % (N,)) + print(demo_grammar) + grammar = CFG.fromstring(demo_grammar) + for n, sent in enumerate(generate(grammar, n=N), 1): + print("%3d. %s" % (n, " ".join(sent))) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/malt.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/malt.py new file mode 100644 index 0000000000000000000000000000000000000000..229e8242719dc4645763706b58363b546bc7e6ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/malt.py @@ -0,0 +1,393 @@ +# Natural Language Toolkit: Interface to MaltParser +# +# Author: Dan Garrette +# Contributor: Liling Tan, Mustufain, osamamukhtar11 +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import inspect +import os +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir, find_file, find_jars_within_path +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.parse.util import taggedsents_to_conll + + +def malt_regex_tagger(): + from nltk.tag import RegexpTagger + + _tagger = RegexpTagger( + [ + (r"\.$", "."), + (r"\,$", ","), + (r"\?$", "?"), # fullstop, comma, Qmark + (r"\($", "("), + (r"\)$", ")"), # round brackets + (r"\[$", "["), + (r"\]$", "]"), # square brackets + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "DT"), # articles + (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns + (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive + (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive + (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions + (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions + (r"(till|Till|until|Until)$", "IN"), # time prepopsitions + (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions + (r"(under|Under|below|Below)$", "IN"), # space prepopsitions + (r"(over|Over|above|Above)$", "IN"), # space prepopsitions + (r"(across|Across|through|Through)$", "IN"), # space prepopsitions + (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions + (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + return _tagger.tag + + +def find_maltparser(parser_dirname): + """ + A module to find MaltParser .jar file and its dependencies. + """ + if os.path.exists(parser_dirname): # If a full path is given. + _malt_dir = parser_dirname + else: # Try to find path to maltparser directory in environment variables. + _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",)) + # Checks that that the found directory contains all the necessary .jar + malt_dependencies = ["", "", ""] + _malt_jars = set(find_jars_within_path(_malt_dir)) + _jars = {os.path.split(jar)[1] for jar in _malt_jars} + malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"} + + assert malt_dependencies.issubset(_jars) + assert any( + filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars) + ) + return list(_malt_jars) + + +def find_malt_model(model_filename): + """ + A module to find pre-trained MaltParser model. + """ + if model_filename is None: + return "malt_temp.mco" + elif os.path.exists(model_filename): # If a full path is given. + return model_filename + else: # Try to find path to malt model in environment variables. + return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False) + + +class MaltParser(ParserI): + """ + A class for dependency parsing with MaltParser. The input is the paths to: + - (optionally) a maltparser directory + - (optionally) the path to a pre-trained MaltParser .mco model file + - (optionally) the tagger to use for POS tagging before parsing + - (optionally) additional Java arguments + + Example: + >>> from nltk.parse import malt + >>> # With MALT_PARSER and MALT_MODEL environment set. + >>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + >>> # Without MALT_PARSER and MALT_MODEL environment. + >>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + """ + + def __init__( + self, + parser_dirname="", + model_filename=None, + tagger=None, + additional_java_args=None, + ): + """ + An interface for parsing with the Malt Parser. + + :param parser_dirname: The path to the maltparser directory that + contains the maltparser-1.x.jar + :type parser_dirname: str + :param model_filename: The name of the pre-trained model with .mco file + extension. If provided, training will not be required. + (see http://www.maltparser.org/mco/mco.html and + see http://www.patful.com/chalk/node/185) + :type model_filename: str + :param tagger: The tagger used to POS tag the raw string before + formatting to CONLL format. It should behave like `nltk.pos_tag` + :type tagger: function + :param additional_java_args: This is the additional Java arguments that + one can use when calling Maltparser, usually this is the heapsize + limits, e.g. `additional_java_args=['-Xmx1024m']` + (see https://goo.gl/mpDBvQ) + :type additional_java_args: list + """ + + # Find all the necessary jar files for MaltParser. + self.malt_jars = find_maltparser(parser_dirname) + # Initialize additional java arguments. + self.additional_java_args = ( + additional_java_args if additional_java_args is not None else [] + ) + # Initialize model. + self.model = find_malt_model(model_filename) + self._trained = self.model != "malt_temp.mco" + # Set the working_dir parameters i.e. `-w` from MaltParser's option. + self.working_dir = tempfile.gettempdir() + # Initialize POS tagger. + self.tagger = tagger if tagger is not None else malt_regex_tagger() + + def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple POS tagged sentences. Takes multiple + sentences where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :return: iter(iter(``DependencyGraph``)) the dependency graph + representation of each sentence + """ + if not self._trained: + raise Exception("Parser has not been trained. Call train() first.") + + with tempfile.NamedTemporaryFile( + prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with tempfile.NamedTemporaryFile( + prefix="malt_output.conll.", + dir=self.working_dir, + mode="w", + delete=False, + ) as output_file: + # Convert list of sentences to CONLL format. + for line in taggedsents_to_conll(sentences): + input_file.write(str(line)) + input_file.close() + + # Generate command to run maltparser. + cmd = self.generate_malt_command( + input_file.name, output_file.name, mode="parse" + ) + + # This is a maltparser quirk, it needs to be run + # where the model file is. otherwise it goes into an awkward + # missing .jars or strange -w working_dir problem. + _current_path = os.getcwd() # Remembers the current path. + try: # Change to modelfile path + os.chdir(os.path.split(self.model)[0]) + except: + pass + ret = self._execute(cmd, verbose) # Run command. + os.chdir(_current_path) # Change back to current path. + + if ret != 0: + raise Exception( + "MaltParser parsing (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + + # Must return iter(iter(Tree)) + with open(output_file.name) as infile: + for tree_str in infile.read().split("\n\n"): + yield ( + iter( + [ + DependencyGraph( + tree_str, top_relation_label=top_relation_label + ) + ] + ) + ) + + os.remove(input_file.name) + os.remove(output_file.name) + + def parse_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple sentences. + Takes a list of sentences, where each sentence is a list of words. + Each sentence will be automatically tagged with this + MaltParser instance's tagger. + + :param sentences: Input sentences to parse + :type sentence: list(list(str)) + :return: iter(DependencyGraph) + """ + tagged_sentences = (self.tagger(sentence) for sentence in sentences) + return self.parse_tagged_sents( + tagged_sentences, verbose, top_relation_label=top_relation_label + ) + + def generate_malt_command(self, inputfilename, outputfilename=None, mode=None): + """ + This function generates the maltparser command use at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + :param outputfilename: path to the output file + :type outputfilename: str + """ + + cmd = ["java"] + cmd += self.additional_java_args # Adds additional java arguments + # Joins classpaths with ";" if on Windows and on Linux/Mac use ":" + classpaths_separator = ";" if sys.platform.startswith("win") else ":" + cmd += [ + "-cp", + classpaths_separator.join(self.malt_jars), + ] # Adds classpaths for jars + cmd += ["org.maltparser.Malt"] # Adds the main function. + + # Adds the model file. + if os.path.exists(self.model): # when parsing + cmd += ["-c", os.path.split(self.model)[-1]] + else: # when learning + cmd += ["-c", self.model] + + cmd += ["-i", inputfilename] + if mode == "parse": + cmd += ["-o", outputfilename] + cmd += ["-m", mode] # mode use to generate parses. + return cmd + + @staticmethod + def _execute(cmd, verbose=False): + output = None if verbose else subprocess.PIPE + p = subprocess.Popen(cmd, stdout=output, stderr=output) + return p.wait() + + def train(self, depgraphs, verbose=False): + """ + Train MaltParser from a list of ``DependencyGraph`` objects + + :param depgraphs: list of ``DependencyGraph`` objects for training input data + :type depgraphs: DependencyGraph + """ + + # Write the conll_str to malt_train.conll file in /tmp/ + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + input_str = "\n".join(dg.to_conll(10) for dg in depgraphs) + input_file.write(str(input_str)) + # Trains the model with the malt_train.conll + self.train_from_file(input_file.name, verbose=verbose) + # Removes the malt_train.conll once training finishes. + os.remove(input_file.name) + + def train_from_file(self, conll_file, verbose=False): + """ + Train MaltParser from a file + :param conll_file: str for the filename of the training input data + :type conll_file: str + """ + + # If conll_file is a ZipFilePathPointer, + # then we need to do some extra massaging + if isinstance(conll_file, ZipFilePathPointer): + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with conll_file.open() as conll_input_file: + conll_str = conll_input_file.read() + input_file.write(str(conll_str)) + return self.train_from_file(input_file.name, verbose=verbose) + + # Generate command to run maltparser. + cmd = self.generate_malt_command(conll_file, mode="learn") + ret = self._execute(cmd, verbose) + if ret != 0: + raise Exception( + "MaltParser training (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + self._trained = True + + +if __name__ == "__main__": + """ + A demonstration function to show how NLTK users can use the malt parser API. + + >>> from nltk import pos_tag + >>> assert 'MALT_PARSER' in os.environ, str( + ... "Please set MALT_PARSER in your global environment, e.g.:\n" + ... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'") + >>> + >>> assert 'MALT_MODEL' in os.environ, str( + ... "Please set MALT_MODEL in your global environment, e.g.:\n" + ... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'") + >>> + >>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 sees _ VB _ _ 0 ROOT _ _\n" + ... "3 a _ DT _ _ 4 SPEC _ _\n" + ... "4 dog _ NN _ _ 2 OBJ _ _\n" + ... "5 . _ . _ _ 2 PUNCT _ _\n") + >>> + >>> + >>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 walks _ VB _ _ 0 ROOT _ _\n" + ... "3 . _ . _ _ 2 PUNCT _ _\n") + >>> dg1 = DependencyGraph(_dg1_str) + >>> dg2 = DependencyGraph(_dg2_str) + >>> # Initialize a MaltParser object + >>> mp = MaltParser() + >>> + >>> # Trains a model. + >>> mp.train([dg1,dg2], verbose=False) + >>> sent1 = ['John','sees','Mary', '.'] + >>> sent2 = ['John', 'walks', 'a', 'dog', '.'] + >>> + >>> # Parse a single sentence. + >>> parsed_sent1 = mp.parse_one(sent1) + >>> parsed_sent2 = mp.parse_one(sent2) + >>> print(parsed_sent1.tree()) + (sees John Mary .) + >>> print(parsed_sent2.tree()) + (walks John (dog a) .) + >>> + >>> # Parsing multiple sentences. + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (sees John Mary .) + >>> print(next(next(parsed_sents)).tree()) + (walks John (dog a) .) + >>> + >>> # Initialize a MaltParser object with an English pre-trained model. + >>> parser_dirname = 'maltparser-1.9.2' + >>> model_name = 'engmalt.linear-1.7.mco' + >>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag) + >>> sent1 = 'I shot an elephant in my pajamas .'.split() + >>> sent2 = 'Time flies like banana .'.split() + >>> # Parse a single sentence. + >>> print(mp.parse_one(sent1).tree()) + (shot I (elephant an) (in (pajamas my)) .) + # Parsing multiple sentences + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (shot I (elephant an) (in (pajamas my)) .) + >>> print(next(next(parsed_sents)).tree()) + (flies Time (like banana) .) + """ + + import doctest + + doctest.testmod() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/stanford.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ed0f9eab042dfdb7d91679ac5502f495a328ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/stanford.py @@ -0,0 +1,470 @@ +# Natural Language Toolkit: Interface to the Stanford Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_jar_iter, + find_jars_within_path, + java, +) +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tree import Tree + +_stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml" + + +class GenericStanfordParser(ParserI): + """Interface to the Stanford Parser""" + + _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar" + _JAR = r"stanford-parser\.jar" + _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser" + + _USE_STDIN = False + _DOUBLE_SPACED_OUTPUT = False + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", + encoding="utf8", + verbose=False, + java_options="-mx4g", + corenlp_options="", + ): + + # find the most recent code and model jar + stanford_jar = max( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + # self._classpath = (stanford_jar, model_jar) + + # Adding logging jar files to classpath + stanford_dir = os.path.split(stanford_jar)[0] + self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir)) + + self.model_path = model_path + self._encoding = encoding + self.corenlp_options = corenlp_options + self.java_options = java_options + + def _parse_trees_output(self, output_): + res = [] + cur_lines = [] + cur_trees = [] + blank = False + for line in output_.splitlines(False): + if line == "": + if blank: + res.append(iter(cur_trees)) + cur_trees = [] + blank = False + elif self._DOUBLE_SPACED_OUTPUT: + cur_trees.append(self._make_tree("\n".join(cur_lines))) + cur_lines = [] + blank = True + else: + res.append(iter([self._make_tree("\n".join(cur_lines))])) + cur_lines = [] + else: + cur_lines.append(line) + blank = False + return iter(res) + + def parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list where each sentence is a list of words. + Each sentence will be automatically tagged with this StanfordParser instance's + tagger. + If whitespaces exists inside a token, then the token will be treated as + separate tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-escaper", + "edu.stanford.nlp.process.PTBEscapingProcessor", + ] + return self._parse_trees_output( + self._execute( + cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose + ) + ) + + def raw_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a string; + before parsing, it will be automatically tokenized and tagged by + the Stanford Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + return next(self.raw_parse_sents([sentence], verbose)) + + def raw_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list of strings. + Each sentence will be automatically tokenized and tagged by the Stanford Parser. + + :param sentences: Input sentences to parse + :type sentences: list(str) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + ] + return self._parse_trees_output( + self._execute(cmd, "\n".join(sentences), verbose) + ) + + def tagged_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a list of + (word, tag) tuples; the sentence must have already been tokenized and + tagged. + + :param sentence: Input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(Tree) + """ + return next(self.tagged_parse_sents([sentence], verbose)) + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences + where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentences: list(list(tuple(str, str))) + :rtype: iter(iter(Tree)) + """ + tag_separator = "/" + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-tagSeparator", + tag_separator, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerMethod", + "newCoreLabelTokenizerFactory", + ] + # We don't need to escape slashes as "splitting is done on the last instance of the character in the token" + return self._parse_trees_output( + self._execute( + cmd, + "\n".join( + " ".join(tag_separator.join(tagged) for tagged in sentence) + for sentence in sentences + ), + verbose, + ) + ) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-encoding", encoding]) + if self.corenlp_options: + cmd.extend(self.corenlp_options.split()) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + # Run the tagger and get the output. + if self._USE_STDIN: + input_file.seek(0) + stdout, stderr = java( + cmd, + classpath=self._classpath, + stdin=input_file, + stdout=PIPE, + stderr=PIPE, + ) + else: + cmd.append(input_file.name) + stdout, stderr = java( + cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE + ) + + stdout = stdout.replace(b"\xc2\xa0", b" ") + stdout = stdout.replace(b"\x00\xa0", b" ") + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout + + +class StanfordParser(GenericStanfordParser): + """ + >>> parser=StanfordParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents(( + ... "the quick brown fox jumps over the lazy dog", + ... "the quick grey wolf jumps over the lazy fox" + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP', + [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP', + [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']), + Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]), + Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP', + [Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']), + Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []), + Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP', + [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])] + """ + + _OUTPUT_FORMAT = "penn" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return Tree.fromstring(result) + + +class StanfordDependencyParser(GenericStanfordParser): + + """ + >>> dep_parser=StanfordDependencyParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]), + Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])] + + >>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + """ + + _OUTPUT_FORMAT = "conll2007" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="root") + + +class StanfordNeuralDependencyParser(GenericStanfordParser): + """ + >>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP + >>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', + (u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), + u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), + ((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det', + (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'), + u'punct', (u'.', u'.'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', + 'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), + Tree('fox', ['over', 'the', 'lazy']), '.'])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', + ['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])] + """ + + _OUTPUT_FORMAT = "conll" + _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP" + _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar" + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar" + _USE_STDIN = True + _DOUBLE_SPACED_OUTPUT = True + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordNeuralDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse" + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Currently unimplemented because the neural dependency parser (and + the StanfordCoreNLP pipeline class) doesn't support passing in pre- + tagged tokens. + """ + raise NotImplementedError( + "tagged_parse[_sents] is not supported by " + "StanfordNeuralDependencyParser; use " + "parse[_sents] or raw_parse[_sents] instead." + ) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="ROOT") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f46ec0c26303eea6837bc070d8e77b56b48e29f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__init__.py @@ -0,0 +1,34 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Stemmers + +Interfaces used to remove morphological affixes from words, leaving +only the word stem. Stemming algorithms aim to remove those affixes +required for eg. grammatical role, tense, derivational morphology +leaving only the stem of the word. This is a difficult problem due to +irregular words (eg. common verbs in English), complicated +morphological rules, and part-of-speech and sense ambiguities +(eg. ``ceil-`` is not the stem of ``ceiling``). + +StemmerI defines a standard interface for stemmers. +""" + +from nltk.stem.api import StemmerI +from nltk.stem.arlstem import ARLSTem +from nltk.stem.arlstem2 import ARLSTem2 +from nltk.stem.cistem import Cistem +from nltk.stem.isri import ISRIStemmer +from nltk.stem.lancaster import LancasterStemmer +from nltk.stem.porter import PorterStemmer +from nltk.stem.regexp import RegexpStemmer +from nltk.stem.rslp import RSLPStemmer +from nltk.stem.snowball import SnowballStemmer +from nltk.stem.wordnet import WordNetLemmatizer diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d46443e2551d4f25acf646fdb61f840e5320bb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30e7ea2f8745f8dcd45c9c4893552b4fbb6543dc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7fbd4f86f7f39c151aacb6693e52864af952aca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ec64b95b28a717ff2ae9e58f7e51b0e29e0a43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/arlstem2.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2eadff77e5401a900d5266a1b1280b2ccc89003f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/cistem.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee1c20de8065f4283566dd89924ab83b414785bd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/isri.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ef4241b067af23b8d7174ca07392acd3d349061 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/lancaster.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f1fd425565a52f311bad6ea5b0e0f2af3372d51 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/porter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a7a53ed23c688c17dcb3320f10af7d09d981e1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/regexp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70a98b46c310cd1d5fd2da73e1a5ed3785d4dfdb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/rslp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2509a2ecc2bf16fcb16acd5b2cf85479bcbd2903 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/snowball.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b93c255431cd5f8bdbea90744fad8c3b57b8740c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83cc20052fb82752075fa48b8a25c1b97ae43a69 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/stem/__pycache__/wordnet.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem.py new file mode 100644 index 0000000000000000000000000000000000000000..566a4dd36b37e148a24eb840b5e9478dd24d4b55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem.py @@ -0,0 +1,361 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Siham Ouamour +# Halim Sayoud +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem Arabic Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia, S. Ouamour and H. Sayoud, A Novel Robust Arabic Light Stemmer , +Journal of Experimental & Theoretical Artificial Intelligence (JETAI'17), +Vol. 29, No. 3, 2017, pp. 557-573. +The ARLSTem is a light Arabic stemmer that is based on removing the affixes +from the word (i.e. prefixes, suffixes and infixes). It was evaluated and +compared to several other stemmers using Paice's parameters (under-stemming +index, over-stemming index and stemming weight), and the results showed that +ARLSTem is promising and producing high performances. This stemmer is not +based on any dictionary and can be used on-line effectively. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem(StemmerI): + """ + ARLSTem stemmer : a light Arabic Stemming algorithm without any dictionary. + Department of Telecommunication & Information Processing. USTHB University, + Algiers, Algeria. + ARLSTem.stem(token) returns the Arabic stem for the input token. + The ARLSTem Stemmer requires that all tokens are encoded using Unicode + encoding. + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem(self, token): + """ + call this function to get the word's stem based on ARLSTem . + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip common prefixes of the nouns + pre = self.pref(token) + if pre is not None: + token = pre + # strip the suffixes which are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + # transform from the feminine form to the masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + else: + if pre is None: # if the prefixes are not stripped + # strip the verb prefixes and suffixes + return self.verb(token) + else: + return ps + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replacing hamzated Alif + with Alif replacing AlifMaqsura with Yaa and removing Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is 3 letters + # at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def suff(self, token): + """ + remove suffixes from the word's end. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 3 and token.endswith("\u0627\u062A"): + return token[:-2] + if len(token) > 3 and token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + if len(token) > 4 and token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + return self.verb_t6(token) + + def verb_t1(self, token): + """ + stem the present prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + return token + + def verb_t6(self, token): + """ + stem the order prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + return token diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem2.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2d9e9551ecffff219821bb570f96b21f588a6f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/arlstem2.py @@ -0,0 +1,457 @@ +# +# Natural Language Toolkit: ARLSTem Stemmer v2 +# +# Copyright (C) 2001-2023 NLTK Project +# +# Author: Kheireddine Abainia (x-programer) +# Algorithms: Kheireddine Abainia +# Hamza Rebbani +# URL: +# For license information, see LICENSE.TXT + + +""" +ARLSTem2 Arabic Light Stemmer +The details about the implementation of this algorithm are described in: +K. Abainia and H. Rebbani, Comparing the Effectiveness of the Improved ARLSTem +Algorithm with Existing Arabic Light Stemmers, International Conference on +Theoretical and Applicative Aspects of Computer Science (ICTAACS'19), Skikda, +Algeria, December 15-16, 2019. +ARLSTem2 is an Arabic light stemmer based on removing the affixes from +the words (i.e. prefixes, suffixes and infixes). It is an improvement +of the previous Arabic light stemmer (ARLSTem). The new version was compared to +the original algorithm and several existing Arabic light stemmers, where the +results showed that the new version considerably improves the under-stemming +errors that are common to light stemmers. Both ARLSTem and ARLSTem2 can be run +online and do not use any dictionary. +""" +import re + +from nltk.stem.api import StemmerI + + +class ARLSTem2(StemmerI): + """ + Return a stemmed Arabic word after removing affixes. This an improved + version of the previous algorithm, which reduces under-stemming errors. + Typically used in Arabic search engine, information retrieval and NLP. + + >>> from nltk.stem import arlstem2 + >>> stemmer = ARLSTem2() + >>> word = stemmer.stem('يعمل') + >>> print(word) + عمل + + :param token: The input Arabic word (unicode) to be stemmed + :type token: unicode + :return: A unicode Arabic word + """ + + def __init__(self): + # different Alif with hamza + self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") + self.re_alifMaqsura = re.compile(r"[\u0649]") + self.re_diacritics = re.compile(r"[\u064B-\u065F]") + + # Alif Laam, Laam Laam, Fa Laam, Fa Ba + self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] + # Ba Alif Laam, Kaaf Alif Laam, Waaw Alif Laam + self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] + # Fa Laam Laam, Waaw Laam Laam + self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] + # Fa Ba Alif Laam, Waaw Ba Alif Laam, Fa Kaaf Alif Laam + self.pr4 = [ + "\u0641\u0628\u0627\u0644", + "\u0648\u0628\u0627\u0644", + "\u0641\u0643\u0627\u0644", + ] + + # Kaf Yaa, Kaf Miim + self.su2 = ["\u0643\u064A", "\u0643\u0645"] + # Ha Alif, Ha Miim + self.su22 = ["\u0647\u0627", "\u0647\u0645"] + # Kaf Miim Alif, Kaf Noon Shadda + self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] + # Ha Miim Alif, Ha Noon Shadda + self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] + + # Alif Noon, Ya Noon, Waaw Noon + self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] + # Taa Alif Noon, Taa Ya Noon + self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] + + # Alif Noon, Waaw Noon + self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] + # Siin Taa, Siin Yaa + self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] + # Siin Alif, Siin Noon + self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] + # Lam Noon, Lam Taa, Lam Yaa, Lam Hamza + self.verb_pr33 = [ + "\u0644\u0646", + "\u0644\u062A", + "\u0644\u064A", + "\u0644\u0623", + ] + # Taa Miim Alif, Taa Noon Shadda + self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] + # Noon Alif, Taa Miim, Taa Alif, Waaw Alif + self.verb_suf2 = [ + "\u0646\u0627", + "\u062A\u0645", + "\u062A\u0627", + "\u0648\u0627", + ] + # Taa, Alif, Noon + self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] + + def stem1(self, token): + """ + call this function to get the first stem + """ + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + self.is_verb = False + # remove Arabic diacritics and replace some letters with others + token = self.norm(token) + # strip the common noun prefixes + pre = self.pref(token) + if pre is not None: + token = pre + # transform the feminine form to masculine form + fm = self.fem2masc(token) + if fm is not None: + return fm + # strip the adjective affixes + adj = self.adjective(token) + if adj is not None: + return adj + # strip the suffixes that are common to nouns and verbs + token = self.suff(token) + # transform a plural noun to a singular noun + ps = self.plur2sing(token) + if ps is None: + if pre is None: # if the noun prefixes are not stripped + # strip the verb prefixes and suffixes + verb = self.verb(token) + if verb is not None: + self.is_verb = True + return verb + else: + return ps + return token + except ValueError as e: + print(e) + + def stem(self, token): + # stem the input word + try: + if token is None: + raise ValueError( + "The word could not be stemmed, because \ + it is empty !" + ) + # run the first round of stemming + token = self.stem1(token) + # check if there is some additional noun affixes + if len(token) > 4: + # ^Taa, $Yaa + char + if token.startswith("\u062A") and token[-2] == "\u064A": + token = token[1:-2] + token[-1] + return token + # ^Miim, $Waaw + char + if token.startswith("\u0645") and token[-2] == "\u0648": + token = token[1:-2] + token[-1] + return token + if len(token) > 3: + # !^Alif, $Yaa + if not token.startswith("\u0627") and token.endswith("\u064A"): + token = token[:-1] + return token + # $Laam + if token.startswith("\u0644"): + return token[1:] + return token + except ValueError as e: + print(e) + + def norm(self, token): + """ + normalize the word by removing diacritics, replace hamzated Alif + with Alif bare, replace AlifMaqsura with Yaa and remove Waaw at the + beginning. + """ + # strip Arabic diacritics + token = self.re_diacritics.sub("", token) + # replace Hamzated Alif with Alif bare + token = self.re_hamzated_alif.sub("\u0627", token) + # replace alifMaqsura with Yaa + token = self.re_alifMaqsura.sub("\u064A", token) + # strip the Waaw from the word beginning if the remaining is + # tri-literal at least + if token.startswith("\u0648") and len(token) > 3: + token = token[1:] + return token + + def pref(self, token): + """ + remove prefixes from the words' beginning. + """ + if len(token) > 5: + for p3 in self.pr3: + if token.startswith(p3): + return token[3:] + if len(token) > 6: + for p4 in self.pr4: + if token.startswith(p4): + return token[4:] + if len(token) > 5: + for p3 in self.pr32: + if token.startswith(p3): + return token[3:] + if len(token) > 4: + for p2 in self.pr2: + if token.startswith(p2): + return token[2:] + + def adjective(self, token): + """ + remove the infixes from adjectives + """ + # ^Alif, Alif, $Yaa + if len(token) > 5: + if ( + token.startswith("\u0627") + and token[-3] == "\u0627" + and token.endswith("\u064A") + ): + return token[:-3] + token[-2] + + def suff(self, token): + """ + remove the suffixes from the word's ending. + """ + if token.endswith("\u0643") and len(token) > 3: + return token[:-1] + if len(token) > 4: + for s2 in self.su2: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su3: + if token.endswith(s3): + return token[:-3] + if token.endswith("\u0647") and len(token) > 3: + token = token[:-1] + return token + if len(token) > 4: + for s2 in self.su22: + if token.endswith(s2): + return token[:-2] + if len(token) > 5: + for s3 in self.su32: + if token.endswith(s3): + return token[:-3] + # $Noon and Alif + if token.endswith("\u0646\u0627") and len(token) > 4: + return token[:-2] + return token + + def fem2masc(self, token): + """ + transform the word from the feminine form to the masculine form. + """ + if len(token) > 6: + # ^Taa, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u062A") + and token[-4] == "\u064A" + and token.endswith("\u064A\u0629") + ): + return token[1:-4] + token[-3] + # ^Alif, Yaa, $Yaa and Taa Marbuta + if ( + token.startswith("\u0627") + and token[-4] == "\u0627" + and token.endswith("\u064A\u0629") + ): + return token[:-4] + token[-3] + # $Alif, Yaa and Taa Marbuta + if token.endswith("\u0627\u064A\u0629") and len(token) > 5: + return token[:-2] + if len(token) > 4: + # Alif, $Taa Marbuta + if token[1] == "\u0627" and token.endswith("\u0629"): + return token[0] + token[2:-1] + # $Yaa and Taa Marbuta + if token.endswith("\u064A\u0629"): + return token[:-2] + # $Taa Marbuta + if token.endswith("\u0629") and len(token) > 3: + return token[:-1] + + def plur2sing(self, token): + """ + transform the word from the plural form to the singular form. + """ + # ^Haa, $Noon, Waaw + if len(token) > 5: + if token.startswith("\u0645") and token.endswith("\u0648\u0646"): + return token[1:-2] + if len(token) > 4: + for ps2 in self.pl_si2: + if token.endswith(ps2): + return token[:-2] + if len(token) > 5: + for ps3 in self.pl_si3: + if token.endswith(ps3): + return token[:-3] + if len(token) > 4: + # $Alif, Taa + if token.endswith("\u0627\u062A"): + return token[:-2] + # ^Alif Alif + if token.startswith("\u0627") and token[2] == "\u0627": + return token[:2] + token[3:] + # ^Alif Alif + if token.startswith("\u0627") and token[-2] == "\u0627": + return token[1:-2] + token[-1] + + def verb(self, token): + """ + stem the verb prefixes and suffixes or both + """ + vb = self.verb_t1(token) + if vb is not None: + return vb + vb = self.verb_t2(token) + if vb is not None: + return vb + vb = self.verb_t3(token) + if vb is not None: + return vb + vb = self.verb_t4(token) + if vb is not None: + return vb + vb = self.verb_t5(token) + if vb is not None: + return vb + vb = self.verb_t6(token) + return vb + + def verb_t1(self, token): + """ + stem the present tense co-occurred prefixes and suffixes + """ + if len(token) > 5 and token.startswith("\u062A"): # Taa + for s2 in self.pl_si2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 5 and token.startswith("\u064A"): # Yaa + for s2 in self.verb_su2: + if token.endswith(s2): + return token[1:-2] + if len(token) > 4 and token.startswith("\u0627"): # Alif + # Waaw Alif + if len(token) > 5 and token.endswith("\u0648\u0627"): + return token[1:-2] + # Yaa + if token.endswith("\u064A"): + return token[1:-1] + # Alif + if token.endswith("\u0627"): + return token[1:-1] + # Noon + if token.endswith("\u0646"): + return token[1:-1] + # ^Yaa, Noon$ + if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): + return token[1:-1] + # ^Taa, Noon$ + if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): + return token[1:-1] + + def verb_t2(self, token): + """ + stem the future tense co-occurred prefixes and suffixes + """ + if len(token) > 6: + for s2 in self.pl_si2: + # ^Siin Taa + if token.startswith(self.verb_pr2[0]) and token.endswith(s2): + return token[2:-2] + # ^Siin Yaa, Alif Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): + return token[2:-2] + # ^Siin Yaa, Waaw Noon$ + if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): + return token[2:-2] + # ^Siin Taa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[0]) + and token.endswith("\u0646") + ): + return token[2:-1] + # ^Siin Yaa, Noon$ + if ( + len(token) > 5 + and token.startswith(self.verb_pr2[1]) + and token.endswith("\u0646") + ): + return token[2:-1] + + def verb_t3(self, token): + """ + stem the present tense suffixes + """ + if len(token) > 5: + for su3 in self.verb_suf3: + if token.endswith(su3): + return token[:-3] + if len(token) > 4: + for su2 in self.verb_suf2: + if token.endswith(su2): + return token[:-2] + if len(token) > 3: + for su1 in self.verb_suf1: + if token.endswith(su1): + return token[:-1] + + def verb_t4(self, token): + """ + stem the present tense prefixes + """ + if len(token) > 3: + for pr1 in self.verb_suf1: + if token.startswith(pr1): + return token[1:] + if token.startswith("\u064A"): + return token[1:] + + def verb_t5(self, token): + """ + stem the future tense prefixes + """ + if len(token) > 4: + for pr2 in self.verb_pr22: + if token.startswith(pr2): + return token[2:] + for pr2 in self.verb_pr2: + if token.startswith(pr2): + return token[2:] + + def verb_t6(self, token): + """ + stem the imperative tense prefixes + """ + if len(token) > 4: + for pr3 in self.verb_pr33: + if token.startswith(pr3): + return token[2:] + + return token diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/cistem.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/cistem.py new file mode 100644 index 0000000000000000000000000000000000000000..69c07a42a373cec1eca9d75e9d474c4c1063e70b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/cistem.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit: CISTEM Stemmer for German +# Copyright (C) 2001-2023 NLTK Project +# Author: Leonie Weissweiler +# Tom Aarsen <> (modifications) +# Algorithm: Leonie Weissweiler +# Alexander Fraser +# URL: +# For license information, see LICENSE.TXT + +import re +from typing import Tuple + +from nltk.stem.api import StemmerI + + +class Cistem(StemmerI): + """ + CISTEM Stemmer for German + + This is the official Python implementation of the CISTEM stemmer. + It is based on the paper + Leonie Weissweiler, Alexander Fraser (2017). Developing a Stemmer for German + Based on a Comparative Analysis of Publicly Available Stemmers. + In Proceedings of the German Society for Computational Linguistics and Language + Technology (GSCL) + which can be read here: + https://www.cis.lmu.de/~weissweiler/cistem/ + + In the paper, we conducted an analysis of publicly available stemmers, + developed two gold standards for German stemming and evaluated the stemmers + based on the two gold standards. We then proposed the stemmer implemented here + and show that it achieves slightly better f-measure than the other stemmers and + is thrice as fast as the Snowball stemmer for German while being about as fast + as most other stemmers. + + case_insensitive is a a boolean specifying if case-insensitive stemming + should be used. Case insensitivity improves performance only if words in the + text may be incorrectly upper case. For all-lowercase and correctly cased + text, best performance is achieved by setting case_insensitive for false. + + :param case_insensitive: if True, the stemming is case insensitive. False by default. + :type case_insensitive: bool + """ + + strip_ge = re.compile(r"^ge(.{4,})") + repl_xx = re.compile(r"(.)\1") + strip_emr = re.compile(r"e[mr]$") + strip_nd = re.compile(r"nd$") + strip_t = re.compile(r"t$") + strip_esn = re.compile(r"[esn]$") + repl_xx_back = re.compile(r"(.)\*") + + def __init__(self, case_insensitive: bool = False): + self._case_insensitive = case_insensitive + + @staticmethod + def replace_to(word: str) -> str: + word = word.replace("sch", "$") + word = word.replace("ei", "%") + word = word.replace("ie", "&") + word = Cistem.repl_xx.sub(r"\1*", word) + + return word + + @staticmethod + def replace_back(word: str) -> str: + word = Cistem.repl_xx_back.sub(r"\1\1", word) + word = word.replace("%", "ei") + word = word.replace("&", "ie") + word = word.replace("$", "sch") + + return word + + def stem(self, word: str) -> str: + """Stems the input word. + + :param word: The word that is to be stemmed. + :type word: str + :return: The stemmed word. + :rtype: str + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.stem(s1) + 'speicherbehalt' + >>> s2 = "Grenzpostens" + >>> stemmer.stem(s2) + 'grenzpost' + >>> s3 = "Ausgefeiltere" + >>> stemmer.stem(s3) + 'ausgefeilt' + >>> stemmer = Cistem(True) + >>> stemmer.stem(s1) + 'speicherbehal' + >>> stemmer.stem(s2) + 'grenzpo' + >>> stemmer.stem(s3) + 'ausgefeil' + """ + if len(word) == 0: + return word + + upper = word[0].isupper() + word = word.lower() + + word = word.replace("ü", "u") + word = word.replace("ö", "o") + word = word.replace("ä", "a") + word = word.replace("ß", "ss") + + word = Cistem.strip_ge.sub(r"\1", word) + + return self._segment_inner(word, upper)[0] + + def segment(self, word: str) -> Tuple[str, str]: + """ + This method works very similarly to stem (:func:'cistem.stem'). The difference is that in + addition to returning the stem, it also returns the rest that was removed at + the end. To be able to return the stem unchanged so the stem and the rest + can be concatenated to form the original word, all subsitutions that altered + the stem in any other way than by removing letters at the end were left out. + + :param word: The word that is to be stemmed. + :type word: str + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + + >>> from nltk.stem.cistem import Cistem + >>> stemmer = Cistem() + >>> s1 = "Speicherbehältern" + >>> stemmer.segment(s1) + ('speicherbehält', 'ern') + >>> s2 = "Grenzpostens" + >>> stemmer.segment(s2) + ('grenzpost', 'ens') + >>> s3 = "Ausgefeiltere" + >>> stemmer.segment(s3) + ('ausgefeilt', 'ere') + >>> stemmer = Cistem(True) + >>> stemmer.segment(s1) + ('speicherbehäl', 'tern') + >>> stemmer.segment(s2) + ('grenzpo', 'stens') + >>> stemmer.segment(s3) + ('ausgefeil', 'tere') + """ + if len(word) == 0: + return ("", "") + + upper = word[0].isupper() + word = word.lower() + + return self._segment_inner(word, upper) + + def _segment_inner(self, word: str, upper: bool): + """Inner method for iteratively applying the code stemming regexes. + This method receives a pre-processed variant of the word to be stemmed, + or the word to be segmented, and returns a tuple of the word and the + removed suffix. + + :param word: A pre-processed variant of the word that is to be stemmed. + :type word: str + :param upper: Whether the original word started with a capital letter. + :type upper: bool + :return: A tuple of the stemmed word and the removed suffix. + :rtype: Tuple[str, str] + """ + + rest_length = 0 + word_copy = word[:] + + # Pre-processing before applying the substitution patterns + word = Cistem.replace_to(word) + rest = "" + + # Apply the substitution patterns + while len(word) > 3: + if len(word) > 5: + word, n = Cistem.strip_emr.subn("", word) + if n != 0: + rest_length += 2 + continue + + word, n = Cistem.strip_nd.subn("", word) + if n != 0: + rest_length += 2 + continue + + if not upper or self._case_insensitive: + word, n = Cistem.strip_t.subn("", word) + if n != 0: + rest_length += 1 + continue + + word, n = Cistem.strip_esn.subn("", word) + if n != 0: + rest_length += 1 + continue + else: + break + + # Post-processing after applying the substitution patterns + word = Cistem.replace_back(word) + + if rest_length: + rest = word_copy[-rest_length:] + + return (word, rest) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/isri.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/isri.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae91f1fafaf713330ce78696873e258487d2d0a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/isri.py @@ -0,0 +1,395 @@ +# +# Natural Language Toolkit: The ISRI Arabic Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kazem Taghva, Rania Elkhoury, and Jeffrey Coombs (2005) +# Author: Hosam Algasaier +# URL: +# For license information, see LICENSE.TXT + +""" +ISRI Arabic Stemmer + +The algorithm for this stemmer is described in: + +Taghva, K., Elkoury, R., and Coombs, J. 2005. Arabic Stemming without a root dictionary. +Information Science Research Institute. University of Nevada, Las Vegas, USA. + +The Information Science Research Institute’s (ISRI) Arabic stemmer shares many features +with the Khoja stemmer. However, the main difference is that ISRI stemmer does not use root +dictionary. Also, if a root is not found, ISRI stemmer returned normalized form, rather than +returning the original unmodified word. + +Additional adjustments were made to improve the algorithm: + +1- Adding 60 stop words. +2- Adding the pattern (تفاعيل) to ISRI pattern set. +3- The step 2 in the original algorithm was normalizing all hamza. This step is discarded because it +increases the word ambiguities and changes the original root. + +""" +import re + +from nltk.stem.api import StemmerI + + +class ISRIStemmer(StemmerI): + """ + ISRI Arabic stemmer based on algorithm: Arabic Stemming without a root dictionary. + Information Science Research Institute. University of Nevada, Las Vegas, USA. + + A few minor modifications have been made to ISRI basic algorithm. + See the source code of this module for more information. + + isri.stem(token) returns Arabic root for the given token. + + The ISRI Stemmer requires that all tokens have Unicode string types. + If you use Python IDLE on Arabic Windows you have to decode text first + using Arabic '1256' coding. + """ + + def __init__(self): + # length three prefixes + self.p3 = [ + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", + "\u0648\u0644\u0644", + "\u0648\u0627\u0644", + ] + + # length two prefixes + self.p2 = ["\u0627\u0644", "\u0644\u0644"] + + # length one prefixes + self.p1 = [ + "\u0644", + "\u0628", + "\u0641", + "\u0633", + "\u0648", + "\u064a", + "\u062a", + "\u0646", + "\u0627", + ] + + # length three suffixes + self.s3 = [ + "\u062a\u0645\u0644", + "\u0647\u0645\u0644", + "\u062a\u0627\u0646", + "\u062a\u064a\u0646", + "\u0643\u0645\u0644", + ] + + # length two suffixes + self.s2 = [ + "\u0648\u0646", + "\u0627\u062a", + "\u0627\u0646", + "\u064a\u0646", + "\u062a\u0646", + "\u0643\u0645", + "\u0647\u0646", + "\u0646\u0627", + "\u064a\u0627", + "\u0647\u0627", + "\u062a\u0645", + "\u0643\u0646", + "\u0646\u064a", + "\u0648\u0627", + "\u0645\u0627", + "\u0647\u0645", + ] + + # length one suffixes + self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"] + + # groups of length four patterns + self.pr4 = { + 0: ["\u0645"], + 1: ["\u0627"], + 2: ["\u0627", "\u0648", "\u064A"], + 3: ["\u0629"], + } + + # Groups of length five patterns and length three roots + self.pr53 = { + 0: ["\u0627", "\u062a"], + 1: ["\u0627", "\u064a", "\u0648"], + 2: ["\u0627", "\u062a", "\u0645"], + 3: ["\u0645", "\u064a", "\u062a"], + 4: ["\u0645", "\u062a"], + 5: ["\u0627", "\u0648"], + 6: ["\u0627", "\u0645"], + } + + self.re_short_vowels = re.compile(r"[\u064B-\u0652]") + self.re_hamza = re.compile(r"[\u0621\u0624\u0626]") + self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]") + + self.stop_words = [ + "\u064a\u0643\u0648\u0646", + "\u0648\u0644\u064a\u0633", + "\u0648\u0643\u0627\u0646", + "\u0643\u0630\u0644\u0643", + "\u0627\u0644\u062a\u064a", + "\u0648\u0628\u064a\u0646", + "\u0639\u0644\u064a\u0647\u0627", + "\u0645\u0633\u0627\u0621", + "\u0627\u0644\u0630\u064a", + "\u0648\u0643\u0627\u0646\u062a", + "\u0648\u0644\u0643\u0646", + "\u0648\u0627\u0644\u062a\u064a", + "\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0648\u0645", + "\u0627\u0644\u0644\u0630\u064a\u0646", + "\u0639\u0644\u064a\u0647", + "\u0643\u0627\u0646\u062a", + "\u0644\u0630\u0644\u0643", + "\u0623\u0645\u0627\u0645", + "\u0647\u0646\u0627\u0643", + "\u0645\u0646\u0647\u0627", + "\u0645\u0627\u0632\u0627\u0644", + "\u0644\u0627\u0632\u0627\u0644", + "\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0645\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0635\u0628\u062d", + "\u0623\u0635\u0628\u062d", + "\u0623\u0645\u0633\u0649", + "\u0627\u0645\u0633\u0649", + "\u0623\u0636\u062d\u0649", + "\u0627\u0636\u062d\u0649", + "\u0645\u0627\u0628\u0631\u062d", + "\u0645\u0627\u0641\u062a\u0626", + "\u0645\u0627\u0627\u0646\u0641\u0643", + "\u0644\u0627\u0633\u064a\u0645\u0627", + "\u0648\u0644\u0627\u064a\u0632\u0627\u0644", + "\u0627\u0644\u062d\u0627\u0644\u064a", + "\u0627\u0644\u064a\u0647\u0627", + "\u0627\u0644\u0630\u064a\u0646", + "\u0641\u0627\u0646\u0647", + "\u0648\u0627\u0644\u0630\u064a", + "\u0648\u0647\u0630\u0627", + "\u0644\u0647\u0630\u0627", + "\u0641\u0643\u0627\u0646", + "\u0633\u062a\u0643\u0648\u0646", + "\u0627\u0644\u064a\u0647", + "\u064a\u0645\u0643\u0646", + "\u0628\u0647\u0630\u0627", + "\u0627\u0644\u0630\u0649", + ] + + def stem(self, token): + """ + Stemming a word token using the ISRI stemmer. + """ + token = self.norm( + token, 1 + ) # remove diacritics which representing Arabic short vowels + if token in self.stop_words: + return token # exclude stop words from being processed + token = self.pre32( + token + ) # remove length three and length two prefixes in this order + token = self.suf32( + token + ) # remove length three and length two suffixes in this order + token = self.waw( + token + ) # remove connective ‘و’ if it precedes a word beginning with ‘و’ + token = self.norm(token, 2) # normalize initial hamza to bare alif + # if 4 <= word length <= 7, then stem; otherwise, no stemming + if len(token) == 4: # length 4 word + token = self.pro_w4(token) + elif len(token) == 5: # length 5 word + token = self.pro_w53(token) + token = self.end_w5(token) + elif len(token) == 6: # length 6 word + token = self.pro_w6(token) + token = self.end_w6(token) + elif len(token) == 7: # length 7 word + token = self.suf1(token) + if len(token) == 7: + token = self.pre1(token) + if len(token) == 6: + token = self.pro_w6(token) + token = self.end_w6(token) + return token + + def norm(self, word, num=3): + """ + normalization: + num=1 normalize diacritics + num=2 normalize initial hamza + num=3 both 1&2 + """ + if num == 1: + word = self.re_short_vowels.sub("", word) + elif num == 2: + word = self.re_initial_hamza.sub("\u0627", word) + elif num == 3: + word = self.re_short_vowels.sub("", word) + word = self.re_initial_hamza.sub("\u0627", word) + return word + + def pre32(self, word): + """remove length three and length two prefixes in this order""" + if len(word) >= 6: + for pre3 in self.p3: + if word.startswith(pre3): + return word[3:] + if len(word) >= 5: + for pre2 in self.p2: + if word.startswith(pre2): + return word[2:] + return word + + def suf32(self, word): + """remove length three and length two suffixes in this order""" + if len(word) >= 6: + for suf3 in self.s3: + if word.endswith(suf3): + return word[:-3] + if len(word) >= 5: + for suf2 in self.s2: + if word.endswith(suf2): + return word[:-2] + return word + + def waw(self, word): + """remove connective ‘و’ if it precedes a word beginning with ‘و’""" + if len(word) >= 4 and word[:2] == "\u0648\u0648": + word = word[1:] + return word + + def pro_w4(self, word): + """process length four patterns and extract length three roots""" + if word[0] in self.pr4[0]: # مفعل + word = word[1:] + elif word[1] in self.pr4[1]: # فاعل + word = word[:1] + word[2:] + elif word[2] in self.pr4[2]: # فعال - فعول - فعيل + word = word[:2] + word[3] + elif word[3] in self.pr4[3]: # فعلة + word = word[:-1] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 4: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w53(self, word): + """process length five patterns and extract length three roots""" + if word[2] in self.pr53[0] and word[0] == "\u0627": # افتعل - افاعل + word = word[1] + word[3:] + elif word[3] in self.pr53[1] and word[0] == "\u0645": # مفعول - مفعال - مفعيل + word = word[1:3] + word[4] + elif word[0] in self.pr53[2] and word[4] == "\u0629": # مفعلة - تفعلة - افعلة + word = word[1:4] + elif word[0] in self.pr53[3] and word[2] == "\u062a": # مفتعل - يفتعل - تفتعل + word = word[1] + word[3:] + elif word[0] in self.pr53[4] and word[2] == "\u0627": # مفاعل - تفاعل + word = word[1] + word[3:] + elif word[2] in self.pr53[5] and word[4] == "\u0629": # فعولة - فعالة + word = word[:2] + word[3] + elif word[0] in self.pr53[6] and word[1] == "\u0646": # انفعل - منفعل + word = word[2:] + elif word[3] == "\u0627" and word[0] == "\u0627": # افعال + word = word[1:3] + word[4] + elif word[4] == "\u0646" and word[3] == "\u0627": # فعلان + word = word[:3] + elif word[3] == "\u064a" and word[0] == "\u062a": # تفعيل + word = word[1:3] + word[4] + elif word[3] == "\u0648" and word[1] == "\u0627": # فاعول + word = word[0] + word[2] + word[4] + elif word[2] == "\u0627" and word[1] == "\u0648": # فواعل + word = word[0] + word[3:] + elif word[3] == "\u0626" and word[2] == "\u0627": # فعائل + word = word[:2] + word[4] + elif word[4] == "\u0629" and word[1] == "\u0627": # فاعلة + word = word[0] + word[2:4] + elif word[4] == "\u064a" and word[2] == "\u0627": # فعالي + word = word[:2] + word[3] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 5: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w54(self, word): + """process length five patterns and extract length four roots""" + if word[0] in self.pr53[2]: # تفعلل - افعلل - مفعلل + word = word[1:] + elif word[4] == "\u0629": # فعللة + word = word[:4] + elif word[2] == "\u0627": # فعالل + word = word[:2] + word[3:] + return word + + def end_w5(self, word): + """ending step (word of length five)""" + if len(word) == 4: + word = self.pro_w4(word) + elif len(word) == 5: + word = self.pro_w54(word) + return word + + def pro_w6(self, word): + """process length six patterns and extract length three roots""" + if word.startswith("\u0627\u0633\u062a") or word.startswith( + "\u0645\u0633\u062a" + ): # مستفعل - استفعل + word = word[3:] + elif ( + word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629" + ): # مفعالة + word = word[1:3] + word[4] + elif ( + word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627" + ): # افتعال + word = word[1] + word[3] + word[5] + elif ( + word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4] + ): # افعوعل + word = word[1] + word[4:] + elif ( + word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a" + ): # تفاعيل new pattern + word = word[1] + word[3] + word[5] + else: + word = self.suf1(word) # do - normalize short sufix + if len(word) == 6: + word = self.pre1(word) # do - normalize short prefix + return word + + def pro_w64(self, word): + """process length six patterns and extract length four roots""" + if word[0] == "\u0627" and word[4] == "\u0627": # افعلال + word = word[1:4] + word[5] + elif word.startswith("\u0645\u062a"): # متفعلل + word = word[2:] + return word + + def end_w6(self, word): + """ending step (word of length six)""" + if len(word) == 5: + word = self.pro_w53(word) + word = self.end_w5(word) + elif len(word) == 6: + word = self.pro_w64(word) + return word + + def suf1(self, word): + """normalize short sufix""" + for sf1 in self.s1: + if word.endswith(sf1): + return word[:-1] + return word + + def pre1(self, word): + """normalize short prefix""" + for sp1 in self.p1: + if word.startswith(sp1): + return word[1:] + return word diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/lancaster.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/lancaster.py new file mode 100644 index 0000000000000000000000000000000000000000..40a87331848c9f25332e5e655bc24d85b563c2c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/lancaster.py @@ -0,0 +1,343 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Tomcavage +# URL: +# For license information, see LICENSE.TXT + +""" +A word stemmer based on the Lancaster (Paice/Husk) stemming algorithm. +Paice, Chris D. "Another Stemmer." ACM SIGIR Forum 24.3 (1990): 56-61. +""" +import re + +from nltk.stem.api import StemmerI + + +class LancasterStemmer(StemmerI): + """ + Lancaster Stemmer + + >>> from nltk.stem.lancaster import LancasterStemmer + >>> st = LancasterStemmer() + >>> st.stem('maximum') # Remove "-um" when word is intact + 'maxim' + >>> st.stem('presumably') # Don't remove "-um" when word is not intact + 'presum' + >>> st.stem('multiply') # No action taken if word ends with "-ply" + 'multiply' + >>> st.stem('provision') # Replace "-sion" with "-j" to trigger "j" set of rules + 'provid' + >>> st.stem('owed') # Word starting with vowel must contain at least 2 letters + 'ow' + >>> st.stem('ear') # ditto + 'ear' + >>> st.stem('saying') # Words starting with consonant must contain at least 3 + 'say' + >>> st.stem('crying') # letters and one of those letters must be a vowel + 'cry' + >>> st.stem('string') # ditto + 'string' + >>> st.stem('meant') # ditto + 'meant' + >>> st.stem('cement') # ditto + 'cem' + >>> st_pre = LancasterStemmer(strip_prefix_flag=True) + >>> st_pre.stem('kilometer') # Test Prefix + 'met' + >>> st_custom = LancasterStemmer(rule_tuple=("ssen4>", "s1t.")) + >>> st_custom.stem("ness") # Change s to t + 'nest' + """ + + # The rule list is static since it doesn't change between instances + default_rule_tuple = ( + "ai*2.", # -ia > - if intact + "a*1.", # -a > - if intact + "bb1.", # -bb > -b + "city3s.", # -ytic > -ys + "ci2>", # -ic > - + "cn1t>", # -nc > -nt + "dd1.", # -dd > -d + "dei3y>", # -ied > -y + "deec2ss.", # -ceed >", -cess + "dee1.", # -eed > -ee + "de2>", # -ed > - + "dooh4>", # -hood > - + "e1>", # -e > - + "feil1v.", # -lief > -liev + "fi2>", # -if > - + "gni3>", # -ing > - + "gai3y.", # -iag > -y + "ga2>", # -ag > - + "gg1.", # -gg > -g + "ht*2.", # -th > - if intact + "hsiug5ct.", # -guish > -ct + "hsi3>", # -ish > - + "i*1.", # -i > - if intact + "i1y>", # -i > -y + "ji1d.", # -ij > -id -- see nois4j> & vis3j> + "juf1s.", # -fuj > -fus + "ju1d.", # -uj > -ud + "jo1d.", # -oj > -od + "jeh1r.", # -hej > -her + "jrev1t.", # -verj > -vert + "jsim2t.", # -misj > -mit + "jn1d.", # -nj > -nd + "j1s.", # -j > -s + "lbaifi6.", # -ifiabl > - + "lbai4y.", # -iabl > -y + "lba3>", # -abl > - + "lbi3.", # -ibl > - + "lib2l>", # -bil > -bl + "lc1.", # -cl > c + "lufi4y.", # -iful > -y + "luf3>", # -ful > - + "lu2.", # -ul > - + "lai3>", # -ial > - + "lau3>", # -ual > - + "la2>", # -al > - + "ll1.", # -ll > -l + "mui3.", # -ium > - + "mu*2.", # -um > - if intact + "msi3>", # -ism > - + "mm1.", # -mm > -m + "nois4j>", # -sion > -j + "noix4ct.", # -xion > -ct + "noi3>", # -ion > - + "nai3>", # -ian > - + "na2>", # -an > - + "nee0.", # protect -een + "ne2>", # -en > - + "nn1.", # -nn > -n + "pihs4>", # -ship > - + "pp1.", # -pp > -p + "re2>", # -er > - + "rae0.", # protect -ear + "ra2.", # -ar > - + "ro2>", # -or > - + "ru2>", # -ur > - + "rr1.", # -rr > -r + "rt1>", # -tr > -t + "rei3y>", # -ier > -y + "sei3y>", # -ies > -y + "sis2.", # -sis > -s + "si2>", # -is > - + "ssen4>", # -ness > - + "ss0.", # protect -ss + "suo3>", # -ous > - + "su*2.", # -us > - if intact + "s*1>", # -s > - if intact + "s0.", # -s > -s + "tacilp4y.", # -plicat > -ply + "ta2>", # -at > - + "tnem4>", # -ment > - + "tne3>", # -ent > - + "tna3>", # -ant > - + "tpir2b.", # -ript > -rib + "tpro2b.", # -orpt > -orb + "tcud1.", # -duct > -duc + "tpmus2.", # -sumpt > -sum + "tpec2iv.", # -cept > -ceiv + "tulo2v.", # -olut > -olv + "tsis0.", # protect -sist + "tsi3>", # -ist > - + "tt1.", # -tt > -t + "uqi3.", # -iqu > - + "ugo1.", # -ogu > -og + "vis3j>", # -siv > -j + "vie0.", # protect -eiv + "vi2>", # -iv > - + "ylb1>", # -bly > -bl + "yli3y>", # -ily > -y + "ylp0.", # protect -ply + "yl2>", # -ly > - + "ygo1.", # -ogy > -og + "yhp1.", # -phy > -ph + "ymo1.", # -omy > -om + "ypo1.", # -opy > -op + "yti3>", # -ity > - + "yte3>", # -ety > - + "ytl2.", # -lty > -l + "yrtsi5.", # -istry > - + "yra3>", # -ary > - + "yro3>", # -ory > - + "yfi3.", # -ify > - + "ycn2t>", # -ncy > -nt + "yca3>", # -acy > - + "zi2>", # -iz > - + "zy1s.", # -yz > -ys + ) + + def __init__(self, rule_tuple=None, strip_prefix_flag=False): + """Create an instance of the Lancaster stemmer.""" + # Setup an empty rule dictionary - this will be filled in later + self.rule_dictionary = {} + # Check if a user wants to strip prefix + self._strip_prefix = strip_prefix_flag + # Check if a user wants to use his/her own rule tuples. + self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple + + def parseRules(self, rule_tuple=None): + """Validate the set of rules used in this stemmer. + + If this function is called as an individual method, without using stem + method, rule_tuple argument will be compiled into self.rule_dictionary. + If this function is called within stem, self._rule_tuple will be used. + + """ + # If there is no argument for the function, use class' own rule tuple. + rule_tuple = rule_tuple if rule_tuple else self._rule_tuple + valid_rule = re.compile(r"^[a-z]+\*?\d[a-z]*[>\.]?$") + # Empty any old rules from the rule set before adding new ones + self.rule_dictionary = {} + + for rule in rule_tuple: + if not valid_rule.match(rule): + raise ValueError(f"The rule {rule} is invalid") + first_letter = rule[0:1] + if first_letter in self.rule_dictionary: + self.rule_dictionary[first_letter].append(rule) + else: + self.rule_dictionary[first_letter] = [rule] + + def stem(self, word): + """Stem a word using the Lancaster stemmer.""" + # Lower-case the word, since all the rules are lower-cased + word = word.lower() + word = self.__stripPrefix(word) if self._strip_prefix else word + + # Save a copy of the original word + intact_word = word + + # If rule dictionary is empty, parse rule tuple. + if not self.rule_dictionary: + self.parseRules() + + return self.__doStemming(word, intact_word) + + def __doStemming(self, word, intact_word): + """Perform the actual word stemming""" + + valid_rule = re.compile(r"^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$") + + proceed = True + + while proceed: + + # Find the position of the last letter of the word to be stemmed + last_letter_position = self.__getLastLetter(word) + + # Only stem the word if it has a last letter and a rule matching that last letter + if ( + last_letter_position < 0 + or word[last_letter_position] not in self.rule_dictionary + ): + proceed = False + + else: + rule_was_applied = False + + # Go through each rule that matches the word's final letter + for rule in self.rule_dictionary[word[last_letter_position]]: + rule_match = valid_rule.match(rule) + if rule_match: + ( + ending_string, + intact_flag, + remove_total, + append_string, + cont_flag, + ) = rule_match.groups() + + # Convert the number of chars to remove when stemming + # from a string to an integer + remove_total = int(remove_total) + + # Proceed if word's ending matches rule's word ending + if word.endswith(ending_string[::-1]): + if intact_flag: + if word == intact_word and self.__isAcceptable( + word, remove_total + ): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + elif self.__isAcceptable(word, remove_total): + word = self.__applyRule( + word, remove_total, append_string + ) + rule_was_applied = True + if cont_flag == ".": + proceed = False + break + # If no rules apply, the word doesn't need any more stemming + if rule_was_applied == False: + proceed = False + return word + + def __getLastLetter(self, word): + """Get the zero-based index of the last alphabetic character in this string""" + last_letter = -1 + for position in range(len(word)): + if word[position].isalpha(): + last_letter = position + else: + break + return last_letter + + def __isAcceptable(self, word, remove_total): + """Determine if the word is acceptable for stemming.""" + word_is_acceptable = False + # If the word starts with a vowel, it must be at least 2 + # characters long to be stemmed + if word[0] in "aeiouy": + if len(word) - remove_total >= 2: + word_is_acceptable = True + # If the word starts with a consonant, it must be at least 3 + # characters long (including one vowel) to be stemmed + elif len(word) - remove_total >= 3: + if word[1] in "aeiouy": + word_is_acceptable = True + elif word[2] in "aeiouy": + word_is_acceptable = True + return word_is_acceptable + + def __applyRule(self, word, remove_total, append_string): + """Apply the stemming rule to the word""" + # Remove letters from the end of the word + new_word_length = len(word) - remove_total + word = word[0:new_word_length] + + # And add new letters to the end of the truncated word + if append_string: + word += append_string + return word + + def __stripPrefix(self, word): + """Remove prefix from a word. + + This function originally taken from Whoosh. + + """ + for prefix in ( + "kilo", + "micro", + "milli", + "intra", + "ultra", + "mega", + "nano", + "pico", + "pseudo", + ): + if word.startswith(prefix): + return word[len(prefix) :] + return word + + def __repr__(self): + return "" diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/porter.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/porter.py new file mode 100644 index 0000000000000000000000000000000000000000..c84402d8083677ea9e727f5f5b0998529ad96ba6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/porter.py @@ -0,0 +1,715 @@ +""" +Porter Stemmer + +This is the Porter stemming algorithm. It follows the algorithm +presented in + +Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137. + +with some optional deviations that can be turned on or off with the +`mode` argument to the constructor. + +Martin Porter, the algorithm's inventor, maintains a web page about the +algorithm at + + https://www.tartarus.org/~martin/PorterStemmer/ + +which includes another Python implementation and other implementations +in many languages. +""" + +__docformat__ = "plaintext" + +import re + +from nltk.stem.api import StemmerI + + +class PorterStemmer(StemmerI): + """ + A word stemmer based on the Porter stemming algorithm. + + Porter, M. "An algorithm for suffix stripping." + Program 14.3 (1980): 130-137. + + See https://www.tartarus.org/~martin/PorterStemmer/ for the homepage + of the algorithm. + + Martin Porter has endorsed several modifications to the Porter + algorithm since writing his original paper, and those extensions are + included in the implementations on his website. Additionally, others + have proposed further improvements to the algorithm, including NLTK + contributors. There are thus three modes that can be selected by + passing the appropriate constant to the class constructor's `mode` + attribute: + + - PorterStemmer.ORIGINAL_ALGORITHM + + An implementation that is faithful to the original paper. + + Note that Martin Porter has deprecated this version of the + algorithm. Martin distributes implementations of the Porter + Stemmer in many languages, hosted at: + + https://www.tartarus.org/~martin/PorterStemmer/ + + and all of these implementations include his extensions. He + strongly recommends against using the original, published + version of the algorithm; only use this mode if you clearly + understand why you are choosing to do so. + + - PorterStemmer.MARTIN_EXTENSIONS + + An implementation that only uses the modifications to the + algorithm that are included in the implementations on Martin + Porter's website. He has declared Porter frozen, so the + behaviour of those implementations should never change. + + - PorterStemmer.NLTK_EXTENSIONS (default) + + An implementation that includes further improvements devised by + NLTK contributors or taken from other modified implementations + found on the web. + + For the best stemming, you should use the default NLTK_EXTENSIONS + version. However, if you need to get the same results as either the + original algorithm or one of Martin Porter's hosted versions for + compatibility with an existing implementation or dataset, you can use + one of the other modes instead. + """ + + # Modes the Stemmer can be instantiated in + NLTK_EXTENSIONS = "NLTK_EXTENSIONS" + MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS" + ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM" + + def __init__(self, mode=NLTK_EXTENSIONS): + if mode not in ( + self.NLTK_EXTENSIONS, + self.MARTIN_EXTENSIONS, + self.ORIGINAL_ALGORITHM, + ): + raise ValueError( + "Mode must be one of PorterStemmer.NLTK_EXTENSIONS, " + "PorterStemmer.MARTIN_EXTENSIONS, or " + "PorterStemmer.ORIGINAL_ALGORITHM" + ) + + self.mode = mode + + if self.mode == self.NLTK_EXTENSIONS: + # This is a table of irregular forms. It is quite short, + # but still reflects the errors actually drawn to Martin + # Porter's attention over a 20 year period! + irregular_forms = { + "sky": ["sky", "skies"], + "die": ["dying"], + "lie": ["lying"], + "tie": ["tying"], + "news": ["news"], + "inning": ["innings", "inning"], + "outing": ["outings", "outing"], + "canning": ["cannings", "canning"], + "howe": ["howe"], + "proceed": ["proceed"], + "exceed": ["exceed"], + "succeed": ["succeed"], + } + + self.pool = {} + for key in irregular_forms: + for val in irregular_forms[key]: + self.pool[val] = key + + self.vowels = frozenset(["a", "e", "i", "o", "u"]) + + def _is_consonant(self, word, i): + """Returns True if word[i] is a consonant, False otherwise + + A consonant is defined in the paper as follows: + + A consonant in a word is a letter other than A, E, I, O or + U, and other than Y preceded by a consonant. (The fact that + the term `consonant' is defined to some extent in terms of + itself does not make it ambiguous.) So in TOY the consonants + are T and Y, and in SYZYGY they are S, Z and G. If a letter + is not a consonant it is a vowel. + """ + if word[i] in self.vowels: + return False + if word[i] == "y": + if i == 0: + return True + else: + return not self._is_consonant(word, i - 1) + return True + + def _measure(self, stem): + r"""Returns the 'measure' of stem, per definition in the paper + + From the paper: + + A consonant will be denoted by c, a vowel by v. A list + ccc... of length greater than 0 will be denoted by C, and a + list vvv... of length greater than 0 will be denoted by V. + Any word, or part of a word, therefore has one of the four + forms: + + CVCV ... C + CVCV ... V + VCVC ... C + VCVC ... V + + These may all be represented by the single form + + [C]VCVC ... [V] + + where the square brackets denote arbitrary presence of their + contents. Using (VC){m} to denote VC repeated m times, this + may again be written as + + [C](VC){m}[V]. + + m will be called the \measure\ of any word or word part when + represented in this form. The case m = 0 covers the null + word. Here are some examples: + + m=0 TR, EE, TREE, Y, BY. + m=1 TROUBLE, OATS, TREES, IVY. + m=2 TROUBLES, PRIVATE, OATEN, ORRERY. + """ + cv_sequence = "" + + # Construct a string of 'c's and 'v's representing whether each + # character in `stem` is a consonant or a vowel. + # e.g. 'falafel' becomes 'cvcvcvc', + # 'architecture' becomes 'vcccvcvccvcv' + for i in range(len(stem)): + if self._is_consonant(stem, i): + cv_sequence += "c" + else: + cv_sequence += "v" + + # Count the number of 'vc' occurrences, which is equivalent to + # the number of 'VC' occurrences in Porter's reduced form in the + # docstring above, which is in turn equivalent to `m` + return cv_sequence.count("vc") + + def _has_positive_measure(self, stem): + return self._measure(stem) > 0 + + def _contains_vowel(self, stem): + """Returns True if stem contains a vowel, else False""" + for i in range(len(stem)): + if not self._is_consonant(stem, i): + return True + return False + + def _ends_double_consonant(self, word): + """Implements condition *d from the paper + + Returns True if word ends with a double consonant + """ + return ( + len(word) >= 2 + and word[-1] == word[-2] + and self._is_consonant(word, len(word) - 1) + ) + + def _ends_cvc(self, word): + """Implements condition *o from the paper + + From the paper: + + *o - the stem ends cvc, where the second c is not W, X or Y + (e.g. -WIL, -HOP). + """ + return ( + len(word) >= 3 + and self._is_consonant(word, len(word) - 3) + and not self._is_consonant(word, len(word) - 2) + and self._is_consonant(word, len(word) - 1) + and word[-1] not in ("w", "x", "y") + ) or ( + self.mode == self.NLTK_EXTENSIONS + and len(word) == 2 + and not self._is_consonant(word, 0) + and self._is_consonant(word, 1) + ) + + def _replace_suffix(self, word, suffix, replacement): + """Replaces `suffix` of `word` with `replacement""" + assert word.endswith(suffix), "Given word doesn't end with given suffix" + if suffix == "": + return word + replacement + else: + return word[: -len(suffix)] + replacement + + def _apply_rule_list(self, word, rules): + """Applies the first applicable suffix-removal rule to the word + + Takes a word and a list of suffix-removal rules represented as + 3-tuples, with the first element being the suffix to remove, + the second element being the string to replace it with, and the + final element being the condition for the rule to be applicable, + or None if the rule is unconditional. + """ + for rule in rules: + suffix, replacement, condition = rule + if suffix == "*d" and self._ends_double_consonant(word): + stem = word[:-2] + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + if word.endswith(suffix): + stem = self._replace_suffix(word, suffix, "") + if condition is None or condition(stem): + return stem + replacement + else: + # Don't try any further rules + return word + + return word + + def _step1a(self, word): + """Implements Step 1a from "An algorithm for suffix stripping" + + From the paper: + + SSES -> SS caresses -> caress + IES -> I ponies -> poni + ties -> ti + SS -> SS caress -> caress + S -> cats -> cat + """ + # this NLTK-only rule extends the original algorithm, so + # that 'flies'->'fli' but 'dies'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ies") and len(word) == 4: + return self._replace_suffix(word, "ies", "ie") + + return self._apply_rule_list( + word, + [ + ("sses", "ss", None), # SSES -> SS + ("ies", "i", None), # IES -> I + ("ss", "ss", None), # SS -> SS + ("s", "", None), # S -> + ], + ) + + def _step1b(self, word): + """Implements Step 1b from "An algorithm for suffix stripping" + + From the paper: + + (m>0) EED -> EE feed -> feed + agreed -> agree + (*v*) ED -> plastered -> plaster + bled -> bled + (*v*) ING -> motoring -> motor + sing -> sing + + If the second or third of the rules in Step 1b is successful, + the following is done: + + AT -> ATE conflat(ed) -> conflate + BL -> BLE troubl(ed) -> trouble + IZ -> IZE siz(ed) -> size + (*d and not (*L or *S or *Z)) + -> single letter + hopp(ing) -> hop + tann(ed) -> tan + fall(ing) -> fall + hiss(ing) -> hiss + fizz(ed) -> fizz + (m=1 and *o) -> E fail(ing) -> fail + fil(ing) -> file + + The rule to map to a single letter causes the removal of one of + the double letter pair. The -E is put back on -AT, -BL and -IZ, + so that the suffixes -ATE, -BLE and -IZE can be recognised + later. This E may be removed in step 4. + """ + # this NLTK-only block extends the original algorithm, so that + # 'spied'->'spi' but 'died'->'die' etc + if self.mode == self.NLTK_EXTENSIONS: + if word.endswith("ied"): + if len(word) == 4: + return self._replace_suffix(word, "ied", "ie") + else: + return self._replace_suffix(word, "ied", "i") + + # (m>0) EED -> EE + if word.endswith("eed"): + stem = self._replace_suffix(word, "eed", "") + if self._measure(stem) > 0: + return stem + "ee" + else: + return word + + rule_2_or_3_succeeded = False + + for suffix in ["ed", "ing"]: + if word.endswith(suffix): + intermediate_stem = self._replace_suffix(word, suffix, "") + if self._contains_vowel(intermediate_stem): + rule_2_or_3_succeeded = True + break + + if not rule_2_or_3_succeeded: + return word + + return self._apply_rule_list( + intermediate_stem, + [ + ("at", "ate", None), # AT -> ATE + ("bl", "ble", None), # BL -> BLE + ("iz", "ize", None), # IZ -> IZE + # (*d and not (*L or *S or *Z)) + # -> single letter + ( + "*d", + intermediate_stem[-1], + lambda stem: intermediate_stem[-1] not in ("l", "s", "z"), + ), + # (m=1 and *o) -> E + ( + "", + "e", + lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)), + ), + ], + ) + + def _step1c(self, word): + """Implements Step 1c from "An algorithm for suffix stripping" + + From the paper: + + Step 1c + + (*v*) Y -> I happy -> happi + sky -> sky + """ + + def nltk_condition(stem): + """ + This has been modified from the original Porter algorithm so + that y->i is only done when y is preceded by a consonant, + but not if the stem is only a single consonant, i.e. + + (*c and not c) Y -> I + + So 'happy' -> 'happi', but + 'enjoy' -> 'enjoy' etc + + This is a much better rule. Formerly 'enjoy'->'enjoi' and + 'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but + with this modification that no longer really matters. + + Also, the removal of the contains_vowel(z) condition means + that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and + conflate with 'spied', 'tried', 'flies' ... + """ + return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1) + + def original_condition(stem): + return self._contains_vowel(stem) + + return self._apply_rule_list( + word, + [ + ( + "y", + "i", + nltk_condition + if self.mode == self.NLTK_EXTENSIONS + else original_condition, + ) + ], + ) + + def _step2(self, word): + """Implements Step 2 from "An algorithm for suffix stripping" + + From the paper: + + Step 2 + + (m>0) ATIONAL -> ATE relational -> relate + (m>0) TIONAL -> TION conditional -> condition + rational -> rational + (m>0) ENCI -> ENCE valenci -> valence + (m>0) ANCI -> ANCE hesitanci -> hesitance + (m>0) IZER -> IZE digitizer -> digitize + (m>0) ABLI -> ABLE conformabli -> conformable + (m>0) ALLI -> AL radicalli -> radical + (m>0) ENTLI -> ENT differentli -> different + (m>0) ELI -> E vileli - > vile + (m>0) OUSLI -> OUS analogousli -> analogous + (m>0) IZATION -> IZE vietnamization -> vietnamize + (m>0) ATION -> ATE predication -> predicate + (m>0) ATOR -> ATE operator -> operate + (m>0) ALISM -> AL feudalism -> feudal + (m>0) IVENESS -> IVE decisiveness -> decisive + (m>0) FULNESS -> FUL hopefulness -> hopeful + (m>0) OUSNESS -> OUS callousness -> callous + (m>0) ALITI -> AL formaliti -> formal + (m>0) IVITI -> IVE sensitiviti -> sensitive + (m>0) BILITI -> BLE sensibiliti -> sensible + """ + + if self.mode == self.NLTK_EXTENSIONS: + # Instead of applying the ALLI -> AL rule after '(a)bli' per + # the published algorithm, instead we apply it first, and, + # if it succeeds, run the result through step2 again. + if word.endswith("alli") and self._has_positive_measure( + self._replace_suffix(word, "alli", "") + ): + return self._step2(self._replace_suffix(word, "alli", "al")) + + bli_rule = ("bli", "ble", self._has_positive_measure) + abli_rule = ("abli", "able", self._has_positive_measure) + + rules = [ + ("ational", "ate", self._has_positive_measure), + ("tional", "tion", self._has_positive_measure), + ("enci", "ence", self._has_positive_measure), + ("anci", "ance", self._has_positive_measure), + ("izer", "ize", self._has_positive_measure), + abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule, + ("alli", "al", self._has_positive_measure), + ("entli", "ent", self._has_positive_measure), + ("eli", "e", self._has_positive_measure), + ("ousli", "ous", self._has_positive_measure), + ("ization", "ize", self._has_positive_measure), + ("ation", "ate", self._has_positive_measure), + ("ator", "ate", self._has_positive_measure), + ("alism", "al", self._has_positive_measure), + ("iveness", "ive", self._has_positive_measure), + ("fulness", "ful", self._has_positive_measure), + ("ousness", "ous", self._has_positive_measure), + ("aliti", "al", self._has_positive_measure), + ("iviti", "ive", self._has_positive_measure), + ("biliti", "ble", self._has_positive_measure), + ] + + if self.mode == self.NLTK_EXTENSIONS: + rules.append(("fulli", "ful", self._has_positive_measure)) + + # The 'l' of the 'logi' -> 'log' rule is put with the stem, + # so that short stems like 'geo' 'theo' etc work like + # 'archaeo' 'philo' etc. + rules.append( + ("logi", "log", lambda stem: self._has_positive_measure(word[:-3])) + ) + + if self.mode == self.MARTIN_EXTENSIONS: + rules.append(("logi", "log", self._has_positive_measure)) + + return self._apply_rule_list(word, rules) + + def _step3(self, word): + """Implements Step 3 from "An algorithm for suffix stripping" + + From the paper: + + Step 3 + + (m>0) ICATE -> IC triplicate -> triplic + (m>0) ATIVE -> formative -> form + (m>0) ALIZE -> AL formalize -> formal + (m>0) ICITI -> IC electriciti -> electric + (m>0) ICAL -> IC electrical -> electric + (m>0) FUL -> hopeful -> hope + (m>0) NESS -> goodness -> good + """ + return self._apply_rule_list( + word, + [ + ("icate", "ic", self._has_positive_measure), + ("ative", "", self._has_positive_measure), + ("alize", "al", self._has_positive_measure), + ("iciti", "ic", self._has_positive_measure), + ("ical", "ic", self._has_positive_measure), + ("ful", "", self._has_positive_measure), + ("ness", "", self._has_positive_measure), + ], + ) + + def _step4(self, word): + """Implements Step 4 from "An algorithm for suffix stripping" + + Step 4 + + (m>1) AL -> revival -> reviv + (m>1) ANCE -> allowance -> allow + (m>1) ENCE -> inference -> infer + (m>1) ER -> airliner -> airlin + (m>1) IC -> gyroscopic -> gyroscop + (m>1) ABLE -> adjustable -> adjust + (m>1) IBLE -> defensible -> defens + (m>1) ANT -> irritant -> irrit + (m>1) EMENT -> replacement -> replac + (m>1) MENT -> adjustment -> adjust + (m>1) ENT -> dependent -> depend + (m>1 and (*S or *T)) ION -> adoption -> adopt + (m>1) OU -> homologou -> homolog + (m>1) ISM -> communism -> commun + (m>1) ATE -> activate -> activ + (m>1) ITI -> angulariti -> angular + (m>1) OUS -> homologous -> homolog + (m>1) IVE -> effective -> effect + (m>1) IZE -> bowdlerize -> bowdler + + The suffixes are now removed. All that remains is a little + tidying up. + """ + measure_gt_1 = lambda stem: self._measure(stem) > 1 + + return self._apply_rule_list( + word, + [ + ("al", "", measure_gt_1), + ("ance", "", measure_gt_1), + ("ence", "", measure_gt_1), + ("er", "", measure_gt_1), + ("ic", "", measure_gt_1), + ("able", "", measure_gt_1), + ("ible", "", measure_gt_1), + ("ant", "", measure_gt_1), + ("ement", "", measure_gt_1), + ("ment", "", measure_gt_1), + ("ent", "", measure_gt_1), + # (m>1 and (*S or *T)) ION -> + ( + "ion", + "", + lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"), + ), + ("ou", "", measure_gt_1), + ("ism", "", measure_gt_1), + ("ate", "", measure_gt_1), + ("iti", "", measure_gt_1), + ("ous", "", measure_gt_1), + ("ive", "", measure_gt_1), + ("ize", "", measure_gt_1), + ], + ) + + def _step5a(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5a + + (m>1) E -> probate -> probat + rate -> rate + (m=1 and not *o) E -> cease -> ceas + """ + # Note that Martin's test vocabulary and reference + # implementations are inconsistent in how they handle the case + # where two rules both refer to a suffix that matches the word + # to be stemmed, but only the condition of the second one is + # true. + # Earlier in step2b we had the rules: + # (m>0) EED -> EE + # (*v*) ED -> + # but the examples in the paper included "feed"->"feed", even + # though (*v*) is true for "fe" and therefore the second rule + # alone would map "feed"->"fe". + # However, in THIS case, we need to handle the consecutive rules + # differently and try both conditions (obviously; the second + # rule here would be redundant otherwise). Martin's paper makes + # no explicit mention of the inconsistency; you have to infer it + # from the examples. + # For this reason, we can't use _apply_rule_list here. + if word.endswith("e"): + stem = self._replace_suffix(word, "e", "") + if self._measure(stem) > 1: + return stem + if self._measure(stem) == 1 and not self._ends_cvc(stem): + return stem + return word + + def _step5b(self, word): + """Implements Step 5a from "An algorithm for suffix stripping" + + From the paper: + + Step 5b + + (m > 1 and *d and *L) -> single letter + controll -> control + roll -> roll + """ + return self._apply_rule_list( + word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)] + ) + + def stem(self, word, to_lowercase=True): + """ + :param to_lowercase: if `to_lowercase=True` the word always lowercase + """ + stem = word.lower() if to_lowercase else word + + if self.mode == self.NLTK_EXTENSIONS and word in self.pool: + return self.pool[stem] + + if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2: + # With this line, strings of length 1 or 2 don't go through + # the stemming process, although no mention is made of this + # in the published algorithm. + return stem + + stem = self._step1a(stem) + stem = self._step1b(stem) + stem = self._step1c(stem) + stem = self._step2(stem) + stem = self._step3(stem) + stem = self._step4(stem) + stem = self._step5a(stem) + stem = self._step5b(stem) + + return stem + + def __repr__(self): + return "" + + +def demo(): + """ + A demonstration of the porter stemmer on a sample from + the Penn Treebank corpus. + """ + + from nltk import stem + from nltk.corpus import treebank + + stemmer = stem.PorterStemmer() + + orig = [] + stemmed = [] + for item in treebank.fileids()[:3]: + for (word, tag) in treebank.tagged_words(item): + orig.append(word) + stemmed.append(stemmer.stem(word)) + + # Convert the results to a string, and word-wrap them. + results = " ".join(stemmed) + results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip() + + # Convert the original to a string, and word wrap it. + original = " ".join(orig) + original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip() + + # Print the results. + print("-Original-".center(70).replace(" ", "*").replace("-", " ")) + print(original) + print("-Results-".center(70).replace(" ", "*").replace("-", " ")) + print(results) + print("*" * 70) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/regexp.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..473b42bd4a194bc11a51db9db7a00178a945862a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/regexp.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Stemmers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT +import re + +from nltk.stem.api import StemmerI + + +class RegexpStemmer(StemmerI): + """ + A stemmer that uses regular expressions to identify morphological + affixes. Any substrings that match the regular expressions will + be removed. + + >>> from nltk.stem import RegexpStemmer + >>> st = RegexpStemmer('ing$|s$|e$|able$', min=4) + >>> st.stem('cars') + 'car' + >>> st.stem('mass') + 'mas' + >>> st.stem('was') + 'was' + >>> st.stem('bee') + 'bee' + >>> st.stem('compute') + 'comput' + >>> st.stem('advisable') + 'advis' + + :type regexp: str or regexp + :param regexp: The regular expression that should be used to + identify morphological affixes. + :type min: int + :param min: The minimum length of string to stem + """ + + def __init__(self, regexp, min=0): + + if not hasattr(regexp, "pattern"): + regexp = re.compile(regexp) + self._regexp = regexp + self._min = min + + def stem(self, word): + if len(word) < self._min: + return word + else: + return self._regexp.sub("", word) + + def __repr__(self): + return f"" diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/rslp.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/rslp.py new file mode 100644 index 0000000000000000000000000000000000000000..b1dfeb35e09643e2e75af68cac3bcc7632fc2245 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/rslp.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: RSLP Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tiago Tresoldi +# URL: +# For license information, see LICENSE.TXT + +# This code is based on the algorithm presented in the paper "A Stemming +# Algorithm for the Portuguese Language" by Viviane Moreira Orengo and +# Christian Huyck, which unfortunately I had no access to. The code is a +# Python version, with some minor modifications of mine, to the description +# presented at https://www.webcitation.org/5NnvdIzOb and to the C source code +# available at http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. +# Please note that this stemmer is intended for demonstration and educational +# purposes only. Feel free to write me for any comments, including the +# development of a different and/or better stemmer for Portuguese. I also +# suggest using NLTK's mailing list for Portuguese for any discussion. + +# Este código é baseado no algoritmo apresentado no artigo "A Stemming +# Algorithm for the Portuguese Language" de Viviane Moreira Orengo e +# Christian Huyck, o qual infelizmente não tive a oportunidade de ler. O +# código é uma conversão para Python, com algumas pequenas modificações +# minhas, daquele apresentado em https://www.webcitation.org/5NnvdIzOb e do +# código para linguagem C disponível em +# http://www.inf.ufrgs.br/~arcoelho/rslp/integrando_rslp.html. Por favor, +# lembre-se de que este stemmer foi desenvolvido com finalidades unicamente +# de demonstração e didáticas. Sinta-se livre para me escrever para qualquer +# comentário, inclusive sobre o desenvolvimento de um stemmer diferente +# e/ou melhor para o português. Também sugiro utilizar-se a lista de discussão +# do NLTK para o português para qualquer debate. + +from nltk.data import load +from nltk.stem.api import StemmerI + + +class RSLPStemmer(StemmerI): + """ + A stemmer for Portuguese. + + >>> from nltk.stem import RSLPStemmer + >>> st = RSLPStemmer() + >>> # opening lines of Erico Verissimo's "Música ao Longe" + >>> text = ''' + ... Clarissa risca com giz no quadro-negro a paisagem que os alunos + ... devem copiar . Uma casinha de porta e janela , em cima duma + ... coxilha .''' + >>> for token in text.split(): # doctest: +NORMALIZE_WHITESPACE + ... print(st.stem(token)) + clariss risc com giz no quadro-negr a pais que os alun dev copi . + uma cas de port e janel , em cim dum coxilh . + """ + + def __init__(self): + self._model = [] + + self._model.append(self.read_rule("step0.pt")) + self._model.append(self.read_rule("step1.pt")) + self._model.append(self.read_rule("step2.pt")) + self._model.append(self.read_rule("step3.pt")) + self._model.append(self.read_rule("step4.pt")) + self._model.append(self.read_rule("step5.pt")) + self._model.append(self.read_rule("step6.pt")) + + def read_rule(self, filename): + rules = load("nltk:stemmers/rslp/" + filename, format="raw").decode("utf8") + lines = rules.split("\n") + + lines = [line for line in lines if line != ""] # remove blank lines + lines = [line for line in lines if line[0] != "#"] # remove comments + + # NOTE: a simple but ugly hack to make this parser happy with double '\t's + lines = [line.replace("\t\t", "\t") for line in lines] + + # parse rules + rules = [] + for line in lines: + rule = [] + tokens = line.split("\t") + + # text to be searched for at the end of the string + rule.append(tokens[0][1:-1]) # remove quotes + + # minimum stem size to perform the replacement + rule.append(int(tokens[1])) + + # text to be replaced into + rule.append(tokens[2][1:-1]) # remove quotes + + # exceptions to this rule + rule.append([token[1:-1] for token in tokens[3].split(",")]) + + # append to the results + rules.append(rule) + + return rules + + def stem(self, word): + word = word.lower() + + # the word ends in 's'? apply rule for plural reduction + if word[-1] == "s": + word = self.apply_rule(word, 0) + + # the word ends in 'a'? apply rule for feminine reduction + if word[-1] == "a": + word = self.apply_rule(word, 1) + + # augmentative reduction + word = self.apply_rule(word, 3) + + # adverb reduction + word = self.apply_rule(word, 2) + + # noun reduction + prev_word = word + word = self.apply_rule(word, 4) + if word == prev_word: + # verb reduction + prev_word = word + word = self.apply_rule(word, 5) + if word == prev_word: + # vowel removal + word = self.apply_rule(word, 6) + + return word + + def apply_rule(self, word, rule_index): + rules = self._model[rule_index] + for rule in rules: + suffix_length = len(rule[0]) + if word[-suffix_length:] == rule[0]: # if suffix matches + if len(word) >= suffix_length + rule[1]: # if we have minimum size + if word not in rule[3]: # if not an exception + word = word[:-suffix_length] + rule[2] + break + + return word diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/snowball.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/snowball.py new file mode 100644 index 0000000000000000000000000000000000000000..08cd9e76993213eafb0d1698f3f9b019af21068d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/snowball.py @@ -0,0 +1,5946 @@ +# +# Natural Language Toolkit: Snowball Stemmer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Michael Stahl +# Peter Ljunglof (revisions) +# Lakhdar Benzahia (co-writer) +# Assem Chelli (reviewer arabicstemmer) +# Abdelkrim Aries (reviewer arabicstemmer) +# Algorithms: Dr Martin Porter +# Assem Chelli arabic stemming algorithm +# Benzahia Lakhdar +# URL: +# For license information, see LICENSE.TXT + +""" +Snowball stemmers + +This module provides a port of the Snowball stemmers +developed by Martin Porter. + +There is also a demo function: `snowball.demo()`. + +""" + +import re + +from nltk.corpus import stopwords +from nltk.stem import porter +from nltk.stem.api import StemmerI +from nltk.stem.util import prefix_replace, suffix_replace + + +class SnowballStemmer(StemmerI): + + """ + Snowball Stemmer + + The following languages are supported: + Arabic, Danish, Dutch, English, Finnish, French, German, + Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian, + Spanish and Swedish. + + The algorithm for English is documented here: + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + The algorithms have been developed by Martin Porter. + These stemmers are called Snowball, because Porter created + a programming language with this name for creating + new stemming algorithms. There is more information available + at http://snowball.tartarus.org/ + + The stemmer is invoked as shown below: + + >>> from nltk.stem import SnowballStemmer # See which languages are supported + >>> print(" ".join(SnowballStemmer.languages)) # doctest: +NORMALIZE_WHITESPACE + arabic danish dutch english finnish french german hungarian + italian norwegian porter portuguese romanian russian + spanish swedish + >>> stemmer = SnowballStemmer("german") # Choose a language + >>> stemmer.stem("Autobahnen") # Stem a word + 'autobahn' + + Invoking the stemmers that way is useful if you do not know the + language to be stemmed at runtime. Alternatively, if you already know + the language, then you can invoke the language specific stemmer directly: + + >>> from nltk.stem.snowball import GermanStemmer + >>> stemmer = GermanStemmer() + >>> stemmer.stem("Autobahnen") + 'autobahn' + + :param language: The language whose subclass is instantiated. + :type language: str or unicode + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + :raise ValueError: If there is no stemmer for the specified + language, a ValueError is raised. + """ + + languages = ( + "arabic", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "hungarian", + "italian", + "norwegian", + "porter", + "portuguese", + "romanian", + "russian", + "spanish", + "swedish", + ) + + def __init__(self, language, ignore_stopwords=False): + if language not in self.languages: + raise ValueError(f"The language '{language}' is not supported.") + stemmerclass = globals()[language.capitalize() + "Stemmer"] + self.stemmer = stemmerclass(ignore_stopwords) + self.stem = self.stemmer.stem + self.stopwords = self.stemmer.stopwords + + def stem(self, token): + return self.stemmer.stem(self, token) + + +class _LanguageSpecificStemmer(StemmerI): + + """ + This helper subclass offers the possibility + to invoke a specific stemmer directly. + This is useful if you already know the language to be stemmed at runtime. + + Create an instance of the Snowball stemmer. + + :param ignore_stopwords: If set to True, stopwords are + not stemmed and returned unchanged. + Set to False by default. + :type ignore_stopwords: bool + """ + + def __init__(self, ignore_stopwords=False): + # The language is the name of the class, minus the final "Stemmer". + language = type(self).__name__.lower() + if language.endswith("stemmer"): + language = language[:-7] + + self.stopwords = set() + if ignore_stopwords: + try: + for word in stopwords.words(language): + self.stopwords.add(word) + except OSError as e: + raise ValueError( + "{!r} has no list of stopwords. Please set" + " 'ignore_stopwords' to 'False'.".format(self) + ) from e + + def __repr__(self): + """ + Print out the string representation of the respective class. + + """ + return f"<{type(self).__name__}>" + + +class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer): + """ + A word stemmer based on the original Porter stemming algorithm. + + Porter, M. \"An algorithm for suffix stripping.\" + Program 14.3 (1980): 130-137. + + A few minor modifications have been made to Porter's basic + algorithm. See the source code of the module + nltk.stem.porter for more information. + + """ + + def __init__(self, ignore_stopwords=False): + _LanguageSpecificStemmer.__init__(self, ignore_stopwords) + porter.PorterStemmer.__init__(self) + + +class _ScandinavianStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates a method for defining the string region R1. + It is used by the Danish, Norwegian, and Swedish stemmer. + + """ + + def _r1_scandinavian(self, word, vowels): + """ + Return the region R1 that is used by the Scandinavian stemmers. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. But then R1 is adjusted so that the region + before it contains at least three letters. + + :param word: The word whose region R1 is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region R1. + :type vowels: unicode + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses DanishStemmer, NorwegianStemmer, and + SwedishStemmer. It is not to be invoked directly! + + """ + r1 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) >= 3: + r1 = word[i + 1 :] + else: + return word + break + + return r1 + + +class _StandardStemmer(_LanguageSpecificStemmer): + + """ + This subclass encapsulates two methods for defining the standard versions + of the string regions R1, R2, and RV. + + """ + + def _r1r2_standard(self, word, vowels): + """ + Return the standard interpretations of the string regions R1 and R2. + + R1 is the region after the first non-vowel following a vowel, + or is the null region at the end of the word if there is no + such non-vowel. + + R2 is the region after the first non-vowel following a vowel + in R1, or is the null region at the end of the word if there + is no such non-vowel. + + :param word: The word whose regions R1 and R2 are determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the regions R1 and R2. + :type vowels: unicode + :return: (r1,r2), the regions R1 and R2 for the respective word. + :rtype: tuple + :note: This helper method is invoked by the respective stem method of + the subclasses DutchStemmer, FinnishStemmer, + FrenchStemmer, GermanStemmer, ItalianStemmer, + PortugueseStemmer, RomanianStemmer, and SpanishStemmer. + It is not to be invoked directly! + :note: A detailed description of how to define R1 and R2 + can be found at http://snowball.tartarus.org/texts/r1r2.html + + """ + r1 = "" + r2 = "" + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + return (r1, r2) + + def _rv_standard(self, word, vowels): + """ + Return the standard interpretation of the string region RV. + + If the second letter is a consonant, RV is the region after the + next following vowel. If the first two letters are vowels, RV is + the region after the next following consonant. Otherwise, RV is + the region after the third letter. + + :param word: The word whose region RV is determined. + :type word: str or unicode + :param vowels: The vowels of the respective language that are + used to determine the region RV. + :type vowels: unicode + :return: the region RV for the respective word. + :rtype: unicode + :note: This helper method is invoked by the respective stem method of + the subclasses ItalianStemmer, PortugueseStemmer, + RomanianStemmer, and SpanishStemmer. It is not to be + invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word[1] not in vowels: + for i in range(2, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + elif word[0] in vowels and word[1] in vowels: + for i in range(2, len(word)): + if word[i] not in vowels: + rv = word[i + 1 :] + break + else: + rv = word[3:] + + return rv + + +class ArabicStemmer(_StandardStemmer): + """ + https://github.com/snowballstem/snowball/blob/master/algorithms/arabic/stem_Unicode.sbl (Original Algorithm) + The Snowball Arabic light Stemmer + Algorithm: + + - Assem Chelli + - Abdelkrim Aries + - Lakhdar Benzahia + + NLTK Version Author: + + - Lakhdar Benzahia + """ + + # Normalize_pre stes + __vocalization = re.compile( + r"[\u064b-\u064c-\u064d-\u064e-\u064f-\u0650-\u0651-\u0652]" + ) # ً، ٌ، ٍ، َ، ُ، ِ، ّ، ْ + + __kasheeda = re.compile(r"[\u0640]") # ـ tatweel/kasheeda + + __arabic_punctuation_marks = re.compile(r"[\u060C-\u061B-\u061F]") # ؛ ، ؟ + + # Normalize_post + __last_hamzat = ("\u0623", "\u0625", "\u0622", "\u0624", "\u0626") # أ، إ، آ، ؤ، ئ + + # normalize other hamza's + __initial_hamzat = re.compile(r"^[\u0622\u0623\u0625]") # أ، إ، آ + + __waw_hamza = re.compile(r"[\u0624]") # ؤ + + __yeh_hamza = re.compile(r"[\u0626]") # ئ + + __alefat = re.compile(r"[\u0623\u0622\u0625]") # أ، إ، آ + + # Checks + __checks1 = ( + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + "\u0627\u0644", + "\u0644\u0644", # لل، ال + ) + + __checks2 = ("\u0629", "\u0627\u062a") # ة # female plural ات + + # Suffixes + __suffix_noun_step1a = ( + "\u064a", + "\u0643", + "\u0647", # ي، ك، ه + "\u0646\u0627", + "\u0643\u0645", + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # نا، كم، ها، هن، هم + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", # كما، هما + ) + + __suffix_noun_step1b = "\u0646" # ن + + __suffix_noun_step2a = ("\u0627", "\u064a", "\u0648") # ا، ي، و + + __suffix_noun_step2b = "\u0627\u062a" # ات + + __suffix_noun_step2c1 = "\u062a" # ت + + __suffix_noun_step2c2 = "\u0629" # ة + + __suffix_noun_step3 = "\u064a" # ي + + __suffix_verb_step1 = ( + "\u0647", + "\u0643", # ه، ك + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", + "\u0647\u0645", # ني، نا، ها، هم + "\u0647\u0646", + "\u0643\u0645", + "\u0643\u0646", # هن، كم، كن + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", # هما، كما، كمو + ) + + __suffix_verb_step2a = ( + "\u062a", + "\u0627", + "\u0646", + "\u064a", # ت، ا، ن، ي + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", # نا، تا، تن Past + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", # ان، هن، ين Present + "\u062a\u0645\u0627", # تما + ) + + __suffix_verb_step2b = ("\u0648\u0627", "\u062a\u0645") # وا، تم + + __suffix_verb_step2c = ("\u0648", "\u062a\u0645\u0648") # و # تمو + + __suffix_all_alef_maqsura = "\u0649" # ى + + # Prefixes + __prefix_step1 = ( + "\u0623", # أ + "\u0623\u0623", + "\u0623\u0622", + "\u0623\u0624", + "\u0623\u0627", + "\u0623\u0625", # أأ، أآ، أؤ، أا، أإ + ) + + __prefix_step2a = ("\u0641\u0627\u0644", "\u0648\u0627\u0644") # فال، وال + + __prefix_step2b = ("\u0641", "\u0648") # ف، و + + __prefix_step3a_noun = ( + "\u0627\u0644", + "\u0644\u0644", # لل، ال + "\u0643\u0627\u0644", + "\u0628\u0627\u0644", # بال، كال + ) + + __prefix_step3b_noun = ( + "\u0628", + "\u0643", + "\u0644", # ب، ك، ل + "\u0628\u0628", + "\u0643\u0643", # بب، كك + ) + + __prefix_step3_verb = ( + "\u0633\u064a", + "\u0633\u062a", + "\u0633\u0646", + "\u0633\u0623", + ) # سي، ست، سن، سأ + + __prefix_step4_verb = ( + "\u064a\u0633\u062a", + "\u0646\u0633\u062a", + "\u062a\u0633\u062a", + ) # يست، نست، تست + + # Suffixes added due to Conjugation Verbs + __conjugation_suffix_verb_1 = ("\u0647", "\u0643") # ه، ك + + __conjugation_suffix_verb_2 = ( + "\u0646\u064a", + "\u0646\u0627", + "\u0647\u0627", # ني، نا، ها + "\u0647\u0645", + "\u0647\u0646", + "\u0643\u0645", # هم، هن، كم + "\u0643\u0646", # كن + ) + __conjugation_suffix_verb_3 = ( + "\u0647\u0645\u0627", + "\u0643\u0645\u0627", + "\u0643\u0645\u0648", + ) # هما، كما، كمو + + __conjugation_suffix_verb_4 = ("\u0627", "\u0646", "\u064a") # ا، ن، ي + + __conjugation_suffix_verb_past = ( + "\u0646\u0627", + "\u062a\u0627", + "\u062a\u0646", + ) # نا، تا، تن + + __conjugation_suffix_verb_present = ( + "\u0627\u0646", + "\u0648\u0646", + "\u064a\u0646", + ) # ان، ون، ين + + # Suffixes added due to derivation Names + __conjugation_suffix_noun_1 = ("\u064a", "\u0643", "\u0647") # ي، ك، ه + + __conjugation_suffix_noun_2 = ( + "\u0646\u0627", + "\u0643\u0645", # نا، كم + "\u0647\u0627", + "\u0647\u0646", + "\u0647\u0645", # ها، هن، هم + ) + + __conjugation_suffix_noun_3 = ( + "\u0643\u0645\u0627", + "\u0647\u0645\u0627", + ) # كما، هما + + # Prefixes added due to derivation Names + __prefixes1 = ("\u0648\u0627", "\u0641\u0627") # فا، وا + + __articles_3len = ("\u0643\u0627\u0644", "\u0628\u0627\u0644") # بال كال + + __articles_2len = ("\u0627\u0644", "\u0644\u0644") # ال لل + + # Prepositions letters + __prepositions1 = ("\u0643", "\u0644") # ك، ل + __prepositions2 = ("\u0628\u0628", "\u0643\u0643") # بب، كك + + is_verb = True + is_noun = True + is_defined = False + + suffixes_verb_step1_success = False + suffix_verb_step2a_success = False + suffix_verb_step2b_success = False + suffix_noun_step2c2_success = False + suffix_noun_step1a_success = False + suffix_noun_step2a_success = False + suffix_noun_step2b_success = False + suffixe_noun_step1b_success = False + prefix_step2a_success = False + prefix_step3a_noun_success = False + prefix_step3b_noun_success = False + + def __normalize_pre(self, token): + """ + :param token: string + :return: normalized token type string + """ + # strip diacritics + token = self.__vocalization.sub("", token) + # strip kasheeda + token = self.__kasheeda.sub("", token) + # strip punctuation marks + token = self.__arabic_punctuation_marks.sub("", token) + return token + + def __normalize_post(self, token): + # normalize last hamza + for hamza in self.__last_hamzat: + if token.endswith(hamza): + token = suffix_replace(token, hamza, "\u0621") + break + # normalize other hamzat + token = self.__initial_hamzat.sub("\u0627", token) + token = self.__waw_hamza.sub("\u0648", token) + token = self.__yeh_hamza.sub("\u064a", token) + token = self.__alefat.sub("\u0627", token) + return token + + def __checks_1(self, token): + for prefix in self.__checks1: + if token.startswith(prefix): + if prefix in self.__articles_3len and len(token) > 4: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + if prefix in self.__articles_2len and len(token) > 3: + self.is_noun = True + self.is_verb = False + self.is_defined = True + break + + def __checks_2(self, token): + for suffix in self.__checks2: + if token.endswith(suffix): + if suffix == "\u0629" and len(token) > 2: + self.is_noun = True + self.is_verb = False + break + + if suffix == "\u0627\u062a" and len(token) > 3: + self.is_noun = True + self.is_verb = False + break + + def __Suffix_Verb_Step1(self, token): + for suffix in self.__suffix_verb_step1: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_verb_1 and len(token) >= 4: + token = token[:-1] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_2 and len(token) >= 5: + token = token[:-2] + self.suffixes_verb_step1_success = True + break + + if suffix in self.__conjugation_suffix_verb_3 and len(token) >= 6: + token = token[:-3] + self.suffixes_verb_step1_success = True + break + return token + + def __Suffix_Verb_Step2a(self, token): + for suffix in self.__suffix_verb_step2a: + if token.endswith(suffix) and len(token) > 3: + if suffix == "\u062a" and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4: + token = token[:-1] + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5: + token = token[:-2] # past + self.suffix_verb_step2a_success = True + break + + if suffix in self.__conjugation_suffix_verb_present and len(token) > 5: + token = token[:-2] # present + self.suffix_verb_step2a_success = True + break + + if suffix == "\u062a\u0645\u0627" and len(token) >= 6: + token = token[:-3] + self.suffix_verb_step2a_success = True + break + return token + + def __Suffix_Verb_Step2c(self, token): + for suffix in self.__suffix_verb_step2c: + if token.endswith(suffix): + if suffix == "\u062a\u0645\u0648" and len(token) >= 6: + token = token[:-3] + break + + if suffix == "\u0648" and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Verb_Step2b(self, token): + for suffix in self.__suffix_verb_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_verb_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c2(self, token): + for suffix in self.__suffix_noun_step2c2: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] + self.suffix_noun_step2c2_success = True + break + return token + + def __Suffix_Noun_Step1a(self, token): + for suffix in self.__suffix_noun_step1a: + if token.endswith(suffix): + if suffix in self.__conjugation_suffix_noun_1 and len(token) >= 4: + token = token[:-1] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_2 and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step1a_success = True + break + + if suffix in self.__conjugation_suffix_noun_3 and len(token) >= 6: + token = token[:-3] + self.suffix_noun_step1a_success = True + break + return token + + def __Suffix_Noun_Step2a(self, token): + for suffix in self.__suffix_noun_step2a: + if token.endswith(suffix) and len(token) > 4: + token = token[:-1] + self.suffix_noun_step2a_success = True + break + return token + + def __Suffix_Noun_Step2b(self, token): + for suffix in self.__suffix_noun_step2b: + if token.endswith(suffix) and len(token) >= 5: + token = token[:-2] + self.suffix_noun_step2b_success = True + break + return token + + def __Suffix_Noun_Step2c1(self, token): + for suffix in self.__suffix_noun_step2c1: + if token.endswith(suffix) and len(token) >= 4: + token = token[:-1] + break + return token + + def __Suffix_Noun_Step1b(self, token): + for suffix in self.__suffix_noun_step1b: + if token.endswith(suffix) and len(token) > 5: + token = token[:-1] + self.suffixe_noun_step1b_success = True + break + return token + + def __Suffix_Noun_Step3(self, token): + for suffix in self.__suffix_noun_step3: + if token.endswith(suffix) and len(token) >= 3: + token = token[:-1] # ya' nisbiya + break + return token + + def __Suffix_All_alef_maqsura(self, token): + for suffix in self.__suffix_all_alef_maqsura: + if token.endswith(suffix): + token = suffix_replace(token, suffix, "\u064a") + return token + + def __Prefix_Step1(self, token): + for prefix in self.__prefix_step1: + if token.startswith(prefix) and len(token) > 3: + if prefix == "\u0623\u0623": + token = prefix_replace(token, prefix, "\u0623") + break + + elif prefix == "\u0623\u0622": + token = prefix_replace(token, prefix, "\u0622") + break + + elif prefix == "\u0623\u0624": + token = prefix_replace(token, prefix, "\u0624") + break + + elif prefix == "\u0623\u0627": + token = prefix_replace(token, prefix, "\u0627") + break + + elif prefix == "\u0623\u0625": + token = prefix_replace(token, prefix, "\u0625") + break + return token + + def __Prefix_Step2a(self, token): + for prefix in self.__prefix_step2a: + if token.startswith(prefix) and len(token) > 5: + token = token[len(prefix) :] + self.prefix_step2a_success = True + break + return token + + def __Prefix_Step2b(self, token): + for prefix in self.__prefix_step2b: + if token.startswith(prefix) and len(token) > 3: + if token[:2] not in self.__prefixes1: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3a_Noun(self, token): + for prefix in self.__prefix_step3a_noun: + if token.startswith(prefix): + if prefix in self.__articles_2len and len(token) > 4: + token = token[len(prefix) :] + self.prefix_step3a_noun_success = True + break + if prefix in self.__articles_3len and len(token) > 5: + token = token[len(prefix) :] + break + return token + + def __Prefix_Step3b_Noun(self, token): + for prefix in self.__prefix_step3b_noun: + if token.startswith(prefix): + if len(token) > 3: + if prefix == "\u0628": + token = token[len(prefix) :] + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions2: + token = prefix_replace(token, prefix, prefix[1]) + self.prefix_step3b_noun_success = True + break + + if prefix in self.__prepositions1 and len(token) > 4: + token = token[len(prefix) :] # BUG: cause confusion + self.prefix_step3b_noun_success = True + break + return token + + def __Prefix_Step3_Verb(self, token): + for prefix in self.__prefix_step3_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, prefix[1]) + break + return token + + def __Prefix_Step4_Verb(self, token): + for prefix in self.__prefix_step4_verb: + if token.startswith(prefix) and len(token) > 4: + token = prefix_replace(token, prefix, "\u0627\u0633\u062a") + self.is_verb = True + self.is_noun = False + break + return token + + def stem(self, word): + """ + Stem an Arabic word and return the stemmed form. + + :param word: string + :return: string + """ + # set initial values + self.is_verb = True + self.is_noun = True + self.is_defined = False + + self.suffix_verb_step2a_success = False + self.suffix_verb_step2b_success = False + self.suffix_noun_step2c2_success = False + self.suffix_noun_step1a_success = False + self.suffix_noun_step2a_success = False + self.suffix_noun_step2b_success = False + self.suffixe_noun_step1b_success = False + self.prefix_step2a_success = False + self.prefix_step3a_noun_success = False + self.prefix_step3b_noun_success = False + + modified_word = word + # guess type and properties + # checks1 + self.__checks_1(modified_word) + # checks2 + self.__checks_2(modified_word) + # Pre_Normalization + modified_word = self.__normalize_pre(modified_word) + # Avoid stopwords + if modified_word in self.stopwords or len(modified_word) <= 2: + return modified_word + # Start stemming + if self.is_verb: + modified_word = self.__Suffix_Verb_Step1(modified_word) + if self.suffixes_verb_step1_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if not self.suffix_verb_step2a_success: + modified_word = self.__Suffix_Verb_Step2c(modified_word) + # or next TODO: How to deal with or next instruction + else: + modified_word = self.__Suffix_Verb_Step2b(modified_word) + if not self.suffix_verb_step2b_success: + modified_word = self.__Suffix_Verb_Step2a(modified_word) + if self.is_noun: + modified_word = self.__Suffix_Noun_Step2c2(modified_word) + if not self.suffix_noun_step2c2_success: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step1a(modified_word) + # if self.suffix_noun_step1a_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + # or next ? todo : how to deal with or next + else: + modified_word = self.__Suffix_Noun_Step1b(modified_word) + if self.suffixe_noun_step1b_success: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + if not self.suffix_noun_step2a_success: + modified_word = self.__Suffix_Noun_Step2b(modified_word) + if ( + not self.suffix_noun_step2b_success + and not self.suffix_noun_step2a_success + ): + modified_word = self.__Suffix_Noun_Step2c1(modified_word) + else: + if not self.is_defined: + modified_word = self.__Suffix_Noun_Step2a(modified_word) + modified_word = self.__Suffix_Noun_Step2b(modified_word) + modified_word = self.__Suffix_Noun_Step3(modified_word) + if not self.is_noun and self.is_verb: + modified_word = self.__Suffix_All_alef_maqsura(modified_word) + + # prefixes + modified_word = self.__Prefix_Step1(modified_word) + modified_word = self.__Prefix_Step2a(modified_word) + if not self.prefix_step2a_success: + modified_word = self.__Prefix_Step2b(modified_word) + modified_word = self.__Prefix_Step3a_Noun(modified_word) + if not self.prefix_step3a_noun_success and self.is_noun: + modified_word = self.__Prefix_Step3b_Noun(modified_word) + else: + if not self.prefix_step3b_noun_success and self.is_verb: + modified_word = self.__Prefix_Step3_Verb(modified_word) + modified_word = self.__Prefix_Step4_Verb(modified_word) + + # post normalization stemming + modified_word = self.__normalize_post(modified_word) + stemmed_word = modified_word + return stemmed_word + + +class DanishStemmer(_ScandinavianStemmer): + + """ + The Danish Snowball stemmer. + + :cvar __vowels: The Danish vowels. + :type __vowels: unicode + :cvar __consonants: The Danish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Danish double consonants. + :type __double_consonants: tuple + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Danish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/danish/stemmer.html + + """ + + # The language's vowels and other important characters are defined. + __vowels = "aeiouy\xE6\xE5\xF8" + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __s_ending = "abcdfghjklmnoprtvyz\xE5" + + # The different suffixes, divided into the algorithm's steps + # and organized by length, are listed in tuples. + __step1_suffixes = ( + "erendes", + "erende", + "hedens", + "ethed", + "erede", + "heden", + "heder", + "endes", + "ernes", + "erens", + "erets", + "ered", + "ende", + "erne", + "eren", + "erer", + "heds", + "enes", + "eres", + "eret", + "hed", + "ene", + "ere", + "ens", + "ers", + "ets", + "en", + "er", + "es", + "et", + "e", + "s", + ) + __step2_suffixes = ("gd", "dt", "gt", "kt") + __step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig") + + def stem(self, word): + """ + Stem a Danish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + # Every word is put into lower case for normalization. + word = word.lower() + + if word in self.stopwords: + return word + + # After this, the required regions are generated + # by the respective helper method. + r1 = self._r1_scandinavian(word, self.__vowels) + + # Then the actual stemming process starts. + # Every new step is explicitly indicated + # according to the descriptions on the Snowball website. + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + if r1.endswith("igst"): + word = word[:-2] + r1 = r1[:-2] + + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "l\xF8st": + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith(self.__step2_suffixes): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 4: Undouble + for double_cons in self.__double_consonants: + if word.endswith(double_cons) and len(word) > 3: + word = word[:-1] + break + + return word + + +class DutchStemmer(_StandardStemmer): + + """ + The Dutch Snowball stemmer. + + :cvar __vowels: The Dutch vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm. + :type __step3b_suffixes: tuple + :note: A detailed description of the Dutch + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/dutch/stemmer.html + + """ + + __vowels = "aeiouy\xE8" + __step1_suffixes = ("heden", "ene", "en", "se", "s") + __step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig") + + def stem(self, word): + """ + Stem a Dutch word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step2_success = False + + # Vowel accents are removed. + word = ( + word.replace("\xE4", "a") + .replace("\xE1", "a") + .replace("\xEB", "e") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xEF", "i") + .replace("\xF6", "o") + .replace("\xF3", "o") + .replace("\xFC", "u") + .replace("\xFA", "u") + ) + + # An initial 'y', a 'y' after a vowel, + # and an 'i' between self.__vowels is put into upper case. + # As from now these are treated as consonants. + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + for i in range(1, len(word) - 1): + if ( + word[i - 1] in self.__vowels + and word[i] == "i" + and word[i + 1] in self.__vowels + ): + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "heden": + word = suffix_replace(word, suffix, "heid") + r1 = suffix_replace(r1, suffix, "heid") + if r2.endswith("heden"): + r2 = suffix_replace(r2, suffix, "heid") + + elif ( + suffix in ("ene", "en") + and not word.endswith("heden") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 3 : -len(suffix)] != "gem" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + suffix in ("se", "s") + and word[-len(suffix) - 1] not in self.__vowels + and word[-len(suffix) - 1] != "j" + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + if r1.endswith("e") and word[-2] not in self.__vowels: + step2_success = True + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3a + if r2.endswith("heid") and word[-5] != "c": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + if ( + r1.endswith("en") + and word[-3] not in self.__vowels + and word[-5:-2] != "gem" + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + # STEP 3b: Derivational suffixes + for suffix in self.__step3b_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ing"): + word = word[:-3] + r2 = r2[:-3] + + if r2.endswith("ig") and word[-3] != "e": + word = word[:-2] + else: + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "ig" and word[-3] != "e": + word = word[:-2] + + elif suffix == "lijk": + word = word[:-4] + r1 = r1[:-4] + + if r1.endswith("e") and word[-2] not in self.__vowels: + word = word[:-1] + if word.endswith(("kk", "dd", "tt")): + word = word[:-1] + + elif suffix == "baar": + word = word[:-4] + + elif suffix == "bar" and step2_success: + word = word[:-3] + break + + # STEP 4: Undouble vowel + if len(word) >= 4: + if word[-1] not in self.__vowels and word[-1] != "I": + if word[-3:-1] in ("aa", "ee", "oo", "uu"): + if word[-4] not in self.__vowels: + word = "".join((word[:-3], word[-3], word[-1])) + + # All occurrences of 'I' and 'Y' are put back into lower case. + word = word.replace("I", "i").replace("Y", "y") + + return word + + +class EnglishStemmer(_StandardStemmer): + + """ + The English Snowball stemmer. + + :cvar __vowels: The English vowels. + :type __vowels: unicode + :cvar __double_consonants: The English double consonants. + :type __double_consonants: tuple + :cvar __li_ending: Letters that may directly appear before a word final 'li'. + :type __li_ending: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm. + :type __step1a_suffixes: tuple + :cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm. + :type __step1b_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __special_words: A dictionary containing words + which have to be stemmed specially. + :type __special_words: dict + :note: A detailed description of the English + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/english/stemmer.html + """ + + __vowels = "aeiouy" + __double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt") + __li_ending = "cdeghkmnrt" + __step0_suffixes = ("'s'", "'s", "'") + __step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s") + __step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed") + __step2_suffixes = ( + "ization", + "ational", + "fulness", + "ousness", + "iveness", + "tional", + "biliti", + "lessli", + "entli", + "ation", + "alism", + "aliti", + "ousli", + "iviti", + "fulli", + "enci", + "anci", + "abli", + "izer", + "ator", + "alli", + "bli", + "ogi", + "li", + ) + __step3_suffixes = ( + "ational", + "tional", + "alize", + "icate", + "iciti", + "ative", + "ical", + "ness", + "ful", + ) + __step4_suffixes = ( + "ement", + "ance", + "ence", + "able", + "ible", + "ment", + "ant", + "ent", + "ism", + "ate", + "iti", + "ous", + "ive", + "ize", + "ion", + "al", + "er", + "ic", + ) + __step5_suffixes = ("e", "l") + __special_words = { + "skis": "ski", + "skies": "sky", + "dying": "die", + "lying": "lie", + "tying": "tie", + "idly": "idl", + "gently": "gentl", + "ugly": "ugli", + "early": "earli", + "only": "onli", + "singly": "singl", + "sky": "sky", + "news": "news", + "howe": "howe", + "atlas": "atlas", + "cosmos": "cosmos", + "bias": "bias", + "andes": "andes", + "inning": "inning", + "innings": "inning", + "outing": "outing", + "outings": "outing", + "canning": "canning", + "cannings": "canning", + "herring": "herring", + "herrings": "herring", + "earring": "earring", + "earrings": "earring", + "proceed": "proceed", + "proceeds": "proceed", + "proceeded": "proceed", + "proceeding": "proceed", + "exceed": "exceed", + "exceeds": "exceed", + "exceeded": "exceed", + "exceeding": "exceed", + "succeed": "succeed", + "succeeds": "succeed", + "succeeded": "succeed", + "succeeding": "succeed", + } + + def stem(self, word): + + """ + Stem an English word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords or len(word) <= 2: + return word + + elif word in self.__special_words: + return self.__special_words[word] + + # Map the different apostrophe characters to a single consistent one + word = ( + word.replace("\u2019", "\x27") + .replace("\u2018", "\x27") + .replace("\u201B", "\x27") + ) + + if word.startswith("\x27"): + word = word[1:] + + if word.startswith("y"): + word = "".join(("Y", word[1:])) + + for i in range(1, len(word)): + if word[i - 1] in self.__vowels and word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + step1a_vowel_found = False + step1b_vowel_found = False + + r1 = "" + r2 = "" + + if word.startswith(("gener", "commun", "arsen")): + if word.startswith(("gener", "arsen")): + r1 = word[5:] + else: + r1 = word[6:] + + for i in range(1, len(r1)): + if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels: + r2 = r1[i + 1 :] + break + else: + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 0 + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 1a + for suffix in self.__step1a_suffixes: + if word.endswith(suffix): + + if suffix == "sses": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("ied", "ies"): + if len(word[: -len(suffix)]) > 1: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix == "s": + for letter in word[:-2]: + if letter in self.__vowels: + step1a_vowel_found = True + break + + if step1a_vowel_found: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + break + + # STEP 1b + for suffix in self.__step1b_suffixes: + if word.endswith(suffix): + if suffix in ("eed", "eedly"): + + if r1.endswith(suffix): + word = suffix_replace(word, suffix, "ee") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ee") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ee") + else: + r2 = "" + else: + for letter in word[: -len(suffix)]: + if letter in self.__vowels: + step1b_vowel_found = True + break + + if step1b_vowel_found: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + if word.endswith(("at", "bl", "iz")): + word = "".join((word, "e")) + r1 = "".join((r1, "e")) + + if len(word) > 5 or len(r1) >= 3: + r2 = "".join((r2, "e")) + + elif word.endswith(self.__double_consonants): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif ( + r1 == "" + and len(word) >= 3 + and word[-1] not in self.__vowels + and word[-1] not in "wxY" + and word[-2] in self.__vowels + and word[-3] not in self.__vowels + ) or ( + r1 == "" + and len(word) == 2 + and word[0] in self.__vowels + and word[1] not in self.__vowels + ): + + word = "".join((word, "e")) + + if len(r1) > 0: + r1 = "".join((r1, "e")) + + if len(r2) > 0: + r2 = "".join((r2, "e")) + break + + # STEP 1c + if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels: + word = "".join((word[:-1], "i")) + if len(r1) >= 1: + r1 = "".join((r1[:-1], "i")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "i")) + else: + r2 = "" + + # STEP 2 + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("enci", "anci", "abli"): + word = "".join((word[:-1], "e")) + + if len(r1) >= 1: + r1 = "".join((r1[:-1], "e")) + else: + r1 = "" + + if len(r2) >= 1: + r2 = "".join((r2[:-1], "e")) + else: + r2 = "" + + elif suffix == "entli": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix in ("izer", "ization"): + word = suffix_replace(word, suffix, "ize") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ize") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ize") + else: + r2 = "" + + elif suffix in ("ational", "ation", "ator"): + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "e" + + elif suffix in ("alism", "aliti", "alli"): + word = suffix_replace(word, suffix, "al") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "al") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "al") + else: + r2 = "" + + elif suffix == "fulness": + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + + elif suffix in ("ousli", "ousness"): + word = suffix_replace(word, suffix, "ous") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ous") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ous") + else: + r2 = "" + + elif suffix in ("iveness", "iviti"): + word = suffix_replace(word, suffix, "ive") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ive") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ive") + else: + r2 = "e" + + elif suffix in ("biliti", "bli"): + word = suffix_replace(word, suffix, "ble") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ble") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ble") + else: + r2 = "" + + elif suffix == "ogi" and word[-4] == "l": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + + elif suffix in ("fulli", "lessli"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "li" and word[-3] in self.__li_ending: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "tional": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ational": + word = suffix_replace(word, suffix, "ate") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ate") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ate") + else: + r2 = "" + + elif suffix == "alize": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + + elif suffix in ("icate", "iciti", "ical"): + word = suffix_replace(word, suffix, "ic") + + if len(r1) >= len(suffix): + r1 = suffix_replace(r1, suffix, "ic") + else: + r1 = "" + + if len(r2) >= len(suffix): + r2 = suffix_replace(r2, suffix, "ic") + else: + r2 = "" + + elif suffix in ("ful", "ness"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + + elif suffix == "ative" and r2.endswith(suffix): + word = word[:-5] + r1 = r1[:-5] + r2 = r2[:-5] + break + + # STEP 4 + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if r2.endswith(suffix): + if suffix == "ion": + if word[-4] in "st": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5 + if r2.endswith("l") and word[-2] == "l": + word = word[:-1] + elif r2.endswith("e"): + word = word[:-1] + elif r1.endswith("e"): + if len(word) >= 4 and ( + word[-2] in self.__vowels + or word[-2] in "wxY" + or word[-3] not in self.__vowels + or word[-4] in self.__vowels + ): + word = word[:-1] + + word = word.replace("Y", "y") + + return word + + +class FinnishStemmer(_StandardStemmer): + + """ + The Finnish Snowball stemmer. + + :cvar __vowels: The Finnish vowels. + :type __vowels: unicode + :cvar __restricted_vowels: A subset of the Finnish vowels. + :type __restricted_vowels: unicode + :cvar __long_vowels: The Finnish vowels in their long forms. + :type __long_vowels: tuple + :cvar __consonants: The Finnish consonants. + :type __consonants: unicode + :cvar __double_consonants: The Finnish double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Finnish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/finnish/stemmer.html + """ + + __vowels = "aeiouy\xE4\xF6" + __restricted_vowels = "aeiou\xE4\xF6" + __long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4", "\xF6\xF6") + __consonants = "bcdfghjklmnpqrstvwxz" + __double_consonants = ( + "bb", + "cc", + "dd", + "ff", + "gg", + "hh", + "jj", + "kk", + "ll", + "mm", + "nn", + "pp", + "qq", + "rr", + "ss", + "tt", + "vv", + "ww", + "xx", + "zz", + ) + __step1_suffixes = ( + "kaan", + "k\xE4\xE4n", + "sti", + "kin", + "han", + "h\xE4n", + "ko", + "k\xF6", + "pa", + "p\xE4", + ) + __step2_suffixes = ("nsa", "ns\xE4", "mme", "nne", "si", "ni", "an", "\xE4n", "en") + __step3_suffixes = ( + "siin", + "tten", + "seen", + "han", + "hen", + "hin", + "hon", + "h\xE4n", + "h\xF6n", + "den", + "tta", + "tt\xE4", + "ssa", + "ss\xE4", + "sta", + "st\xE4", + "lla", + "ll\xE4", + "lta", + "lt\xE4", + "lle", + "ksi", + "ine", + "ta", + "t\xE4", + "na", + "n\xE4", + "a", + "\xE4", + "n", + ) + __step4_suffixes = ( + "impi", + "impa", + "imp\xE4", + "immi", + "imma", + "imm\xE4", + "mpi", + "mpa", + "mp\xE4", + "mmi", + "mma", + "mm\xE4", + "eja", + "ej\xE4", + ) + + def stem(self, word): + """ + Stem a Finnish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step3_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # STEP 1: Particles etc. + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "sti": + if suffix in r2: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + if word[-len(suffix) - 1] in "ntaeiouy\xE4\xF6": + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2: Possessives + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "si": + if word[-3] != "k": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "ni": + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + if word.endswith("kse"): + word = suffix_replace(word, "kse", "ksi") + + if r1.endswith("kse"): + r1 = suffix_replace(r1, "kse", "ksi") + + if r2.endswith("kse"): + r2 = suffix_replace(r2, "kse", "ksi") + + elif suffix == "an": + if word[-4:-2] in ("ta", "na") or word[-5:-2] in ( + "ssa", + "sta", + "lla", + "lta", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "\xE4n": + if word[-4:-2] in ("t\xE4", "n\xE4") or word[-5:-2] in ( + "ss\xE4", + "st\xE4", + "ll\xE4", + "lt\xE4", + ): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + + elif suffix == "en": + if word[-5:-2] in ("lle", "ine"): + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + break + + # STEP 3: Cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("han", "hen", "hin", "hon", "h\xE4n", "h\xF6n"): + if ( + (suffix == "han" and word[-4] == "a") + or (suffix == "hen" and word[-4] == "e") + or (suffix == "hin" and word[-4] == "i") + or (suffix == "hon" and word[-4] == "o") + or (suffix == "h\xE4n" and word[-4] == "\xE4") + or (suffix == "h\xF6n" and word[-4] == "\xF6") + ): + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix in ("siin", "den", "tten"): + if ( + word[-len(suffix) - 1] == "i" + and word[-len(suffix) - 2] in self.__restricted_vowels + ): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + else: + continue + + elif suffix == "seen": + if word[-6:-4] in self.__long_vowels: + word = word[:-4] + r1 = r1[:-4] + r2 = r2[:-4] + step3_success = True + else: + continue + + elif suffix in ("a", "\xE4"): + if word[-2] in self.__vowels and word[-3] in self.__consonants: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + elif suffix in ("tta", "tt\xE4"): + if word[-4] == "e": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + step3_success = True + + elif suffix == "n": + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + step3_success = True + + if word[-2:] == "ie" or word[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + step3_success = True + break + + # STEP 4: Other endings + for suffix in self.__step4_suffixes: + if r2.endswith(suffix): + if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma", "mm\xE4"): + if word[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + r2 = r2[:-3] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 5: Plurals + if step3_success and len(r1) >= 1 and r1[-1] in "ij": + word = word[:-1] + r1 = r1[:-1] + + elif ( + not step3_success + and len(r1) >= 2 + and r1[-1] == "t" + and r1[-2] in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + if r2.endswith("imma"): + word = word[:-4] + r1 = r1[:-4] + elif r2.endswith("mma") and r2[-5:-3] != "po": + word = word[:-3] + r1 = r1[:-3] + + # STEP 6: Tidying up + if r1[-2:] in self.__long_vowels: + word = word[:-1] + r1 = r1[:-1] + + if len(r1) >= 2 and r1[-2] in self.__consonants and r1[-1] in "a\xE4ei": + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith(("oj", "uj")): + word = word[:-1] + r1 = r1[:-1] + + if r1.endswith("jo"): + word = word[:-1] + r1 = r1[:-1] + + # If the word ends with a double consonant + # followed by zero or more vowels, the last consonant is removed. + for i in range(1, len(word)): + if word[-i] in self.__vowels: + continue + else: + if i == 1: + if word[-i - 1 :] in self.__double_consonants: + word = word[:-1] + else: + if word[-i - 1 : -i + 1] in self.__double_consonants: + word = "".join((word[:-i], word[-i + 1 :])) + break + + return word + + +class FrenchStemmer(_StandardStemmer): + + """ + The French Snowball stemmer. + + :cvar __vowels: The French vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the French + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/french/stemmer.html + """ + + __vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9" + __step1_suffixes = ( + "issements", + "issement", + "atrices", + "atrice", + "ateurs", + "ations", + "logies", + "usions", + "utions", + "ements", + "amment", + "emment", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + "ateur", + "ation", + "logie", + "usion", + "ution", + "ences", + "ement", + "euses", + "ments", + "ance", + "iqUe", + "isme", + "able", + "iste", + "ence", + "it\xE9s", + "ives", + "eaux", + "euse", + "ment", + "eux", + "it\xE9", + "ive", + "ifs", + "aux", + "if", + ) + __step2a_suffixes = ( + "issaIent", + "issantes", + "iraIent", + "issante", + "issants", + "issions", + "irions", + "issais", + "issait", + "issant", + "issent", + "issiez", + "issons", + "irais", + "irait", + "irent", + "iriez", + "irons", + "iront", + "isses", + "issez", + "\xEEmes", + "\xEEtes", + "irai", + "iras", + "irez", + "isse", + "ies", + "ira", + "\xEEt", + "ie", + "ir", + "is", + "it", + "i", + ) + __step2b_suffixes = ( + "eraIent", + "assions", + "erions", + "assent", + "assiez", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "aIent", + "antes", + "asses", + "ions", + "erai", + "eras", + "erez", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "\xE9es", + "era", + "iez", + "ais", + "ait", + "ant", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE2t", + "ai", + "as", + "\xE9", + "a", + ) + __step4_suffixes = ("i\xE8re", "I\xE8re", "ion", "ier", "Ier", "e", "\xEB") + + def stem(self, word): + """ + Stem a French word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + rv_ending_found = False + step2a_success = False + step2b_success = False + + # Every occurrence of 'u' after 'q' is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + # Every occurrence of 'y' preceded or + # followed by a vowel is also put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + if word[i - 1] in self.__vowels or word[i + 1] in self.__vowels: + if word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self.__rv_french(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "eaux": + word = word[:-1] + step1_success = True + + elif suffix in ("euse", "euses"): + if suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + elif suffix in r1: + word = suffix_replace(word, suffix, "eux") + step1_success = True + + elif suffix in ("ement", "ements") and suffix in rv: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "iv" and "iv" in r2: + word = word[:-2] + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + elif word[-3:] == "eus": + if "eus" in r2: + word = word[:-3] + elif "eus" in r1: + word = "".join((word[:-1], "x")) + + elif word[-3:] in ("abl", "iqU"): + if "abl" in r2 or "iqU" in r2: + word = word[:-3] + + elif word[-3:] in ("i\xE8r", "I\xE8r"): + if "i\xE8r" in rv or "I\xE8r" in rv: + word = "".join((word[:-3], "i")) + + elif suffix == "amment" and suffix in rv: + word = suffix_replace(word, "amment", "ant") + rv = suffix_replace(rv, "amment", "ant") + rv_ending_found = True + + elif suffix == "emment" and suffix in rv: + word = suffix_replace(word, "emment", "ent") + rv_ending_found = True + + elif ( + suffix in ("ment", "ments") + and suffix in rv + and not rv.startswith(suffix) + and rv[rv.rindex(suffix) - 1] in self.__vowels + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + rv_ending_found = True + + elif suffix == "aux" and suffix in r1: + word = "".join((word[:-2], "l")) + step1_success = True + + elif ( + suffix in ("issement", "issements") + and suffix in r1 + and word[-len(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ( + "ance", + "iqUe", + "isme", + "able", + "iste", + "eux", + "ances", + "iqUes", + "ismes", + "ables", + "istes", + ) + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + elif ( + suffix + in ("atrice", "ateur", "ation", "atrices", "ateurs", "ations") + and suffix in r2 + ): + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif suffix in ("logie", "logies") and suffix in r2: + word = suffix_replace(word, suffix, "log") + step1_success = True + + elif suffix in ("usion", "ution", "usions", "utions") and suffix in r2: + word = suffix_replace(word, suffix, "u") + step1_success = True + + elif suffix in ("ence", "ences") and suffix in r2: + word = suffix_replace(word, suffix, "ent") + step1_success = True + + elif suffix in ("it\xE9", "it\xE9s") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-4:] == "abil": + if "abil" in r2: + word = word[:-4] + else: + word = "".join((word[:-2], "l")) + + elif word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + + elif word[-2:] == "iv": + if "iv" in r2: + word = word[:-2] + + elif suffix in ("if", "ive", "ifs", "ives") and suffix in r2: + word = word[: -len(suffix)] + step1_success = True + + if word[-2:] == "at" and "at" in r2: + word = word[:-2] + + if word[-2:] == "ic": + if "ic" in r2: + word = word[:-2] + else: + word = "".join((word[:-2], "iqU")) + break + + # STEP 2a: Verb suffixes beginning 'i' + if not step1_success or rv_ending_found: + for suffix in self.__step2a_suffixes: + if word.endswith(suffix): + if ( + suffix in rv + and len(rv) > len(suffix) + and rv[rv.rindex(suffix) - 1] not in self.__vowels + ): + word = word[: -len(suffix)] + step2a_success = True + break + + # STEP 2b: Other verb suffixes + if not step2a_success: + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + if suffix == "ions" and "ions" in r2: + word = word[:-4] + step2b_success = True + + elif suffix in ( + "eraIent", + "erions", + "\xE8rent", + "erais", + "erait", + "eriez", + "erons", + "eront", + "erai", + "eras", + "erez", + "\xE9es", + "era", + "iez", + "\xE9e", + "\xE9s", + "er", + "ez", + "\xE9", + ): + word = word[: -len(suffix)] + step2b_success = True + + elif suffix in ( + "assions", + "assent", + "assiez", + "aIent", + "antes", + "asses", + "\xE2mes", + "\xE2tes", + "ante", + "ants", + "asse", + "ais", + "ait", + "ant", + "\xE2t", + "ai", + "as", + "a", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + step2b_success = True + if rv.endswith("e"): + word = word[:-1] + break + + # STEP 3 + if step1_success or step2a_success or step2b_success: + if word[-1] == "Y": + word = "".join((word[:-1], "i")) + elif word[-1] == "\xE7": + word = "".join((word[:-1], "c")) + + # STEP 4: Residual suffixes + else: + if len(word) >= 2 and word[-1] == "s" and word[-2] not in "aiou\xE8s": + word = word[:-1] + + for suffix in self.__step4_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix == "ion" and suffix in r2 and rv[-4] in "st": + word = word[:-3] + + elif suffix in ("ier", "i\xE8re", "Ier", "I\xE8re"): + word = suffix_replace(word, suffix, "i") + + elif suffix == "e": + word = word[:-1] + + elif suffix == "\xEB" and word[-3:-1] == "gu": + word = word[:-1] + break + + # STEP 5: Undouble + if word.endswith(("enn", "onn", "ett", "ell", "eill")): + word = word[:-1] + + # STEP 6: Un-accent + for i in range(1, len(word)): + if word[-i] not in self.__vowels: + i += 1 + else: + if i != 1 and word[-i] in ("\xE9", "\xE8"): + word = "".join((word[:-i], "e", word[-i + 1 :])) + break + + word = word.replace("I", "i").replace("U", "u").replace("Y", "y") + + return word + + def __rv_french(self, word, vowels): + """ + Return the region RV that is used by the French stemmer. + + If the word begins with two vowels, RV is the region after + the third letter. Otherwise, it is the region after the first + vowel not at the beginning of the word, or the end of the word + if these positions cannot be found. (Exceptionally, u'par', + u'col' or u'tap' at the beginning of a word is also taken to + define RV as the region to their right.) + + :param word: The French word whose region RV is determined. + :type word: str or unicode + :param vowels: The French vowels that are used to determine + the region RV. + :type vowels: unicode + :return: the region RV for the respective French word. + :rtype: unicode + :note: This helper method is invoked by the stem method of + the subclass FrenchStemmer. It is not to be invoked directly! + + """ + rv = "" + if len(word) >= 2: + if word.startswith(("par", "col", "tap")) or ( + word[0] in vowels and word[1] in vowels + ): + rv = word[3:] + else: + for i in range(1, len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + return rv + + +class GermanStemmer(_StandardStemmer): + + """ + The German Snowball stemmer. + + :cvar __vowels: The German vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __st_ending: Letter that may directly appear before a word final 'st'. + :type __st_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the German + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/german/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xF6\xFC" + __s_ending = "bdfghklmnrt" + __st_ending = "bdfghklmnt" + + __step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s") + __step2_suffixes = ("est", "en", "er", "st") + __step3_suffixes = ("isch", "lich", "heit", "keit", "end", "ung", "ig", "ik") + + def stem(self, word): + """ + Stem a German word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + word = word.replace("\xDF", "ss") + + # Every occurrence of 'u' and 'y' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "y": + word = "".join((word[:i], "Y", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + + # R1 is adjusted so that the region before it + # contains at least 3 letters. + for i in range(1, len(word)): + if word[i] not in self.__vowels and word[i - 1] in self.__vowels: + if 3 > len(word[: i + 1]) > 0: + r1 = word[3:] + elif len(word[: i + 1]) == 0: + return word + break + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if ( + suffix in ("en", "es", "e") + and word[-len(suffix) - 4 : -len(suffix)] == "niss" + ): + word = word[: -len(suffix) - 1] + r1 = r1[: -len(suffix) - 1] + r2 = r2[: -len(suffix) - 1] + + elif suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + r2 = r2[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + if suffix == "st": + if word[-3] in self.__st_ending and len(word[:-3]) >= 3: + word = word[:-2] + r1 = r1[:-2] + r2 = r2[:-2] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + break + + # STEP 3: Derivational suffixes + for suffix in self.__step3_suffixes: + if r2.endswith(suffix): + if suffix in ("end", "ung"): + if ( + "ig" in r2[-len(suffix) - 2 : -len(suffix)] + and "e" not in r2[-len(suffix) - 3 : -len(suffix) - 2] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif ( + suffix in ("ig", "ik", "isch") + and "e" not in r2[-len(suffix) - 1 : -len(suffix)] + ): + word = word[: -len(suffix)] + + elif suffix in ("lich", "heit"): + if ( + "er" in r1[-len(suffix) - 2 : -len(suffix)] + or "en" in r1[-len(suffix) - 2 : -len(suffix)] + ): + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + + elif suffix == "keit": + if "lich" in r2[-len(suffix) - 4 : -len(suffix)]: + word = word[: -len(suffix) - 4] + + elif "ig" in r2[-len(suffix) - 2 : -len(suffix)]: + word = word[: -len(suffix) - 2] + else: + word = word[: -len(suffix)] + break + + # Umlaut accents are removed and + # 'u' and 'y' are put back into lower case. + word = ( + word.replace("\xE4", "a") + .replace("\xF6", "o") + .replace("\xFC", "u") + .replace("U", "u") + .replace("Y", "y") + ) + + return word + + +class HungarianStemmer(_LanguageSpecificStemmer): + + """ + The Hungarian Snowball stemmer. + + :cvar __vowels: The Hungarian vowels. + :type __vowels: unicode + :cvar __digraphs: The Hungarian digraphs. + :type __digraphs: tuple + :cvar __double_consonants: The Hungarian double consonants. + :type __double_consonants: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm. + :type __step5_suffixes: tuple + :cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm. + :type __step6_suffixes: tuple + :cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm. + :type __step7_suffixes: tuple + :cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm. + :type __step8_suffixes: tuple + :cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm. + :type __step9_suffixes: tuple + :note: A detailed description of the Hungarian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/hungarian/stemmer.html + + """ + + __vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB" + __digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs") + __double_consonants = ( + "bb", + "cc", + "ccs", + "dd", + "ff", + "gg", + "ggy", + "jj", + "kk", + "ll", + "lly", + "mm", + "nn", + "nny", + "pp", + "rr", + "ss", + "ssz", + "tt", + "tty", + "vv", + "zz", + "zzs", + ) + + __step1_suffixes = ("al", "el") + __step2_suffixes = ( + "k\xE9ppen", + "onk\xE9nt", + "enk\xE9nt", + "ank\xE9nt", + "k\xE9pp", + "k\xE9nt", + "ban", + "ben", + "nak", + "nek", + "val", + "vel", + "t\xF3l", + "t\xF5l", + "r\xF3l", + "r\xF5l", + "b\xF3l", + "b\xF5l", + "hoz", + "hez", + "h\xF6z", + "n\xE1l", + "n\xE9l", + "\xE9rt", + "kor", + "ba", + "be", + "ra", + "re", + "ig", + "at", + "et", + "ot", + "\xF6t", + "ul", + "\xFCl", + "v\xE1", + "v\xE9", + "en", + "on", + "an", + "\xF6n", + "n", + "t", + ) + __step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n") + __step4_suffixes = ( + "astul", + "est\xFCl", + "\xE1stul", + "\xE9st\xFCl", + "stul", + "st\xFCl", + ) + __step5_suffixes = ("\xE1", "\xE9") + __step6_suffixes = ( + "ok\xE9", + "\xF6k\xE9", + "ak\xE9", + "ek\xE9", + "\xE1k\xE9", + "\xE1\xE9i", + "\xE9k\xE9", + "\xE9\xE9i", + "k\xE9", + "\xE9i", + "\xE9\xE9", + "\xE9", + ) + __step7_suffixes = ( + "\xE1juk", + "\xE9j\xFCk", + "\xFCnk", + "unk", + "juk", + "j\xFCk", + "\xE1nk", + "\xE9nk", + "nk", + "uk", + "\xFCk", + "em", + "om", + "am", + "od", + "ed", + "ad", + "\xF6d", + "ja", + "je", + "\xE1m", + "\xE1d", + "\xE9m", + "\xE9d", + "m", + "d", + "a", + "e", + "o", + "\xE1", + "\xE9", + ) + __step8_suffixes = ( + "jaitok", + "jeitek", + "jaink", + "jeink", + "aitok", + "eitek", + "\xE1itok", + "\xE9itek", + "jaim", + "jeim", + "jaid", + "jeid", + "eink", + "aink", + "itek", + "jeik", + "jaik", + "\xE1ink", + "\xE9ink", + "aim", + "eim", + "aid", + "eid", + "jai", + "jei", + "ink", + "aik", + "eik", + "\xE1im", + "\xE1id", + "\xE1ik", + "\xE9im", + "\xE9id", + "\xE9ik", + "im", + "id", + "ai", + "ei", + "ik", + "\xE1i", + "\xE9i", + "i", + ) + __step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok", "ek", "ak", "k") + + def stem(self, word): + """ + Stem an Hungarian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs) + + # STEP 1: Remove instrumental case + if r1.endswith(self.__step1_suffixes): + for double_cons in self.__double_consonants: + if word[-2 - len(double_cons) : -2] == double_cons: + word = "".join((word[:-4], word[-3])) + + if r1[-2 - len(double_cons) : -2] == double_cons: + r1 = "".join((r1[:-4], r1[-3])) + break + + # STEP 2: Remove frequent cases + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + + if r1.endswith("\xE1"): + word = "".join((word[:-1], "a")) + r1 = suffix_replace(r1, "\xE1", "a") + + elif r1.endswith("\xE9"): + word = "".join((word[:-1], "e")) + r1 = suffix_replace(r1, "\xE9", "e") + break + + # STEP 3: Remove special cases + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix == "\xE9n": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + break + + # STEP 4: Remove other cases + for suffix in self.__step4_suffixes: + if r1.endswith(suffix): + if suffix == "\xE1stul": + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix == "\xE9st\xFCl": + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 5: Remove factive case + for suffix in self.__step5_suffixes: + if r1.endswith(suffix): + for double_cons in self.__double_consonants: + if word[-1 - len(double_cons) : -1] == double_cons: + word = "".join((word[:-3], word[-2])) + + if r1[-1 - len(double_cons) : -1] == double_cons: + r1 = "".join((r1[:-3], r1[-2])) + break + + # STEP 6: Remove owned + for suffix in self.__step6_suffixes: + if r1.endswith(suffix): + if suffix in ("\xE1k\xE9", "\xE1\xE9i"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9k\xE9", "\xE9\xE9i", "\xE9\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 7: Remove singular owner suffixes + for suffix in self.__step7_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ("\xE1nk", "\xE1juk", "\xE1m", "\xE1d", "\xE1"): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ("\xE9nk", "\xE9j\xFCk", "\xE9m", "\xE9d", "\xE9"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 8: Remove plural owner suffixes + for suffix in self.__step8_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix in ( + "\xE1im", + "\xE1id", + "\xE1i", + "\xE1ink", + "\xE1itok", + "\xE1ik", + ): + word = suffix_replace(word, suffix, "a") + r1 = suffix_replace(r1, suffix, "a") + + elif suffix in ( + "\xE9im", + "\xE9id", + "\xE9i", + "\xE9ink", + "\xE9itek", + "\xE9ik", + ): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 9: Remove plural suffixes + for suffix in self.__step9_suffixes: + if word.endswith(suffix): + if r1.endswith(suffix): + if suffix == "\xE1k": + word = suffix_replace(word, suffix, "a") + elif suffix == "\xE9k": + word = suffix_replace(word, suffix, "e") + else: + word = word[: -len(suffix)] + break + + return word + + def __r1_hungarian(self, word, vowels, digraphs): + """ + Return the region R1 that is used by the Hungarian stemmer. + + If the word begins with a vowel, R1 is defined as the region + after the first consonant or digraph (= two letters stand for + one phoneme) in the word. If the word begins with a consonant, + it is defined as the region after the first vowel in the word. + If the word does not contain both a vowel and consonant, R1 + is the null region at the end of the word. + + :param word: The Hungarian word whose region R1 is determined. + :type word: str or unicode + :param vowels: The Hungarian vowels that are used to determine + the region R1. + :type vowels: unicode + :param digraphs: The digraphs that are used to determine the + region R1. + :type digraphs: tuple + :return: the region R1 for the respective word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + HungarianStemmer. It is not to be invoked directly! + + """ + r1 = "" + if word[0] in vowels: + for digraph in digraphs: + if digraph in word[1:]: + r1 = word[word.index(digraph[-1]) + 1 :] + return r1 + + for i in range(1, len(word)): + if word[i] not in vowels: + r1 = word[i + 1 :] + break + else: + for i in range(1, len(word)): + if word[i] in vowels: + r1 = word[i + 1 :] + break + + return r1 + + +class ItalianStemmer(_StandardStemmer): + + """ + The Italian Snowball stemmer. + + :cvar __vowels: The Italian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :note: A detailed description of the Italian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/italian/stemmer.html + + """ + + __vowels = "aeiou\xE0\xE8\xEC\xF2\xF9" + __step0_suffixes = ( + "gliela", + "gliele", + "glieli", + "glielo", + "gliene", + "sene", + "mela", + "mele", + "meli", + "melo", + "mene", + "tela", + "tele", + "teli", + "telo", + "tene", + "cela", + "cele", + "celi", + "celo", + "cene", + "vela", + "vele", + "veli", + "velo", + "vene", + "gli", + "ci", + "la", + "le", + "li", + "lo", + "mi", + "ne", + "si", + "ti", + "vi", + ) + __step1_suffixes = ( + "atrice", + "atrici", + "azione", + "azioni", + "uzione", + "uzioni", + "usione", + "usioni", + "amento", + "amenti", + "imento", + "imenti", + "amente", + "abile", + "abili", + "ibile", + "ibili", + "mente", + "atore", + "atori", + "logia", + "logie", + "anza", + "anze", + "iche", + "ichi", + "ismo", + "ismi", + "ista", + "iste", + "isti", + "ist\xE0", + "ist\xE8", + "ist\xEC", + "ante", + "anti", + "enza", + "enze", + "ico", + "ici", + "ica", + "ice", + "oso", + "osi", + "osa", + "ose", + "it\xE0", + "ivo", + "ivi", + "iva", + "ive", + ) + __step2_suffixes = ( + "erebbero", + "irebbero", + "assero", + "assimo", + "eranno", + "erebbe", + "eremmo", + "ereste", + "eresti", + "essero", + "iranno", + "irebbe", + "iremmo", + "ireste", + "iresti", + "iscano", + "iscono", + "issero", + "arono", + "avamo", + "avano", + "avate", + "eremo", + "erete", + "erono", + "evamo", + "evano", + "evate", + "iremo", + "irete", + "irono", + "ivamo", + "ivano", + "ivate", + "ammo", + "ando", + "asse", + "assi", + "emmo", + "enda", + "ende", + "endi", + "endo", + "erai", + "erei", + "Yamo", + "iamo", + "immo", + "irai", + "irei", + "isca", + "isce", + "isci", + "isco", + "ano", + "are", + "ata", + "ate", + "ati", + "ato", + "ava", + "avi", + "avo", + "er\xE0", + "ere", + "er\xF2", + "ete", + "eva", + "evi", + "evo", + "ir\xE0", + "ire", + "ir\xF2", + "ita", + "ite", + "iti", + "ito", + "iva", + "ivi", + "ivo", + "ono", + "uta", + "ute", + "uti", + "uto", + "ar", + "ir", + ) + + def stem(self, word): + """ + Stem an Italian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + # All acute accents are replaced by grave accents. + word = ( + word.replace("\xE1", "\xE0") + .replace("\xE9", "\xE8") + .replace("\xED", "\xEC") + .replace("\xF3", "\xF2") + .replace("\xFA", "\xF9") + ) + + # Every occurrence of 'u' after 'q' + # is put into upper case. + for i in range(1, len(word)): + if word[i - 1] == "q" and word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + # Every occurrence of 'u' and 'i' + # between vowels is put into upper case. + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if rv.endswith(suffix): + if rv[-len(suffix) - 4 : -len(suffix)] in ("ando", "endo"): + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + elif rv[-len(suffix) - 2 : -len(suffix)] in ("ar", "er", "ir"): + word = suffix_replace(word, suffix, "e") + r1 = suffix_replace(r1, suffix, "e") + r2 = suffix_replace(r2, suffix, "e") + rv = suffix_replace(rv, suffix, "e") + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("amento", "amenti", "imento", "imenti") and rv.endswith( + suffix + ): + step1_success = True + word = word[:-6] + rv = rv[:-6] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ("azione", "azioni", "atore", "atori"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("logia", "logie"): + word = word[:-2] + rv = word[:-2] + + elif suffix in ("uzione", "uzioni", "usione", "usioni"): + word = word[:-5] + rv = rv[:-5] + + elif suffix in ("enza", "enze"): + word = suffix_replace(word, suffix, "te") + rv = suffix_replace(rv, suffix, "te") + + elif suffix == "it\xE0": + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("ivo", "ivi", "iva", "ive"): + word = word[:-3] + r2 = r2[:-3] + rv = rv[:-3] + + if r2.endswith("at"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3a + if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8", "\xEC", "\xF2")): + word = word[:-1] + rv = rv[:-1] + + if rv.endswith("i"): + word = word[:-1] + rv = rv[:-1] + + # STEP 3b + if rv.endswith(("ch", "gh")): + word = word[:-1] + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class NorwegianStemmer(_ScandinavianStemmer): + + """ + The Norwegian Snowball stemmer. + + :cvar __vowels: The Norwegian vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Norwegian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/norwegian/stemmer.html + + """ + + __vowels = "aeiouy\xE6\xE5\xF8" + __s_ending = "bcdfghjlmnoprtvyz" + __step1_suffixes = ( + "hetenes", + "hetene", + "hetens", + "heter", + "heten", + "endes", + "ande", + "ende", + "edes", + "enes", + "erte", + "ede", + "ane", + "ene", + "ens", + "ers", + "ets", + "het", + "ast", + "ert", + "en", + "ar", + "er", + "as", + "es", + "et", + "a", + "e", + "s", + ) + + __step2_suffixes = ("dt", "vt") + + __step3_suffixes = ( + "hetslov", + "eleg", + "elig", + "elov", + "slov", + "leg", + "eig", + "lig", + "els", + "lov", + "ig", + ) + + def stem(self, word): + """ + Stem a Norwegian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix in ("erte", "ert"): + word = suffix_replace(word, suffix, "er") + r1 = suffix_replace(r1, suffix, "er") + + elif suffix == "s": + if word[-2] in self.__s_ending or ( + word[-2] == "k" and word[-3] not in self.__vowels + ): + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + word = word[: -len(suffix)] + break + + return word + + +class PortugueseStemmer(_StandardStemmer): + + """ + The Portuguese Snowball stemmer. + + :cvar __vowels: The Portuguese vowels. + :type __vowels: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm. + :type __step4_suffixes: tuple + :note: A detailed description of the Portuguese + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/portuguese/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4" + __step1_suffixes = ( + "amentos", + "imentos", + "uço~es", + "amento", + "imento", + "adoras", + "adores", + "a\xE7o~es", + "logias", + "\xEAncias", + "amente", + "idades", + "an\xE7as", + "ismos", + "istas", + "adora", + "a\xE7a~o", + "antes", + "\xE2ncia", + "logia", + "uça~o", + "\xEAncia", + "mente", + "idade", + "an\xE7a", + "ezas", + "icos", + "icas", + "ismo", + "\xE1vel", + "\xEDvel", + "ista", + "osos", + "osas", + "ador", + "ante", + "ivas", + "ivos", + "iras", + "eza", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + "ira", + ) + __step2_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "\xE1ssemos", + "\xEAssemos", + "\xEDssemos", + "ar\xEDeis", + "er\xEDeis", + "ir\xEDeis", + "\xE1sseis", + "\xE9sseis", + "\xEDsseis", + "\xE1ramos", + "\xE9ramos", + "\xEDramos", + "\xE1vamos", + "aremos", + "eremos", + "iremos", + "ariam", + "eriam", + "iriam", + "assem", + "essem", + "issem", + "ara~o", + "era~o", + "ira~o", + "arias", + "erias", + "irias", + "ardes", + "erdes", + "irdes", + "asses", + "esses", + "isses", + "astes", + "estes", + "istes", + "\xE1reis", + "areis", + "\xE9reis", + "ereis", + "\xEDreis", + "ireis", + "\xE1veis", + "\xEDamos", + "armos", + "ermos", + "irmos", + "aria", + "eria", + "iria", + "asse", + "esse", + "isse", + "aste", + "este", + "iste", + "arei", + "erei", + "irei", + "aram", + "eram", + "iram", + "avam", + "arem", + "erem", + "irem", + "ando", + "endo", + "indo", + "adas", + "idas", + "ar\xE1s", + "aras", + "er\xE1s", + "eras", + "ir\xE1s", + "avas", + "ares", + "eres", + "ires", + "\xEDeis", + "ados", + "idos", + "\xE1mos", + "amos", + "emos", + "imos", + "iras", + "ada", + "ida", + "ar\xE1", + "ara", + "er\xE1", + "era", + "ir\xE1", + "ava", + "iam", + "ado", + "ido", + "ias", + "ais", + "eis", + "ira", + "ia", + "ei", + "am", + "em", + "ar", + "er", + "ir", + "as", + "es", + "is", + "eu", + "iu", + "ou", + ) + __step4_suffixes = ("os", "a", "i", "o", "\xE1", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Portuguese word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + word = ( + word.replace("\xE3", "a~") + .replace("\xF5", "o~") + .replace("q\xFC", "qu") + .replace("g\xFC", "gu") + ) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif ( + suffix in ("ira", "iras") + and rv.endswith(suffix) + and word[-len(suffix) - 1 : -len(suffix)] == "e" + ): + step1_success = True + + word = suffix_replace(word, suffix, "ir") + rv = suffix_replace(rv, suffix, "ir") + + elif r2.endswith(suffix): + step1_success = True + + if suffix in ("logia", "logias"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uça~o", "uço~es"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("\xEAncia", "\xEAncias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[:-5] + r2 = r2[:-5] + rv = rv[:-5] + + if r2.endswith(("ante", "avel", "ivel")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idade", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ic", "iv")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith("abil"): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("iva", "ivo", "ivas", "ivos"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2: Verb suffixes + if not step1_success: + for suffix in self.__step2_suffixes: + if rv.endswith(suffix): + step2_success = True + + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 3 + if step1_success or step2_success: + if rv.endswith("i") and word[-2] == "c": + word = word[:-1] + rv = rv[:-1] + + ### STEP 4: Residual suffix + if not step1_success and not step2_success: + for suffix in self.__step4_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 5 + if rv.endswith(("e", "\xE9", "\xEA")): + word = word[:-1] + rv = rv[:-1] + + if (word.endswith("gu") and rv.endswith("u")) or ( + word.endswith("ci") and rv.endswith("i") + ): + word = word[:-1] + + elif word.endswith("\xE7"): + word = suffix_replace(word, "\xE7", "c") + + word = word.replace("a~", "\xE3").replace("o~", "\xF5") + + return word + + +class RomanianStemmer(_StandardStemmer): + + """ + The Romanian Snowball stemmer. + + :cvar __vowels: The Romanian vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Romanian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/romanian/stemmer.html + + """ + + __vowels = "aeiou\u0103\xE2\xEE" + __step0_suffixes = ( + "iilor", + "ului", + "elor", + "iile", + "ilor", + "atei", + "a\u0163ie", + "a\u0163ia", + "aua", + "ele", + "iua", + "iei", + "ile", + "ul", + "ea", + "ii", + ) + __step1_suffixes = ( + "abilitate", + "abilitati", + "abilit\u0103\u0163i", + "ibilitate", + "abilit\u0103i", + "ivitate", + "ivitati", + "ivit\u0103\u0163i", + "icitate", + "icitati", + "icit\u0103\u0163i", + "icatori", + "ivit\u0103i", + "icit\u0103i", + "icator", + "a\u0163iune", + "atoare", + "\u0103toare", + "i\u0163iune", + "itoare", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "icala", + "icale", + "icali", + "ical\u0103", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "atori", + "\u0103tori", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "itori", + "iciv", + "ical", + "ativ", + "ator", + "\u0103tor", + "itiv", + "itor", + ) + __step2_suffixes = ( + "abila", + "abile", + "abili", + "abil\u0103", + "ibila", + "ibile", + "ibili", + "ibil\u0103", + "atori", + "itate", + "itati", + "it\u0103\u0163i", + "abil", + "ibil", + "oasa", + "oas\u0103", + "oase", + "anta", + "ante", + "anti", + "ant\u0103", + "ator", + "it\u0103i", + "iune", + "iuni", + "isme", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + "ata", + "at\u0103", + "ati", + "ate", + "uta", + "ut\u0103", + "uti", + "ute", + "ita", + "it\u0103", + "iti", + "ite", + "ica", + "ice", + "ici", + "ic\u0103", + "osi", + "o\u015Fi", + "ant", + "iva", + "ive", + "ivi", + "iv\u0103", + "ism", + "ist", + "at", + "ut", + "it", + "ic", + "os", + "iv", + ) + __step3_suffixes = ( + "seser\u0103\u0163i", + "aser\u0103\u0163i", + "iser\u0103\u0163i", + "\xE2ser\u0103\u0163i", + "user\u0103\u0163i", + "seser\u0103m", + "aser\u0103m", + "iser\u0103m", + "\xE2ser\u0103m", + "user\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "easc\u0103", + "ar\u0103\u0163i", + "ur\u0103\u0163i", + "ir\u0103\u0163i", + "\xE2r\u0103\u0163i", + "ase\u015Fi", + "aser\u0103", + "ise\u015Fi", + "iser\u0103", + "\xe2se\u015Fi", + "\xE2ser\u0103", + "use\u015Fi", + "user\u0103", + "ser\u0103m", + "sesem", + "indu", + "\xE2ndu", + "eaz\u0103", + "e\u015Fti", + "e\u015Fte", + "\u0103\u015Fti", + "\u0103\u015Fte", + "ea\u0163i", + "ia\u0163i", + "ar\u0103m", + "ur\u0103m", + "ir\u0103m", + "\xE2r\u0103m", + "asem", + "isem", + "\xE2sem", + "usem", + "se\u015Fi", + "ser\u0103", + "sese", + "are", + "ere", + "ire", + "\xE2re", + "ind", + "\xE2nd", + "eze", + "ezi", + "esc", + "\u0103sc", + "eam", + "eai", + "eau", + "iam", + "iai", + "iau", + "a\u015Fi", + "ar\u0103", + "u\u015Fi", + "ur\u0103", + "i\u015Fi", + "ir\u0103", + "\xE2\u015Fi", + "\xe2r\u0103", + "ase", + "ise", + "\xE2se", + "use", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xe2\u0163i", + "sei", + "ez", + "am", + "ai", + "au", + "ea", + "ia", + "ui", + "\xE2i", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ) + + def stem(self, word): + """ + Stem a Romanian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + step2_success = False + + for i in range(1, len(word) - 1): + if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: + if word[i] == "u": + word = "".join((word[:i], "U", word[i + 1 :])) + + elif word[i] == "i": + word = "".join((word[:i], "I", word[i + 1 :])) + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Removal of plurals and other simplifications + for suffix in self.__step0_suffixes: + if word.endswith(suffix): + if suffix in r1: + if suffix in ("ul", "ului"): + word = word[: -len(suffix)] + + if suffix in rv: + rv = rv[: -len(suffix)] + else: + rv = "" + + elif ( + suffix == "aua" + or suffix == "atei" + or (suffix == "ile" and word[-5:-3] != "ab") + ): + word = word[:-2] + + elif suffix in ("ea", "ele", "elor"): + word = suffix_replace(word, suffix, "e") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "e") + else: + rv = "" + + elif suffix in ("ii", "iua", "iei", "iile", "iilor", "ilor"): + word = suffix_replace(word, suffix, "i") + + if suffix in rv: + rv = suffix_replace(rv, suffix, "i") + else: + rv = "" + + elif suffix in ("a\u0163ie", "a\u0163ia"): + word = word[:-1] + break + + # STEP 1: Reduction of combining suffixes + while True: + + replacement_done = False + + for suffix in self.__step1_suffixes: + if word.endswith(suffix): + if suffix in r1: + step1_success = True + replacement_done = True + + if suffix in ( + "abilitate", + "abilitati", + "abilit\u0103i", + "abilit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "abil") + + elif suffix == "ibilitate": + word = word[:-5] + + elif suffix in ( + "ivitate", + "ivitati", + "ivit\u0103i", + "ivit\u0103\u0163i", + ): + word = suffix_replace(word, suffix, "iv") + + elif suffix in ( + "icitate", + "icitati", + "icit\u0103i", + "icit\u0103\u0163i", + "icator", + "icatori", + "iciv", + "iciva", + "icive", + "icivi", + "iciv\u0103", + "ical", + "icala", + "icale", + "icali", + "ical\u0103", + ): + word = suffix_replace(word, suffix, "ic") + + elif suffix in ( + "ativ", + "ativa", + "ative", + "ativi", + "ativ\u0103", + "a\u0163iune", + "atoare", + "ator", + "atori", + "\u0103toare", + "\u0103tor", + "\u0103tori", + ): + word = suffix_replace(word, suffix, "at") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "at") + + elif suffix in ( + "itiv", + "itiva", + "itive", + "itivi", + "itiv\u0103", + "i\u0163iune", + "itoare", + "itor", + "itori", + ): + word = suffix_replace(word, suffix, "it") + + if suffix in r2: + r2 = suffix_replace(r2, suffix, "it") + else: + step1_success = False + break + + if not replacement_done: + break + + # STEP 2: Removal of standard suffixes + for suffix in self.__step2_suffixes: + if word.endswith(suffix): + if suffix in r2: + step2_success = True + + if suffix in ("iune", "iuni"): + if word[-5] == "\u0163": + word = "".join((word[:-5], "t")) + + elif suffix in ( + "ism", + "isme", + "ist", + "ista", + "iste", + "isti", + "ist\u0103", + "i\u015Fti", + ): + word = suffix_replace(word, suffix, "ist") + + else: + word = word[: -len(suffix)] + break + + # STEP 3: Removal of verb suffixes + if not step1_success and not step2_success: + for suffix in self.__step3_suffixes: + if word.endswith(suffix): + if suffix in rv: + if suffix in ( + "seser\u0103\u0163i", + "seser\u0103m", + "ser\u0103\u0163i", + "sese\u015Fi", + "seser\u0103", + "ser\u0103m", + "sesem", + "se\u015Fi", + "ser\u0103", + "sese", + "a\u0163i", + "e\u0163i", + "i\u0163i", + "\xE2\u0163i", + "sei", + "\u0103m", + "em", + "im", + "\xE2m", + "se", + ): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + else: + if ( + not rv.startswith(suffix) + and rv[rv.index(suffix) - 1] not in "aeio\u0103\xE2\xEE" + ): + word = word[: -len(suffix)] + break + + # STEP 4: Removal of final vowel + for suffix in ("ie", "a", "e", "i", "\u0103"): + if word.endswith(suffix): + if suffix in rv: + word = word[: -len(suffix)] + break + + word = word.replace("I", "i").replace("U", "u") + + return word + + +class RussianStemmer(_LanguageSpecificStemmer): + + """ + The Russian Snowball stemmer. + + :cvar __perfective_gerund_suffixes: Suffixes to be deleted. + :type __perfective_gerund_suffixes: tuple + :cvar __adjectival_suffixes: Suffixes to be deleted. + :type __adjectival_suffixes: tuple + :cvar __reflexive_suffixes: Suffixes to be deleted. + :type __reflexive_suffixes: tuple + :cvar __verb_suffixes: Suffixes to be deleted. + :type __verb_suffixes: tuple + :cvar __noun_suffixes: Suffixes to be deleted. + :type __noun_suffixes: tuple + :cvar __superlative_suffixes: Suffixes to be deleted. + :type __superlative_suffixes: tuple + :cvar __derivational_suffixes: Suffixes to be deleted. + :type __derivational_suffixes: tuple + :note: A detailed description of the Russian + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/russian/stemmer.html + + """ + + __perfective_gerund_suffixes = ( + "ivshis'", + "yvshis'", + "vshis'", + "ivshi", + "yvshi", + "vshi", + "iv", + "yv", + "v", + ) + __adjectival_suffixes = ( + "ui^ushchi^ui^u", + "ui^ushchi^ai^a", + "ui^ushchimi", + "ui^ushchymi", + "ui^ushchego", + "ui^ushchogo", + "ui^ushchemu", + "ui^ushchomu", + "ui^ushchikh", + "ui^ushchykh", + "ui^ushchui^u", + "ui^ushchaia", + "ui^ushchoi^u", + "ui^ushchei^u", + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "ui^ushchee", + "ui^ushchie", + "ui^ushchye", + "ui^ushchoe", + "ui^ushchei`", + "ui^ushchii`", + "ui^ushchyi`", + "ui^ushchoi`", + "ui^ushchem", + "ui^ushchim", + "ui^ushchym", + "ui^ushchom", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "shchi^ui^u", + "shchi^ai^a", + "ivshi^ui^u", + "ivshi^ai^a", + "yvshi^ui^u", + "yvshi^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "ivshimi", + "ivshymi", + "ivshego", + "ivshogo", + "ivshemu", + "ivshomu", + "ivshikh", + "ivshykh", + "ivshui^u", + "ivshai^a", + "ivshoi^u", + "ivshei^u", + "yvshimi", + "yvshymi", + "yvshego", + "yvshogo", + "yvshemu", + "yvshomu", + "yvshikh", + "yvshykh", + "yvshui^u", + "yvshai^a", + "yvshoi^u", + "yvshei^u", + "vshi^ui^u", + "vshi^ai^a", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "ivshee", + "ivshie", + "ivshye", + "ivshoe", + "ivshei`", + "ivshii`", + "ivshyi`", + "ivshoi`", + "ivshem", + "ivshim", + "ivshym", + "ivshom", + "yvshee", + "yvshie", + "yvshye", + "yvshoe", + "yvshei`", + "yvshii`", + "yvshyi`", + "yvshoi`", + "yvshem", + "yvshim", + "yvshym", + "yvshom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + "i^ui^u", + "i^ai^a", + "imi", + "ymi", + "ego", + "ogo", + "emu", + "omu", + "ikh", + "ykh", + "ui^u", + "ai^a", + "oi^u", + "ei^u", + "ee", + "ie", + "ye", + "oe", + "ei`", + "ii`", + "yi`", + "oi`", + "em", + "im", + "ym", + "om", + ) + __reflexive_suffixes = ("si^a", "s'") + __verb_suffixes = ( + "esh'", + "ei`te", + "ui`te", + "ui^ut", + "ish'", + "ete", + "i`te", + "i^ut", + "nno", + "ila", + "yla", + "ena", + "ite", + "ili", + "yli", + "ilo", + "ylo", + "eno", + "i^at", + "uet", + "eny", + "it'", + "yt'", + "ui^u", + "la", + "na", + "li", + "em", + "lo", + "no", + "et", + "ny", + "t'", + "ei`", + "ui`", + "il", + "yl", + "im", + "ym", + "en", + "it", + "yt", + "i^u", + "i`", + "l", + "n", + ) + __noun_suffixes = ( + "ii^ami", + "ii^akh", + "i^ami", + "ii^am", + "i^akh", + "ami", + "iei`", + "i^am", + "iem", + "akh", + "ii^u", + "'i^u", + "ii^a", + "'i^a", + "ev", + "ov", + "ie", + "'e", + "ei", + "ii", + "ei`", + "oi`", + "ii`", + "em", + "am", + "om", + "i^u", + "i^a", + "a", + "e", + "i", + "i`", + "o", + "u", + "y", + "'", + ) + __superlative_suffixes = ("ei`she", "ei`sh") + __derivational_suffixes = ("ost'", "ost") + + def stem(self, word): + """ + Stem a Russian word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + if word in self.stopwords: + return word + + chr_exceeded = False + for i in range(len(word)): + if ord(word[i]) > 255: + chr_exceeded = True + break + + if not chr_exceeded: + return word + + word = self.__cyrillic_to_roman(word) + + step1_success = False + adjectival_removed = False + verb_removed = False + undouble_success = False + superlative_removed = False + + rv, r2 = self.__regions_russian(word) + + # Step 1 + for suffix in self.__perfective_gerund_suffixes: + if rv.endswith(suffix): + if suffix in ("v", "vshi", "vshis'"): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + step1_success = True + break + + if not step1_success: + for suffix in self.__reflexive_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + for suffix in self.__adjectival_suffixes: + if rv.endswith(suffix): + if suffix in ( + "i^ushchi^ui^u", + "i^ushchi^ai^a", + "i^ushchui^u", + "i^ushchai^a", + "i^ushchoi^u", + "i^ushchei^u", + "i^ushchimi", + "i^ushchymi", + "i^ushchego", + "i^ushchogo", + "i^ushchemu", + "i^ushchomu", + "i^ushchikh", + "i^ushchykh", + "shchi^ui^u", + "shchi^ai^a", + "i^ushchee", + "i^ushchie", + "i^ushchye", + "i^ushchoe", + "i^ushchei`", + "i^ushchii`", + "i^ushchyi`", + "i^ushchoi`", + "i^ushchem", + "i^ushchim", + "i^ushchym", + "i^ushchom", + "vshi^ui^u", + "vshi^ai^a", + "shchui^u", + "shchai^a", + "shchoi^u", + "shchei^u", + "emi^ui^u", + "emi^ai^a", + "nni^ui^u", + "nni^ai^a", + "shchimi", + "shchymi", + "shchego", + "shchogo", + "shchemu", + "shchomu", + "shchikh", + "shchykh", + "vshui^u", + "vshai^a", + "vshoi^u", + "vshei^u", + "shchee", + "shchie", + "shchye", + "shchoe", + "shchei`", + "shchii`", + "shchyi`", + "shchoi`", + "shchem", + "shchim", + "shchym", + "shchom", + "vshimi", + "vshymi", + "vshego", + "vshogo", + "vshemu", + "vshomu", + "vshikh", + "vshykh", + "emui^u", + "emai^a", + "emoi^u", + "emei^u", + "nnui^u", + "nnai^a", + "nnoi^u", + "nnei^u", + "vshee", + "vshie", + "vshye", + "vshoe", + "vshei`", + "vshii`", + "vshyi`", + "vshoi`", + "vshem", + "vshim", + "vshym", + "vshom", + "emimi", + "emymi", + "emego", + "emogo", + "ememu", + "emomu", + "emikh", + "emykh", + "nnimi", + "nnymi", + "nnego", + "nnogo", + "nnemu", + "nnomu", + "nnikh", + "nnykh", + "emee", + "emie", + "emye", + "emoe", + "emei`", + "emii`", + "emyi`", + "emoi`", + "emem", + "emim", + "emym", + "emom", + "nnee", + "nnie", + "nnye", + "nnoe", + "nnei`", + "nnii`", + "nnyi`", + "nnoi`", + "nnem", + "nnim", + "nnym", + "nnom", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + adjectival_removed = True + break + + if not adjectival_removed: + for suffix in self.__verb_suffixes: + if rv.endswith(suffix): + if suffix in ( + "la", + "na", + "ete", + "i`te", + "li", + "i`", + "l", + "em", + "n", + "lo", + "no", + "et", + "i^ut", + "ny", + "t'", + "esh'", + "nno", + ): + if ( + rv[-len(suffix) - 3 : -len(suffix)] == "i^a" + or rv[-len(suffix) - 1 : -len(suffix)] == "a" + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + else: + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + verb_removed = True + break + + if not adjectival_removed and not verb_removed: + for suffix in self.__noun_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # Step 2 + if rv.endswith("i"): + word = word[:-1] + r2 = r2[:-1] + + # Step 3 + for suffix in self.__derivational_suffixes: + if r2.endswith(suffix): + word = word[: -len(suffix)] + break + + # Step 4 + if word.endswith("nn"): + word = word[:-1] + undouble_success = True + + if not undouble_success: + for suffix in self.__superlative_suffixes: + if word.endswith(suffix): + word = word[: -len(suffix)] + superlative_removed = True + break + if word.endswith("nn"): + word = word[:-1] + + if not undouble_success and not superlative_removed: + if word.endswith("'"): + word = word[:-1] + + word = self.__roman_to_cyrillic(word) + + return word + + def __regions_russian(self, word): + """ + Return the regions RV and R2 which are used by the Russian stemmer. + + In any word, RV is the region after the first vowel, + or the end of the word if it contains no vowel. + + R2 is the region after the first non-vowel following + a vowel in R1, or the end of the word if there is no such non-vowel. + + R1 is the region after the first non-vowel following a vowel, + or the end of the word if there is no such non-vowel. + + :param word: The Russian word whose regions RV and R2 are determined. + :type word: str or unicode + :return: the regions RV and R2 for the respective Russian word. + :rtype: tuple + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + r1 = "" + r2 = "" + rv = "" + + vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y") + word = word.replace("i^a", "A").replace("i^u", "U").replace("e`", "E") + + for i in range(1, len(word)): + if word[i] not in vowels and word[i - 1] in vowels: + r1 = word[i + 1 :] + break + + for i in range(1, len(r1)): + if r1[i] not in vowels and r1[i - 1] in vowels: + r2 = r1[i + 1 :] + break + + for i in range(len(word)): + if word[i] in vowels: + rv = word[i + 1 :] + break + + r2 = r2.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + rv = rv.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") + + return (rv, r2) + + def __cyrillic_to_roman(self, word): + """ + Transliterate a Russian word into the Roman alphabet. + + A Russian word whose letters consist of the Cyrillic + alphabet are transliterated into the Roman alphabet + in order to ease the forthcoming stemming process. + + :param word: The word that is transliterated. + :type word: unicode + :return: the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("\u0410", "a") + .replace("\u0430", "a") + .replace("\u0411", "b") + .replace("\u0431", "b") + .replace("\u0412", "v") + .replace("\u0432", "v") + .replace("\u0413", "g") + .replace("\u0433", "g") + .replace("\u0414", "d") + .replace("\u0434", "d") + .replace("\u0415", "e") + .replace("\u0435", "e") + .replace("\u0401", "e") + .replace("\u0451", "e") + .replace("\u0416", "zh") + .replace("\u0436", "zh") + .replace("\u0417", "z") + .replace("\u0437", "z") + .replace("\u0418", "i") + .replace("\u0438", "i") + .replace("\u0419", "i`") + .replace("\u0439", "i`") + .replace("\u041A", "k") + .replace("\u043A", "k") + .replace("\u041B", "l") + .replace("\u043B", "l") + .replace("\u041C", "m") + .replace("\u043C", "m") + .replace("\u041D", "n") + .replace("\u043D", "n") + .replace("\u041E", "o") + .replace("\u043E", "o") + .replace("\u041F", "p") + .replace("\u043F", "p") + .replace("\u0420", "r") + .replace("\u0440", "r") + .replace("\u0421", "s") + .replace("\u0441", "s") + .replace("\u0422", "t") + .replace("\u0442", "t") + .replace("\u0423", "u") + .replace("\u0443", "u") + .replace("\u0424", "f") + .replace("\u0444", "f") + .replace("\u0425", "kh") + .replace("\u0445", "kh") + .replace("\u0426", "t^s") + .replace("\u0446", "t^s") + .replace("\u0427", "ch") + .replace("\u0447", "ch") + .replace("\u0428", "sh") + .replace("\u0448", "sh") + .replace("\u0429", "shch") + .replace("\u0449", "shch") + .replace("\u042A", "''") + .replace("\u044A", "''") + .replace("\u042B", "y") + .replace("\u044B", "y") + .replace("\u042C", "'") + .replace("\u044C", "'") + .replace("\u042D", "e`") + .replace("\u044D", "e`") + .replace("\u042E", "i^u") + .replace("\u044E", "i^u") + .replace("\u042F", "i^a") + .replace("\u044F", "i^a") + ) + + return word + + def __roman_to_cyrillic(self, word): + """ + Transliterate a Russian word back into the Cyrillic alphabet. + + A Russian word formerly transliterated into the Roman alphabet + in order to ease the stemming process, is transliterated back + into the Cyrillic alphabet, its original form. + + :param word: The word that is transliterated. + :type word: str or unicode + :return: word, the transliterated word. + :rtype: unicode + :note: This helper method is invoked by the stem method of the subclass + RussianStemmer. It is not to be invoked directly! + + """ + word = ( + word.replace("i^u", "\u044E") + .replace("i^a", "\u044F") + .replace("shch", "\u0449") + .replace("kh", "\u0445") + .replace("t^s", "\u0446") + .replace("ch", "\u0447") + .replace("e`", "\u044D") + .replace("i`", "\u0439") + .replace("sh", "\u0448") + .replace("k", "\u043A") + .replace("e", "\u0435") + .replace("zh", "\u0436") + .replace("a", "\u0430") + .replace("b", "\u0431") + .replace("v", "\u0432") + .replace("g", "\u0433") + .replace("d", "\u0434") + .replace("e", "\u0435") + .replace("z", "\u0437") + .replace("i", "\u0438") + .replace("l", "\u043B") + .replace("m", "\u043C") + .replace("n", "\u043D") + .replace("o", "\u043E") + .replace("p", "\u043F") + .replace("r", "\u0440") + .replace("s", "\u0441") + .replace("t", "\u0442") + .replace("u", "\u0443") + .replace("f", "\u0444") + .replace("''", "\u044A") + .replace("y", "\u044B") + .replace("'", "\u044C") + ) + + return word + + +class SpanishStemmer(_StandardStemmer): + + """ + The Spanish Snowball stemmer. + + :cvar __vowels: The Spanish vowels. + :type __vowels: unicode + :cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm. + :type __step0_suffixes: tuple + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm. + :type __step2a_suffixes: tuple + :cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm. + :type __step2b_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Spanish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/spanish/stemmer.html + + """ + + __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC" + __step0_suffixes = ( + "selas", + "selos", + "sela", + "selo", + "las", + "les", + "los", + "nos", + "me", + "se", + "la", + "le", + "lo", + ) + __step1_suffixes = ( + "amientos", + "imientos", + "amiento", + "imiento", + "acion", + "aciones", + "uciones", + "adoras", + "adores", + "ancias", + "log\xEDas", + "encias", + "amente", + "idades", + "anzas", + "ismos", + "ables", + "ibles", + "istas", + "adora", + "aci\xF3n", + "antes", + "ancia", + "log\xEDa", + "uci\xf3n", + "encia", + "mente", + "anza", + "icos", + "icas", + "ismo", + "able", + "ible", + "ista", + "osos", + "osas", + "ador", + "ante", + "idad", + "ivas", + "ivos", + "ico", + "ica", + "oso", + "osa", + "iva", + "ivo", + ) + __step2a_suffixes = ( + "yeron", + "yendo", + "yamos", + "yais", + "yan", + "yen", + "yas", + "yes", + "ya", + "ye", + "yo", + "y\xF3", + ) + __step2b_suffixes = ( + "ar\xEDamos", + "er\xEDamos", + "ir\xEDamos", + "i\xE9ramos", + "i\xE9semos", + "ar\xEDais", + "aremos", + "er\xEDais", + "eremos", + "ir\xEDais", + "iremos", + "ierais", + "ieseis", + "asteis", + "isteis", + "\xE1bamos", + "\xE1ramos", + "\xE1semos", + "ar\xEDan", + "ar\xEDas", + "ar\xE9is", + "er\xEDan", + "er\xEDas", + "er\xE9is", + "ir\xEDan", + "ir\xEDas", + "ir\xE9is", + "ieran", + "iesen", + "ieron", + "iendo", + "ieras", + "ieses", + "abais", + "arais", + "aseis", + "\xE9amos", + "ar\xE1n", + "ar\xE1s", + "ar\xEDa", + "er\xE1n", + "er\xE1s", + "er\xEDa", + "ir\xE1n", + "ir\xE1s", + "ir\xEDa", + "iera", + "iese", + "aste", + "iste", + "aban", + "aran", + "asen", + "aron", + "ando", + "abas", + "adas", + "idas", + "aras", + "ases", + "\xEDais", + "ados", + "idos", + "amos", + "imos", + "emos", + "ar\xE1", + "ar\xE9", + "er\xE1", + "er\xE9", + "ir\xE1", + "ir\xE9", + "aba", + "ada", + "ida", + "ara", + "ase", + "\xEDan", + "ado", + "ido", + "\xEDas", + "\xE1is", + "\xE9is", + "\xEDa", + "ad", + "ed", + "id", + "an", + "i\xF3", + "ar", + "er", + "ir", + "as", + "\xEDs", + "en", + "es", + ) + __step3_suffixes = ("os", "a", "e", "o", "\xE1", "\xE9", "\xED", "\xF3") + + def stem(self, word): + """ + Stem a Spanish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + step1_success = False + + r1, r2 = self._r1r2_standard(word, self.__vowels) + rv = self._rv_standard(word, self.__vowels) + + # STEP 0: Attached pronoun + for suffix in self.__step0_suffixes: + if not (word.endswith(suffix) and rv.endswith(suffix)): + continue + + if ( + rv[: -len(suffix)].endswith( + ( + "ando", + "\xE1ndo", + "ar", + "\xE1r", + "er", + "\xE9r", + "iendo", + "i\xE9ndo", + "ir", + "\xEDr", + ) + ) + ) or ( + rv[: -len(suffix)].endswith("yendo") + and word[: -len(suffix)].endswith("uyendo") + ): + + word = self.__replace_accented(word[: -len(suffix)]) + r1 = self.__replace_accented(r1[: -len(suffix)]) + r2 = self.__replace_accented(r2[: -len(suffix)]) + rv = self.__replace_accented(rv[: -len(suffix)]) + break + + # STEP 1: Standard suffix removal + for suffix in self.__step1_suffixes: + if not word.endswith(suffix): + continue + + if suffix == "amente" and r1.endswith(suffix): + step1_success = True + word = word[:-6] + r2 = r2[:-6] + rv = rv[:-6] + + if r2.endswith("iv"): + word = word[:-2] + r2 = r2[:-2] + rv = rv[:-2] + + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(("os", "ic", "ad")): + word = word[:-2] + rv = rv[:-2] + + elif r2.endswith(suffix): + step1_success = True + if suffix in ( + "adora", + "ador", + "aci\xF3n", + "adoras", + "adores", + "acion", + "aciones", + "ante", + "antes", + "ancia", + "ancias", + ): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith("ic"): + word = word[:-2] + rv = rv[:-2] + + elif suffix in ("log\xEDa", "log\xEDas"): + word = suffix_replace(word, suffix, "log") + rv = suffix_replace(rv, suffix, "log") + + elif suffix in ("uci\xF3n", "uciones"): + word = suffix_replace(word, suffix, "u") + rv = suffix_replace(rv, suffix, "u") + + elif suffix in ("encia", "encias"): + word = suffix_replace(word, suffix, "ente") + rv = suffix_replace(rv, suffix, "ente") + + elif suffix == "mente": + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + if r2.endswith(("ante", "able", "ible")): + word = word[:-4] + rv = rv[:-4] + + elif suffix in ("idad", "idades"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + + for pre_suff in ("abil", "ic", "iv"): + if r2.endswith(pre_suff): + word = word[: -len(pre_suff)] + rv = rv[: -len(pre_suff)] + + elif suffix in ("ivo", "iva", "ivos", "ivas"): + word = word[: -len(suffix)] + r2 = r2[: -len(suffix)] + rv = rv[: -len(suffix)] + if r2.endswith("at"): + word = word[:-2] + rv = rv[:-2] + else: + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2a: Verb suffixes beginning 'y' + if not step1_success: + for suffix in self.__step2a_suffixes: + if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == "u": + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + break + + # STEP 2b: Other verb suffixes + for suffix in self.__step2b_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + rv = rv[: -len(suffix)] + if suffix in ("en", "es", "\xE9is", "emos"): + if word.endswith("gu"): + word = word[:-1] + + if rv.endswith("gu"): + rv = rv[:-1] + break + + # STEP 3: Residual suffix + for suffix in self.__step3_suffixes: + if rv.endswith(suffix): + word = word[: -len(suffix)] + if suffix in ("e", "\xE9"): + rv = rv[: -len(suffix)] + + if word[-2:] == "gu" and rv.endswith("u"): + word = word[:-1] + break + + word = self.__replace_accented(word) + + return word + + def __replace_accented(self, word): + """ + Replaces all accented letters on a word with their non-accented + counterparts. + + :param word: A spanish word, with or without accents + :type word: str or unicode + :return: a word with the accented letters (á, é, í, ó, ú) replaced with + their non-accented counterparts (a, e, i, o, u) + :rtype: str or unicode + """ + return ( + word.replace("\xE1", "a") + .replace("\xE9", "e") + .replace("\xED", "i") + .replace("\xF3", "o") + .replace("\xFA", "u") + ) + + +class SwedishStemmer(_ScandinavianStemmer): + + """ + The Swedish Snowball stemmer. + + :cvar __vowels: The Swedish vowels. + :type __vowels: unicode + :cvar __s_ending: Letters that may directly appear before a word final 's'. + :type __s_ending: unicode + :cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm. + :type __step1_suffixes: tuple + :cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm. + :type __step2_suffixes: tuple + :cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm. + :type __step3_suffixes: tuple + :note: A detailed description of the Swedish + stemming algorithm can be found under + http://snowball.tartarus.org/algorithms/swedish/stemmer.html + + """ + + __vowels = "aeiouy\xE4\xE5\xF6" + __s_ending = "bcdfghjklmnoprtvy" + __step1_suffixes = ( + "heterna", + "hetens", + "heter", + "heten", + "anden", + "arnas", + "ernas", + "ornas", + "andes", + "andet", + "arens", + "arna", + "erna", + "orna", + "ande", + "arne", + "aste", + "aren", + "ades", + "erns", + "ade", + "are", + "ern", + "ens", + "het", + "ast", + "ad", + "en", + "ar", + "er", + "or", + "as", + "es", + "at", + "a", + "e", + "s", + ) + __step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt") + __step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig") + + def stem(self, word): + """ + Stem a Swedish word and return the stemmed form. + + :param word: The word that is stemmed. + :type word: str or unicode + :return: The stemmed form. + :rtype: unicode + + """ + word = word.lower() + + if word in self.stopwords: + return word + + r1 = self._r1_scandinavian(word, self.__vowels) + + # STEP 1 + for suffix in self.__step1_suffixes: + if r1.endswith(suffix): + if suffix == "s": + if word[-2] in self.__s_ending: + word = word[:-1] + r1 = r1[:-1] + else: + word = word[: -len(suffix)] + r1 = r1[: -len(suffix)] + break + + # STEP 2 + for suffix in self.__step2_suffixes: + if r1.endswith(suffix): + word = word[:-1] + r1 = r1[:-1] + break + + # STEP 3 + for suffix in self.__step3_suffixes: + if r1.endswith(suffix): + if suffix in ("els", "lig", "ig"): + word = word[: -len(suffix)] + elif suffix in ("fullt", "l\xF6st"): + word = word[:-1] + break + + return word + + +def demo(): + """ + This function provides a demonstration of the Snowball stemmers. + + After invoking this function and specifying a language, + it stems an excerpt of the Universal Declaration of Human Rights + (which is a part of the NLTK corpus collection) and then prints + out the original and the stemmed text. + + """ + + from nltk.corpus import udhr + + udhr_corpus = { + "arabic": "Arabic_Alarabia-Arabic", + "danish": "Danish_Dansk-Latin1", + "dutch": "Dutch_Nederlands-Latin1", + "english": "English-Latin1", + "finnish": "Finnish_Suomi-Latin1", + "french": "French_Francais-Latin1", + "german": "German_Deutsch-Latin1", + "hungarian": "Hungarian_Magyar-UTF8", + "italian": "Italian_Italiano-Latin1", + "norwegian": "Norwegian-Latin1", + "porter": "English-Latin1", + "portuguese": "Portuguese_Portugues-Latin1", + "romanian": "Romanian_Romana-Latin2", + "russian": "Russian-UTF8", + "spanish": "Spanish-Latin1", + "swedish": "Swedish_Svenska-Latin1", + } + + print("\n") + print("******************************") + print("Demo for the Snowball stemmers") + print("******************************") + + while True: + + language = input( + "Please enter the name of the language " + + "to be demonstrated\n" + + "/".join(SnowballStemmer.languages) + + "\n" + + "(enter 'exit' in order to leave): " + ) + + if language == "exit": + break + + if language not in SnowballStemmer.languages: + print( + "\nOops, there is no stemmer for this language. " + + "Please try again.\n" + ) + continue + + stemmer = SnowballStemmer(language) + excerpt = udhr.words(udhr_corpus[language])[:300] + + stemmed = " ".join(stemmer.stem(word) for word in excerpt) + stemmed = re.sub(r"(.{,70})\s", r"\1\n", stemmed + " ").rstrip() + excerpt = " ".join(excerpt) + excerpt = re.sub(r"(.{,70})\s", r"\1\n", excerpt + " ").rstrip() + + print("\n") + print("-" * 70) + print("ORIGINAL".center(70)) + print(excerpt) + print("\n\n") + print("STEMMED RESULTS".center(70)) + print(stemmed) + print("-" * 70) + print("\n") diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/stem/wordnet.py b/llmeval-env/lib/python3.10/site-packages/nltk/stem/wordnet.py new file mode 100644 index 0000000000000000000000000000000000000000..0ccb056f7e2172f46d03236d76942c3080d2f107 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/stem/wordnet.py @@ -0,0 +1,49 @@ +# Natural Language Toolkit: WordNet stemmer interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import wordnet as wn + + +class WordNetLemmatizer: + """ + WordNet Lemmatizer + + Lemmatize using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + >>> from nltk.stem import WordNetLemmatizer + >>> wnl = WordNetLemmatizer() + >>> print(wnl.lemmatize('dogs')) + dog + >>> print(wnl.lemmatize('churches')) + church + >>> print(wnl.lemmatize('aardwolves')) + aardwolf + >>> print(wnl.lemmatize('abaci')) + abacus + >>> print(wnl.lemmatize('hardrock')) + hardrock + """ + + def lemmatize(self, word: str, pos: str = "n") -> str: + """Lemmatize `word` using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + :param word: The input word to lemmatize. + :type word: str + :param pos: The Part Of Speech tag. Valid options are `"n"` for nouns, + `"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"` + for satellite adjectives. + :param pos: str + :return: The lemma of `word`, for the given `pos`. + """ + lemmas = wn._morphy(word, pos) + return min(lemmas, key=len) if lemmas else word + + def __repr__(self): + return "" diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0059c1e19003bc946f699ca5895f9932ed4ec341 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__init__.py @@ -0,0 +1,32 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird , Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Experimental features for machine translation. +These interfaces are prone to change. + +isort:skip_file +""" + +from nltk.translate.api import AlignedSent, Alignment, PhraseTable +from nltk.translate.ibm_model import IBMModel +from nltk.translate.ibm1 import IBMModel1 +from nltk.translate.ibm2 import IBMModel2 +from nltk.translate.ibm3 import IBMModel3 +from nltk.translate.ibm4 import IBMModel4 +from nltk.translate.ibm5 import IBMModel5 +from nltk.translate.bleu_score import sentence_bleu as bleu +from nltk.translate.ribes_score import sentence_ribes as ribes +from nltk.translate.meteor_score import meteor_score as meteor +from nltk.translate.metrics import alignment_error_rate +from nltk.translate.stack_decoder import StackDecoder +from nltk.translate.nist_score import sentence_nist as nist +from nltk.translate.chrf_score import sentence_chrf as chrf +from nltk.translate.gale_church import trace +from nltk.translate.gdfa import grow_diag_final_and +from nltk.translate.gleu_score import sentence_gleu as gleu +from nltk.translate.phrase_based import extract diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58c1d4db5aecc2043789cde688af3ed7fc791f87 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0db716d109c4fda22a2f1f01faebb99439d1dbd5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..067718dc33bbb7fc3d3a673385c10b3925fddb72 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..091c572eca0bbb67ab15b2850f4c29fca087257d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b1c0691594de07a093c8688817645fac8e72323 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2cfbd250148c352a8e69f1ad43a4251a07b9392 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21c889d2618494dd86477444ffc28c30ddeab50e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e55ef3eac3928ef86a32cb5326256ac0b42863 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fddde9497a421645505c4f95a0b0ec913e9b277d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1497d206dac9ba9ee9c2f67e5a40f45d6f230851 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac37ae23d02ce8569088ce22b60d0dfa31183b5d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cd116a9fb0d4e689871e33a1b1f9c13390c8a1b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cd1c4164de311ead5c97c6d94a5b780779498ae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e0c21fa584bccb117ec615015496a8b0a645515 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d32be4212625b7a573d962db3f034b5d024f39b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06a61d71b75bbd5ab223b94a87e76e8007ac58bd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3176a8480cf7482c4de8df279a2aaa5bc1cc34e0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c749b908edf8f0baa0a8be54746e53c480654dc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4af45df3dcac8ee52ec883a27de6ad8df69fa0b9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cf00f2b52f00cd7bf6df82d9b8d4557bb0592079 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/api.py @@ -0,0 +1,334 @@ +# Natural Language Toolkit: API for alignment and translation objects +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +import subprocess +from collections import namedtuple + + +class AlignedSent: + """ + Return an aligned sentence object, which encapsulates two sentences + along with an ``Alignment`` between them. + + Typically used in machine translation to represent a sentence and + its translation. + + >>> from nltk.translate import AlignedSent, Alignment + >>> algnsent = AlignedSent(['klein', 'ist', 'das', 'Haus'], + ... ['the', 'house', 'is', 'small'], Alignment.fromstring('0-3 1-2 2-0 3-1')) + >>> algnsent.words + ['klein', 'ist', 'das', 'Haus'] + >>> algnsent.mots + ['the', 'house', 'is', 'small'] + >>> algnsent.alignment + Alignment([(0, 3), (1, 2), (2, 0), (3, 1)]) + >>> from nltk.corpus import comtrans + >>> print(comtrans.aligned_sents()[54]) + 'So why should EU arm...'> + >>> print(comtrans.aligned_sents()[54].alignment) + 0-0 0-1 1-0 2-2 3-4 3-5 4-7 5-8 6-3 7-9 8-9 9-10 9-11 10-12 11-6 12-6 13-13 + + :param words: Words in the target language sentence + :type words: list(str) + :param mots: Words in the source language sentence + :type mots: list(str) + :param alignment: Word-level alignments between ``words`` and ``mots``. + Each alignment is represented as a 2-tuple (words_index, mots_index). + :type alignment: Alignment + """ + + def __init__(self, words, mots, alignment=None): + self._words = words + self._mots = mots + if alignment is None: + self.alignment = Alignment([]) + else: + assert type(alignment) is Alignment + self.alignment = alignment + + @property + def words(self): + return self._words + + @property + def mots(self): + return self._mots + + def _get_alignment(self): + return self._alignment + + def _set_alignment(self, alignment): + _check_alignment(len(self.words), len(self.mots), alignment) + self._alignment = alignment + + alignment = property(_get_alignment, _set_alignment) + + def __repr__(self): + """ + Return a string representation for this ``AlignedSent``. + + :rtype: str + """ + words = "[%s]" % (", ".join("'%s'" % w for w in self._words)) + mots = "[%s]" % (", ".join("'%s'" % w for w in self._mots)) + + return f"AlignedSent({words}, {mots}, {self._alignment!r})" + + def _to_dot(self): + """ + Dot representation of the aligned sentence + """ + s = "graph align {\n" + s += "node[shape=plaintext]\n" + + # Declare node + for w in self._words: + s += f'"{w}_source" [label="{w}"] \n' + + for w in self._mots: + s += f'"{w}_target" [label="{w}"] \n' + + # Alignment + for u, v in self._alignment: + s += f'"{self._words[u]}_source" -- "{self._mots[v]}_target" \n' + + # Connect the source words + for i in range(len(self._words) - 1): + s += '"{}_source" -- "{}_source" [style=invis]\n'.format( + self._words[i], + self._words[i + 1], + ) + + # Connect the target words + for i in range(len(self._mots) - 1): + s += '"{}_target" -- "{}_target" [style=invis]\n'.format( + self._mots[i], + self._mots[i + 1], + ) + + # Put it in the same rank + s += "{rank = same; %s}\n" % (" ".join('"%s_source"' % w for w in self._words)) + s += "{rank = same; %s}\n" % (" ".join('"%s_target"' % w for w in self._mots)) + + s += "}" + + return s + + def _repr_svg_(self): + """ + Ipython magic : show SVG representation of this ``AlignedSent``. + """ + dot_string = self._to_dot().encode("utf8") + output_format = "svg" + try: + process = subprocess.Popen( + ["dot", "-T%s" % output_format], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + except OSError as e: + raise Exception("Cannot find the dot binary from Graphviz package") from e + out, err = process.communicate(dot_string) + + return out.decode("utf8") + + def __str__(self): + """ + Return a human-readable string representation for this ``AlignedSent``. + + :rtype: str + """ + source = " ".join(self._words)[:20] + "..." + target = " ".join(self._mots)[:20] + "..." + return f" '{target}'>" + + def invert(self): + """ + Return the aligned sentence pair, reversing the directionality + + :rtype: AlignedSent + """ + return AlignedSent(self._mots, self._words, self._alignment.invert()) + + +class Alignment(frozenset): + """ + A storage class for representing alignment between two sequences, s1, s2. + In general, an alignment is a set of tuples of the form (i, j, ...) + representing an alignment between the i-th element of s1 and the + j-th element of s2. Tuples are extensible (they might contain + additional data, such as a boolean to indicate sure vs possible alignments). + + >>> from nltk.translate import Alignment + >>> a = Alignment([(0, 0), (0, 1), (1, 2), (2, 2)]) + >>> a.invert() + Alignment([(0, 0), (1, 0), (2, 1), (2, 2)]) + >>> print(a.invert()) + 0-0 1-0 2-1 2-2 + >>> a[0] + [(0, 1), (0, 0)] + >>> a.invert()[2] + [(2, 1), (2, 2)] + >>> b = Alignment([(0, 0), (0, 1)]) + >>> b.issubset(a) + True + >>> c = Alignment.fromstring('0-0 0-1') + >>> b == c + True + """ + + def __new__(cls, pairs): + self = frozenset.__new__(cls, pairs) + self._len = max(p[0] for p in self) if self != frozenset([]) else 0 + self._index = None + return self + + @classmethod + def fromstring(cls, s): + """ + Read a giza-formatted string and return an Alignment object. + + >>> Alignment.fromstring('0-0 2-1 9-2 21-3 10-4 7-5') + Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)]) + + :type s: str + :param s: the positional alignments in giza format + :rtype: Alignment + :return: An Alignment object corresponding to the string representation ``s``. + """ + + return Alignment([_giza2pair(a) for a in s.split()]) + + def __getitem__(self, key): + """ + Look up the alignments that map from a given index or slice. + """ + if not self._index: + self._build_index() + return self._index.__getitem__(key) + + def invert(self): + """ + Return an Alignment object, being the inverted mapping. + """ + return Alignment(((p[1], p[0]) + p[2:]) for p in self) + + def range(self, positions=None): + """ + Work out the range of the mapping from the given positions. + If no positions are specified, compute the range of the entire mapping. + """ + image = set() + if not self._index: + self._build_index() + if not positions: + positions = list(range(len(self._index))) + for p in positions: + image.update(f for _, f in self._index[p]) + return sorted(image) + + def __repr__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return "Alignment(%r)" % sorted(self) + + def __str__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return " ".join("%d-%d" % p[:2] for p in sorted(self)) + + def _build_index(self): + """ + Build a list self._index such that self._index[i] is a list + of the alignments originating from word i. + """ + self._index = [[] for _ in range(self._len + 1)] + for p in self: + self._index[p[0]].append(p) + + +def _giza2pair(pair_string): + i, j = pair_string.split("-") + return int(i), int(j) + + +def _naacl2pair(pair_string): + i, j, p = pair_string.split("-") + return int(i), int(j) + + +def _check_alignment(num_words, num_mots, alignment): + """ + Check whether the alignments are legal. + + :param num_words: the number of source language words + :type num_words: int + :param num_mots: the number of target language words + :type num_mots: int + :param alignment: alignment to be checked + :type alignment: Alignment + :raise IndexError: if alignment falls outside the sentence + """ + + assert type(alignment) is Alignment + + if not all(0 <= pair[0] < num_words for pair in alignment): + raise IndexError("Alignment is outside boundary of words") + if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment): + raise IndexError("Alignment is outside boundary of mots") + + +PhraseTableEntry = namedtuple("PhraseTableEntry", ["trg_phrase", "log_prob"]) + + +class PhraseTable: + """ + In-memory store of translations for a given phrase, and the log + probability of the those translations + """ + + def __init__(self): + self.src_phrases = dict() + + def translations_for(self, src_phrase): + """ + Get the translations for a source language phrase + + :param src_phrase: Source language phrase of interest + :type src_phrase: tuple(str) + + :return: A list of target language phrases that are translations + of ``src_phrase``, ordered in decreasing order of + likelihood. Each list element is a tuple of the target + phrase and its log probability. + :rtype: list(PhraseTableEntry) + """ + return self.src_phrases[src_phrase] + + def add(self, src_phrase, trg_phrase, log_prob): + """ + :type src_phrase: tuple(str) + :type trg_phrase: tuple(str) + + :param log_prob: Log probability that given ``src_phrase``, + ``trg_phrase`` is its translation + :type log_prob: float + """ + entry = PhraseTableEntry(trg_phrase=trg_phrase, log_prob=log_prob) + if src_phrase not in self.src_phrases: + self.src_phrases[src_phrase] = [] + self.src_phrases[src_phrase].append(entry) + self.src_phrases[src_phrase].sort(key=lambda e: e.log_prob, reverse=True) + + def __contains__(self, src_phrase): + return src_phrase in self.src_phrases diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/bleu_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/bleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2cc949db964b029f4e7324cbbc7236d3ff9248 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/bleu_score.py @@ -0,0 +1,685 @@ +# Natural Language Toolkit: BLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +"""BLEU score implementation.""" + +import math +import sys +import warnings +from collections import Counter +from fractions import Fraction + +from nltk.util import ngrams + + +def sentence_bleu( + references, + hypothesis, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate BLEU score (Bilingual Evaluation Understudy) from + Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. + "BLEU: a method for automatic evaluation of machine translation." + In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 0.5045... + + If there is no ngrams overlap for any order of n-grams, BLEU returns the + value 0. This is because the precision for the order of n-grams without + overlap is 0, and the geometric mean in the final BLEU score computation + multiplies the 0 with the precision of other n-grams. This results in 0 + (independently of the precision of the other n-gram orders). The following + example has zero 3-gram and 4-gram overlaps: + + >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS + 0.0 + + To avoid this harsh behaviour when no ngram overlaps are found a smoothing + function can be used. + + >>> chencherry = SmoothingFunction() + >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, + ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS + 0.0370... + + The default BLEU calculates a score for up to 4-grams using uniform + weights (this is called BLEU-4). To evaluate your translations with + higher/lower order ngrams, use customized weights. E.g. when accounting + for up to 5-grams with uniform weights (this is called BLEU-5) use: + + >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + 0.3920... + + Multiple BLEU scores can be computed at once, by supplying a list of weights. + E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: + >>> weights = [ + ... (1./2., 1./2.), + ... (1./3., 1./3., 1./3.), + ... (1./4., 1./4., 1./4., 1./4.) + ... ] + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + [0.7453..., 0.6240..., 0.5045...] + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. + :rtype: float / list(float) + """ + return corpus_bleu( + [references], [hypothesis], weights, smoothing_function, auto_reweigh + ) + + +def corpus_bleu( + list_of_references, + hypotheses, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level BLEU scores (i.e. macro-average + precision), the original BLEU metric (Papineni et al. 2002) accounts for + the micro-average precision (i.e. summing the numerators and denominators + for each hypothesis-reference(s) pairs before the division). + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5920... + + The example below show that corpus_bleu() is different from averaging + sentence_bleu() for hypotheses + + >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) + >>> score2 = sentence_bleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6223... + + Custom weights may be supplied to fine-tune the BLEU score further. + A tuple of float weights for unigrams, bigrams, trigrams and so on can be given. + >>> weights = (0.1, 0.3, 0.5, 0.1) + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + 0.5818... + + This particular weight gave extra value to trigrams. + Furthermore, multiple weights can be given, resulting in multiple BLEU scores. + >>> weights = [ + ... (0.5, 0.5), + ... (0.333, 0.333, 0.334), + ... (0.25, 0.25, 0.25, 0.25), + ... (0.2, 0.2, 0.2, 0.2, 0.2) + ... ] + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + [0.8242..., 0.7067..., 0.5920..., 0.4719...] + + :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The corpus-level BLEU score. + :rtype: float + """ + # Before proceeding to compute BLEU, perform sanity checks. + + p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. + p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. + hyp_lengths, ref_lengths = 0, 0 + + assert len(list_of_references) == len(hypotheses), ( + "The number of hypotheses and their reference(s) should be the " "same " + ) + + try: + weights[0][0] + except TypeError: + weights = [weights] + max_weight_length = max(len(weight) for weight in weights) + + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + # For each order of ngram, calculate the numerator and + # denominator for the corpus-level modified precision. + for i in range(1, max_weight_length + 1): + p_i = modified_precision(references, hypothesis, i) + p_numerators[i] += p_i.numerator + p_denominators[i] += p_i.denominator + + # Calculate the hypothesis length and the closest reference length. + # Adds them to the corpus-level hypothesis and reference counts. + hyp_len = len(hypothesis) + hyp_lengths += hyp_len + ref_lengths += closest_ref_length(references, hyp_len) + + # Calculate corpus-level brevity penalty. + bp = brevity_penalty(ref_lengths, hyp_lengths) + + # Collects the various precision values for the different ngram orders. + p_n = [ + Fraction(p_numerators[i], p_denominators[i], _normalize=False) + for i in range(1, max_weight_length + 1) + ] + + # Returns 0 if there's no matching n-grams + # We only need to check for p_numerators[1] == 0, since if there's + # no unigrams, there won't be any higher order ngrams. + if p_numerators[1] == 0: + return 0 if len(weights) == 1 else [0] * len(weights) + + # If there's no smoothing, set use method0 from SmoothinFunction class. + if not smoothing_function: + smoothing_function = SmoothingFunction().method0 + # Smoothen the modified precision. + # Note: smoothing_function() may convert values into floats; + # it tries to retain the Fraction object as much as the + # smoothing method allows. + p_n = smoothing_function( + p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths + ) + + bleu_scores = [] + for weight in weights: + # Uniformly re-weighting based on maximum hypothesis lengths if largest + # order of n-grams < 4 and weights is set at default. + if auto_reweigh: + if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25): + weight = (1 / hyp_lengths,) * hyp_lengths + + s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0) + s = bp * math.exp(math.fsum(s)) + bleu_scores.append(s) + return bleu_scores[0] if len(weights) == 1 else bleu_scores + + +def modified_precision(references, hypothesis, n): + """ + Calculate modified ngram precision. + + The normal precision method may lead to some wrong translations with + high-precision, e.g., the translation, in which a word of reference + repeats several times, has very high precision. + + This function only returns the Fraction object that contains the numerator + and denominator necessary to calculate the corpus-level precision. + To calculate the modified precision for a single pair of hypothesis and + references, cast the Fraction object into a float. + + The famous "the the the ... " example shows that you can get BLEU precision + by duplicating high frequency words. + + >>> reference1 = 'the cat is on the mat'.split() + >>> reference2 = 'there is a cat on the mat'.split() + >>> hypothesis1 = 'the the the the the the the'.split() + >>> references = [reference1, reference2] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.2857... + + In the modified n-gram precision, a reference word will be considered + exhausted after a matching hypothesis word is identified, e.g. + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> hypothesis = 'of the'.split() + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis, n=1)) + 1.0 + >>> float(modified_precision(references, hypothesis, n=2)) + 1.0 + + An example of a normal machine translation hypothesis: + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.9444... + >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS + 0.5714... + >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS + 0.5882352941176471 + >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS + 0.07692... + + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hypothesis: A hypothesis translation. + :type hypothesis: list(str) + :param n: The ngram order. + :type n: int + :return: BLEU's modified precision for the nth order ngram. + :rtype: Fraction + """ + # Extracts all ngrams in hypothesis + # Set an empty Counter if hypothesis is empty. + counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter() + # Extract a union of references' counts. + # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references]) + max_counts = {} + for reference in references: + reference_counts = ( + Counter(ngrams(reference, n)) if len(reference) >= n else Counter() + ) + for ngram in counts: + max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram]) + + # Assigns the intersection between hypothesis and references' counts. + clipped_counts = { + ngram: min(count, max_counts[ngram]) for ngram, count in counts.items() + } + + numerator = sum(clipped_counts.values()) + # Ensures that denominator is minimum 1 to avoid ZeroDivisionError. + # Usually this happens when the ngram order is > len(reference). + denominator = max(1, sum(counts.values())) + + return Fraction(numerator, denominator, _normalize=False) + + +def closest_ref_length(references, hyp_len): + """ + This function finds the reference that is the closest length to the + hypothesis. The closest reference length is referred to as *r* variable + from the brevity penalty formula in Papineni et. al. (2002) + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hyp_len: The length of the hypothesis. + :type hyp_len: int + :return: The length of the reference that's closest to the hypothesis. + :rtype: int + """ + ref_lens = (len(reference) for reference in references) + closest_ref_len = min( + ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len) + ) + return closest_ref_len + + +def brevity_penalty(closest_ref_len, hyp_len): + """ + Calculate brevity penalty. + + As the modified n-gram precision still has the problem from the short + length sentence, brevity penalty is used to modify the overall BLEU + score according to length. + + An example from the paper. There are three references with length 12, 15 + and 17. And a concise hypothesis of the length 12. The brevity penalty is 1. + + >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15 + >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17 + >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> references = [reference1, reference2, reference3] + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + In case a hypothesis translation is shorter than the references, penalty is + applied. + + >>> references = [['a'] * 28, ['a'] * 28] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 0.2635971381157267 + + The length of the closest reference is used to compute the penalty. If the + length of a hypothesis is 12, and the reference lengths are 13 and 2, the + penalty is applied because the hypothesis length (12) is less then the + closest reference length (13). + + >>> references = [['a'] * 13, ['a'] * 2] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.9200... + + The brevity penalty doesn't depend on reference order. More importantly, + when two reference sentences are at the same distance, the shortest + reference sentence length is used. + + >>> references = [['a'] * 13, ['a'] * 11] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> bp1 = brevity_penalty(closest_ref_len, hyp_len) + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len) + >>> bp2 = brevity_penalty(closest_ref_len, hyp_len) + >>> bp1 == bp2 == 1 + True + + A test example from mteval-v13a.pl (starting from the line 705): + + >>> references = [['a'] * 11, ['a'] * 8] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.8668... + + >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + :param hyp_len: The length of the hypothesis for a single sentence OR the + sum of all the hypotheses' lengths for a corpus + :type hyp_len: int + :param closest_ref_len: The length of the closest reference for a single + hypothesis OR the sum of all the closest references for every hypotheses. + :type closest_ref_len: int + :return: BLEU's brevity penalty. + :rtype: float + """ + if hyp_len > closest_ref_len: + return 1 + # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0 + elif hyp_len == 0: + return 0 + else: + return math.exp(1 - closest_ref_len / hyp_len) + + +class SmoothingFunction: + """ + This is an implementation of the smoothing techniques + for segment-level BLEU scores that was presented in + Boxing Chen and Collin Cherry (2014) A Systematic Comparison of + Smoothing Techniques for Sentence-Level BLEU. In WMT14. + http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf + """ + + def __init__(self, epsilon=0.1, alpha=5, k=5): + """ + This will initialize the parameters required for the various smoothing + techniques, the default values are set to the numbers used in the + experiments from Chen and Cherry (2014). + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', + ... 'that', 'the', 'military', 'always', 'obeys', 'the', + ... 'commands', 'of', 'the', 'party'] + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', + ... 'that', 'the', 'military', 'will', 'forever', 'heed', + ... 'Party', 'commands'] + + >>> chencherry = SmoothingFunction() + >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS + 0.4452... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS + 0.4905... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS + 0.4135... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS + 0.4905... + + :param epsilon: the epsilon value use in method 1 + :type epsilon: float + :param alpha: the alpha value use in method 6 + :type alpha: int + :param k: the k value use in method 4 + :type k: int + """ + self.epsilon = epsilon + self.alpha = alpha + self.k = k + + def method0(self, p_n, *args, **kwargs): + """ + No smoothing. + """ + p_n_new = [] + for i, p_i in enumerate(p_n): + if p_i.numerator != 0: + p_n_new.append(p_i) + else: + _msg = str( + "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n" + "Therefore the BLEU score evaluates to 0, independently of\n" + "how many N-gram overlaps of lower order it contains.\n" + "Consider using lower n-gram order or use " + "SmoothingFunction()" + ).format(i + 1) + warnings.warn(_msg) + # When numerator==0 where denonminator==0 or !=0, the result + # for the precision score should be equal to 0 or undefined. + # Due to BLEU geometric mean computation in logarithm space, + # we we need to take the return sys.float_info.min such that + # math.log(sys.float_info.min) returns a 0 precision score. + p_n_new.append(sys.float_info.min) + return p_n_new + + def method1(self, p_n, *args, **kwargs): + """ + Smoothing method 1: Add *epsilon* counts to precision with 0 counts. + """ + return [ + (p_i.numerator + self.epsilon) / p_i.denominator + if p_i.numerator == 0 + else p_i + for p_i in p_n + ] + + def method2(self, p_n, *args, **kwargs): + """ + Smoothing method 2: Add 1 to both numerator and denominator from + Chin-Yew Lin and Franz Josef Och (2004) ORANGE: a Method for + Evaluating Automatic Evaluation Metrics for Machine Translation. + In COLING 2004. + """ + return [ + Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False) + if i != 0 + else p_n[0] + for i in range(len(p_n)) + ] + + def method3(self, p_n, *args, **kwargs): + """ + Smoothing method 3: NIST geometric sequence smoothing + The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each + precision score whose matching n-gram count is null. + k is 1 for the first 'n' value for which the n-gram match count is null/ + + For example, if the text contains: + + - one 2-gram match + - and (consequently) two 1-gram matches + + the n-gram count for each individual precision score would be: + + - n=1 => prec_count = 2 (two unigrams) + - n=2 => prec_count = 1 (one bigram) + - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1) + - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2) + """ + incvnt = 1 # From the mteval-v13a.pl, it's referred to as k. + for i, p_i in enumerate(p_n): + if p_i.numerator == 0: + p_n[i] = 1 / (2**incvnt * p_i.denominator) + incvnt += 1 + return p_n + + def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 4: + Shorter translations may have inflated precision values due to having + smaller denominators; therefore, we give them proportionally + smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry + suggests dividing by 1/ln(len(T)), where T is the length of the translation. + """ + incvnt = 1 + hyp_len = hyp_len if hyp_len else len(hypothesis) + for i, p_i in enumerate(p_n): + if p_i.numerator == 0 and hyp_len > 1: + # incvnt = i + 1 * self.k / math.log( + # hyp_len + # ) # Note that this K is different from the K from NIST. + # p_n[i] = incvnt / p_i.denominator\ + numerator = 1 / (2**incvnt * self.k / math.log(hyp_len)) + p_n[i] = numerator / p_i.denominator + incvnt += 1 + return p_n + + def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 5: + The matched counts for similar values of n should be similar. To a + calculate the n-gram matched count, it averages the n−1, n and n+1 gram + matched counts. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + m = {} + # Requires an precision value for an addition ngram order. + p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)] + m[-1] = p_n[0] + 1 + for i, p_i in enumerate(p_n): + p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3 + m[i] = p_n[i] + return p_n + + def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 6: + Interpolates the maximum likelihood estimate of the precision *p_n* with + a prior estimate *pi0*. The prior is estimated by assuming that the ratio + between pn and pn−1 will be the same as that between pn−1 and pn−2; from + Gao and He (2013) Training MRF-Based Phrase Translation Models using + Gradient Ascent. In NAACL. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + # This smoothing only works when p_1 and p_2 is non-zero. + # Raise an error with an appropriate message when the input is too short + # to use this smoothing technique. + assert p_n[2], "This smoothing method requires non-zero precision for bigrams." + for i, p_i in enumerate(p_n): + if i in [0, 1]: # Skips the first 2 orders of ngrams. + continue + else: + pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2] + # No. of ngrams in translation that matches the reference. + m = p_i.numerator + # No. of ngrams in translation. + l = sum(1 for _ in ngrams(hypothesis, i + 1)) + # Calculates the interpolated precision. + p_n[i] = (m + self.alpha * pi0) / (l + self.alpha) + return p_n + + def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 7: + Interpolates methods 4 and 5. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + p_n = self.method4(p_n, references, hypothesis, hyp_len) + p_n = self.method5(p_n, references, hypothesis, hyp_len) + return p_n diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/chrf_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/chrf_score.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b54f3a07166ba5179b2850cca82b21fe7c39f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/chrf_score.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: ChrF score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Maja Popovic +# Contributors: Liling Tan, Aleš Tamchyna (Memsource) +# URL: +# For license information, see LICENSE.TXT + +""" ChrF score implementation """ +import re +from collections import Counter, defaultdict + +from nltk.util import ngrams + + +def sentence_chrf( + reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the sentence level CHRF (Character n-gram F-score) described in + - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. + In Proceedings of the 10th Workshop on Machine Translation. + https://www.statmt.org/wmt15/pdf/WMT49.pdf + - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. + In Proceedings of the 1st Conference on Machine Translation. + https://www.statmt.org/wmt16/pdf/W16-2341.pdf + + This implementation of CHRF only supports a single reference at the moment. + + For details not reported in the paper, consult Maja Popovic's original + implementation: https://github.com/m-popovic/chrF + + The code should output results equivalent to running CHRF++ with the + following options: -nw 0 -b 3 + + An example from the original BLEU paper + https://www.aclweb.org/anthology/P02-1040.pdf + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS + 0.3330... + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS + 0.1468... + + An example to show that this function allows users to use strings instead of + tokens, i.e. list(str) as inputs. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands') + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party') + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> type(ref1) == type(hyp1) == str + True + >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS + 0.6349... + + To skip the unigrams and only use 2- to 3-grams: + + >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS + 0.6617... + + :param references: reference sentence + :type references: list(str) / str + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) / str + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + return corpus_chrf( + [reference], + [hypothesis], + min_len, + max_len, + beta=beta, + ignore_whitespace=ignore_whitespace, + ) + + +def _preprocess(sent, ignore_whitespace): + if type(sent) != str: + # turn list of tokens into a string + sent = " ".join(sent) + + if ignore_whitespace: + sent = re.sub(r"\s+", "", sent) + return sent + + +def chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=3.0, epsilon=1e-16 +): + """ + This function computes the precision, recall and fscore from the ngram + overlaps. It returns the `support` which is the true positive score. + + By underspecifying the input type, the function will be agnostic as to how + it computes the ngrams and simply take the whichever element in the list; + it could be either token or character. + + :param reference: The reference sentence. + :type reference: list + :param hypothesis: The hypothesis sentence. + :type hypothesis: list + :param n: Extract up to the n-th order ngrams + :type n: int + :param beta: The parameter to assign more importance to recall over precision. + :type beta: float + :param epsilon: The fallback value if the hypothesis or reference is empty. + :type epsilon: float + :return: Returns the precision, recall and f-score and support (true positive). + :rtype: tuple(float) + """ + ref_ngrams = Counter(ngrams(reference, n)) + hyp_ngrams = Counter(ngrams(hypothesis, n)) + + # calculate the number of ngram matches + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + try: + prec = tp / tpfp # precision + rec = tp / tpfn # recall + factor = beta**2 + fscore = (1 + factor) * (prec * rec) / (factor * prec + rec) + except ZeroDivisionError: + prec = rec = fscore = epsilon + return prec, rec, fscore, tp + + +def corpus_chrf( + references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the corpus level CHRF (Character n-gram F-score), it is the + macro-averaged value of the sentence/segment level CHRF score. + + This implementation of CHRF only supports a single reference at the moment. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> ref2 = str('It is the guiding principle which guarantees the military ' + ... 'forces always being under the command of the Party').split() + >>> + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct') + >>> corpus_chrf([ref1, ref2, ref1, ref2], [hyp1, hyp2, hyp2, hyp1]) # doctest: +ELLIPSIS + 0.3910... + + :param references: a corpus of list of reference sentences, w.r.t. hypotheses + :type references: list(list(str)) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + + assert len(references) == len( + hypotheses + ), "The number of hypotheses and their references should be the same" + num_sents = len(hypotheses) + + # Keep f-scores for each n-gram order separate + ngram_fscores = defaultdict(lambda: list()) + + # Iterate through each hypothesis and their corresponding references. + for reference, hypothesis in zip(references, hypotheses): + + # preprocess both reference and hypothesis + reference = _preprocess(reference, ignore_whitespace) + hypothesis = _preprocess(hypothesis, ignore_whitespace) + + # Calculate f-scores for each sentence and for each n-gram order + # separately. + for n in range(min_len, max_len + 1): + # Compute the precision, recall, fscore and support. + prec, rec, fscore, tp = chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=beta + ) + ngram_fscores[n].append(fscore) + + # how many n-gram sizes + num_ngram_sizes = len(ngram_fscores) + + # sum of f-scores over all sentences for each n-gram order + total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()] + + # macro-average over n-gram orders and over all sentences + return (sum(total_scores) / num_ngram_sizes) / num_sents diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/gale_church.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gale_church.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c81940d9ac27c159b680d688343e67e9ef9c58 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gale_church.py @@ -0,0 +1,263 @@ +# Natural Language Toolkit: Gale-Church Aligner +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Torsten Marek +# Contributor: Cassidy Laidlaw, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" + +A port of the Gale-Church Aligner. + +Gale & Church (1993), A Program for Aligning Sentences in Bilingual Corpora. +https://aclweb.org/anthology/J93-1004.pdf + +""" + +import math + +try: + from norm import logsf as norm_logsf + from scipy.stats import norm +except ImportError: + + def erfcc(x): + """Complementary error function.""" + z = abs(x) + t = 1 / (1 + 0.5 * z) + r = t * math.exp( + -z * z + - 1.26551223 + + t + * ( + 1.00002368 + + t + * ( + 0.37409196 + + t + * ( + 0.09678418 + + t + * ( + -0.18628806 + + t + * ( + 0.27886807 + + t + * ( + -1.13520398 + + t + * (1.48851587 + t * (-0.82215223 + t * 0.17087277)) + ) + ) + ) + ) + ) + ) + ) + if x >= 0.0: + return r + else: + return 2.0 - r + + def norm_cdf(x): + """Return the area under the normal distribution from M{-∞..x}.""" + return 1 - 0.5 * erfcc(x / math.sqrt(2)) + + def norm_logsf(x): + try: + return math.log(1 - norm_cdf(x)) + except ValueError: + return float("-inf") + + +LOG2 = math.log(2) + + +class LanguageIndependent: + # These are the language-independent probabilities and parameters + # given in Gale & Church + + # for the computation, l_1 is always the language with less characters + PRIORS = { + (1, 0): 0.0099, + (0, 1): 0.0099, + (1, 1): 0.89, + (2, 1): 0.089, + (1, 2): 0.089, + (2, 2): 0.011, + } + + AVERAGE_CHARACTERS = 1 + VARIANCE_CHARACTERS = 6.8 + + +def trace(backlinks, source_sents_lens, target_sents_lens): + """ + Traverse the alignment cost from the tracebacks and retrieves + appropriate sentence pairs. + + :param backlinks: A dictionary where the key is the alignment points and value is the cost (referencing the LanguageIndependent.PRIORS) + :type backlinks: dict + :param source_sents_lens: A list of target sentences' lengths + :type source_sents_lens: list(int) + :param target_sents_lens: A list of target sentences' lengths + :type target_sents_lens: list(int) + """ + links = [] + position = (len(source_sents_lens), len(target_sents_lens)) + while position != (0, 0) and all(p >= 0 for p in position): + try: + s, t = backlinks[position] + except TypeError: + position = (position[0] - 1, position[1] - 1) + continue + for i in range(s): + for j in range(t): + links.append((position[0] - i - 1, position[1] - j - 1)) + position = (position[0] - s, position[1] - t) + + return links[::-1] + + +def align_log_prob(i, j, source_sents, target_sents, alignment, params): + """Returns the log probability of the two sentences C{source_sents[i]}, C{target_sents[j]} + being aligned with a specific C{alignment}. + + @param i: The offset of the source sentence. + @param j: The offset of the target sentence. + @param source_sents: The list of source sentence lengths. + @param target_sents: The list of target sentence lengths. + @param alignment: The alignment type, a tuple of two integers. + @param params: The sentence alignment parameters. + + @returns: The log probability of a specific alignment between the two sentences, given the parameters. + """ + l_s = sum(source_sents[i - offset - 1] for offset in range(alignment[0])) + l_t = sum(target_sents[j - offset - 1] for offset in range(alignment[1])) + try: + # actually, the paper says l_s * params.VARIANCE_CHARACTERS, this is based on the C + # reference implementation. With l_s in the denominator, insertions are impossible. + m = (l_s + l_t / params.AVERAGE_CHARACTERS) / 2 + delta = (l_s * params.AVERAGE_CHARACTERS - l_t) / math.sqrt( + m * params.VARIANCE_CHARACTERS + ) + except ZeroDivisionError: + return float("-inf") + + return -(LOG2 + norm_logsf(abs(delta)) + math.log(params.PRIORS[alignment])) + + +def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent): + """Return the sentence alignment of two text blocks (usually paragraphs). + + >>> align_blocks([5,5,5], [7,7,7]) + [(0, 0), (1, 1), (2, 2)] + >>> align_blocks([10,5,5], [12,20]) + [(0, 0), (1, 1), (2, 1)] + >>> align_blocks([12,20], [10,5,5]) + [(0, 0), (1, 1), (1, 2)] + >>> align_blocks([10,2,10,10,2,10], [12,3,20,3,12]) + [(0, 0), (1, 1), (2, 2), (3, 2), (4, 3), (5, 4)] + + @param source_sents_lens: The list of source sentence lengths. + @param target_sents_lens: The list of target sentence lengths. + @param params: the sentence alignment parameters. + @return: The sentence alignments, a list of index pairs. + """ + + alignment_types = list(params.PRIORS.keys()) + + # there are always three rows in the history (with the last of them being filled) + D = [[]] + + backlinks = {} + + for i in range(len(source_sents_lens) + 1): + for j in range(len(target_sents_lens) + 1): + min_dist = float("inf") + min_align = None + for a in alignment_types: + prev_i = -1 - a[0] + prev_j = j - a[1] + if prev_i < -len(D) or prev_j < 0: + continue + p = D[prev_i][prev_j] + align_log_prob( + i, j, source_sents_lens, target_sents_lens, a, params + ) + if p < min_dist: + min_dist = p + min_align = a + + if min_dist == float("inf"): + min_dist = 0 + + backlinks[(i, j)] = min_align + D[-1].append(min_dist) + + if len(D) > 2: + D.pop(0) + D.append([]) + + return trace(backlinks, source_sents_lens, target_sents_lens) + + +def align_texts(source_blocks, target_blocks, params=LanguageIndependent): + """Creates the sentence alignment of two texts. + + Texts can consist of several blocks. Block boundaries cannot be crossed by sentence + alignment links. + + Each block consists of a list that contains the lengths (in characters) of the sentences + in this block. + + @param source_blocks: The list of blocks in the source text. + @param target_blocks: The list of blocks in the target text. + @param params: the sentence alignment parameters. + + @returns: A list of sentence alignment lists + """ + if len(source_blocks) != len(target_blocks): + raise ValueError( + "Source and target texts do not have the same number of blocks." + ) + + return [ + align_blocks(source_block, target_block, params) + for source_block, target_block in zip(source_blocks, target_blocks) + ] + + +# File I/O functions; may belong in a corpus reader + + +def split_at(it, split_value): + """Splits an iterator C{it} at values of C{split_value}. + + Each instance of C{split_value} is swallowed. The iterator produces + subiterators which need to be consumed fully before the next subiterator + can be used. + """ + + def _chunk_iterator(first): + v = first + while v != split_value: + yield v + v = it.next() + + while True: + yield _chunk_iterator(it.next()) + + +def parse_token_stream(stream, soft_delimiter, hard_delimiter): + """Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) + and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function. + """ + return [ + [ + sum(len(token) for token in sentence_it) + for sentence_it in split_at(block_it, soft_delimiter) + ] + for block_it in split_at(stream, hard_delimiter) + ] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/gdfa.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gdfa.py new file mode 100644 index 0000000000000000000000000000000000000000..57df0cea63b35bfbf83f9d330bf137563b332a33 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gdfa.py @@ -0,0 +1,138 @@ +# Natural Language Toolkit: GDFA word alignment symmetrization +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +def grow_diag_final_and(srclen, trglen, e2f, f2e): + """ + This module symmetrisatizes the source-to-target and target-to-source + word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). + + Step 1: Find the intersection of the bidirectional alignment. + + Step 2: Search for additional neighbor alignment points to be added, given + these criteria: (i) neighbor alignments points are not in the + intersection and (ii) neighbor alignments are in the union. + + Step 3: Add all other alignment points that are not in the intersection, not in + the neighboring alignments that met the criteria but in the original + forward/backward alignment outputs. + + >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' + ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') + >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' + ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' + ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') + >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " + ... "は L と 共 に 不連続 に 増加 する こと が " + ... "期待 さ れる こと を 示し た 。") + >>> trgtext = ("Therefore , we expect that the luminosity function " + ... "of such halo white dwarfs increases discontinuously " + ... "with the luminosity .") + >>> srclen = len(srctext.split()) + >>> trglen = len(trgtext.split()) + >>> + >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) + >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), + ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, + ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), + ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, + ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, + ... 12), (11, 6), (12, 8)])) + True + + References: + Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. + 2005. Edinburgh System Description for the 2005 IWSLT Speech + Translation Evaluation. In MT Eval Workshop. + + :type srclen: int + :param srclen: the number of tokens in the source language + :type trglen: int + :param trglen: the number of tokens in the target language + :type e2f: str + :param e2f: the forward word alignment outputs from source-to-target + language (in pharaoh output format) + :type f2e: str + :param f2e: the backward word alignment outputs from target-to-source + language (in pharaoh output format) + :rtype: set(tuple(int)) + :return: the symmetrized alignment points from the GDFA algorithm + """ + + # Converts pharaoh text format into list of tuples. + e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] + f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] + + neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] + alignment = set(e2f).intersection(set(f2e)) # Find the intersection. + union = set(e2f).union(set(f2e)) + + # *aligned* is used to check if neighbors are aligned in grow_diag() + aligned = defaultdict(set) + for i, j in alignment: + aligned["e"].add(i) + aligned["f"].add(j) + + def grow_diag(): + """ + Search for the neighbor points and them to the intersected alignment + points if criteria are met. + """ + prev_len = len(alignment) - 1 + # iterate until no new points added + while prev_len < len(alignment): + no_new_points = True + # for english word e = 0 ... en + for e in range(srclen): + # for foreign word f = 0 ... fn + for f in range(trglen): + # if ( e aligned with f) + if (e, f) in alignment: + # for each neighboring point (e-new, f-new) + for neighbor in neighbors: + neighbor = tuple(i + j for i, j in zip((e, f), neighbor)) + e_new, f_new = neighbor + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned and f_new not in aligned + ) and neighbor in union: + alignment.add(neighbor) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + prev_len += 1 + no_new_points = False + # iterate until no new points added + if no_new_points: + break + + def final_and(a): + """ + Adds remaining points that are not in the intersection, not in the + neighboring alignments but in the original *e2f* and *f2e* alignments + """ + # for english word e = 0 ... en + for e_new in range(srclen): + # for foreign word f = 0 ... fn + for f_new in range(trglen): + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned + and f_new not in aligned + and (e_new, f_new) in union + ): + alignment.add((e_new, f_new)) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + + grow_diag() + final_and(e2f) + final_and(f2e) + return sorted(alignment) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/gleu_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..81932a73fb5bdd34e539dfd9d1b46f179fc26558 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/gleu_score.py @@ -0,0 +1,190 @@ +# Natural Language Toolkit: GLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" GLEU score implementation. """ + +from collections import Counter + +from nltk.util import everygrams, ngrams + + +def sentence_gleu(references, hypothesis, min_len=1, max_len=4): + """ + Calculates the sentence level GLEU (Google-BLEU) score described in + + Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, + Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, + Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, + Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, + George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, + Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, + Jeffrey Dean. (2016) Google’s Neural Machine Translation System: + Bridging the Gap between Human and Machine Translation. + eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf + Retrieved on 27 Oct 2016. + + From Wu et al. (2016): + "The BLEU score has some undesirable properties when used for single + sentences, as it was designed to be a corpus measure. We therefore + use a slightly different score for our RL experiments which we call + the 'GLEU score'. For the GLEU score, we record all sub-sequences of + 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then + compute a recall, which is the ratio of the number of matching n-grams + to the number of total n-grams in the target (ground truth) sequence, + and a precision, which is the ratio of the number of matching n-grams + to the number of total n-grams in the generated output sequence. Then + GLEU score is simply the minimum of recall and precision. This GLEU + score's range is always between 0 (no matches) and 1 (all match) and + it is symmetrical when switching output and target. According to + our experiments, GLEU score correlates quite well with the BLEU + metric on a corpus level but does not have its drawbacks for our per + sentence reward objective." + + Note: The initial implementation only allowed a single reference, but now + a list of references is required (which is consistent with + bleu_score.sentence_bleu()). + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS + 0.0909... + + An example to evaluate normal machine translation outputs + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS + 0.4393... + >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS + 0.1206... + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: the sentence level GLEU score. + :rtype: float + """ + return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) + + +def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4): + """ + Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level GLEU scores (i.e. macro-average + precision), Wu et al. (2016) sum up the matching tokens and the max of + hypothesis and reference tokens for each sentence, then compute using the + aggregate values. + + From Mike Schuster (via email): + "For the corpus, we just add up the two statistics n_match and + n_all = max(n_all_output, n_all_target) for all sentences, then + calculate gleu_score = n_match / n_all, so it is not just a mean of + the sentence gleu scores (in our case, longer sentences count more, + which I think makes sense as they are more difficult to translate)." + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5673... + + The example below show that corpus_gleu() is different from averaging + sentence_gleu() for hypotheses + + >>> score1 = sentence_gleu([ref1a], hyp1) + >>> score2 = sentence_gleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6144... + + :param list_of_references: a list of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: The corpus-level GLEU score. + :rtype: float + """ + # sanity check + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # sum matches and max-token-lengths over all sentences + corpus_n_match = 0 + corpus_n_all = 0 + + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len)) + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + + hyp_counts = [] + for reference in references: + ref_ngrams = Counter(everygrams(reference, min_len, max_len)) + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + + # While GLEU is defined as the minimum of precision and + # recall, we can reduce the number of division operations by one by + # instead finding the maximum of the denominators for the precision + # and recall formulae, since the numerators are the same: + # precision = tp / tpfp + # recall = tp / tpfn + # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn) + n_all = max(tpfp, tpfn) + + if n_all > 0: + hyp_counts.append((tp, n_all)) + + # use the reference yielding the highest score + if hyp_counts: + n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1]) + corpus_n_match += n_match + corpus_n_all += n_all + + # corner case: empty corpus or empty references---don't divide by zero! + if corpus_n_all == 0: + gleu_score = 0.0 + else: + gleu_score = corpus_n_match / corpus_n_all + + return gleu_score diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm1.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm1.py new file mode 100644 index 0000000000000000000000000000000000000000..badb896968633d0db99f9b8fb2a7679b65d9a534 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm1.py @@ -0,0 +1,251 @@ +# Natural Language Toolkit: IBM Model 1 +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Chin Yee Lee +# Hengfeng Li +# Ruxin Hou +# Calvin Tanujaya Lim +# Based on earlier version by: +# Will Zhang +# Guan Gui +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that ignores word order. + +In IBM Model 1, word order is ignored for simplicity. As long as the +word alignments are equivalent, it doesn't matter where the word occurs +in the source or target sentence. Thus, the following three alignments +are equally likely:: + + Source: je mange du jambon + Target: i eat some ham + Alignment: (0,0) (1,1) (2,2) (3,3) + + Source: je mange du jambon + Target: some ham eat i + Alignment: (0,2) (1,3) (2,1) (3,1) + + Source: du jambon je mange + Target: eat i some ham + Alignment: (0,3) (1,2) (2,0) (3,1) + +Note that an alignment is represented here as +(word_index_in_target, word_index_in_source). + +The EM algorithm used in Model 1 is: + +:E step: In the training data, count how many times a source language + word is translated into a target language word, weighted by + the prior probability of the translation. + +:M step: Estimate the new probability of translation based on the + counts from the Expectation step. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel +from nltk.translate.ibm_model import Counts + + +class IBMModel1(IBMModel): + """ + Lexical translation model that ignores word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm1 = IBMModel1(bitext, 5) + + >>> print(round(ibm1.translation_table['buch']['book'], 3)) + 0.889 + >>> print(round(ibm1.translation_table['das']['book'], 3)) + 0.062 + >>> print(round(ibm1.translation_table['buch'][None], 3)) + 0.113 + >>> print(round(ibm1.translation_table['ja'][None], 3)) + 0.073 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, the following entry must be present: + ``translation_table``. + See ``IBMModel`` for the type and purpose of this table. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + initial_prob = 1 / len(self.trg_vocab) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "Target language vocabulary is too large (" + + str(len(self.trg_vocab)) + + " words). " + "Results may be less accurate." + ) + + for t in self.trg_vocab: + self.translation_table[t] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Counts() + for aligned_sentence in parallel_corpus: + trg_sentence = aligned_sentence.words + src_sentence = [None] + aligned_sentence.mots + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for t in trg_sentence: + for s in src_sentence: + count = self.prob_alignment_point(s, t) + normalized_count = count / total_count[t] + counts.t_given_s[t][s] += normalized_count + counts.any_t_given_s[s] += normalized_count + + # M step: Update probabilities with maximum likelihood estimate + self.maximize_lexical_translation_probabilities(counts) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for t in trg_sentence: + for s in src_sentence: + alignment_prob_for_t[t] += self.prob_alignment_point(s, t) + return alignment_prob_for_t + + def prob_alignment_point(self, s, t): + """ + Probability that word ``t`` in the target sentence is aligned to + word ``s`` in the source sentence + """ + return self.translation_table[t][s] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= self.translation_table[trg_word][src_word] + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = max(self.translation_table[trg_word][None], IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = self.translation_table[trg_word][src_word] + if align_prob >= best_prob: # prefer newer word in case of tie + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm4.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm4.py new file mode 100644 index 0000000000000000000000000000000000000000..c7686939ac5027d6e16147cc82611cd4519ea51e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm4.py @@ -0,0 +1,490 @@ +# Natural Language Toolkit: IBM Model 4 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that reorders output words based on their type and +distance from other related words in the output sentence. + +IBM Model 4 improves the distortion model of Model 3, motivated by the +observation that certain words tend to be re-ordered in a predictable +way relative to one another. For example, in English +usually has its order flipped as in French. + +Model 4 requires words in the source and target vocabularies to be +categorized into classes. This can be linguistically driven, like parts +of speech (adjective, nouns, prepositions, etc). Word classes can also +be obtained by statistical methods. The original IBM Model 4 uses an +information theoretic approach to group words into 50 classes for each +vocabulary. + +Terminology +----------- + +:Cept: + A source word with non-zero fertility i.e. aligned to one or more + target words. +:Tablet: + The set of target word(s) aligned to a cept. +:Head of cept: + The first word of the tablet of that cept. +:Center of cept: + The average position of the words in that cept's tablet. If the + value is not an integer, the ceiling is taken. + For example, for a tablet with words in positions 2, 5, 6 in the + target sentence, the center of the corresponding cept is + ceil((2 + 5 + 6) / 3) = 5 +:Displacement: + For a head word, defined as (position of head word - position of + previous cept's center). Can be positive or negative. + For a non-head word, defined as (position of non-head word - + position of previous word in the same tablet). Always positive, + because successive words in a tablet are assumed to appear to the + right of the previous word. + +In contrast to Model 3 which reorders words in a tablet independently of +other words, Model 4 distinguishes between three cases. + +1. Words generated by NULL are distributed uniformly. +2. For a head word t, its position is modeled by the probability + d_head(displacement | word_class_s(s),word_class_t(t)), + where s is the previous cept, and word_class_s and word_class_t maps + s and t to a source and target language word class respectively. +3. For a non-head word t, its position is modeled by the probability + d_non_head(displacement | word_class_t(t)) + +The EM algorithm used in Model 4 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class, count how many times a head + word is located at a particular displacement from the + previous cept's center + - (c) for a particular word class, count how many times a + non-head word is located at a particular displacement from + the previous target word + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 3, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:dj: Displacement, Δj + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel3 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel4(IBMModel): + """ + Translation model that reorders output words based on their type and + their distance from other related words in the output sentence + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm4 = IBMModel4(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm4.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm4.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm4.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm4.head_distortion_table[1][0][1], 3)) + 1.0 + >>> print(round(ibm4.head_distortion_table[2][0][1], 3)) + 0.0 + >>> print(round(ibm4.non_head_distortion_table[3][6], 3)) + 0.5 + + >>> print(round(ibm4.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm4.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm4.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, distortion models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``. See ``IBMModel`` and + ``IBMModel4`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 3 + ibm3 = IBMModel3(sentence_aligned_corpus, iterations) + self.translation_table = ibm3.translation_table + self.alignment_table = ibm3.alignment_table + self.fertility_table = ibm3.fertility_table + self.p1 = ibm3.p1 + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(displacement of head + word | word class of previous cept,target word class). + Values accessed as ``distortion_table[dj][src_class][trg_class]``. + """ + + self.non_head_distortion_table = defaultdict( + lambda: defaultdict(lambda: self.MIN_PROB) + ) + """ + dict[int][int]: float. Probability(displacement of non-head + word | target word class). + Values accessed as ``distortion_table[dj][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set distortion probabilities uniformly to + 1 / cardinality of displacement values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum displacement is m-1, when a word is in the last + # position m of the target sentence and the previously placed + # word is in the first position. + # Conversely, the minimum displacement is -(m-1). + # Thus, the displacement range is (m-1) - (-(m-1)). Note that + # displacement cannot be zero and is not included in the range. + if max_m <= 1: + initial_prob = IBMModel.MIN_PROB + else: + initial_prob = 1 / (2 * (max_m - 1)) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for dj in range(1, max_m): + self.head_distortion_table[dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.head_distortion_table[-dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.non_head_distortion_table[dj] = defaultdict(lambda: initial_prob) + self.non_head_distortion_table[-dj] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Model4Counts() + for aligned_sentence in parallel_corpus: + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion( + normalized_count, + alignment_info, + j, + self.src_classes, + self.trg_classes, + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + head_d_table = self.head_distortion_table + for dj, src_classes in counts.head_distortion.items(): + for s_cls, trg_classes in src_classes.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_distortion[dj][s_cls][t_cls] + / counts.head_distortion_for_any_dj[s_cls][t_cls] + ) + head_d_table[dj][s_cls][t_cls] = max(estimate, IBMModel.MIN_PROB) + + non_head_d_table = self.non_head_distortion_table + for dj, trg_classes in counts.non_head_distortion.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_distortion[dj][t_cls] + / counts.non_head_distortion_for_any_dj[t_cls] + ) + non_head_d_table[dj][t_cls] = max(estimate, IBMModel.MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + return IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + + @staticmethod # exposed for Model 5 to use + def model4_prob_t_a_given_s(alignment_info, ibm_model): + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = ibm_model.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * ibm_model.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return ibm_model.translation_table[t][s] + + def distortion_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + if i == 0: + # case 1: t is aligned to NULL + return 1.0 + if alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + src_class = None + if previous_cept is not None: + previous_s = alignment_info.src_sentence[previous_cept] + src_class = ibm_model.src_classes[previous_s] + trg_class = ibm_model.trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + return ibm_model.head_distortion_table[dj][src_class][trg_class] + + # case 3: t is a subsequent word of a tablet + previous_position = alignment_info.previous_in_tablet(j) + trg_class = ibm_model.trg_classes[t] + dj = j - previous_position + return ibm_model.non_head_distortion_table[dj][trg_class] + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + probability *= distortion_term(j) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model4Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.head_distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_distortion_for_any_dj = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion_for_any_dj = defaultdict(lambda: 0.0) + + def update_distortion(self, count, alignment_info, j, src_classes, trg_classes): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + if i == 0: + # case 1: t is aligned to NULL + pass + elif alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + if previous_cept is not None: + previous_src_word = alignment_info.src_sentence[previous_cept] + src_class = src_classes[previous_src_word] + else: + src_class = None + trg_class = trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + self.head_distortion[dj][src_class][trg_class] += count + self.head_distortion_for_any_dj[src_class][trg_class] += count + else: + # case 3: t is a subsequent word of a tablet + previous_j = alignment_info.previous_in_tablet(j) + trg_class = trg_classes[t] + dj = j - previous_j + self.non_head_distortion[dj][trg_class] += count + self.non_head_distortion_for_any_dj[trg_class] += count diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm_model.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ed9a49408638605f8d4d627883d51e04816877d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm_model.py @@ -0,0 +1,549 @@ +# Natural Language Toolkit: IBM Model Core +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Common methods and classes for all IBM models. See ``IBMModel1``, +``IBMModel2``, ``IBMModel3``, ``IBMModel4``, and ``IBMModel5`` +for specific implementations. + +The IBM models are a series of generative models that learn lexical +translation probabilities, p(target language word|source language word), +given a sentence-aligned parallel corpus. + +The models increase in sophistication from model 1 to 5. Typically, the +output of lower models is used to seed the higher models. All models +use the Expectation-Maximization (EM) algorithm to learn various +probability tables. + +Words in a sentence are one-indexed. The first word of a sentence has +position 1, not 0. Index 0 is reserved in the source sentence for the +NULL token. The concept of position does not apply to NULL, but it is +indexed at 0 by convention. + +Each target word is aligned to exactly one source word or the NULL +token. + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +from bisect import insort_left +from collections import defaultdict +from copy import deepcopy +from math import ceil + + +def longest_target_sentence_length(sentence_aligned_corpus): + """ + :param sentence_aligned_corpus: Parallel corpus under consideration + :type sentence_aligned_corpus: list(AlignedSent) + :return: Number of words in the longest target language sentence + of ``sentence_aligned_corpus`` + """ + max_m = 0 + for aligned_sentence in sentence_aligned_corpus: + m = len(aligned_sentence.words) + max_m = max(m, max_m) + return max_m + + +class IBMModel: + """ + Abstract base class for all IBM models + """ + + # Avoid division by zero and precision errors by imposing a minimum + # value for probabilities. Note that this approach is theoretically + # incorrect, since it may create probabilities that sum to more + # than 1. In practice, the contribution of probabilities with MIN_PROB + # is tiny enough that the value of MIN_PROB can be treated as zero. + MIN_PROB = 1.0e-12 # GIZA++ is more liberal and uses 1.0e-7 + + def __init__(self, sentence_aligned_corpus): + self.init_vocab(sentence_aligned_corpus) + self.reset_probabilities() + + def reset_probabilities(self): + self.translation_table = defaultdict( + lambda: defaultdict(lambda: IBMModel.MIN_PROB) + ) + """ + dict[str][str]: float. Probability(target word | source word). + Values accessed as ``translation_table[target_word][source_word]``. + """ + + self.alignment_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: IBMModel.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(i | j,l,m). + Values accessed as ``alignment_table[i][j][l][m]``. + Used in model 2 and hill climbing in models 3 and above + """ + + self.fertility_table = defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + """ + dict[int][str]: float. Probability(fertility | source word). + Values accessed as ``fertility_table[fertility][source_word]``. + Used in model 3 and higher. + """ + + self.p1 = 0.5 + """ + Probability that a generated word requires another target word + that is aligned to NULL. + Used in model 3 and higher. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Initialize probability tables to a uniform distribution + + Derived classes should implement this accordingly. + """ + pass + + def init_vocab(self, sentence_aligned_corpus): + src_vocab = set() + trg_vocab = set() + for aligned_sentence in sentence_aligned_corpus: + trg_vocab.update(aligned_sentence.words) + src_vocab.update(aligned_sentence.mots) + # Add the NULL token + src_vocab.add(None) + + self.src_vocab = src_vocab + """ + set(str): All source language words used in training + """ + + self.trg_vocab = trg_vocab + """ + set(str): All target language words used in training + """ + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a higher IBM Model. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. + + Hill climbing may be stuck in a local maxima, hence the pegging + and trying out of different alignments. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments = set() + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + # Start from the best model 2 alignment + initial_alignment = self.best_model2_alignment(sentence_pair) + potential_alignment = self.hillclimb(initial_alignment) + sampled_alignments.update(self.neighboring(potential_alignment)) + best_alignment = potential_alignment + + # Start from other model 2 alignments, + # with the constraint that j is aligned (pegged) to i + for j in range(1, m + 1): + for i in range(0, l + 1): + initial_alignment = self.best_model2_alignment(sentence_pair, j, i) + potential_alignment = self.hillclimb(initial_alignment, j) + neighbors = self.neighboring(potential_alignment, j) + sampled_alignments.update(neighbors) + if potential_alignment.score > best_alignment.score: + best_alignment = potential_alignment + + return sampled_alignments, best_alignment + + def best_model2_alignment(self, sentence_pair, j_pegged=None, i_pegged=0): + """ + Finds the best alignment according to IBM Model 2 + + Used as a starting point for hill climbing in Models 3 and + above, because it is easier to compute than the best alignments + in higher models + + :param sentence_pair: Source and target language sentence pair + to be word-aligned + :type sentence_pair: AlignedSent + + :param j_pegged: If specified, the alignment point of j_pegged + will be fixed to i_pegged + :type j_pegged: int + + :param i_pegged: Alignment point to j_pegged + :type i_pegged: int + """ + src_sentence = [None] + sentence_pair.mots + trg_sentence = ["UNUSED"] + sentence_pair.words # 1-indexed + + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + + alignment = [0] * (m + 1) # init all alignments to NULL + cepts = [[] for i in range(l + 1)] # init all cepts to empty list + + for j in range(1, m + 1): + if j == j_pegged: + # use the pegged alignment instead of searching for best one + best_i = i_pegged + else: + best_i = 0 + max_alignment_prob = IBMModel.MIN_PROB + t = trg_sentence[j] + + for i in range(0, l + 1): + s = src_sentence[i] + alignment_prob = ( + self.translation_table[t][s] * self.alignment_table[i][j][l][m] + ) + + if alignment_prob >= max_alignment_prob: + max_alignment_prob = alignment_prob + best_i = i + + alignment[j] = best_i + cepts[best_i].append(j) + + return AlignmentInfo( + tuple(alignment), tuple(src_sentence), tuple(trg_sentence), cepts + ) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = self.prob_t_a_given_s(alignment) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = self.prob_t_a_given_s(neighbor_alignment) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def neighboring(self, alignment_info, j_pegged=None): + """ + Determine the neighbors of ``alignment_info``, obtained by + moving or swapping one alignment point + + :param j_pegged: If specified, neighbors that have a different + alignment point from j_pegged will not be considered + :type j_pegged: int + + :return: A set neighboring alignments represented by their + ``AlignmentInfo`` + :rtype: set(AlignmentInfo) + """ + neighbors = set() + + l = len(alignment_info.src_sentence) - 1 # exclude NULL + m = len(alignment_info.trg_sentence) - 1 + original_alignment = alignment_info.alignment + original_cepts = alignment_info.cepts + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that differ by one alignment point + for i in range(0, l + 1): + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + old_i = original_alignment[j] + + # update alignment + new_alignment[j] = i + + # update cepts + insort_left(new_cepts[i], j) + new_cepts[old_i].remove(j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that have two alignment points swapped + for other_j in range(1, m + 1): + if other_j != j_pegged and other_j != j: + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + other_i = original_alignment[other_j] + i = original_alignment[j] + + # update alignments + new_alignment[j] = other_i + new_alignment[other_j] = i + + # update cepts + new_cepts[other_i].remove(other_j) + insort_left(new_cepts[other_i], j) + new_cepts[i].remove(j) + insort_left(new_cepts[i], other_j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + return neighbors + + def maximize_lexical_translation_probabilities(self, counts): + for t, src_words in counts.t_given_s.items(): + for s in src_words: + estimate = counts.t_given_s[t][s] / counts.any_t_given_s[s] + self.translation_table[t][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_fertility_probabilities(self, counts): + for phi, src_words in counts.fertility.items(): + for s in src_words: + estimate = counts.fertility[phi][s] / counts.fertility_for_any_phi[s] + self.fertility_table[phi][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_null_generation_probabilities(self, counts): + p1_estimate = counts.p1 / (counts.p1 + counts.p0) + p1_estimate = max(p1_estimate, IBMModel.MIN_PROB) + # Clip p1 if it is too large, because p0 = 1 - p1 should not be + # smaller than MIN_PROB + self.p1 = min(p1_estimate, 1 - IBMModel.MIN_PROB) + + def prob_of_alignments(self, alignments): + probability = 0 + for alignment_info in alignments: + probability += self.prob_t_a_given_s(alignment_info) + return probability + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + + All required information is assumed to be in ``alignment_info`` + and self. + + Derived classes should override this method + """ + return 0.0 + + +class AlignmentInfo: + """ + Helper data object for training IBM Models 3 and up + + Read-only. For a source sentence and its counterpart in the target + language, this class holds information about the sentence pair's + alignment, cepts, and fertility. + + Warning: Alignments are one-indexed here, in contrast to + nltk.translate.Alignment and AlignedSent, which are zero-indexed + This class is not meant to be used outside of IBM models. + """ + + def __init__(self, alignment, src_sentence, trg_sentence, cepts): + if not isinstance(alignment, tuple): + raise TypeError( + "The alignment must be a tuple because it is used " + "to uniquely identify AlignmentInfo objects." + ) + + self.alignment = alignment + """ + tuple(int): Alignment function. ``alignment[j]`` is the position + in the source sentence that is aligned to the position j in the + target sentence. + """ + + self.src_sentence = src_sentence + """ + tuple(str): Source sentence referred to by this object. + Should include NULL token (None) in index 0. + """ + + self.trg_sentence = trg_sentence + """ + tuple(str): Target sentence referred to by this object. + Should have a dummy element in index 0 so that the first word + starts from index 1. + """ + + self.cepts = cepts + """ + list(list(int)): The positions of the target words, in + ascending order, aligned to a source word position. For example, + cepts[4] = (2, 3, 7) means that words in positions 2, 3 and 7 + of the target sentence are aligned to the word in position 4 of + the source sentence + """ + + self.score = None + """ + float: Optional. Probability of alignment, as defined by the + IBM model that assesses this alignment + """ + + def fertility_of_i(self, i): + """ + Fertility of word in position ``i`` of the source sentence + """ + return len(self.cepts[i]) + + def is_head_word(self, j): + """ + :return: Whether the word in position ``j`` of the target + sentence is a head word + """ + i = self.alignment[j] + return self.cepts[i][0] == j + + def center_of_cept(self, i): + """ + :return: The ceiling of the average positions of the words in + the tablet of cept ``i``, or 0 if ``i`` is None + """ + if i is None: + return 0 + + average_position = sum(self.cepts[i]) / len(self.cepts[i]) + return int(ceil(average_position)) + + def previous_cept(self, j): + """ + :return: The previous cept of ``j``, or None if ``j`` belongs to + the first cept + """ + i = self.alignment[j] + if i == 0: + raise ValueError( + "Words aligned to NULL cannot have a previous " + "cept because NULL has no position" + ) + previous_cept = i - 1 + while previous_cept > 0 and self.fertility_of_i(previous_cept) == 0: + previous_cept -= 1 + + if previous_cept <= 0: + previous_cept = None + return previous_cept + + def previous_in_tablet(self, j): + """ + :return: The position of the previous word that is in the same + tablet as ``j``, or None if ``j`` is the first word of the + tablet + """ + i = self.alignment[j] + tablet_position = self.cepts[i].index(j) + if tablet_position == 0: + return None + return self.cepts[i][tablet_position - 1] + + def zero_indexed_alignment(self): + """ + :return: Zero-indexed alignment, suitable for use in external + ``nltk.translate`` modules like ``nltk.translate.Alignment`` + :rtype: list(tuple) + """ + zero_indexed_alignment = [] + for j in range(1, len(self.trg_sentence)): + i = self.alignment[j] - 1 + if i < 0: + i = None # alignment to NULL token + zero_indexed_alignment.append((j - 1, i)) + return zero_indexed_alignment + + def __eq__(self, other): + return self.alignment == other.alignment + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.alignment) + + +class Counts: + """ + Data object to store counts of various parameters during training + """ + + def __init__(self): + self.t_given_s = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.any_t_given_s = defaultdict(lambda: 0.0) + self.p0 = 0.0 + self.p1 = 0.0 + self.fertility = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.fertility_for_any_phi = defaultdict(lambda: 0.0) + + def update_lexical_translation(self, count, alignment_info, j): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + s = alignment_info.src_sentence[i] + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_null_generation(self, count, alignment_info): + m = len(alignment_info.trg_sentence) - 1 + fertility_of_null = alignment_info.fertility_of_i(0) + self.p1 += fertility_of_null * count + self.p0 += (m - 2 * fertility_of_null) * count + + def update_fertility(self, count, alignment_info): + for i in range(0, len(alignment_info.src_sentence)): + s = alignment_info.src_sentence[i] + phi = alignment_info.fertility_of_i(i) + self.fertility[phi][s] += count + self.fertility_for_any_phi[s] += count diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/metrics.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..88444087f65395428c87a6c5d805c682958b6e55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/metrics.py @@ -0,0 +1,41 @@ +# Natural Language Toolkit: Translation metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def alignment_error_rate(reference, hypothesis, possible=None): + """ + Return the Alignment Error Rate (AER) of an alignment + with respect to a "gold standard" reference alignment. + Return an error rate between 0.0 (perfect alignment) and 1.0 (no + alignment). + + >>> from nltk.translate import Alignment + >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) + >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) + >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS + 0.6666666666666667 + + :type reference: Alignment + :param reference: A gold standard alignment (sure alignments) + :type hypothesis: Alignment + :param hypothesis: A hypothesis alignment (aka. candidate alignments) + :type possible: Alignment or None + :param possible: A gold standard reference of possible alignments + (defaults to *reference* if None) + :rtype: float or None + """ + + if possible is None: + possible = reference + else: + assert reference.issubset(possible) # sanity check + + return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( + len(hypothesis) + len(reference) + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/phrase_based.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/phrase_based.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd85109ad26055023c502d6bd233a220d28e7e4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/phrase_based.py @@ -0,0 +1,193 @@ +# Natural Language Toolkit: Phrase Extraction Algorithm +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan, Fredrik Hedman, Petra Barancikova +# URL: +# For license information, see LICENSE.TXT + + +def extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, +): + """ + This function checks for alignment point consistency and extracts + phrases using the chunk of consistent phrases. + + A phrase pair (e, f ) is consistent with an alignment A if and only if: + + (i) No English words in the phrase pair are aligned to words outside it. + + ∀e i ∈ e, (e i , f j ) ∈ A ⇒ f j ∈ f + + (ii) No Foreign words in the phrase pair are aligned to words outside it. + + ∀f j ∈ f , (e i , f j ) ∈ A ⇒ e i ∈ e + + (iii) The phrase pair contains at least one alignment point. + + ∃e i ∈ e ̄ , f j ∈ f ̄ s.t. (e i , f j ) ∈ A + + :type f_start: int + :param f_start: Starting index of the possible foreign language phrases + :type f_end: int + :param f_end: End index of the possible foreign language phrases + :type e_start: int + :param e_start: Starting index of the possible source language phrases + :type e_end: int + :param e_end: End index of the possible source language phrases + :type srctext: list + :param srctext: The source language tokens, a list of string. + :type trgtext: list + :param trgtext: The target language tokens, a list of string. + :type srclen: int + :param srclen: The number of tokens in the source language tokens. + :type trglen: int + :param trglen: The number of tokens in the target language tokens. + """ + + if f_end < 0: # 0-based indexing. + return {} + # Check if alignment points are consistent. + for e, f in alignment: + if (f_start <= f <= f_end) and (e < e_start or e > e_end): + return {} + + # Add phrase pairs (incl. additional unaligned f) + phrases = set() + fs = f_start + while True: + fe = min(f_end, f_start + max_phrase_length - 1) + while True: + # add phrase pair ([e_start, e_end], [fs, fe]) to set E + # Need to +1 in range to include the end-point. + src_phrase = " ".join(srctext[e_start : e_end + 1]) + trg_phrase = " ".join(trgtext[fs : fe + 1]) + # Include more data for later ordering. + phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase)) + fe += 1 + if fe in f_aligned or fe >= trglen: + break + fs -= 1 + if fs in f_aligned or fs < 0: + break + return phrases + + +def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0): + """ + Phrase extraction algorithm extracts all consistent phrase pairs from + a word-aligned sentence pair. + + The idea is to loop over all possible source language (e) phrases and find + the minimal foreign phrase (f) that matches each of them. Matching is done + by identifying all alignment points for the source phrase and finding the + shortest foreign phrase that includes all the foreign counterparts for the + source words. + + In short, a phrase alignment has to + (a) contain all alignment points for all covered words + (b) contain at least one alignment point + + >>> srctext = "michael assumes that he will stay in the house" + >>> trgtext = "michael geht davon aus , dass er im haus bleibt" + >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9), + ... (5,9), (6,7), (7,7), (8,8)] + >>> phrases = phrase_extraction(srctext, trgtext, alignment) + >>> for i in sorted(phrases): + ... print(i) + ... + ((0, 1), (0, 1), 'michael', 'michael') + ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus') + ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,') + ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass') + ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er') + ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt') + ((1, 2), (1, 4), 'assumes', 'geht davon aus') + ((1, 2), (1, 5), 'assumes', 'geht davon aus ,') + ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass') + ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er') + ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt') + ((2, 3), (4, 6), 'that', ', dass') + ((2, 3), (5, 6), 'that', 'dass') + ((2, 4), (4, 7), 'that he', ', dass er') + ((2, 4), (5, 7), 'that he', 'dass er') + ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt') + ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt') + ((3, 4), (6, 7), 'he', 'er') + ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt') + ((4, 6), (9, 10), 'will stay', 'bleibt') + ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt') + ((6, 8), (7, 8), 'in the', 'im') + ((6, 9), (7, 9), 'in the house', 'im haus') + ((8, 9), (8, 9), 'house', 'haus') + + :type srctext: str + :param srctext: The sentence string from the source language. + :type trgtext: str + :param trgtext: The sentence string from the target language. + :type alignment: list(tuple) + :param alignment: The word alignment outputs as list of tuples, where + the first elements of tuples are the source words' indices and + second elements are the target words' indices. This is also the output + format of nltk.translate.ibm1 + :rtype: list(tuple) + :return: A list of tuples, each element in a list is a phrase and each + phrase is a tuple made up of (i) its source location, (ii) its target + location, (iii) the source phrase and (iii) the target phrase. The phrase + list of tuples represents all the possible phrases extracted from the + word alignments. + :type max_phrase_length: int + :param max_phrase_length: maximal phrase length, if 0 or not specified + it is set to a length of the longer sentence (srctext or trgtext). + """ + + srctext = srctext.split() # e + trgtext = trgtext.split() # f + srclen = len(srctext) # len(e) + trglen = len(trgtext) # len(f) + # Keeps an index of which source/target words that are aligned. + f_aligned = [j for _, j in alignment] + max_phrase_length = max_phrase_length or max(srclen, trglen) + + # set of phrase pairs BP + bp = set() + + for e_start in range(srclen): + max_idx = min(srclen, e_start + max_phrase_length) + for e_end in range(e_start, max_idx): + # // find the minimally matching foreign phrase + # (f start , f end ) = ( length(f), 0 ) + # f_start ∈ [0, len(f) - 1]; f_end ∈ [0, len(f) - 1] + f_start, f_end = trglen - 1, -1 # 0-based indexing + + for e, f in alignment: + if e_start <= e <= e_end: + f_start = min(f, f_start) + f_end = max(f, f_end) + # add extract (f start , f end , e start , e end ) to set BP + phrases = extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, + ) + if phrases: + bp.update(phrases) + return bp diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ribes_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ribes_score.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d0bb5f14590082fb74e4a2c3613a40b6e168f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ribes_score.py @@ -0,0 +1,330 @@ +# Natural Language Toolkit: RIBES Score +# +# Copyright (C) 2001-2023 NLTK Project +# Contributors: Katsuhito Sudoh, Liling Tan, Kasramvd, J.F.Sebastian +# Mark Byers, ekhumoro, P. Ortiz +# URL: +# For license information, see LICENSE.TXT +""" RIBES score implementation """ + +import math +from itertools import islice + +from nltk.util import choose, ngrams + + +def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): + """ + The RIBES (Rank-based Intuitive Bilingual Evaluation Score) from + Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh and + Hajime Tsukada. 2010. "Automatic Evaluation of Translation Quality for + Distant Language Pairs". In Proceedings of EMNLP. + https://www.aclweb.org/anthology/D/D10/D10-1092.pdf + + The generic RIBES scores used in shared task, e.g. Workshop for + Asian Translation (WAT) uses the following RIBES calculations: + + RIBES = kendall_tau * (alpha**p1) * (beta**bp) + + Please note that this re-implementation differs from the official + RIBES implementation and though it emulates the results as describe + in the original paper, there are further optimization implemented + in the official RIBES script. + + Users are encouraged to use the official RIBES script instead of this + implementation when evaluating your machine translation system. Refer + to https://www.kecl.ntt.co.jp/icl/lirg/ribes/ for the official script. + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + best_ribes = -1.0 + # Calculates RIBES for each reference and returns the best score. + for reference in references: + # Collects the *worder* from the ranked correlation alignments. + worder = word_rank_alignment(reference, hypothesis) + nkt = kendall_tau(worder) + + # Calculates the brevity penalty + bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis))) + + # Calculates the unigram precision, *p1* + p1 = len(worder) / len(hypothesis) + + _ribes = nkt * (p1**alpha) * (bp**beta) + + if _ribes > best_ribes: # Keeps the best score. + best_ribes = _ribes + + return best_ribes + + +def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10): + """ + This function "calculates RIBES for a system output (hypothesis) with + multiple references, and returns "best" score among multi-references and + individual scores. The scores are corpus-wise, i.e., averaged by the number + of sentences." (c.f. RIBES version 1.03.1 code). + + Different from BLEU's micro-average precision, RIBES calculates the + macro-average precision by averaging the best RIBES score for each pair of + hypothesis and its corresponding references + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> round(corpus_ribes(list_of_references, hypotheses),4) + 0.3597 + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + corpus_best_ribes = 0.0 + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + corpus_best_ribes += sentence_ribes(references, hypothesis, alpha, beta) + return corpus_best_ribes / len(hypotheses) + + +def position_of_ngram(ngram, sentence): + """ + This function returns the position of the first instance of the ngram + appearing in a sentence. + + Note that one could also use string as follows but the code is a little + convoluted with type casting back and forth: + + char_pos = ' '.join(sent)[:' '.join(sent).index(' '.join(ngram))] + word_pos = char_pos.count(' ') + + Another way to conceive this is: + + return next(i for i, ng in enumerate(ngrams(sentence, len(ngram))) + if ng == ngram) + + :param ngram: The ngram that needs to be searched + :type ngram: tuple + :param sentence: The list of tokens to search from. + :type sentence: list(str) + """ + # Iterates through the ngrams in sentence. + for i, sublist in enumerate(ngrams(sentence, len(ngram))): + # Returns the index of the word when ngram matches. + if ngram == sublist: + return i + + +def word_rank_alignment(reference, hypothesis, character_based=False): + """ + This is the word rank alignment algorithm described in the paper to produce + the *worder* list, i.e. a list of word indices of the hypothesis word orders + w.r.t. the list of reference words. + + Below is (H0, R0) example from the Isozaki et al. 2010 paper, + note the examples are indexed from 1 but the results here are indexed from 0: + + >>> ref = str('he was interested in world history because he ' + ... 'read the book').split() + >>> hyp = str('he read the book because he was interested in world ' + ... 'history').split() + >>> word_rank_alignment(ref, hyp) + [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + + The (H1, R1) example from the paper, note the 0th index: + + >>> ref = 'John hit Bob yesterday'.split() + >>> hyp = 'Bob hit John yesterday'.split() + >>> word_rank_alignment(ref, hyp) + [2, 1, 0, 3] + + Here is the (H2, R2) example from the paper, note the 0th index here too: + + >>> ref = 'the boy read the book'.split() + >>> hyp = 'the book was read by the boy'.split() + >>> word_rank_alignment(ref, hyp) + [3, 4, 2, 0, 1] + + :param reference: a reference sentence + :type reference: list(str) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + """ + worder = [] + hyp_len = len(hypothesis) + # Stores a list of possible ngrams from the reference sentence. + # This is used for matching context window later in the algorithm. + ref_ngrams = [] + hyp_ngrams = [] + for n in range(1, len(reference) + 1): + for ng in ngrams(reference, n): + ref_ngrams.append(ng) + for ng in ngrams(hypothesis, n): + hyp_ngrams.append(ng) + for i, h_word in enumerate(hypothesis): + # If word is not in the reference, continue. + if h_word not in reference: + continue + # If we can determine one-to-one word correspondence for unigrams that + # only appear once in both the reference and hypothesis. + elif hypothesis.count(h_word) == reference.count(h_word) == 1: + worder.append(reference.index(h_word)) + else: + max_window_size = max(i, hyp_len - i + 1) + for window in range(1, max_window_size): + if i + window < hyp_len: # If searching the right context is possible. + # Retrieve the right context window. + right_context_ngram = tuple(islice(hypothesis, i, i + window + 1)) + num_times_in_ref = ref_ngrams.count(right_context_ngram) + num_times_in_hyp = hyp_ngrams.count(right_context_ngram) + # If ngram appears only once in both ref and hyp. + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(right_context_ngram, reference) + worder.append(pos) # Add the positions of the ngram. + break + if window <= i: # If searching the left context is possible. + # Retrieve the left context window. + left_context_ngram = tuple(islice(hypothesis, i - window, i + 1)) + num_times_in_ref = ref_ngrams.count(left_context_ngram) + num_times_in_hyp = hyp_ngrams.count(left_context_ngram) + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(left_context_ngram, reference) + # Add the positions of the ngram. + worder.append(pos + len(left_context_ngram) - 1) + break + return worder + + +def find_increasing_sequences(worder): + """ + Given the *worder* list, this function groups monotonic +1 sequences. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> list(find_increasing_sequences(worder)) + [(7, 8, 9, 10), (0, 1, 2, 3, 4, 5)] + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + items = iter(worder) + a, b = None, next(items, None) + result = [b] + while b is not None: + a, b = b, next(items, None) + if b is not None and a + 1 == b: + result.append(b) + else: + if len(result) > 1: + yield tuple(result) + result = [b] + + +def kendall_tau(worder, normalize=True): + """ + Calculates the Kendall's Tau correlation coefficient given the *worder* + list of word alignments from word_rank_alignment(), using the formula: + + tau = 2 * num_increasing_pairs / num_possible_pairs -1 + + Note that the no. of increasing pairs can be discontinuous in the *worder* + list and each each increasing sequence can be tabulated as choose(len(seq), 2) + no. of increasing pairs, e.g. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> number_possible_pairs = choose(len(worder), 2) + >>> round(kendall_tau(worder, normalize=False),3) + -0.236 + >>> round(kendall_tau(worder),3) + 0.382 + + :param worder: The worder list output from word_rank_alignment + :type worder: list(int) + :param normalize: Flag to indicate normalization to between 0.0 and 1.0. + :type normalize: boolean + :return: The Kendall's Tau correlation coefficient. + :rtype: float + """ + worder_len = len(worder) + # With worder_len < 2, `choose(worder_len, 2)` will be 0. + # As we divide by this, it will give a ZeroDivisionError. + # To avoid this, we can just return the lowest possible score. + if worder_len < 2: + tau = -1 + else: + # Extract the groups of increasing/monotonic sequences. + increasing_sequences = find_increasing_sequences(worder) + # Calculate no. of increasing_pairs in *worder* list. + num_increasing_pairs = sum(choose(len(seq), 2) for seq in increasing_sequences) + # Calculate no. of possible pairs. + num_possible_pairs = choose(worder_len, 2) + # Kendall's Tau computation. + tau = 2 * num_increasing_pairs / num_possible_pairs - 1 + if normalize: # If normalized, the tau output falls between 0.0 to 1.0 + return (tau + 1) / 2 + else: # Otherwise, the tau outputs falls between -1.0 to +1.0 + return tau + + +def spearman_rho(worder, normalize=True): + """ + Calculates the Spearman's Rho correlation coefficient given the *worder* + list of word alignment from word_rank_alignment(), using the formula: + + rho = 1 - sum(d**2) / choose(len(worder)+1, 3) + + Given that d is the sum of difference between the *worder* list of indices + and the original word indices from the reference sentence. + + Using the (H0,R0) and (H5, R5) example from the paper + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> round(spearman_rho(worder, normalize=False), 3) + -0.591 + >>> round(spearman_rho(worder), 3) + 0.205 + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + worder_len = len(worder) + sum_d_square = sum((wi - i) ** 2 for wi, i in zip(worder, range(worder_len))) + rho = 1 - sum_d_square / choose(worder_len + 1, 3) + + if normalize: # If normalized, the rho output falls between 0.0 to 1.0 + return (rho + 1) / 2 + else: # Otherwise, the rho outputs falls between -1.0 to +1.0 + return rho