diff --git a/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9ea18b4f23714bbfa1f9845e04fffb15646783ff --- /dev/null +++ b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f38b925a7af0322ad68abc7db2ac055147a75f029045fe6c2876445f2535178 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..bbf9f52a529710d0d7e19b7ecba9903c146a8c8f --- /dev/null +++ b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91e9274825045ac22d62b393697bacccac6f6165f7c016168f9b976333c96ad0 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..ba4722ac2c35f0f56bf4d4353e1e4e6d66c09c74 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4daa274fcea6f52ae6438d775e833cebb40dd2139690bb2745dd8e698c71dadb +size 33555627 diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__init__.py b/venv/lib/python3.10/site-packages/nltk/cluster/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9df093cb0a7964ea43df052ac42fb46b6fbadee0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/__init__.py @@ -0,0 +1,92 @@ +# Natural Language Toolkit: Clusterers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +""" +This module contains a number of basic clustering algorithms. Clustering +describes the task of discovering groups of similar items with a large +collection. It is also describe as unsupervised machine learning, as the data +from which it learns is unannotated with class information, as is the case for +supervised learning. Annotated data is difficult and expensive to obtain in +the quantities required for the majority of supervised learning algorithms. +This problem, the knowledge acquisition bottleneck, is common to most natural +language processing tasks, thus fueling the need for quality unsupervised +approaches. + +This module contains a k-means clusterer, E-M clusterer and a group average +agglomerative clusterer (GAAC). All these clusterers involve finding good +cluster groupings for a set of vectors in multi-dimensional space. + +The K-means clusterer starts with k arbitrary chosen means then allocates each +vector to the cluster with the closest mean. It then recalculates the means of +each cluster as the centroid of the vectors in the cluster. This process +repeats until the cluster memberships stabilise. This is a hill-climbing +algorithm which may converge to a local maximum. Hence the clustering is +often repeated with random initial means and the most commonly occurring +output means are chosen. + +The GAAC clusterer starts with each of the *N* vectors as singleton clusters. +It then iteratively merges pairs of clusters which have the closest centroids. +This continues until there is only one cluster. The order of merges gives rise +to a dendrogram - a tree with the earlier merges lower than later merges. The +membership of a given number of clusters *c*, *1 <= c <= N*, can be found by +cutting the dendrogram at depth *c*. + +The Gaussian EM clusterer models the vectors as being produced by a mixture +of k Gaussian sources. The parameters of these sources (prior probability, +mean and covariance matrix) are then found to maximise the likelihood of the +given data. This is done with the expectation maximisation algorithm. It +starts with k arbitrarily chosen means, priors and covariance matrices. It +then calculates the membership probabilities for each vector in each of the +clusters - this is the 'E' step. The cluster parameters are then updated in +the 'M' step using the maximum likelihood estimate from the cluster membership +probabilities. This process continues until the likelihood of the data does +not significantly increase. + +They all extend the ClusterI interface which defines common operations +available with each clusterer. These operations include: + +- cluster: clusters a sequence of vectors +- classify: assign a vector to a cluster +- classification_probdist: give the probability distribution over cluster memberships + +The current existing classifiers also extend cluster.VectorSpace, an +abstract class which allows for singular value decomposition (SVD) and vector +normalisation. SVD is used to reduce the dimensionality of the vector space in +such a manner as to preserve as much of the variation as possible, by +reparameterising the axes in order of variability and discarding all bar the +first d dimensions. Normalisation ensures that vectors fall in the unit +hypersphere. + +Usage example (see also demo()):: + + from nltk import cluster + from nltk.cluster import euclidean_distance + from numpy import array + + vectors = [array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0]]] + + # initialise the clusterer (will also assign the vectors to clusters) + clusterer = cluster.KMeansClusterer(2, euclidean_distance) + clusterer.cluster(vectors, True) + + # classify a new vector + print(clusterer.classify(array([3, 3]))) + +Note that the vectors must use numpy array-like +objects. nltk_contrib.unimelb.tacohn.SparseArrays may be used for +efficiency when required. +""" + +from nltk.cluster.em import EMClusterer +from nltk.cluster.gaac import GAAClusterer +from nltk.cluster.kmeans import KMeansClusterer +from nltk.cluster.util import ( + Dendrogram, + VectorSpaceClusterer, + cosine_distance, + euclidean_distance, +) diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93177f1514cf0711f50b92b227419b6a3d3c0970 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fb3ec6bd3bac415b7702c39e5fc2e42b9e3f20b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b780a9cf827d09aae76c8847271f22d1ce8dba80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/em.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..276b04fc7d09e2c3d0646dcd002b3a3b77fa9090 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/gaac.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf4b5523933db456f250e20277a62a7ecd70304d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/kmeans.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02210f292a6503fadde4f345fc2cf02b26ebeee9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/cluster/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/api.py b/venv/lib/python3.10/site-packages/nltk/cluster/api.py new file mode 100644 index 0000000000000000000000000000000000000000..8da588408f83894b512166334197ec43b6899631 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/api.py @@ -0,0 +1,74 @@ +# Natural Language Toolkit: Clusterer Interfaces +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Porting: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk.probability import DictionaryProbDist + + +class ClusterI(metaclass=ABCMeta): + """ + Interface covering basic clustering functionality. + """ + + @abstractmethod + def cluster(self, vectors, assign_clusters=False): + """ + Assigns the vectors to clusters, learning the clustering parameters + from the data. Returns a cluster identifier for each vector. + """ + + @abstractmethod + def classify(self, token): + """ + Classifies the token into a cluster, setting the token's CLUSTER + parameter to that cluster identifier. + """ + + def likelihood(self, vector, label): + """ + Returns the likelihood (a float) of the token having the + corresponding cluster. + """ + if self.classify(vector) == label: + return 1.0 + else: + return 0.0 + + def classification_probdist(self, vector): + """ + Classifies the token into a cluster, returning + a probability distribution over the cluster identifiers. + """ + likelihoods = {} + sum = 0.0 + for cluster in self.cluster_names(): + likelihoods[cluster] = self.likelihood(vector, cluster) + sum += likelihoods[cluster] + for cluster in self.cluster_names(): + likelihoods[cluster] /= sum + return DictionaryProbDist(likelihoods) + + @abstractmethod + def num_clusters(self): + """ + Returns the number of clusters. + """ + + def cluster_names(self): + """ + Returns the names of the clusters. + :rtype: list + """ + return list(range(self.num_clusters())) + + def cluster_name(self, index): + """ + Returns the names of the cluster at index. + """ + return index diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/em.py b/venv/lib/python3.10/site-packages/nltk/cluster/em.py new file mode 100644 index 0000000000000000000000000000000000000000..cb46fe35700afed79b728336bd1f07c33ed50dcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/em.py @@ -0,0 +1,219 @@ +# Natural Language Toolkit: Expectation Maximization Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import VectorSpaceClusterer + + +class EMClusterer(VectorSpaceClusterer): + """ + The Gaussian EM clusterer models the vectors as being produced by + a mixture of k Gaussian sources. The parameters of these sources + (prior probability, mean and covariance matrix) are then found to + maximise the likelihood of the given data. This is done with the + expectation maximisation algorithm. It starts with k arbitrarily + chosen means, priors and covariance matrices. It then calculates + the membership probabilities for each vector in each of the + clusters; this is the 'E' step. The cluster parameters are then + updated in the 'M' step using the maximum likelihood estimate from + the cluster membership probabilities. This process continues until + the likelihood of the data does not significantly increase. + """ + + def __init__( + self, + initial_means, + priors=None, + covariance_matrices=None, + conv_threshold=1e-6, + bias=0.1, + normalise=False, + svd_dimensions=None, + ): + """ + Creates an EM clusterer with the given starting parameters, + convergence threshold and vector mangling parameters. + + :param initial_means: the means of the gaussian cluster centers + :type initial_means: [seq of] numpy array or seq of SparseArray + :param priors: the prior probability for each cluster + :type priors: numpy array or seq of float + :param covariance_matrices: the covariance matrix for each cluster + :type covariance_matrices: [seq of] numpy array + :param conv_threshold: maximum change in likelihood before deemed + convergent + :type conv_threshold: int or float + :param bias: variance bias used to ensure non-singular covariance + matrices + :type bias: float + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._means = numpy.array(initial_means, numpy.float64) + self._num_clusters = len(initial_means) + self._conv_threshold = conv_threshold + self._covariance_matrices = covariance_matrices + self._priors = priors + self._bias = bias + + def num_clusters(self): + return self._num_clusters + + def cluster_vectorspace(self, vectors, trace=False): + assert len(vectors) > 0 + + # set the parameters to initial values + dimensions = len(vectors[0]) + means = self._means + priors = self._priors + if not priors: + priors = self._priors = ( + numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters + ) + covariances = self._covariance_matrices + if not covariances: + covariances = self._covariance_matrices = [ + numpy.identity(dimensions, numpy.float64) + for i in range(self._num_clusters) + ] + + # do the E and M steps until the likelihood plateaus + lastl = self._loglikelihood(vectors, priors, means, covariances) + converged = False + + while not converged: + if trace: + print("iteration; loglikelihood", lastl) + # E-step, calculate hidden variables, h[i,j] + h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64) + for i in range(len(vectors)): + for j in range(self._num_clusters): + h[i, j] = priors[j] * self._gaussian( + means[j], covariances[j], vectors[i] + ) + h[i, :] /= sum(h[i, :]) + + # M-step, update parameters - cvm, p, mean + for j in range(self._num_clusters): + covariance_before = covariances[j] + new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64) + new_mean = numpy.zeros(dimensions, numpy.float64) + sum_hj = 0.0 + for i in range(len(vectors)): + delta = vectors[i] - means[j] + new_covariance += h[i, j] * numpy.multiply.outer(delta, delta) + sum_hj += h[i, j] + new_mean += h[i, j] * vectors[i] + covariances[j] = new_covariance / sum_hj + means[j] = new_mean / sum_hj + priors[j] = sum_hj / len(vectors) + + # bias term to stop covariance matrix being singular + covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64) + + # calculate likelihood - FIXME: may be broken + l = self._loglikelihood(vectors, priors, means, covariances) + + # check for convergence + if abs(lastl - l) < self._conv_threshold: + converged = True + lastl = l + + def classify_vectorspace(self, vector): + best = None + for j in range(self._num_clusters): + p = self._priors[j] * self._gaussian( + self._means[j], self._covariance_matrices[j], vector + ) + if not best or p > best[0]: + best = (p, j) + return best[1] + + def likelihood_vectorspace(self, vector, cluster): + cid = self.cluster_names().index(cluster) + return self._priors[cluster] * self._gaussian( + self._means[cluster], self._covariance_matrices[cluster], vector + ) + + def _gaussian(self, mean, cvm, x): + m = len(mean) + assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape) + try: + det = numpy.linalg.det(cvm) + inv = numpy.linalg.inv(cvm) + a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0) + dx = x - mean + print(dx, inv) + b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx) + return a * numpy.exp(b) + except OverflowError: + # happens when the exponent is negative infinity - i.e. b = 0 + # i.e. the inverse of cvm is huge (cvm is almost zero) + return 0 + + def _loglikelihood(self, vectors, priors, means, covariances): + llh = 0.0 + for vector in vectors: + p = 0 + for j in range(len(priors)): + p += priors[j] * self._gaussian(means[j], covariances[j], vector) + llh += numpy.log(p) + return llh + + def __repr__(self): + return "" % list(self._means) + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk import cluster + + # example from figure 14.10, page 519, Manning and Schutze + + vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]] + means = [[4, 2], [4, 2.01]] + + clusterer = cluster.EMClusterer(means, bias=0.1) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As: ", clusters) + print() + + for c in range(2): + print("Cluster:", c) + print("Prior: ", clusterer._priors[c]) + print("Mean: ", clusterer._means[c]) + print("Covar: ", clusterer._covariance_matrices[c]) + print() + + # classify a new vector + vector = numpy.array([2, 2]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + + # show the classification probabilities + vector = numpy.array([2, 2]) + print("classification_probdist(%s):" % vector) + pdist = clusterer.classification_probdist(vector) + for sample in pdist.samples(): + print(f"{sample} => {pdist.prob(sample) * 100:.0f}%") + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/gaac.py b/venv/lib/python3.10/site-packages/nltk/cluster/gaac.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb9e2c51141ba915bf4defe2d8cdeadaa14e6b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/gaac.py @@ -0,0 +1,170 @@ +# Natural Language Toolkit: Group Average Agglomerative Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance + + +class GAAClusterer(VectorSpaceClusterer): + """ + The Group Average Agglomerative starts with each of the N vectors as singleton + clusters. It then iteratively merges pairs of clusters which have the + closest centroids. This continues until there is only one cluster. The + order of merges gives rise to a dendrogram: a tree with the earlier merges + lower than later merges. The membership of a given number of clusters c, 1 + <= c <= N, can be found by cutting the dendrogram at depth c. + + This clusterer uses the cosine similarity metric only, which allows for + efficient speed-up in the clustering process. + """ + + def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None): + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_clusters = num_clusters + self._dendrogram = None + self._groups_values = None + + def cluster(self, vectors, assign_clusters=False, trace=False): + # stores the merge order + self._dendrogram = Dendrogram( + [numpy.array(vector, numpy.float64) for vector in vectors] + ) + return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace) + + def cluster_vectorspace(self, vectors, trace=False): + # variables describing the initial situation + N = len(vectors) + cluster_len = [1] * N + cluster_count = N + index_map = numpy.arange(N) + + # construct the similarity matrix + dims = (N, N) + dist = numpy.ones(dims, dtype=float) * numpy.inf + for i in range(N): + for j in range(i + 1, N): + dist[i, j] = cosine_distance(vectors[i], vectors[j]) + + while cluster_count > max(self._num_clusters, 1): + i, j = numpy.unravel_index(dist.argmin(), dims) + if trace: + print("merging %d and %d" % (i, j)) + + # update similarities for merging i and j + self._merge_similarities(dist, cluster_len, i, j) + + # remove j + dist[:, j] = numpy.inf + dist[j, :] = numpy.inf + + # merge the clusters + cluster_len[i] = cluster_len[i] + cluster_len[j] + self._dendrogram.merge(index_map[i], index_map[j]) + cluster_count -= 1 + + # update the index map to reflect the indexes if we + # had removed j + index_map[j + 1 :] -= 1 + index_map[j] = N + + self.update_clusters(self._num_clusters) + + def _merge_similarities(self, dist, cluster_len, i, j): + # the new cluster i merged from i and j adopts the average of + # i and j's similarity to each other cluster, weighted by the + # number of points in the clusters i and j + i_weight = cluster_len[i] + j_weight = cluster_len[j] + weight_sum = i_weight + j_weight + + # update for x 0 + if self._should_normalise: + centroid = self._normalise(cluster[0]) + else: + centroid = numpy.array(cluster[0]) + for vector in cluster[1:]: + if self._should_normalise: + centroid += self._normalise(vector) + else: + centroid += vector + centroid /= len(cluster) + self._centroids.append(centroid) + self._num_clusters = len(self._centroids) + + def classify_vectorspace(self, vector): + best = None + for i in range(self._num_clusters): + centroid = self._centroids[i] + dist = cosine_distance(vector, centroid) + if not best or dist < best[0]: + best = (dist, i) + return best[1] + + def dendrogram(self): + """ + :return: The dendrogram representing the current clustering + :rtype: Dendrogram + """ + return self._dendrogram + + def num_clusters(self): + return self._num_clusters + + def __repr__(self): + return "" % self._num_clusters + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk.cluster import GAAClusterer + + # use a set of tokens with 2D indices + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test the GAAC clusterer with 4 clusters + clusterer = GAAClusterer(4) + clusters = clusterer.cluster(vectors, True) + + print("Clusterer:", clusterer) + print("Clustered:", vectors) + print("As:", clusters) + print() + + # show the dendrogram + clusterer.dendrogram().show() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/kmeans.py b/venv/lib/python3.10/site-packages/nltk/cluster/kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0d02f7dc0178f5bb1406d7a71a07ae46acaa93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/kmeans.py @@ -0,0 +1,231 @@ +# Natural Language Toolkit: K-Means Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +import copy +import random +import sys + +try: + import numpy +except ImportError: + pass + + +from nltk.cluster.util import VectorSpaceClusterer + + +class KMeansClusterer(VectorSpaceClusterer): + """ + The K-means clusterer starts with k arbitrary chosen means then allocates + each vector to the cluster with the closest mean. It then recalculates the + means of each cluster as the centroid of the vectors in the cluster. This + process repeats until the cluster memberships stabilise. This is a + hill-climbing algorithm which may converge to a local maximum. Hence the + clustering is often repeated with random initial means and the most + commonly occurring output means are chosen. + """ + + def __init__( + self, + num_means, + distance, + repeats=1, + conv_test=1e-6, + initial_means=None, + normalise=False, + svd_dimensions=None, + rng=None, + avoid_empty_clusters=False, + ): + + """ + :param num_means: the number of means to use (may use fewer) + :type num_means: int + :param distance: measure of distance between two vectors + :type distance: function taking two vectors and returning a float + :param repeats: number of randomised clustering trials to use + :type repeats: int + :param conv_test: maximum variation in mean differences before + deemed convergent + :type conv_test: number + :param initial_means: set of k initial means + :type initial_means: sequence of vectors + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + :param rng: random number generator (or None) + :type rng: Random + :param avoid_empty_clusters: include current centroid in computation + of next one; avoids undefined behavior + when clusters become empty + :type avoid_empty_clusters: boolean + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_means = num_means + self._distance = distance + self._max_difference = conv_test + assert not initial_means or len(initial_means) == num_means + self._means = initial_means + assert repeats >= 1 + assert not (initial_means and repeats > 1) + self._repeats = repeats + self._rng = rng if rng else random.Random() + self._avoid_empty_clusters = avoid_empty_clusters + + def cluster_vectorspace(self, vectors, trace=False): + if self._means and self._repeats > 1: + print("Warning: means will be discarded for subsequent trials") + + meanss = [] + for trial in range(self._repeats): + if trace: + print("k-means trial", trial) + if not self._means or trial > 1: + self._means = self._rng.sample(list(vectors), self._num_means) + self._cluster_vectorspace(vectors, trace) + meanss.append(self._means) + + if len(meanss) > 1: + # sort the means first (so that different cluster numbering won't + # effect the distance comparison) + for means in meanss: + means.sort(key=sum) + + # find the set of means that's minimally different from the others + min_difference = min_means = None + for i in range(len(meanss)): + d = 0 + for j in range(len(meanss)): + if i != j: + d += self._sum_distances(meanss[i], meanss[j]) + if min_difference is None or d < min_difference: + min_difference, min_means = d, meanss[i] + + # use the best means + self._means = min_means + + def _cluster_vectorspace(self, vectors, trace=False): + if self._num_means < len(vectors): + # perform k-means clustering + converged = False + while not converged: + # assign the tokens to clusters based on minimum distance to + # the cluster means + clusters = [[] for m in range(self._num_means)] + for vector in vectors: + index = self.classify_vectorspace(vector) + clusters[index].append(vector) + + if trace: + print("iteration") + # for i in range(self._num_means): + # print ' mean', i, 'allocated', len(clusters[i]), 'vectors' + + # recalculate cluster means by computing the centroid of each cluster + new_means = list(map(self._centroid, clusters, self._means)) + + # measure the degree of change from the previous step for convergence + difference = self._sum_distances(self._means, new_means) + if difference < self._max_difference: + converged = True + + # remember the new means + self._means = new_means + + def classify_vectorspace(self, vector): + # finds the closest cluster centroid + # returns that cluster's index + best_distance = best_index = None + for index in range(len(self._means)): + mean = self._means[index] + dist = self._distance(vector, mean) + if best_distance is None or dist < best_distance: + best_index, best_distance = index, dist + return best_index + + def num_clusters(self): + if self._means: + return len(self._means) + else: + return self._num_means + + def means(self): + """ + The means used for clustering. + """ + return self._means + + def _sum_distances(self, vectors1, vectors2): + difference = 0.0 + for u, v in zip(vectors1, vectors2): + difference += self._distance(u, v) + return difference + + def _centroid(self, cluster, mean): + if self._avoid_empty_clusters: + centroid = copy.copy(mean) + for vector in cluster: + centroid += vector + return centroid / (1 + len(cluster)) + else: + if not len(cluster): + sys.stderr.write("Error: no centroid defined for empty cluster.\n") + sys.stderr.write( + "Try setting argument 'avoid_empty_clusters' to True\n" + ) + assert False + centroid = copy.copy(cluster[0]) + for vector in cluster[1:]: + centroid += vector + return centroid / len(cluster) + + def __repr__(self): + return "" % (self._means, self._repeats) + + +################################################################################# + + +def demo(): + # example from figure 14.9, page 517, Manning and Schutze + + from nltk.cluster import KMeansClusterer, euclidean_distance + + vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]] + means = [[4, 3], [5, 5]] + + clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test k-means using the euclidean distance metric, 2 means and repeat + # clustering 10 times with random seeds + + clusterer = KMeansClusterer(2, euclidean_distance, repeats=10) + clusters = clusterer.cluster(vectors, True) + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/corenlp.py b/venv/lib/python3.10/site-packages/nltk/parse/corenlp.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3146d1a086d4e49a0eaae585e09cab4a267834 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/corenlp.py @@ -0,0 +1,800 @@ +# Natural Language Toolkit: Interface to the CoreNLP REST API. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dmitrijs Milajevs +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os # required for doctests +import re +import socket +import time +from typing import List, Tuple + +from nltk.internals import _java_options, config_java, find_jar_iter, java +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tag.api import TaggerI +from nltk.tokenize.api import TokenizerI +from nltk.tree import Tree + +_stanford_url = "https://stanfordnlp.github.io/CoreNLP/" + + +class CoreNLPServerError(EnvironmentError): + """Exceptions associated with the Core NLP server.""" + + +def try_port(port=0): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("", port)) + + p = sock.getsockname()[1] + sock.close() + + return p + + +class CoreNLPServer: + + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)-models\.jar" + _JAR = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)\.jar" + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + verbose=False, + java_options=None, + corenlp_options=None, + port=None, + ): + + if corenlp_options is None: + corenlp_options = ["-preload", "tokenize,ssplit,pos,lemma,parse,depparse"] + + jars = list( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("CORENLP",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ) + ) + + # find the most recent code and model jar + stanford_jar = max(jars, key=lambda model_name: re.match(self._JAR, model_name)) + + if port is None: + try: + port = try_port(9000) + except OSError: + port = try_port() + corenlp_options.extend(["-port", str(port)]) + else: + try_port(port) + corenlp_options.extend(["-port", str(port)]) + + self.url = f"http://localhost:{port}" + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("CORENLP_MODELS",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_name: re.match(self._MODEL_JAR_PATTERN, model_name), + ) + + self.verbose = verbose + + self._classpath = stanford_jar, model_jar + + self.corenlp_options = corenlp_options + self.java_options = java_options or ["-mx2g"] + + def start(self, stdout="devnull", stderr="devnull"): + """Starts the CoreNLP server + + :param stdout, stderr: Specifies where CoreNLP output is redirected. Valid values are 'devnull', 'stdout', 'pipe' + """ + import requests + + cmd = ["edu.stanford.nlp.pipeline.StanfordCoreNLPServer"] + + if self.corenlp_options: + cmd.extend(self.corenlp_options) + + # Configure java. + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=self.verbose) + + try: + self.popen = java( + cmd, + classpath=self._classpath, + blocking=False, + stdout=stdout, + stderr=stderr, + ) + finally: + # Return java configurations to their default values. + config_java(options=default_options, verbose=self.verbose) + + # Check that the server is istill running. + returncode = self.popen.poll() + if returncode is not None: + _, stderrdata = self.popen.communicate() + raise CoreNLPServerError( + returncode, + "Could not start the server. " + "The error was: {}".format(stderrdata.decode("ascii")), + ) + + for i in range(30): + try: + response = requests.get(requests.compat.urljoin(self.url, "live")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("Could not connect to the server.") + + for i in range(60): + try: + response = requests.get(requests.compat.urljoin(self.url, "ready")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("The server is not ready.") + + def stop(self): + self.popen.terminate() + self.popen.wait() + + def __enter__(self): + self.start() + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + return False + + +class GenericCoreNLPParser(ParserI, TokenizerI, TaggerI): + """Interface to the CoreNLP Parser.""" + + def __init__( + self, + url="http://localhost:9000", + encoding="utf8", + tagtype=None, + strict_json=True, + ): + import requests + + self.url = url + self.encoding = encoding + + if tagtype not in ["pos", "ner", None]: + raise ValueError("tagtype must be either 'pos', 'ner' or None") + + self.tagtype = tagtype + self.strict_json = strict_json + + self.session = requests.Session() + + def parse_sents(self, sentences, *args, **kwargs): + """Parse multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + words. Each sentence will be automatically tagged with this + CoreNLPParser instance's tagger. + + If a whitespace exists inside a token, then the token will be treated as + several tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return self.raw_parse_sents(sentences, *args, **kwargs) + + def raw_parse(self, sentence, properties=None, *args, **kwargs): + """Parse a sentence. + + Takes a sentence as a string; before parsing, it will be automatically + tokenized and tagged by the CoreNLP Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + default_properties = {"tokenize.whitespace": "false"} + default_properties.update(properties or {}) + + return next( + self.raw_parse_sents( + [sentence], properties=default_properties, *args, **kwargs + ) + ) + + def api_call(self, data, properties=None, timeout=60): + default_properties = { + "outputFormat": "json", + "annotators": "tokenize,pos,lemma,ssplit,{parser_annotator}".format( + parser_annotator=self.parser_annotator + ), + } + + default_properties.update(properties or {}) + + response = self.session.post( + self.url, + params={"properties": json.dumps(default_properties)}, + data=data.encode(self.encoding), + headers={"Content-Type": f"text/plain; charset={self.encoding}"}, + timeout=timeout, + ) + + response.raise_for_status() + + return response.json(strict=self.strict_json) + + def raw_parse_sents( + self, sentences, verbose=False, properties=None, *args, **kwargs + ): + """Parse multiple sentences. + + Takes multiple sentences as a list of strings. Each sentence will be + automatically tokenized and tagged. + + :param sentences: Input sentences to parse. + :type sentences: list(str) + :rtype: iter(iter(Tree)) + + """ + default_properties = { + # Only splits on '\n', never inside the sentence. + "ssplit.eolonly": "true" + } + + default_properties.update(properties or {}) + + """ + for sentence in sentences: + parsed_data = self.api_call(sentence, properties=default_properties) + + assert len(parsed_data['sentences']) == 1 + + for parse in parsed_data['sentences']: + tree = self.make_tree(parse) + yield iter([tree]) + """ + parsed_data = self.api_call("\n".join(sentences), properties=default_properties) + for parsed_sent in parsed_data["sentences"]: + tree = self.make_tree(parsed_sent) + yield iter([tree]) + + def parse_text(self, text, *args, **kwargs): + """Parse a piece of text. + + The text might contain several sentences which will be split by CoreNLP. + + :param str text: text to be split. + :returns: an iterable of syntactic structures. # TODO: should it be an iterable of iterables? + + """ + parsed_data = self.api_call(text, *args, **kwargs) + + for parse in parsed_data["sentences"]: + yield self.make_tree(parse) + + def tokenize(self, text, properties=None): + """Tokenize a string of text. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> text = 'Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.' + >>> list(parser.tokenize(text)) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + + >>> s = "The colour of the wall is blue." + >>> list( + ... parser.tokenize( + ... 'The colour of the wall is blue.', + ... properties={'tokenize.options': 'americanize=true'}, + ... ) + ... ) + ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.'] + >>> server.stop() + + """ + default_properties = {"annotators": "tokenize,ssplit"} + + default_properties.update(properties or {}) + + result = self.api_call(text, properties=default_properties) + + for sentence in result["sentences"]: + for token in sentence["tokens"]: + yield token["originalText"] or token["word"] + + def tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + tokens. + + :param sentences: Input sentences to tag + :type sentences: list(list(str)) + :rtype: list(list(tuple(str, str)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return [sentences[0] for sentences in self.raw_tag_sents(sentences)] + + def tag(self, sentence: str) -> List[Tuple[str, str]]: + """ + Tag a list of tokens. + + :rtype: list(tuple(str, str)) + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url, tagtype='ner') + >>> tokens = 'Rami Eid is studying at Stony Brook University in NY'.split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), ('at', 'O'), ('Stony', 'ORGANIZATION'), + ('Brook', 'ORGANIZATION'), ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'STATE_OR_PROVINCE')] + + >>> parser = CoreNLPParser(url=server.url, tagtype='pos') + >>> tokens = "What is the airspeed of an unladen swallow ?".split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), + ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), + ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + >>> server.stop() + """ + return self.tag_sents([sentence])[0] + + def raw_tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a string. + + :param sentences: Input sentences to tag + :type sentences: list(str) + :rtype: list(list(list(tuple(str, str))) + """ + default_properties = { + "ssplit.isOneSentence": "true", + "annotators": "tokenize,ssplit,", + } + + # Supports only 'pos' or 'ner' tags. + assert self.tagtype in ["pos", "ner"] + default_properties["annotators"] += self.tagtype + for sentence in sentences: + tagged_data = self.api_call(sentence, properties=default_properties) + yield [ + [ + (token["word"], token[self.tagtype]) + for token in tagged_sentence["tokens"] + ] + for tagged_sentence in tagged_data["sentences"] + ] + + +class CoreNLPParser(GenericCoreNLPParser): + """ + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... parser = CoreNLPParser(url=server.url) + ... next( + ... parser.raw_parse('The quick brown fox jumps over the lazy dog.') + ... ).pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> (parse_fox, ), (parse_wolf, ) = parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + + >>> parse_fox.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + >>> parse_wolf.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|_________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick grey wolf jumps over the lazy fox . + + >>> (parse_dog, ), (parse_friends, ) = parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + + >>> parse_dog.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______|____ + | VP + | ________|___ + NP | NP + | | ___|___ + PRP VBP DT NN + | | | | + I 'm a dog + + >>> parse_friends.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + ____|___________ + | VP + | ___________|_____________ + | | NP + | | _______|________________________ + | | NP | | | + | | _____|_______ | | | + NP | NP | | NP | + | | ______|_________ | | ___|____ | + DT VBZ PRP$ NNS POS NN -LRB- DT NN -RRB- + | | | | | | | | | | + This is my friends ' cat -LRB- the tabby -RRB- + + >>> parse_john, parse_mary, = parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> parse_john.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|_____________ + | VP | + | ____|___ | + NP | NP | + | | | | + NNP VBZ NNP . + | | | | + John loves Mary . + + >>> parse_mary.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|____ + NP VP | + | | | + NNP VBZ . + | | | + Mary walks . + + Special cases + + >>> next( + ... parser.raw_parse( + ... 'NASIRIYA, Iraq—Iraqi doctors who treated former prisoner of war ' + ... 'Jessica Lynch have angrily dismissed claims made in her biography ' + ... 'that she was raped by her Iraqi captors.' + ... ) + ... ).height() + 14 + + >>> next( + ... parser.raw_parse( + ... "The broader Standard & Poor's 500 Index <.SPX> was 0.46 points lower, or " + ... '0.05 percent, at 997.02.' + ... ) + ... ).height() + 11 + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "penn" + parser_annotator = "parse" + + def make_tree(self, result): + return Tree.fromstring(result["parse"]) + + +class CoreNLPDependencyParser(GenericCoreNLPParser): + """Dependency parser. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... dep_parser = CoreNLPDependencyParser(url=server.url) + ... parse, = dep_parser.raw_parse( + ... 'The quick brown fox jumps over the lazy dog.' + ... ) + ... print(parse.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> dep_parser = CoreNLPDependencyParser(url=server.url) + >>> parse, = dep_parser.raw_parse('The quick brown fox jumps over the lazy dog.') + >>> print(parse.tree()) # doctest: +NORMALIZE_WHITESPACE + (jumps (fox The quick brown) (dog over the lazy) .) + + >>> for governor, dep, dependent in parse.triples(): + ... print(governor, dep, dependent) # doctest: +NORMALIZE_WHITESPACE + ('jumps', 'VBZ') nsubj ('fox', 'NN') + ('fox', 'NN') det ('The', 'DT') + ('fox', 'NN') amod ('quick', 'JJ') + ('fox', 'NN') amod ('brown', 'JJ') + ('jumps', 'VBZ') obl ('dog', 'NN') + ('dog', 'NN') case ('over', 'IN') + ('dog', 'NN') det ('the', 'DT') + ('dog', 'NN') amod ('lazy', 'JJ') + ('jumps', 'VBZ') punct ('.', '.') + + >>> (parse_fox, ), (parse_dog, ) = dep_parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + >>> print(parse_fox.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + grey JJ 4 amod + wolf NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + fox NN 5 obl + . . 5 punct + + >>> (parse_dog, ), (parse_friends, ) = dep_parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + I PRP 4 nsubj + 'm VBP 4 cop + a DT 4 det + dog NN 0 ROOT + + >>> print(parse_friends.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + This DT 6 nsubj + is VBZ 6 cop + my PRP$ 4 nmod:poss + friends NNS 6 nmod:poss + ' POS 4 case + cat NN 0 ROOT + ( -LRB- 9 punct + the DT 9 det + tabby NN 6 dep + ) -RRB- 9 punct + + >>> parse_john, parse_mary, = dep_parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> print(parse_john.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + John NNP 2 nsubj + loves VBZ 0 ROOT + Mary NNP 2 obj + . . 2 punct + + >>> print(parse_mary.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + Mary NNP 2 nsubj + walks VBZ 0 ROOT + . . 2 punct + + Special cases + + Non-breaking space inside of a token. + + >>> len( + ... next( + ... dep_parser.raw_parse( + ... 'Anhalt said children typically treat a 20-ounce soda bottle as one ' + ... 'serving, while it actually contains 2 1/2 servings.' + ... ) + ... ).nodes + ... ) + 23 + + Phone numbers. + + >>> len( + ... next( + ... dep_parser.raw_parse('This is not going to crash: 01 111 555.') + ... ).nodes + ... ) + 10 + + >>> print( + ... next( + ... dep_parser.raw_parse('The underscore _ should not simply disappear.') + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE + The DT 2 det + underscore NN 7 nsubj + _ NFP 7 punct + should MD 7 aux + not RB 7 advmod + simply RB 7 advmod + disappear VB 0 ROOT + . . 7 punct + + >>> print( + ... next( + ... dep_parser.raw_parse( + ... 'for all of its insights into the dream world of teen life , and its electronic expression through ' + ... 'cyber culture , the film gives no quarter to anyone seeking to pull a cohesive story out of its 2 ' + ... '1/2-hour running time .' + ... ) + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS + for IN 2 case + all DT 24 obl + of IN 5 case + its PRP$ 5 nmod:poss + insights NNS 2 nmod + into IN 9 case + the DT 9 det + dream NN 9 compound + world NN 5 nmod + of IN 12 case + teen NN 12 compound + ... + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "conll2007" + parser_annotator = "depparse" + + def make_tree(self, result): + + return DependencyGraph( + ( + " ".join(n_items[1:]) # NLTK expects an iterable of strings... + for n_items in sorted(transform(result)) + ), + cell_separator=" ", # To make sure that a non-breaking space is kept inside of a token. + ) + + +def transform(sentence): + for dependency in sentence["basicDependencies"]: + + dependent_index = dependency["dependent"] + token = sentence["tokens"][dependent_index - 1] + + # Return values that we don't know as '_'. Also, consider tag and ctag + # to be equal. + yield ( + dependent_index, + "_", + token["word"], + token["lemma"], + token["pos"], + token["pos"], + "_", + str(dependency["governor"]), + dependency["dep"], + "_", + "_", + ) diff --git a/venv/lib/python3.10/site-packages/nltk/parse/earleychart.py b/venv/lib/python3.10/site-packages/nltk/parse/earleychart.py new file mode 100644 index 0000000000000000000000000000000000000000..1054e114c8e3177754ed895b67ac2b2f4d39cc21 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/earleychart.py @@ -0,0 +1,552 @@ +# Natural Language Toolkit: An Incremental Earley Chart Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Rob Speer +# Edward Loper +# Steven Bird +# Jean Mark Gawron +# URL: +# For license information, see LICENSE.TXT + +""" +Data classes and parser implementations for *incremental* chart +parsers, which use dynamic programming to efficiently parse a text. +A "chart parser" derives parse trees for a text by iteratively adding +\"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree +structure for a subsequence of the text. The "chart" is a +\"blackboard\" for composing and combining these hypotheses. + +A parser is "incremental", if it guarantees that for all i, j where i < j, +all edges ending at i are built before any edges ending at j. +This is appealing for, say, speech recognizer hypothesis filtering. + +The main parser class is ``EarleyChartParser``, which is a top-down +algorithm, originally formulated by Jay Earley (1970). +""" + +from time import perf_counter + +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + CachedTopDownPredictRule, + Chart, + ChartParser, + EdgeI, + EmptyPredictRule, + FilteredBottomUpPredictCombineRule, + FilteredSingleEdgeFundamentalRule, + LeafEdge, + LeafInitRule, + SingleEdgeFundamentalRule, + TopDownInitRule, +) +from nltk.parse.featurechart import ( + FeatureBottomUpPredictCombineRule, + FeatureBottomUpPredictRule, + FeatureChart, + FeatureChartParser, + FeatureEmptyPredictRule, + FeatureSingleEdgeFundamentalRule, + FeatureTopDownInitRule, + FeatureTopDownPredictRule, +) + +# //////////////////////////////////////////////////////////// +# Incremental Chart +# //////////////////////////////////////////////////////////// + + +class IncrementalChart(Chart): + def initialize(self): + # A sequence of edge lists contained in this chart. + self._edgelists = tuple([] for x in self._positions()) + + # The set of child pointer lists associated with each edge. + self._edge_to_cpls = {} + + # Indexes mapping attribute values to lists of edges + # (used by select()). + self._indexes = {} + + def edges(self): + return list(self.iteredges()) + + def iteredges(self): + return (edge for edgelist in self._edgelists for edge in edgelist) + + def select(self, end, **restrictions): + edgelist = self._edgelists[end] + + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(edgelist) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple(restrictions[key] for key in restr_keys) + return iter(self._indexes[restr_keys][end].get(vals, [])) + + def _add_index(self, restr_keys): + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = tuple({} for x in self._positions()) + + # Add all existing edges to the index. + for end, edgelist in enumerate(self._edgelists): + this_index = index[end] + for edge in edgelist: + vals = tuple(getattr(edge, key)() for key in restr_keys) + this_index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + end = edge.end() + for (restr_keys, index) in self._indexes.items(): + vals = tuple(getattr(edge, key)() for key in restr_keys) + index[end].setdefault(vals, []).append(edge) + + def _append_edge(self, edge): + self._edgelists[edge.end()].append(edge) + + def _positions(self): + return range(self.num_leaves() + 1) + + +class FeatureIncrementalChart(IncrementalChart, FeatureChart): + def select(self, end, **restrictions): + edgelist = self._edgelists[end] + + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(edgelist) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple( + self._get_type_if_possible(restrictions[key]) for key in restr_keys + ) + return iter(self._indexes[restr_keys][end].get(vals, [])) + + def _add_index(self, restr_keys): + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = tuple({} for x in self._positions()) + + # Add all existing edges to the index. + for end, edgelist in enumerate(self._edgelists): + this_index = index[end] + for edge in edgelist: + vals = tuple( + self._get_type_if_possible(getattr(edge, key)()) + for key in restr_keys + ) + this_index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + end = edge.end() + for (restr_keys, index) in self._indexes.items(): + vals = tuple( + self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys + ) + index[end].setdefault(vals, []).append(edge) + + +# //////////////////////////////////////////////////////////// +# Incremental CFG Rules +# //////////////////////////////////////////////////////////// + + +class CompleteFundamentalRule(SingleEdgeFundamentalRule): + def _apply_incomplete(self, chart, grammar, left_edge): + end = left_edge.end() + # When the chart is incremental, we only have to look for + # empty complete edges here. + for right_edge in chart.select( + start=end, end=end, is_complete=True, lhs=left_edge.nextsym() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class CompleterRule(CompleteFundamentalRule): + _fundamental_rule = CompleteFundamentalRule() + + def apply(self, chart, grammar, edge): + if not isinstance(edge, LeafEdge): + yield from self._fundamental_rule.apply(chart, grammar, edge) + + +class ScannerRule(CompleteFundamentalRule): + _fundamental_rule = CompleteFundamentalRule() + + def apply(self, chart, grammar, edge): + if isinstance(edge, LeafEdge): + yield from self._fundamental_rule.apply(chart, grammar, edge) + + +class PredictorRule(CachedTopDownPredictRule): + pass + + +class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule): + def apply(self, chart, grammar, edge): + # Since the Filtered rule only works for grammars without empty productions, + # we only have to bother with complete edges here. + if edge.is_complete(): + yield from self._apply_complete(chart, grammar, edge) + + +# //////////////////////////////////////////////////////////// +# Incremental FCFG Rules +# //////////////////////////////////////////////////////////// + + +class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule): + def _apply_incomplete(self, chart, grammar, left_edge): + fr = self._fundamental_rule + end = left_edge.end() + # When the chart is incremental, we only have to look for + # empty complete edges here. + for right_edge in chart.select( + start=end, end=end, is_complete=True, lhs=left_edge.nextsym() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + +class FeatureCompleterRule(CompleterRule): + _fundamental_rule = FeatureCompleteFundamentalRule() + + +class FeatureScannerRule(ScannerRule): + _fundamental_rule = FeatureCompleteFundamentalRule() + + +class FeaturePredictorRule(FeatureTopDownPredictRule): + pass + + +# //////////////////////////////////////////////////////////// +# Incremental CFG Chart Parsers +# //////////////////////////////////////////////////////////// + +EARLEY_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CompleterRule(), + ScannerRule(), + PredictorRule(), +] +TD_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CachedTopDownPredictRule(), + CompleteFundamentalRule(), +] +BU_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictRule(), + CompleteFundamentalRule(), +] +BU_LC_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictCombineRule(), + CompleteFundamentalRule(), +] + +LC_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + FilteredBottomUpPredictCombineRule(), + FilteredCompleteFundamentalRule(), +] + + +class IncrementalChartParser(ChartParser): + """ + An *incremental* chart parser implementing Jay Earley's + parsing algorithm: + + | For each index end in [0, 1, ..., N]: + | For each edge such that edge.end = end: + | If edge is incomplete and edge.next is not a part of speech: + | Apply PredictorRule to edge + | If edge is incomplete and edge.next is a part of speech: + | Apply ScannerRule to edge + | If edge is complete: + | Apply CompleterRule to edge + | Return any complete parses in the chart + """ + + def __init__( + self, + grammar, + strategy=BU_LC_INCREMENTAL_STRATEGY, + trace=0, + trace_chart_width=50, + chart_class=IncrementalChart, + ): + """ + Create a new Earley chart parser, that uses ``grammar`` to + parse texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + :type trace_chart_width: int + :param trace_chart_width: The default total width reserved for + the chart in trace output. The remainder of each line will + be used to display edges. + :param chart_class: The class that should be used to create + the charts used by this parser. + """ + self._grammar = grammar + self._trace = trace + self._trace_chart_width = trace_chart_width + self._chart_class = chart_class + + self._axioms = [] + self._inference_rules = [] + for rule in strategy: + if rule.NUM_EDGES == 0: + self._axioms.append(rule) + elif rule.NUM_EDGES == 1: + self._inference_rules.append(rule) + else: + raise ValueError( + "Incremental inference rules must have " "NUM_EDGES == 0 or 1" + ) + + def chart_parse(self, tokens, trace=None): + if trace is None: + trace = self._trace + trace_new_edges = self._trace_new_edges + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + chart = self._chart_class(tokens) + grammar = self._grammar + + # Width, for printing trace edges. + trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) + if trace: + print(chart.pretty_format_leaves(trace_edge_width)) + + for axiom in self._axioms: + new_edges = list(axiom.apply(chart, grammar)) + trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) + + inference_rules = self._inference_rules + for end in range(chart.num_leaves() + 1): + if trace > 1: + print("\n* Processing queue:", end, "\n") + agenda = list(chart.select(end=end)) + while agenda: + edge = agenda.pop() + for rule in inference_rules: + new_edges = list(rule.apply(chart, grammar, edge)) + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + for new_edge in new_edges: + if new_edge.end() == end: + agenda.append(new_edge) + + return chart + + +class EarleyChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args) + + +class IncrementalTopDownChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalBottomUpChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalLeftCornerChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + if not grammar.is_nonempty(): + raise ValueError( + "IncrementalLeftCornerParser only works for grammars " + "without empty productions." + ) + IncrementalChartParser.__init__( + self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Incremental FCFG Chart Parsers +# //////////////////////////////////////////////////////////// + +EARLEY_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureCompleterRule(), + FeatureScannerRule(), + FeaturePredictorRule(), +] +TD_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureTopDownPredictRule(), + FeatureCompleteFundamentalRule(), +] +BU_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictRule(), + FeatureCompleteFundamentalRule(), +] +BU_LC_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictCombineRule(), + FeatureCompleteFundamentalRule(), +] + + +class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser): + def __init__( + self, + grammar, + strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY, + trace_chart_width=20, + chart_class=FeatureIncrementalChart, + **parser_args + ): + IncrementalChartParser.__init__( + self, + grammar, + strategy=strategy, + trace_chart_width=trace_chart_width, + chart_class=chart_class, + **parser_args + ) + + +class FeatureEarleyChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Demonstration +# //////////////////////////////////////////////////////////// + + +def demo( + print_times=True, + print_grammar=False, + print_trees=True, + trace=2, + sent="I saw John with a dog with my cookie", + numparses=5, +): + """ + A demonstration of the Earley parsers. + """ + import sys + import time + + from nltk.parse.chart import demo_grammar + + # The grammar for ChartParser and SteppingChartParser: + grammar = demo_grammar() + if print_grammar: + print("* Grammar") + print(grammar) + + # Tokenize the sample sentence. + print("* Sentence:") + print(sent) + tokens = sent.split() + print(tokens) + print() + + # Do the parsing. + earley = EarleyChartParser(grammar, trace=trace) + t = perf_counter() + chart = earley.chart_parse(tokens) + parses = list(chart.parses(grammar.start())) + t = perf_counter() - t + + # Print results. + if numparses: + assert len(parses) == numparses, "Not all parses found" + if print_trees: + for tree in parses: + print(tree) + else: + print("Nr trees:", len(parses)) + if print_times: + print("Time:", t) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/evaluate.py b/venv/lib/python3.10/site-packages/nltk/parse/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..07ab1c9832b42be2e655663cacf87d84db5ea3a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/evaluate.py @@ -0,0 +1,129 @@ +# Natural Language Toolkit: evaluation of dependency parser +# +# Author: Long Duong +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import unicodedata + + +class DependencyEvaluator: + """ + Class for measuring labelled and unlabelled attachment score for + dependency parsing. Note that the evaluation ignores punctuation. + + >>> from nltk.parse import DependencyGraph, DependencyEvaluator + + >>> gold_sent = DependencyGraph(\""" + ... Pierre NNP 2 NMOD + ... Vinken NNP 8 SUB + ... , , 2 P + ... 61 CD 5 NMOD + ... years NNS 6 AMOD + ... old JJ 2 NMOD + ... , , 2 P + ... will MD 0 ROOT + ... join VB 8 VC + ... the DT 11 NMOD + ... board NN 9 OBJ + ... as IN 9 VMOD + ... a DT 15 NMOD + ... nonexecutive JJ 15 NMOD + ... director NN 12 PMOD + ... Nov. NNP 9 VMOD + ... 29 CD 16 NMOD + ... . . 9 VMOD + ... \""") + + >>> parsed_sent = DependencyGraph(\""" + ... Pierre NNP 8 NMOD + ... Vinken NNP 1 SUB + ... , , 3 P + ... 61 CD 6 NMOD + ... years NNS 6 AMOD + ... old JJ 2 NMOD + ... , , 3 AMOD + ... will MD 0 ROOT + ... join VB 8 VC + ... the DT 11 AMOD + ... board NN 9 OBJECT + ... as IN 9 NMOD + ... a DT 15 NMOD + ... nonexecutive JJ 15 NMOD + ... director NN 12 PMOD + ... Nov. NNP 9 VMOD + ... 29 CD 16 NMOD + ... . . 9 VMOD + ... \""") + + >>> de = DependencyEvaluator([parsed_sent],[gold_sent]) + >>> las, uas = de.eval() + >>> las + 0.6 + >>> uas + 0.8 + >>> abs(uas - 0.8) < 0.00001 + True + """ + + def __init__(self, parsed_sents, gold_sents): + """ + :param parsed_sents: the list of parsed_sents as the output of parser + :type parsed_sents: list(DependencyGraph) + """ + self._parsed_sents = parsed_sents + self._gold_sents = gold_sents + + def _remove_punct(self, inStr): + """ + Function to remove punctuation from Unicode string. + :param input: the input string + :return: Unicode string after remove all punctuation + """ + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + return "".join(x for x in inStr if unicodedata.category(x) not in punc_cat) + + def eval(self): + """ + Return the Labeled Attachment Score (LAS) and Unlabeled Attachment Score (UAS) + + :return : tuple(float,float) + """ + if len(self._parsed_sents) != len(self._gold_sents): + raise ValueError( + " Number of parsed sentence is different with number of gold sentence." + ) + + corr = 0 + corrL = 0 + total = 0 + + for i in range(len(self._parsed_sents)): + parsed_sent_nodes = self._parsed_sents[i].nodes + gold_sent_nodes = self._gold_sents[i].nodes + + if len(parsed_sent_nodes) != len(gold_sent_nodes): + raise ValueError("Sentences must have equal length.") + + for parsed_node_address, parsed_node in parsed_sent_nodes.items(): + gold_node = gold_sent_nodes[parsed_node_address] + + if parsed_node["word"] is None: + continue + if parsed_node["word"] != gold_node["word"]: + raise ValueError("Sentence sequence is not matched.") + + # Ignore if word is punctuation by default + # if (parsed_sent[j]["word"] in string.punctuation): + if self._remove_punct(parsed_node["word"]) == "": + continue + + total += 1 + if parsed_node["head"] == gold_node["head"]: + corr += 1 + if parsed_node["rel"] == gold_node["rel"]: + corrL += 1 + + return corrL / total, corr / total diff --git a/venv/lib/python3.10/site-packages/nltk/parse/featurechart.py b/venv/lib/python3.10/site-packages/nltk/parse/featurechart.py new file mode 100644 index 0000000000000000000000000000000000000000..0a981001e4f9ad301d4c564ac45c6a0bdcbd310e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/featurechart.py @@ -0,0 +1,674 @@ +# Natural Language Toolkit: Chart Parser for Feature-Based Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rob Speer +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Extension of chart parsing implementation to handle grammars with +feature structures as nodes. +""" +from time import perf_counter + +from nltk.featstruct import TYPE, FeatStruct, find_variables, unify +from nltk.grammar import ( + CFG, + FeatStructNonterminal, + Nonterminal, + Production, + is_nonterminal, + is_terminal, +) +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + CachedTopDownPredictRule, + Chart, + ChartParser, + EdgeI, + EmptyPredictRule, + FundamentalRule, + LeafInitRule, + SingleEdgeFundamentalRule, + TopDownInitRule, + TreeEdge, +) +from nltk.sem import logic +from nltk.tree import Tree + +# //////////////////////////////////////////////////////////// +# Tree Edge +# //////////////////////////////////////////////////////////// + + +class FeatureTreeEdge(TreeEdge): + """ + A specialized tree edge that allows shared variable bindings + between nonterminals on the left-hand side and right-hand side. + + Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a + dictionary mapping from variables to values. If the edge is not + complete, then these bindings are simply stored. However, if the + edge is complete, then the constructor applies these bindings to + every nonterminal in the edge whose symbol implements the + interface ``SubstituteBindingsI``. + """ + + def __init__(self, span, lhs, rhs, dot=0, bindings=None): + """ + Construct a new edge. If the edge is incomplete (i.e., if + ``dot alpha \* B1 beta][i:j]`` + - ``[B2 -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B3 \* beta][i:j]`` + + assuming that B1 and B2 can be unified to generate B3. + """ + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.end() == right_edge.start() + and left_edge.is_incomplete() + and right_edge.is_complete() + and isinstance(left_edge, FeatureTreeEdge) + ): + return + found = right_edge.lhs() + nextsym = left_edge.nextsym() + if isinstance(right_edge, FeatureTreeEdge): + if not is_nonterminal(nextsym): + return + if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]: + return + # Create a copy of the bindings. + bindings = left_edge.bindings() + # We rename vars here, because we don't want variables + # from the two different productions to match. + found = found.rename_variables(used_vars=left_edge.variables()) + # Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to + # generate B3 (result). + result = unify(nextsym, found, bindings, rename_vars=False) + if result is None: + return + else: + if nextsym != found: + return + # Create a copy of the bindings. + bindings = left_edge.bindings() + + # Construct the new edge. + new_edge = left_edge.move_dot_forward(right_edge.end(), bindings) + + # Add it to the chart, with appropriate child pointers. + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): + """ + A specialized version of the completer / single edge fundamental rule + that operates on nonterminals whose symbols are ``FeatStructNonterminal``. + Rather than simply comparing the nonterminals for equality, they are + unified. + """ + + _fundamental_rule = FeatureFundamentalRule() + + def _apply_complete(self, chart, grammar, right_edge): + fr = self._fundamental_rule + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + def _apply_incomplete(self, chart, grammar, left_edge): + fr = self._fundamental_rule + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + +# //////////////////////////////////////////////////////////// +# Top-Down Prediction +# //////////////////////////////////////////////////////////// + + +class FeatureTopDownInitRule(TopDownInitRule): + def apply(self, chart, grammar): + for prod in grammar.productions(lhs=grammar.start()): + new_edge = FeatureTreeEdge.from_production(prod, 0) + if chart.insert(new_edge, ()): + yield new_edge + + +class FeatureTopDownPredictRule(CachedTopDownPredictRule): + r""" + A specialized version of the (cached) top down predict rule that operates + on nonterminals whose symbols are ``FeatStructNonterminal``. Rather + than simply comparing the nonterminals for equality, they are + unified. + + The top down expand rule states that: + + - ``[A -> alpha \* B1 beta][i:j]`` + + licenses the edge: + + - ``[B2 -> \* gamma][j:j]`` + + for each grammar production ``B2 -> gamma``, assuming that B1 + and B2 can be unified. + """ + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + nextsym, index = edge.nextsym(), edge.end() + if not is_nonterminal(nextsym): + return + + # If we've already applied this rule to an edge with the same + # next & end, and the chart & grammar have not changed, then + # just return (no new edges to add). + nextsym_with_bindings = edge.next_with_bindings() + done = self._done.get((nextsym_with_bindings, index), (None, None)) + if done[0] is chart and done[1] is grammar: + return + + for prod in grammar.productions(lhs=nextsym): + # If the left corner in the predicted production is + # leaf, it must match with the input. + if prod.rhs(): + first = prod.rhs()[0] + if is_terminal(first): + if index >= chart.num_leaves(): + continue + if first != chart.leaf(index): + continue + + # We rename vars here, because we don't want variables + # from the two different productions to match. + if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True): + new_edge = FeatureTreeEdge.from_production(prod, edge.end()) + if chart.insert(new_edge, ()): + yield new_edge + + # Record the fact that we've applied this rule. + self._done[nextsym_with_bindings, index] = (chart, grammar) + + +# //////////////////////////////////////////////////////////// +# Bottom-Up Prediction +# //////////////////////////////////////////////////////////// + + +class FeatureBottomUpPredictRule(BottomUpPredictRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + if isinstance(edge, FeatureTreeEdge): + _next = prod.rhs()[0] + if not is_nonterminal(_next): + continue + + new_edge = FeatureTreeEdge.from_production(prod, edge.start()) + if chart.insert(new_edge, ()): + yield new_edge + + +class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + found = edge.lhs() + for prod in grammar.productions(rhs=found): + bindings = {} + if isinstance(edge, FeatureTreeEdge): + _next = prod.rhs()[0] + if not is_nonterminal(_next): + continue + + # We rename vars here, because we don't want variables + # from the two different productions to match. + used_vars = find_variables( + (prod.lhs(),) + prod.rhs(), fs_class=FeatStruct + ) + found = found.rename_variables(used_vars=used_vars) + + result = unify(_next, found, bindings, rename_vars=False) + if result is None: + continue + + new_edge = FeatureTreeEdge.from_production( + prod, edge.start() + ).move_dot_forward(edge.end(), bindings) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +class FeatureEmptyPredictRule(EmptyPredictRule): + def apply(self, chart, grammar): + for prod in grammar.productions(empty=True): + for index in range(chart.num_leaves() + 1): + new_edge = FeatureTreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Feature Chart Parser +# //////////////////////////////////////////////////////////// + +TD_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureTopDownPredictRule(), + FeatureSingleEdgeFundamentalRule(), +] +BU_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictRule(), + FeatureSingleEdgeFundamentalRule(), +] +BU_LC_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictCombineRule(), + FeatureSingleEdgeFundamentalRule(), +] + + +class FeatureChartParser(ChartParser): + def __init__( + self, + grammar, + strategy=BU_LC_FEATURE_STRATEGY, + trace_chart_width=20, + chart_class=FeatureChart, + **parser_args, + ): + ChartParser.__init__( + self, + grammar, + strategy=strategy, + trace_chart_width=trace_chart_width, + chart_class=chart_class, + **parser_args, + ) + + +class FeatureTopDownChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args) + + +class FeatureBottomUpChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args) + + +class FeatureBottomUpLeftCornerChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__( + self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Instantiate Variable Chart +# //////////////////////////////////////////////////////////// + + +class InstantiateVarsChart(FeatureChart): + """ + A specialized chart that 'instantiates' variables whose names + start with '@', by replacing them with unique new variables. + In particular, whenever a complete edge is added to the chart, any + variables in the edge's ``lhs`` whose names start with '@' will be + replaced by unique new ``Variable``. + """ + + def __init__(self, tokens): + FeatureChart.__init__(self, tokens) + + def initialize(self): + self._instantiated = set() + FeatureChart.initialize(self) + + def insert(self, edge, child_pointer_list): + if edge in self._instantiated: + return False + self.instantiate_edge(edge) + return FeatureChart.insert(self, edge, child_pointer_list) + + def instantiate_edge(self, edge): + """ + If the edge is a ``FeatureTreeEdge``, and it is complete, + then instantiate all variables whose names start with '@', + by replacing them with unique new variables. + + Note that instantiation is done in-place, since the + parsing algorithms might already hold a reference to + the edge for future use. + """ + # If the edge is a leaf, or is not complete, or is + # already in the chart, then just return it as-is. + if not isinstance(edge, FeatureTreeEdge): + return + if not edge.is_complete(): + return + if edge in self._edge_to_cpls: + return + + # Get a list of variables that need to be instantiated. + # If there are none, then return as-is. + inst_vars = self.inst_vars(edge) + if not inst_vars: + return + + # Instantiate the edge! + self._instantiated.add(edge) + edge._lhs = edge.lhs().substitute_bindings(inst_vars) + + def inst_vars(self, edge): + return { + var: logic.unique_variable() + for var in edge.lhs().variables() + if var.name.startswith("@") + } + + +# //////////////////////////////////////////////////////////// +# Demo +# //////////////////////////////////////////////////////////// + + +def demo_grammar(): + from nltk.grammar import FeatureGrammar + + return FeatureGrammar.fromstring( + """ +S -> NP VP +PP -> Prep NP +NP -> NP PP +VP -> VP PP +VP -> Verb NP +VP -> Verb +NP -> Det[pl=?x] Noun[pl=?x] +NP -> "John" +NP -> "I" +Det -> "the" +Det -> "my" +Det[-pl] -> "a" +Noun[-pl] -> "dog" +Noun[-pl] -> "cookie" +Verb -> "ate" +Verb -> "saw" +Prep -> "with" +Prep -> "under" +""" + ) + + +def demo( + print_times=True, + print_grammar=True, + print_trees=True, + print_sentence=True, + trace=1, + parser=FeatureChartParser, + sent="I saw John with a dog with my cookie", +): + import sys + import time + + print() + grammar = demo_grammar() + if print_grammar: + print(grammar) + print() + print("*", parser.__name__) + if print_sentence: + print("Sentence:", sent) + tokens = sent.split() + t = perf_counter() + cp = parser(grammar, trace=trace) + chart = cp.chart_parse(tokens) + trees = list(chart.parses(grammar.start())) + if print_times: + print("Time: %s" % (perf_counter() - t)) + if print_trees: + for tree in trees: + print(tree) + else: + print("Nr trees:", len(trees)) + + +def run_profile(): + import profile + + profile.run("for i in range(1): demo()", "/tmp/profile.out") + import pstats + + p = pstats.Stats("/tmp/profile.out") + p.strip_dirs().sort_stats("time", "cum").print_stats(60) + p.strip_dirs().sort_stats("cum", "time").print_stats(60) + + +if __name__ == "__main__": + from nltk.data import load + + demo() + print() + grammar = load("grammars/book_grammars/feat0.fcfg") + cp = FeatureChartParser(grammar, trace=2) + sent = "Kim likes children" + tokens = sent.split() + trees = cp.parse(tokens) + for tree in trees: + print(tree) diff --git a/venv/lib/python3.10/site-packages/nltk/parse/generate.py b/venv/lib/python3.10/site-packages/nltk/parse/generate.py new file mode 100644 index 0000000000000000000000000000000000000000..fb2f4e9fa03ee09d5de2c25bf15d728033b577e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/generate.py @@ -0,0 +1,85 @@ +# Natural Language Toolkit: Generating from a CFG +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT +# + +import itertools +import sys + +from nltk.grammar import Nonterminal + + +def generate(grammar, start=None, depth=None, n=None): + """ + Generates an iterator of all sentences from a CFG. + + :param grammar: The Grammar used to generate sentences. + :param start: The Nonterminal from which to start generate sentences. + :param depth: The maximal depth of the generated tree. + :param n: The maximum number of sentences to return. + :return: An iterator of lists of terminal tokens. + """ + if not start: + start = grammar.start() + if depth is None: + depth = sys.maxsize + + iter = _generate_all(grammar, [start], depth) + + if n: + iter = itertools.islice(iter, n) + + return iter + + +def _generate_all(grammar, items, depth): + if items: + try: + for frag1 in _generate_one(grammar, items[0], depth): + for frag2 in _generate_all(grammar, items[1:], depth): + yield frag1 + frag2 + except RecursionError as error: + # Helpful error message while still showing the recursion stack. + raise RuntimeError( + "The grammar has rule(s) that yield infinite recursion!" + ) from error + else: + yield [] + + +def _generate_one(grammar, item, depth): + if depth > 0: + if isinstance(item, Nonterminal): + for prod in grammar.productions(lhs=item): + yield from _generate_all(grammar, prod.rhs(), depth - 1) + else: + yield [item] + + +demo_grammar = """ + S -> NP VP + NP -> Det N + PP -> P NP + VP -> 'slept' | 'saw' NP | 'walked' PP + Det -> 'the' | 'a' + N -> 'man' | 'park' | 'dog' + P -> 'in' | 'with' +""" + + +def demo(N=23): + from nltk.grammar import CFG + + print("Generating the first %d sentences for demo grammar:" % (N,)) + print(demo_grammar) + grammar = CFG.fromstring(demo_grammar) + for n, sent in enumerate(generate(grammar, n=N), 1): + print("%3d. %s" % (n, " ".join(sent))) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/malt.py b/venv/lib/python3.10/site-packages/nltk/parse/malt.py new file mode 100644 index 0000000000000000000000000000000000000000..229e8242719dc4645763706b58363b546bc7e6ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/malt.py @@ -0,0 +1,393 @@ +# Natural Language Toolkit: Interface to MaltParser +# +# Author: Dan Garrette +# Contributor: Liling Tan, Mustufain, osamamukhtar11 +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import inspect +import os +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir, find_file, find_jars_within_path +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.parse.util import taggedsents_to_conll + + +def malt_regex_tagger(): + from nltk.tag import RegexpTagger + + _tagger = RegexpTagger( + [ + (r"\.$", "."), + (r"\,$", ","), + (r"\?$", "?"), # fullstop, comma, Qmark + (r"\($", "("), + (r"\)$", ")"), # round brackets + (r"\[$", "["), + (r"\]$", "]"), # square brackets + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "DT"), # articles + (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), # pronouns + (r"(His|his|Her|her|Its|its)$", "PRP$"), # possessive + (r"(my|Your|your|Yours|yours)$", "PRP$"), # possessive + (r"(on|On|in|In|at|At|since|Since)$", "IN"), # time prepopsitions + (r"(for|For|ago|Ago|before|Before)$", "IN"), # time prepopsitions + (r"(till|Till|until|Until)$", "IN"), # time prepopsitions + (r"(by|By|beside|Beside)$", "IN"), # space prepopsitions + (r"(under|Under|below|Below)$", "IN"), # space prepopsitions + (r"(over|Over|above|Above)$", "IN"), # space prepopsitions + (r"(across|Across|through|Through)$", "IN"), # space prepopsitions + (r"(into|Into|towards|Towards)$", "IN"), # space prepopsitions + (r"(onto|Onto|from|From)$", "IN"), # space prepopsitions + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + return _tagger.tag + + +def find_maltparser(parser_dirname): + """ + A module to find MaltParser .jar file and its dependencies. + """ + if os.path.exists(parser_dirname): # If a full path is given. + _malt_dir = parser_dirname + else: # Try to find path to maltparser directory in environment variables. + _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",)) + # Checks that that the found directory contains all the necessary .jar + malt_dependencies = ["", "", ""] + _malt_jars = set(find_jars_within_path(_malt_dir)) + _jars = {os.path.split(jar)[1] for jar in _malt_jars} + malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"} + + assert malt_dependencies.issubset(_jars) + assert any( + filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars) + ) + return list(_malt_jars) + + +def find_malt_model(model_filename): + """ + A module to find pre-trained MaltParser model. + """ + if model_filename is None: + return "malt_temp.mco" + elif os.path.exists(model_filename): # If a full path is given. + return model_filename + else: # Try to find path to malt model in environment variables. + return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False) + + +class MaltParser(ParserI): + """ + A class for dependency parsing with MaltParser. The input is the paths to: + - (optionally) a maltparser directory + - (optionally) the path to a pre-trained MaltParser .mco model file + - (optionally) the tagger to use for POS tagging before parsing + - (optionally) additional Java arguments + + Example: + >>> from nltk.parse import malt + >>> # With MALT_PARSER and MALT_MODEL environment set. + >>> mp = malt.MaltParser(model_filename='engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + >>> # Without MALT_PARSER and MALT_MODEL environment. + >>> mp = malt.MaltParser('/home/user/maltparser-1.9.2/', '/home/user/engmalt.linear-1.7.mco') # doctest: +SKIP + >>> mp.parse_one('I shot an elephant in my pajamas .'.split()).tree() # doctest: +SKIP + (shot I (elephant an) (in (pajamas my)) .) + """ + + def __init__( + self, + parser_dirname="", + model_filename=None, + tagger=None, + additional_java_args=None, + ): + """ + An interface for parsing with the Malt Parser. + + :param parser_dirname: The path to the maltparser directory that + contains the maltparser-1.x.jar + :type parser_dirname: str + :param model_filename: The name of the pre-trained model with .mco file + extension. If provided, training will not be required. + (see http://www.maltparser.org/mco/mco.html and + see http://www.patful.com/chalk/node/185) + :type model_filename: str + :param tagger: The tagger used to POS tag the raw string before + formatting to CONLL format. It should behave like `nltk.pos_tag` + :type tagger: function + :param additional_java_args: This is the additional Java arguments that + one can use when calling Maltparser, usually this is the heapsize + limits, e.g. `additional_java_args=['-Xmx1024m']` + (see https://goo.gl/mpDBvQ) + :type additional_java_args: list + """ + + # Find all the necessary jar files for MaltParser. + self.malt_jars = find_maltparser(parser_dirname) + # Initialize additional java arguments. + self.additional_java_args = ( + additional_java_args if additional_java_args is not None else [] + ) + # Initialize model. + self.model = find_malt_model(model_filename) + self._trained = self.model != "malt_temp.mco" + # Set the working_dir parameters i.e. `-w` from MaltParser's option. + self.working_dir = tempfile.gettempdir() + # Initialize POS tagger. + self.tagger = tagger if tagger is not None else malt_regex_tagger() + + def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple POS tagged sentences. Takes multiple + sentences where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :return: iter(iter(``DependencyGraph``)) the dependency graph + representation of each sentence + """ + if not self._trained: + raise Exception("Parser has not been trained. Call train() first.") + + with tempfile.NamedTemporaryFile( + prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with tempfile.NamedTemporaryFile( + prefix="malt_output.conll.", + dir=self.working_dir, + mode="w", + delete=False, + ) as output_file: + # Convert list of sentences to CONLL format. + for line in taggedsents_to_conll(sentences): + input_file.write(str(line)) + input_file.close() + + # Generate command to run maltparser. + cmd = self.generate_malt_command( + input_file.name, output_file.name, mode="parse" + ) + + # This is a maltparser quirk, it needs to be run + # where the model file is. otherwise it goes into an awkward + # missing .jars or strange -w working_dir problem. + _current_path = os.getcwd() # Remembers the current path. + try: # Change to modelfile path + os.chdir(os.path.split(self.model)[0]) + except: + pass + ret = self._execute(cmd, verbose) # Run command. + os.chdir(_current_path) # Change back to current path. + + if ret != 0: + raise Exception( + "MaltParser parsing (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + + # Must return iter(iter(Tree)) + with open(output_file.name) as infile: + for tree_str in infile.read().split("\n\n"): + yield ( + iter( + [ + DependencyGraph( + tree_str, top_relation_label=top_relation_label + ) + ] + ) + ) + + os.remove(input_file.name) + os.remove(output_file.name) + + def parse_sents(self, sentences, verbose=False, top_relation_label="null"): + """ + Use MaltParser to parse multiple sentences. + Takes a list of sentences, where each sentence is a list of words. + Each sentence will be automatically tagged with this + MaltParser instance's tagger. + + :param sentences: Input sentences to parse + :type sentence: list(list(str)) + :return: iter(DependencyGraph) + """ + tagged_sentences = (self.tagger(sentence) for sentence in sentences) + return self.parse_tagged_sents( + tagged_sentences, verbose, top_relation_label=top_relation_label + ) + + def generate_malt_command(self, inputfilename, outputfilename=None, mode=None): + """ + This function generates the maltparser command use at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + :param outputfilename: path to the output file + :type outputfilename: str + """ + + cmd = ["java"] + cmd += self.additional_java_args # Adds additional java arguments + # Joins classpaths with ";" if on Windows and on Linux/Mac use ":" + classpaths_separator = ";" if sys.platform.startswith("win") else ":" + cmd += [ + "-cp", + classpaths_separator.join(self.malt_jars), + ] # Adds classpaths for jars + cmd += ["org.maltparser.Malt"] # Adds the main function. + + # Adds the model file. + if os.path.exists(self.model): # when parsing + cmd += ["-c", os.path.split(self.model)[-1]] + else: # when learning + cmd += ["-c", self.model] + + cmd += ["-i", inputfilename] + if mode == "parse": + cmd += ["-o", outputfilename] + cmd += ["-m", mode] # mode use to generate parses. + return cmd + + @staticmethod + def _execute(cmd, verbose=False): + output = None if verbose else subprocess.PIPE + p = subprocess.Popen(cmd, stdout=output, stderr=output) + return p.wait() + + def train(self, depgraphs, verbose=False): + """ + Train MaltParser from a list of ``DependencyGraph`` objects + + :param depgraphs: list of ``DependencyGraph`` objects for training input data + :type depgraphs: DependencyGraph + """ + + # Write the conll_str to malt_train.conll file in /tmp/ + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + input_str = "\n".join(dg.to_conll(10) for dg in depgraphs) + input_file.write(str(input_str)) + # Trains the model with the malt_train.conll + self.train_from_file(input_file.name, verbose=verbose) + # Removes the malt_train.conll once training finishes. + os.remove(input_file.name) + + def train_from_file(self, conll_file, verbose=False): + """ + Train MaltParser from a file + :param conll_file: str for the filename of the training input data + :type conll_file: str + """ + + # If conll_file is a ZipFilePathPointer, + # then we need to do some extra massaging + if isinstance(conll_file, ZipFilePathPointer): + with tempfile.NamedTemporaryFile( + prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + with conll_file.open() as conll_input_file: + conll_str = conll_input_file.read() + input_file.write(str(conll_str)) + return self.train_from_file(input_file.name, verbose=verbose) + + # Generate command to run maltparser. + cmd = self.generate_malt_command(conll_file, mode="learn") + ret = self._execute(cmd, verbose) + if ret != 0: + raise Exception( + "MaltParser training (%s) failed with exit " + "code %d" % (" ".join(cmd), ret) + ) + self._trained = True + + +if __name__ == "__main__": + """ + A demonstration function to show how NLTK users can use the malt parser API. + + >>> from nltk import pos_tag + >>> assert 'MALT_PARSER' in os.environ, str( + ... "Please set MALT_PARSER in your global environment, e.g.:\n" + ... "$ export MALT_PARSER='/home/user/maltparser-1.9.2/'") + >>> + >>> assert 'MALT_MODEL' in os.environ, str( + ... "Please set MALT_MODEL in your global environment, e.g.:\n" + ... "$ export MALT_MODEL='/home/user/engmalt.linear-1.7.mco'") + >>> + >>> _dg1_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 sees _ VB _ _ 0 ROOT _ _\n" + ... "3 a _ DT _ _ 4 SPEC _ _\n" + ... "4 dog _ NN _ _ 2 OBJ _ _\n" + ... "5 . _ . _ _ 2 PUNCT _ _\n") + >>> + >>> + >>> _dg2_str = str("1 John _ NNP _ _ 2 SUBJ _ _\n" + ... "2 walks _ VB _ _ 0 ROOT _ _\n" + ... "3 . _ . _ _ 2 PUNCT _ _\n") + >>> dg1 = DependencyGraph(_dg1_str) + >>> dg2 = DependencyGraph(_dg2_str) + >>> # Initialize a MaltParser object + >>> mp = MaltParser() + >>> + >>> # Trains a model. + >>> mp.train([dg1,dg2], verbose=False) + >>> sent1 = ['John','sees','Mary', '.'] + >>> sent2 = ['John', 'walks', 'a', 'dog', '.'] + >>> + >>> # Parse a single sentence. + >>> parsed_sent1 = mp.parse_one(sent1) + >>> parsed_sent2 = mp.parse_one(sent2) + >>> print(parsed_sent1.tree()) + (sees John Mary .) + >>> print(parsed_sent2.tree()) + (walks John (dog a) .) + >>> + >>> # Parsing multiple sentences. + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (sees John Mary .) + >>> print(next(next(parsed_sents)).tree()) + (walks John (dog a) .) + >>> + >>> # Initialize a MaltParser object with an English pre-trained model. + >>> parser_dirname = 'maltparser-1.9.2' + >>> model_name = 'engmalt.linear-1.7.mco' + >>> mp = MaltParser(parser_dirname=parser_dirname, model_filename=model_name, tagger=pos_tag) + >>> sent1 = 'I shot an elephant in my pajamas .'.split() + >>> sent2 = 'Time flies like banana .'.split() + >>> # Parse a single sentence. + >>> print(mp.parse_one(sent1).tree()) + (shot I (elephant an) (in (pajamas my)) .) + # Parsing multiple sentences + >>> sentences = [sent1,sent2] + >>> parsed_sents = mp.parse_sents(sentences) + >>> print(next(next(parsed_sents)).tree()) + (shot I (elephant an) (in (pajamas my)) .) + >>> print(next(next(parsed_sents)).tree()) + (flies Time (like banana) .) + """ + + import doctest + + doctest.testmod() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py b/venv/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..b96f996cf63b4d3e093994d6319c8fb9fb91569a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py @@ -0,0 +1,772 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +import logging +import math + +from nltk.parse.dependencygraph import DependencyGraph + +logger = logging.getLogger(__name__) + +################################################################# +# DependencyScorerI - Interface for Graph-Edge Weight Calculation +################################################################# + + +class DependencyScorerI: + """ + A scorer for calculated the weights on the edges of a weighted + dependency graph. This is used by a + ``ProbabilisticNonprojectiveParser`` to initialize the edge + weights of a ``DependencyGraph``. While typically this would be done + by training a binary classifier, any class that can return a + multidimensional list representation of the edge weights can + implement this interface. As such, it has no necessary + fields. + """ + + def __init__(self): + if self.__class__ == DependencyScorerI: + raise TypeError("DependencyScorerI is an abstract interface") + + def train(self, graphs): + """ + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + Typically the edges present in the graphs can be used as + positive training examples, and the edges not present as negative + examples. + """ + raise NotImplementedError() + + def score(self, graph): + """ + :type graph: DependencyGraph + :param graph: A dependency graph whose set of edges need to be + scored. + :rtype: A three-dimensional list of numbers. + :return: The score is returned in a multidimensional(3) list, such + that the outer-dimension refers to the head, and the + inner-dimension refers to the dependencies. For instance, + scores[0][1] would reference the list of scores corresponding to + arcs from node 0 to node 1. The node's 'address' field can be used + to determine its number identification. + + For further illustration, a score list corresponding to Fig.2 of + Keith Hall's 'K-best Spanning Tree Parsing' paper:: + + scores = [[[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []]] + + When used in conjunction with a MaxEntClassifier, each score would + correspond to the confidence of a particular edge being classified + with the positive training examples. + """ + raise NotImplementedError() + + +################################################################# +# NaiveBayesDependencyScorer +################################################################# + + +class NaiveBayesDependencyScorer(DependencyScorerI): + """ + A dependency scorer built around a MaxEnt classifier. In this + particular class that classifier is a ``NaiveBayesClassifier``. + It uses head-word, head-tag, child-word, and child-tag features + for classification. + + >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2 + + >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry] + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train(graphs, NaiveBayesDependencyScorer()) + >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']) + >>> len(list(parses)) + 1 + + """ + + def __init__(self): + pass # Do nothing without throwing error + + def train(self, graphs): + """ + Trains a ``NaiveBayesClassifier`` using the edges present in + graphs list as positive examples, the edges not present as + negative examples. Uses a feature vector of head-word, + head-tag, child-word, and child-tag. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + """ + + from nltk.classify import NaiveBayesClassifier + + # Create training labeled training examples + labeled_examples = [] + for graph in graphs: + for head_node in graph.nodes.values(): + for child_index, child_node in graph.nodes.items(): + if child_index in head_node["deps"]: + label = "T" + else: + label = "F" + labeled_examples.append( + ( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ), + label, + ) + ) + + self.classifier = NaiveBayesClassifier.train(labeled_examples) + + def score(self, graph): + """ + Converts the graph into a feature-based representation of + each edge, and then assigns a score to each based on the + confidence of the classifier in assigning it to the + positive label. Scores are returned in a multidimensional list. + + :type graph: DependencyGraph + :param graph: A dependency graph to score. + :rtype: 3 dimensional list + :return: Edge scores for the graph parameter. + """ + # Convert graph to feature representation + edges = [] + for head_node in graph.nodes.values(): + for child_node in graph.nodes.values(): + edges.append( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ) + ) + + # Score edges + edge_scores = [] + row = [] + count = 0 + for pdist in self.classifier.prob_classify_many(edges): + logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F")) + # smoothing in case the probability = 0 + row.append([math.log(pdist.prob("T") + 0.00000000001)]) + count += 1 + if count == len(graph.nodes): + edge_scores.append(row) + row = [] + count = 0 + return edge_scores + + +################################################################# +# A Scorer for Demo Purposes +################################################################# +# A short class necessary to show parsing example from paper +class DemoScorer(DependencyScorerI): + def train(self, graphs): + print("Training...") + + def score(self, graph): + # scores for Keith Hall 'K-best Spanning Tree Parsing' paper + return [ + [[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []], + ] + + +################################################################# +# Non-Projective Probabilistic Parsing +################################################################# + + +class ProbabilisticNonprojectiveParser: + """A probabilistic non-projective dependency parser. + + Nonprojective dependencies allows for "crossing branches" in the parse tree + which is necessary for representing particular linguistic phenomena, or even + typical parses in some languages. This parser follows the MST parsing + algorithm, outlined in McDonald(2005), which likens the search for the best + non-projective parse to finding the maximum spanning tree in a weighted + directed graph. + + >>> class Scorer(DependencyScorerI): + ... def train(self, graphs): + ... pass + ... + ... def score(self, graph): + ... return [ + ... [[], [5], [1], [1]], + ... [[], [], [11], [4]], + ... [[], [10], [], [5]], + ... [[], [8], [8], []], + ... ] + + + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train([], Scorer()) + + >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None]) + >>> len(list(parses)) + 1 + + Rule based example + + >>> from nltk.grammar import DependencyGrammar + + >>> grammar = DependencyGrammar.fromstring(''' + ... 'taught' -> 'play' | 'man' + ... 'man' -> 'the' | 'in' + ... 'in' -> 'corner' + ... 'corner' -> 'the' + ... 'play' -> 'golf' | 'dachshund' | 'to' + ... 'dachshund' -> 'his' + ... ''') + + >>> ndp = NonprojectiveDependencyParser(grammar) + >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) + >>> len(list(parses)) + 4 + + """ + + def __init__(self): + """ + Creates a new non-projective parser. + """ + logging.debug("initializing prob. nonprojective...") + + def train(self, graphs, dependency_scorer): + """ + Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects, + and establishes this as the parser's scorer. This is used to + initialize the scores on a ``DependencyGraph`` during the parsing + procedure. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + :type dependency_scorer: DependencyScorerI + :param dependency_scorer: A scorer which implements the + ``DependencyScorerI`` interface. + """ + self._scorer = dependency_scorer + self._scorer.train(graphs) + + def initialize_edge_scores(self, graph): + """ + Assigns a score to every edge in the ``DependencyGraph`` graph. + These scores are generated via the parser's scorer which + was assigned during the training process. + + :type graph: DependencyGraph + :param graph: A dependency graph to assign scores to. + """ + self.scores = self._scorer.score(graph) + + def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): + """ + Takes a list of nodes that have been identified to belong to a cycle, + and collapses them into on larger node. The arcs of all nodes in + the graph must be updated to account for this. + + :type new_node: Node. + :param new_node: A Node (Dictionary) to collapse the cycle nodes into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses, each of which is in the cycle. + :type g_graph, b_graph, c_graph: DependencyGraph + :param g_graph, b_graph, c_graph: Graphs which need to be updated. + """ + logger.debug("Collapsing nodes...") + # Collapse all cycle nodes into v_n+1 in G_Graph + for cycle_node_index in cycle_path: + g_graph.remove_by_address(cycle_node_index) + g_graph.add_node(new_node) + g_graph.redirect_arcs(cycle_path, new_node["address"]) + + def update_edge_scores(self, new_node, cycle_path): + """ + Updates the edge scores to reflect a collapse operation into + new_node. + + :type new_node: A Node. + :param new_node: The node which cycle nodes are collapsed into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses that belong to the cycle. + """ + logger.debug("cycle %s", cycle_path) + + cycle_path = self.compute_original_indexes(cycle_path) + + logger.debug("old cycle %s", cycle_path) + logger.debug("Prior to update: %s", self.scores) + + for i, row in enumerate(self.scores): + for j, column in enumerate(self.scores[i]): + logger.debug(self.scores[i][j]) + if j in cycle_path and i not in cycle_path and self.scores[i][j]: + subtract_val = self.compute_max_subtract_score(j, cycle_path) + + logger.debug("%s - %s", self.scores[i][j], subtract_val) + + new_vals = [] + for cur_val in self.scores[i][j]: + new_vals.append(cur_val - subtract_val) + + self.scores[i][j] = new_vals + + for i, row in enumerate(self.scores): + for j, cell in enumerate(self.scores[i]): + if i in cycle_path and j in cycle_path: + self.scores[i][j] = [] + + logger.debug("After update: %s", self.scores) + + def compute_original_indexes(self, new_indexes): + """ + As nodes are collapsed into others, they are replaced + by the new node in the graph, but it's still necessary + to keep track of what these original nodes were. This + takes a list of node addresses and replaces any collapsed + node addresses with their original addresses. + + :type new_indexes: A list of integers. + :param new_indexes: A list of node addresses to check for + subsumed nodes. + """ + swapped = True + while swapped: + originals = [] + swapped = False + for new_index in new_indexes: + if new_index in self.inner_nodes: + for old_val in self.inner_nodes[new_index]: + if old_val not in originals: + originals.append(old_val) + swapped = True + else: + originals.append(new_index) + new_indexes = originals + return new_indexes + + def compute_max_subtract_score(self, column_index, cycle_indexes): + """ + When updating scores the score of the highest-weighted incoming + arc is subtracted upon collapse. This returns the correct + amount to subtract from that edge. + + :type column_index: integer. + :param column_index: A index representing the column of incoming arcs + to a particular node being updated + :type cycle_indexes: A list of integers. + :param cycle_indexes: Only arcs from cycle nodes are considered. This + is a list of such nodes addresses. + """ + max_score = -100000 + for row_index in cycle_indexes: + for subtract_val in self.scores[row_index][column_index]: + if subtract_val > max_score: + max_score = subtract_val + return max_score + + def best_incoming_arc(self, node_index): + """ + Returns the source of the best incoming arc to the + node with address: node_index + + :type node_index: integer. + :param node_index: The address of the 'destination' node, + the node that is arced to. + """ + originals = self.compute_original_indexes([node_index]) + logger.debug("originals: %s", originals) + + max_arc = None + max_score = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + logger.debug("%s, %s", row_index, col_index) + + logger.debug(max_score) + + for key in self.inner_nodes: + replaced_nodes = self.inner_nodes[key] + if max_arc in replaced_nodes: + return key + + return max_arc + + def original_best_arc(self, node_index): + originals = self.compute_original_indexes([node_index]) + max_arc = None + max_score = None + max_orig = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + max_orig = col_index + return [max_arc, max_orig] + + def parse(self, tokens, tags): + """ + Parses a list of tokens in accordance to the MST parsing algorithm + for non-projective dependency parses. Assumes that the tokens to + be parsed have already been tagged and those tags are provided. Various + scoring methods can be used by implementing the ``DependencyScorerI`` + interface and passing it to the training algorithm. + + :type tokens: list(str) + :param tokens: A list of words or punctuation to be parsed. + :type tags: list(str) + :param tags: A list of tags corresponding by index to the words in the tokens list. + :return: An iterator of non-projective parses. + :rtype: iter(DependencyGraph) + """ + self.inner_nodes = {} + + # Initialize g_graph + g_graph = DependencyGraph() + for index, token in enumerate(tokens): + g_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Fully connect non-root nodes in g_graph + g_graph.connect_graph() + original_graph = DependencyGraph() + for index, token in enumerate(tokens): + original_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + b_graph = DependencyGraph() + c_graph = DependencyGraph() + + for index, token in enumerate(tokens): + c_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Assign initial scores to g_graph edges + self.initialize_edge_scores(g_graph) + logger.debug(self.scores) + # Initialize a list of unvisited vertices (by node address) + unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()] + # Iterate over unvisited vertices + nr_vertices = len(tokens) + betas = {} + while unvisited_vertices: + # Mark current node as visited + current_vertex = unvisited_vertices.pop(0) + logger.debug("current_vertex: %s", current_vertex) + # Get corresponding node n_i to vertex v_i + current_node = g_graph.get_by_address(current_vertex) + logger.debug("current_node: %s", current_node) + # Get best in-edge node b for current node + best_in_edge = self.best_incoming_arc(current_vertex) + betas[current_vertex] = self.original_best_arc(current_vertex) + logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex) + # b_graph = Union(b_graph, b) + for new_vertex in [current_vertex, best_in_edge]: + b_graph.nodes[new_vertex].update( + {"word": "TEMP", "rel": "NTOP", "address": new_vertex} + ) + b_graph.add_arc(best_in_edge, current_vertex) + # Beta(current node) = b - stored for parse recovery + # If b_graph contains a cycle, collapse it + cycle_path = b_graph.contains_cycle() + if cycle_path: + # Create a new node v_n+1 with address = len(nodes) + 1 + new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1} + # c_graph = Union(c_graph, v_n+1) + c_graph.add_node(new_node) + # Collapse all nodes in cycle C into v_n+1 + self.update_edge_scores(new_node, cycle_path) + self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) + for cycle_index in cycle_path: + c_graph.add_arc(new_node["address"], cycle_index) + # self.replaced_by[cycle_index] = new_node['address'] + + self.inner_nodes[new_node["address"]] = cycle_path + + # Add v_n+1 to list of unvisited vertices + unvisited_vertices.insert(0, nr_vertices + 1) + + # increment # of nodes counter + nr_vertices += 1 + + # Remove cycle nodes from b_graph; B = B - cycle c + for cycle_node_address in cycle_path: + b_graph.remove_by_address(cycle_node_address) + + logger.debug("g_graph: %s", g_graph) + logger.debug("b_graph: %s", b_graph) + logger.debug("c_graph: %s", c_graph) + logger.debug("Betas: %s", betas) + logger.debug("replaced nodes %s", self.inner_nodes) + + # Recover parse tree + logger.debug("Final scores: %s", self.scores) + + logger.debug("Recovering parse...") + for i in range(len(tokens) + 1, nr_vertices + 1): + betas[betas[i][1]] = betas[i] + + logger.debug("Betas: %s", betas) + for node in original_graph.nodes.values(): + # TODO: It's dangerous to assume that deps it a dictionary + # because it's a default dictionary. Ideally, here we should not + # be concerned how dependencies are stored inside of a dependency + # graph. + node["deps"] = {} + for i in range(1, len(tokens) + 1): + original_graph.add_arc(betas[i][0], betas[i][1]) + + logger.debug("Done.") + yield original_graph + + +################################################################# +# Rule-based Non-Projective Parser +################################################################# + + +class NonprojectiveDependencyParser: + """ + A non-projective, rule-based, dependency parser. This parser + will return the set of all possible non-projective parses based on + the word-to-word relations defined in the parser's dependency + grammar, and will allow the branches of the parse tree to cross + in order to capture a variety of linguistic phenomena that a + projective parser will not. + """ + + def __init__(self, dependency_grammar): + """ + Creates a new ``NonprojectiveDependencyParser``. + + :param dependency_grammar: a grammar of word-to-word relations. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Parses the input tokens with respect to the parser's grammar. Parsing + is accomplished by representing the search-space of possible parses as + a fully-connected directed graph. Arcs that would lead to ungrammatical + parses are removed and a lattice is constructed of length n, where n is + the number of input tokens, to represent all possible grammatical + traversals. All possible paths through the lattice are then enumerated + to produce the set of non-projective parses. + + param tokens: A list of tokens to parse. + type tokens: list(str) + return: An iterator of non-projective parses. + rtype: iter(DependencyGraph) + """ + # Create graph representation of tokens + self._graph = DependencyGraph() + + for index, token in enumerate(tokens): + self._graph.nodes[index] = { + "word": token, + "deps": [], + "rel": "NTOP", + "address": index, + } + + for head_node in self._graph.nodes.values(): + deps = [] + for dep_node in self._graph.nodes.values(): + if ( + self._grammar.contains(head_node["word"], dep_node["word"]) + and head_node["word"] != dep_node["word"] + ): + deps.append(dep_node["address"]) + head_node["deps"] = deps + + # Create lattice of possible heads + roots = [] + possible_heads = [] + for i, word in enumerate(tokens): + heads = [] + for j, head in enumerate(tokens): + if (i != j) and self._grammar.contains(head, word): + heads.append(j) + if len(heads) == 0: + roots.append(i) + possible_heads.append(heads) + + # Set roots to attempt + if len(roots) < 2: + if len(roots) == 0: + for i in range(len(tokens)): + roots.append(i) + + # Traverse lattice + analyses = [] + for _ in roots: + stack = [] + analysis = [[] for i in range(len(possible_heads))] + i = 0 + forward = True + while i >= 0: + if forward: + if len(possible_heads[i]) == 1: + analysis[i] = possible_heads[i][0] + elif len(possible_heads[i]) == 0: + analysis[i] = -1 + else: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + if not forward: + index_on_stack = False + for stack_item in stack: + if stack_item[0] == i: + index_on_stack = True + orig_length = len(possible_heads[i]) + + if index_on_stack and orig_length == 0: + for j in range(len(stack) - 1, -1, -1): + stack_item = stack[j] + if stack_item[0] == i: + possible_heads[i].append(stack.pop(j)[1]) + + elif index_on_stack and orig_length > 0: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + forward = True + + if i + 1 == len(possible_heads): + analyses.append(analysis[:]) + forward = False + if forward: + i += 1 + else: + i -= 1 + + # Filter parses + # ensure 1 root, every thing has 1 head + for analysis in analyses: + if analysis.count(-1) > 1: + # there are several root elements! + continue + + graph = DependencyGraph() + graph.root = graph.nodes[analysis.index(-1) + 1] + + for address, (token, head_index) in enumerate( + zip(tokens, analysis), start=1 + ): + head_address = head_index + 1 + + node = graph.nodes[address] + node.update({"word": token, "address": address}) + + if head_address == 0: + rel = "ROOT" + else: + rel = "" + graph.nodes[head_index + 1]["deps"][rel].append(address) + + # TODO: check for cycles + yield graph + + +################################################################# +# Demos +################################################################# + + +def demo(): + # hall_demo() + nonprojective_conll_parse_demo() + rule_based_demo() + + +def hall_demo(): + npp = ProbabilisticNonprojectiveParser() + npp.train([], DemoScorer()) + for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]): + print(parse_graph) + + +def nonprojective_conll_parse_demo(): + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + npp = ProbabilisticNonprojectiveParser() + npp.train(graphs, NaiveBayesDependencyScorer()) + for parse_graph in npp.parse( + ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"] + ): + print(parse_graph) + + +def rule_based_demo(): + from nltk.grammar import DependencyGrammar + + grammar = DependencyGrammar.fromstring( + """ + 'taught' -> 'play' | 'man' + 'man' -> 'the' | 'in' + 'in' -> 'corner' + 'corner' -> 'the' + 'play' -> 'golf' | 'dachshund' | 'to' + 'dachshund' -> 'his' + """ + ) + print(grammar) + ndp = NonprojectiveDependencyParser(grammar) + graphs = ndp.parse( + [ + "the", + "man", + "in", + "the", + "corner", + "taught", + "his", + "dachshund", + "to", + "play", + "golf", + ] + ) + print("Graphs:") + for graph in graphs: + print(graph) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/stanford.py b/venv/lib/python3.10/site-packages/nltk/parse/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ed0f9eab042dfdb7d91679ac5502f495a328ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/stanford.py @@ -0,0 +1,470 @@ +# Natural Language Toolkit: Interface to the Stanford Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_jar_iter, + find_jars_within_path, + java, +) +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tree import Tree + +_stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml" + + +class GenericStanfordParser(ParserI): + """Interface to the Stanford Parser""" + + _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar" + _JAR = r"stanford-parser\.jar" + _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser" + + _USE_STDIN = False + _DOUBLE_SPACED_OUTPUT = False + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", + encoding="utf8", + verbose=False, + java_options="-mx4g", + corenlp_options="", + ): + + # find the most recent code and model jar + stanford_jar = max( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_path: os.path.dirname(model_path), + ) + + # self._classpath = (stanford_jar, model_jar) + + # Adding logging jar files to classpath + stanford_dir = os.path.split(stanford_jar)[0] + self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir)) + + self.model_path = model_path + self._encoding = encoding + self.corenlp_options = corenlp_options + self.java_options = java_options + + def _parse_trees_output(self, output_): + res = [] + cur_lines = [] + cur_trees = [] + blank = False + for line in output_.splitlines(False): + if line == "": + if blank: + res.append(iter(cur_trees)) + cur_trees = [] + blank = False + elif self._DOUBLE_SPACED_OUTPUT: + cur_trees.append(self._make_tree("\n".join(cur_lines))) + cur_lines = [] + blank = True + else: + res.append(iter([self._make_tree("\n".join(cur_lines))])) + cur_lines = [] + else: + cur_lines.append(line) + blank = False + return iter(res) + + def parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list where each sentence is a list of words. + Each sentence will be automatically tagged with this StanfordParser instance's + tagger. + If whitespaces exists inside a token, then the token will be treated as + separate tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-escaper", + "edu.stanford.nlp.process.PTBEscapingProcessor", + ] + return self._parse_trees_output( + self._execute( + cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose + ) + ) + + def raw_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a string; + before parsing, it will be automatically tokenized and tagged by + the Stanford Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + return next(self.raw_parse_sents([sentence], verbose)) + + def raw_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences as a + list of strings. + Each sentence will be automatically tokenized and tagged by the Stanford Parser. + + :param sentences: Input sentences to parse + :type sentences: list(str) + :rtype: iter(iter(Tree)) + """ + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + ] + return self._parse_trees_output( + self._execute(cmd, "\n".join(sentences), verbose) + ) + + def tagged_parse(self, sentence, verbose=False): + """ + Use StanfordParser to parse a sentence. Takes a sentence as a list of + (word, tag) tuples; the sentence must have already been tokenized and + tagged. + + :param sentence: Input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(Tree) + """ + return next(self.tagged_parse_sents([sentence], verbose)) + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Use StanfordParser to parse multiple sentences. Takes multiple sentences + where each sentence is a list of (word, tag) tuples. + The sentences must have already been tokenized and tagged. + + :param sentences: Input sentences to parse + :type sentences: list(list(tuple(str, str))) + :rtype: iter(iter(Tree)) + """ + tag_separator = "/" + cmd = [ + self._MAIN_CLASS, + "-model", + self.model_path, + "-sentences", + "newline", + "-outputFormat", + self._OUTPUT_FORMAT, + "-tokenized", + "-tagSeparator", + tag_separator, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerMethod", + "newCoreLabelTokenizerFactory", + ] + # We don't need to escape slashes as "splitting is done on the last instance of the character in the token" + return self._parse_trees_output( + self._execute( + cmd, + "\n".join( + " ".join(tag_separator.join(tagged) for tagged in sentence) + for sentence in sentences + ), + verbose, + ) + ) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-encoding", encoding]) + if self.corenlp_options: + cmd.extend(self.corenlp_options.split()) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + # Run the tagger and get the output. + if self._USE_STDIN: + input_file.seek(0) + stdout, stderr = java( + cmd, + classpath=self._classpath, + stdin=input_file, + stdout=PIPE, + stderr=PIPE, + ) + else: + cmd.append(input_file.name) + stdout, stderr = java( + cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE + ) + + stdout = stdout.replace(b"\xc2\xa0", b" ") + stdout = stdout.replace(b"\x00\xa0", b" ") + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout + + +class StanfordParser(GenericStanfordParser): + """ + >>> parser=StanfordParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> list(parser.raw_parse("the quick brown fox jumps over the lazy dog")) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.raw_parse_sents(( + ... "the quick brown fox jumps over the lazy dog", + ... "the quick grey wolf jumps over the lazy fox" + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('NP', [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('NP', [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), + Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])])])]), Tree('ROOT', [Tree('NP', + [Tree('NP', [Tree('DT', ['the']), Tree('JJ', ['quick']), Tree('JJ', ['grey']), Tree('NN', ['wolf'])]), Tree('NP', + [Tree('NP', [Tree('NNS', ['jumps'])]), Tree('PP', [Tree('IN', ['over']), Tree('NP', [Tree('DT', ['the']), + Tree('JJ', ['lazy']), Tree('NN', ['fox'])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('PRP', ['I'])]), Tree('VP', [Tree('VBP', ["'m"]), + Tree('NP', [Tree('DT', ['a']), Tree('NN', ['dog'])])])])]), Tree('ROOT', [Tree('S', [Tree('NP', + [Tree('DT', ['This'])]), Tree('VP', [Tree('VBZ', ['is']), Tree('NP', [Tree('NP', [Tree('NP', [Tree('PRP$', ['my']), + Tree('NNS', ['friends']), Tree('POS', ["'"])]), Tree('NN', ['cat'])]), Tree('PRN', [Tree('-LRB-', [Tree('', []), + Tree('NP', [Tree('DT', ['the']), Tree('NN', ['tabby'])]), Tree('-RRB-', [])])])])])])])] + + >>> sum([list(dep_graphs) for dep_graphs in parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('ROOT', [Tree('S', [Tree('NP', [Tree('DT', ['The']), Tree('JJ', ['quick']), Tree('JJ', ['brown']), + Tree('NN', ['fox'])]), Tree('VP', [Tree('VBD', ['jumped']), Tree('PP', [Tree('IN', ['over']), Tree('NP', + [Tree('DT', ['the']), Tree('JJ', ['lazy']), Tree('NN', ['dog'])])])]), Tree('.', ['.'])])])] + """ + + _OUTPUT_FORMAT = "penn" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return Tree.fromstring(result) + + +class StanfordDependencyParser(GenericStanfordParser): + + """ + >>> dep_parser=StanfordDependencyParser( + ... model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" + ... ) # doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy'])]), + Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), Tree('fox', ['over', 'the', 'lazy'])])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', ['my', "'"]), Tree('tabby', ['the'])])] + + >>> sum([[list(parse.triples()) for parse in dep_graphs] for dep_graphs in dep_parser.tagged_parse_sents(( + ... ( + ... ("The", "DT"), + ... ("quick", "JJ"), + ... ("brown", "JJ"), + ... ("fox", "NN"), + ... ("jumped", "VBD"), + ... ("over", "IN"), + ... ("the", "DT"), + ... ("lazy", "JJ"), + ... ("dog", "NN"), + ... (".", "."), + ... ), + ... ))],[]) # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumped', u'VBD'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', (u'The', u'DT')), + ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), u'amod', (u'brown', u'JJ')), + ((u'jumped', u'VBD'), u'nmod', (u'dog', u'NN')), ((u'dog', u'NN'), u'case', (u'over', u'IN')), + ((u'dog', u'NN'), u'det', (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ'))]] + + """ + + _OUTPUT_FORMAT = "conll2007" + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="root") + + +class StanfordNeuralDependencyParser(GenericStanfordParser): + """ + >>> from nltk.parse.stanford import StanfordNeuralDependencyParser # doctest: +SKIP + >>> dep_parser=StanfordNeuralDependencyParser(java_options='-mx4g')# doctest: +SKIP + + >>> [parse.tree() for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', 'the', 'lazy']), '.'])] + + >>> [list(parse.triples()) for parse in dep_parser.raw_parse("The quick brown fox jumps over the lazy dog.")] # doctest: +NORMALIZE_WHITESPACE +SKIP + [[((u'jumps', u'VBZ'), u'nsubj', (u'fox', u'NN')), ((u'fox', u'NN'), u'det', + (u'The', u'DT')), ((u'fox', u'NN'), u'amod', (u'quick', u'JJ')), ((u'fox', u'NN'), + u'amod', (u'brown', u'JJ')), ((u'jumps', u'VBZ'), u'nmod', (u'dog', u'NN')), + ((u'dog', u'NN'), u'case', (u'over', u'IN')), ((u'dog', u'NN'), u'det', + (u'the', u'DT')), ((u'dog', u'NN'), u'amod', (u'lazy', u'JJ')), ((u'jumps', u'VBZ'), + u'punct', (u'.', u'.'))]] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.raw_parse_sents(( + ... "The quick brown fox jumps over the lazy dog.", + ... "The quick grey wolf jumps over the lazy fox." + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('jumps', [Tree('fox', ['The', 'quick', 'brown']), Tree('dog', ['over', + 'the', 'lazy']), '.']), Tree('jumps', [Tree('wolf', ['The', 'quick', 'grey']), + Tree('fox', ['over', 'the', 'lazy']), '.'])] + + >>> sum([[parse.tree() for parse in dep_graphs] for dep_graphs in dep_parser.parse_sents(( + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ))], []) # doctest: +NORMALIZE_WHITESPACE +SKIP + [Tree('dog', ['I', "'m", 'a']), Tree('cat', ['This', 'is', Tree('friends', + ['my', "'"]), Tree('tabby', ['-LRB-', 'the', '-RRB-'])])] + """ + + _OUTPUT_FORMAT = "conll" + _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP" + _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar" + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar" + _USE_STDIN = True + _DOUBLE_SPACED_OUTPUT = True + + def __init__(self, *args, **kwargs): + warnings.warn( + "The StanfordNeuralDependencyParser will be deprecated\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", + DeprecationWarning, + stacklevel=2, + ) + + super().__init__(*args, **kwargs) + self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse" + + def tagged_parse_sents(self, sentences, verbose=False): + """ + Currently unimplemented because the neural dependency parser (and + the StanfordCoreNLP pipeline class) doesn't support passing in pre- + tagged tokens. + """ + raise NotImplementedError( + "tagged_parse[_sents] is not supported by " + "StanfordNeuralDependencyParser; use " + "parse[_sents] or raw_parse[_sents] instead." + ) + + def _make_tree(self, result): + return DependencyGraph(result, top_relation_label="ROOT") diff --git a/venv/lib/python3.10/site-packages/nltk/parse/transitionparser.py b/venv/lib/python3.10/site-packages/nltk/parse/transitionparser.py new file mode 100644 index 0000000000000000000000000000000000000000..476d70260a09c92196ea1cce749fc6774e75d822 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/transitionparser.py @@ -0,0 +1,794 @@ +# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers +# +# Author: Long Duong +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import pickle +import tempfile +from copy import deepcopy +from operator import itemgetter +from os import remove + +try: + from numpy import array + from scipy import sparse + from sklearn import svm + from sklearn.datasets import load_svmlight_file +except ImportError: + pass + +from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI + + +class Configuration: + """ + Class for holding configuration which is the partial analysis of the input sentence. + The transition based parser aims at finding set of operators that transfer the initial + configuration to the terminal configuration. + + The configuration includes: + - Stack: for storing partially proceeded words + - Buffer: for storing remaining input words + - Set of arcs: for storing partially built dependency tree + + This class also provides a method to represent a configuration as list of features. + """ + + def __init__(self, dep_graph): + """ + :param dep_graph: the representation of an input in the form of dependency graph. + :type dep_graph: DependencyGraph where the dependencies are not specified. + """ + # dep_graph.nodes contain list of token for a sentence + self.stack = [0] # The root element + self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer + self.arcs = [] # empty set of arc + self._tokens = dep_graph.nodes + self._max_address = len(self.buffer) + + def __str__(self): + return ( + "Stack : " + + str(self.stack) + + " Buffer : " + + str(self.buffer) + + " Arcs : " + + str(self.arcs) + ) + + def _check_informative(self, feat, flag=False): + """ + Check whether a feature is informative + The flag control whether "_" is informative or not + """ + if feat is None: + return False + if feat == "": + return False + if flag is False: + if feat == "_": + return False + return True + + def extract_features(self): + """ + Extract the set of features for the current configuration. Implement standard features as describe in + Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre. + Please note that these features are very basic. + :return: list(str) + """ + result = [] + # Todo : can come up with more complicated features set for better + # performance. + if len(self.stack) > 0: + # Stack 0 + stack_idx0 = self.stack[len(self.stack) - 1] + token = self._tokens[stack_idx0] + if self._check_informative(token["word"], True): + result.append("STK_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("STK_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("STK_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("STK_0_FEATS_" + feat) + # Stack 1 + if len(self.stack) > 1: + stack_idx1 = self.stack[len(self.stack) - 2] + token = self._tokens[stack_idx1] + if self._check_informative(token["tag"]): + result.append("STK_1_POS_" + token["tag"]) + + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == stack_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("STK_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("STK_0_RDEP_" + dep_right_most) + + # Check Buffered 0 + if len(self.buffer) > 0: + # Buffer 0 + buffer_idx0 = self.buffer[0] + token = self._tokens[buffer_idx0] + if self._check_informative(token["word"], True): + result.append("BUF_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("BUF_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("BUF_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("BUF_0_FEATS_" + feat) + # Buffer 1 + if len(self.buffer) > 1: + buffer_idx1 = self.buffer[1] + token = self._tokens[buffer_idx1] + if self._check_informative(token["word"], True): + result.append("BUF_1_FORM_" + token["word"]) + if self._check_informative(token["tag"]): + result.append("BUF_1_POS_" + token["tag"]) + if len(self.buffer) > 2: + buffer_idx2 = self.buffer[2] + token = self._tokens[buffer_idx2] + if self._check_informative(token["tag"]): + result.append("BUF_2_POS_" + token["tag"]) + if len(self.buffer) > 3: + buffer_idx3 = self.buffer[3] + token = self._tokens[buffer_idx3] + if self._check_informative(token["tag"]): + result.append("BUF_3_POS_" + token["tag"]) + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == buffer_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("BUF_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("BUF_0_RDEP_" + dep_right_most) + + return result + + +class Transition: + """ + This class defines a set of transition which is applied to a configuration to get another configuration + Note that for different parsing algorithm, the transition is different. + """ + + # Define set of transitions + LEFT_ARC = "LEFTARC" + RIGHT_ARC = "RIGHTARC" + SHIFT = "SHIFT" + REDUCE = "REDUCE" + + def __init__(self, alg_option): + """ + :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type alg_option: str + """ + self._algo = alg_option + if alg_option not in [ + TransitionParser.ARC_STANDARD, + TransitionParser.ARC_EAGER, + ]: + raise ValueError( + " Currently we only support %s and %s " + % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER) + ) + + def left_arc(self, conf, relation): + """ + Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if conf.buffer[0] == 0: + # here is the Root element + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + + flag = True + if self._algo == TransitionParser.ARC_EAGER: + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = False + + if flag: + conf.stack.pop() + idx_wj = conf.buffer[0] + conf.arcs.append((idx_wj, relation, idx_wi)) + else: + return -1 + + def right_arc(self, conf, relation): + """ + Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if self._algo == TransitionParser.ARC_STANDARD: + idx_wi = conf.stack.pop() + idx_wj = conf.buffer[0] + conf.buffer[0] = idx_wi + conf.arcs.append((idx_wi, relation, idx_wj)) + else: # arc-eager + idx_wi = conf.stack[len(conf.stack) - 1] + idx_wj = conf.buffer.pop(0) + conf.stack.append(idx_wj) + conf.arcs.append((idx_wi, relation, idx_wj)) + + def reduce(self, conf): + """ + Note that the algorithm for reduce is only available for arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + + if self._algo != TransitionParser.ARC_EAGER: + return -1 + if len(conf.stack) <= 0: + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + flag = False + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = True + if flag: + conf.stack.pop() # reduce it + else: + return -1 + + def shift(self, conf): + """ + Note that the algorithm for shift is the SAME for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if len(conf.buffer) <= 0: + return -1 + idx_wi = conf.buffer.pop(0) + conf.stack.append(idx_wi) + + +class TransitionParser(ParserI): + + """ + Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager" + """ + + ARC_STANDARD = "arc-standard" + ARC_EAGER = "arc-eager" + + def __init__(self, algorithm): + """ + :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type algorithm: str + """ + if not (algorithm in [self.ARC_STANDARD, self.ARC_EAGER]): + raise ValueError( + " Currently we only support %s and %s " + % (self.ARC_STANDARD, self.ARC_EAGER) + ) + self._algorithm = algorithm + + self._dictionary = {} + self._transition = {} + self._match_transition = {} + + def _get_dep_relation(self, idx_parent, idx_child, depgraph): + p_node = depgraph.nodes[idx_parent] + c_node = depgraph.nodes[idx_child] + + if c_node["word"] is None: + return None # Root word + + if c_node["head"] == p_node["address"]: + return c_node["rel"] + else: + return None + + def _convert_to_binary_features(self, features): + """ + :param features: list of feature string which is needed to convert to binary features + :type features: list(str) + :return : string of binary features in libsvm format which is 'featureID:value' pairs + """ + unsorted_result = [] + for feature in features: + self._dictionary.setdefault(feature, len(self._dictionary)) + unsorted_result.append(self._dictionary[feature]) + + # Default value of each feature is 1.0 + return " ".join( + str(featureID) + ":1.0" for featureID in sorted(unsorted_result) + ) + + def _is_projective(self, depgraph): + arc_list = [] + for key in depgraph.nodes: + node = depgraph.nodes[key] + + if "head" in node: + childIdx = node["address"] + parentIdx = node["head"] + if parentIdx is not None: + arc_list.append((parentIdx, childIdx)) + + for (parentIdx, childIdx) in arc_list: + # Ensure that childIdx < parentIdx + if childIdx > parentIdx: + temp = childIdx + childIdx = parentIdx + parentIdx = temp + for k in range(childIdx + 1, parentIdx): + for m in range(len(depgraph.nodes)): + if (m < childIdx) or (m > parentIdx): + if (k, m) in arc_list: + return False + if (m, k) in arc_list: + return False + return True + + def _write_to_file(self, key, binary_features, input_file): + """ + write the binary features to input file and update the transition dictionary + """ + self._transition.setdefault(key, len(self._transition) + 1) + self._match_transition[self._transition[key]] = key + + input_str = str(self._transition[key]) + " " + binary_features + "\n" + input_file.write(input_str.encode("utf-8")) + + def _create_training_examples_arc_std(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009) + """ + operation = Transition(self.ARC_STANDARD) + count_proj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + count_proj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + precondition = True + # Get the max-index of buffer + maxID = conf._max_address + + for w in range(maxID + 1): + if w != b0: + relw = self._get_dep_relation(b0, w, depgraph) + if relw is not None: + if (b0, relw, w) not in conf.arcs: + precondition = False + + if precondition: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(count_proj)) + return training_seq + + def _create_training_examples_arc_eager(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre + """ + operation = Transition(self.ARC_EAGER) + countProj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + countProj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # reduce operation + flag = False + for k in range(s0): + if self._get_dep_relation(k, b0, depgraph) is not None: + flag = True + if self._get_dep_relation(b0, k, depgraph) is not None: + flag = True + if flag: + key = Transition.REDUCE + self._write_to_file(key, binary_features, input_file) + operation.reduce(conf) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(countProj)) + return training_seq + + def train(self, depgraphs, modelfile, verbose=True): + """ + :param depgraphs : list of DependencyGraph as the training data + :type depgraphs : DependencyGraph + :param modelfile : file name to save the trained model + :type modelfile : str + """ + + try: + input_file = tempfile.NamedTemporaryFile( + prefix="transition_parse.train", dir=tempfile.gettempdir(), delete=False + ) + + if self._algorithm == self.ARC_STANDARD: + self._create_training_examples_arc_std(depgraphs, input_file) + else: + self._create_training_examples_arc_eager(depgraphs, input_file) + + input_file.close() + # Using the temporary file to train the libsvm classifier + x_train, y_train = load_svmlight_file(input_file.name) + # The parameter is set according to the paper: + # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre + # Todo : because of probability = True => very slow due to + # cross-validation. Need to improve the speed here + model = svm.SVC( + kernel="poly", + degree=2, + coef0=0, + gamma=0.2, + C=0.5, + verbose=verbose, + probability=True, + ) + + model.fit(x_train, y_train) + # Save the model to file name (as pickle) + pickle.dump(model, open(modelfile, "wb")) + finally: + remove(input_file.name) + + def parse(self, depgraphs, modelFile): + """ + :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy + :type depgraphs: list(DependencyGraph) + :param modelfile: the model file + :type modelfile: str + :return: list (DependencyGraph) with the 'head' and 'rel' information + """ + result = [] + # First load the model + model = pickle.load(open(modelFile, "rb")) + operation = Transition(self._algorithm) + + for depgraph in depgraphs: + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + features = conf.extract_features() + col = [] + row = [] + data = [] + for feature in features: + if feature in self._dictionary: + col.append(self._dictionary[feature]) + row.append(0) + data.append(1.0) + np_col = array(sorted(col)) # NB : index must be sorted + np_row = array(row) + np_data = array(data) + + x_test = sparse.csr_matrix( + (np_data, (np_row, np_col)), shape=(1, len(self._dictionary)) + ) + + # It's best to use decision function as follow BUT it's not supported yet for sparse SVM + # Using decision function to build the votes array + # dec_func = model.decision_function(x_test)[0] + # votes = {} + # k = 0 + # for i in range(len(model.classes_)): + # for j in range(i+1, len(model.classes_)): + # #if dec_func[k] > 0: + # votes.setdefault(i,0) + # votes[i] +=1 + # else: + # votes.setdefault(j,0) + # votes[j] +=1 + # k +=1 + # Sort votes according to the values + # sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True) + + # We will use predict_proba instead of decision_function + prob_dict = {} + pred_prob = model.predict_proba(x_test)[0] + for i in range(len(pred_prob)): + prob_dict[i] = pred_prob[i] + sorted_Prob = sorted(prob_dict.items(), key=itemgetter(1), reverse=True) + + # Note that SHIFT is always a valid operation + for (y_pred_idx, confidence) in sorted_Prob: + # y_pred = model.predict(x_test)[0] + # From the prediction match to the operation + y_pred = model.classes_[y_pred_idx] + + if y_pred in self._match_transition: + strTransition = self._match_transition[y_pred] + baseTransition = strTransition.split(":")[0] + + if baseTransition == Transition.LEFT_ARC: + if ( + operation.left_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.RIGHT_ARC: + if ( + operation.right_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.REDUCE: + if operation.reduce(conf) != -1: + break + elif baseTransition == Transition.SHIFT: + if operation.shift(conf) != -1: + break + else: + raise ValueError( + "The predicted transition is not recognized, expected errors" + ) + + # Finish with operations build the dependency graph from Conf.arcs + + new_depgraph = deepcopy(depgraph) + for key in new_depgraph.nodes: + node = new_depgraph.nodes[key] + node["rel"] = "" + # With the default, all the token depend on the Root + node["head"] = 0 + for (head, rel, child) in conf.arcs: + c_node = new_depgraph.nodes[child] + c_node["head"] = head + c_node["rel"] = rel + result.append(new_depgraph) + + return result + + +def demo(): + """ + >>> from nltk.parse import DependencyGraph, DependencyEvaluator + >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition + >>> gold_sent = DependencyGraph(\""" + ... Economic JJ 2 ATT + ... news NN 3 SBJ + ... has VBD 0 ROOT + ... little JJ 5 ATT + ... effect NN 3 OBJ + ... on IN 5 ATT + ... financial JJ 8 ATT + ... markets NNS 6 PC + ... . . 3 PU + ... \""") + + >>> conf = Configuration(gold_sent) + + ###################### Check the Initial Feature ######################## + + >>> print(', '.join(conf.extract_features())) + STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ + + ###################### Check The Transition ####################### + Check the Initialized Configuration + >>> print(conf) + Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] + + A. Do some transition checks for ARC-STANDARD + + >>> operation = Transition('arc-standard') + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.left_arc(conf,"SBJ") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + + Middle Configuration and Features Check + >>> print(conf) + Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] + + >>> print(', '.join(conf.extract_features())) + STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT + + >>> operation.right_arc(conf, "PC") + >>> operation.right_arc(conf, "ATT") + >>> operation.right_arc(conf, "OBJ") + >>> operation.shift(conf) + >>> operation.right_arc(conf, "PU") + >>> operation.right_arc(conf, "ROOT") + >>> operation.shift(conf) + + Terminated Configuration Check + >>> print(conf) + Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] + + + B. Do some transition checks for ARC-EAGER + + >>> conf = Configuration(gold_sent) + >>> operation = Transition('arc-eager') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'SBJ') + >>> operation.right_arc(conf,'ROOT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'OBJ') + >>> operation.right_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'PC') + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.right_arc(conf,'PU') + >>> print(conf) + Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] + + ###################### Check The Training Function ####################### + + A. Check the ARC-STANDARD training + >>> import tempfile + >>> import os + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) + + >>> parser_std = TransitionParser('arc-standard') + >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT + + >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + >>> input_file.close() + >>> remove(input_file.name) + + B. Check the ARC-EAGER training + + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) + >>> parser_eager = TransitionParser('arc-eager') + >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU + + >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + + >>> input_file.close() + >>> remove(input_file.name) + + ###################### Check The Parsing Function ######################## + + A. Check the ARC-STANDARD parser + + >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + B. Check the ARC-EAGER parser + >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + Remove test temporary files + >>> remove('temp.arceager.model') + >>> remove('temp.arcstd.model') + + Note that result is very poor because of only one training example. + """ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/viterbi.py b/venv/lib/python3.10/site-packages/nltk/parse/viterbi.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3e9de30432a65828463e32e6ea7bff27b7c5ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/viterbi.py @@ -0,0 +1,453 @@ +# Natural Language Toolkit: Viterbi Probabilistic Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from functools import reduce + +from nltk.parse.api import ParserI +from nltk.tree import ProbabilisticTree, Tree + +##////////////////////////////////////////////////////// +## Viterbi PCFG Parser +##////////////////////////////////////////////////////// + + +class ViterbiParser(ParserI): + """ + A bottom-up ``PCFG`` parser that uses dynamic programming to find + the single most likely parse for a text. The ``ViterbiParser`` parser + parses texts by filling in a "most likely constituent table". + This table records the most probable tree representation for any + given span and node value. In particular, it has an entry for + every start index, end index, and node value, recording the most + likely subtree that spans from the start index to the end index, + and has the given node value. + + The ``ViterbiParser`` parser fills in this table incrementally. It starts + by filling in all entries for constituents that span one element + of text (i.e., entries where the end index is one greater than the + start index). After it has filled in all table entries for + constituents that span one element of text, it fills in the + entries for constitutants that span two elements of text. It + continues filling in the entries for constituents spanning larger + and larger portions of the text, until the entire table has been + filled. Finally, it returns the table entry for a constituent + spanning the entire text, whose node value is the grammar's start + symbol. + + In order to find the most likely constituent with a given span and + node value, the ``ViterbiParser`` parser considers all productions that + could produce that node value. For each production, it finds all + children that collectively cover the span and have the node values + specified by the production's right hand side. If the probability + of the tree formed by applying the production to the children is + greater than the probability of the current entry in the table, + then the table is updated with this new tree. + + A pseudo-code description of the algorithm used by + ``ViterbiParser`` is: + + | Create an empty most likely constituent table, *MLC*. + | For width in 1...len(text): + | For start in 1...len(text)-width: + | For prod in grammar.productions: + | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC, + | where t[i].label()==prod.rhs[i], + | and the sequence covers [start:start+width]: + | old_p = MLC[start, start+width, prod.lhs] + | new_p = P(t[1])P(t[1])...P(t[n])P(prod) + | if new_p > old_p: + | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n]) + | MLC[start, start+width, prod.lhs] = new_tree + | Return MLC[0, len(text), start_symbol] + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``ViterbiParser`` parser, that uses ``grammar`` to + parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # The most likely constituent table. This table specifies the + # most likely constituent for a given span and type. + # Constituents can be either Trees or tokens. For Trees, + # the "type" is the Nonterminal for the tree's root node + # value. For Tokens, the "type" is the token's type. + # The table is stored as a dictionary, since it is sparse. + constituents = {} + + # Initialize the constituents dictionary with the words from + # the text. + if self._trace: + print("Inserting tokens into the most likely" + " constituents table...") + for index in range(len(tokens)): + token = tokens[index] + constituents[index, index + 1, token] = token + if self._trace > 1: + self._trace_lexical_insertion(token, index, len(tokens)) + + # Consider each span of length 1, 2, ..., n; and add any trees + # that might cover that span to the constituents dictionary. + for length in range(1, len(tokens) + 1): + if self._trace: + print( + "Finding the most likely constituents" + + " spanning %d text elements..." % length + ) + for start in range(len(tokens) - length + 1): + span = (start, start + length) + self._add_constituents_spanning(span, constituents, tokens) + + # Return the tree that spans the entire text & have the right cat + tree = constituents.get((0, len(tokens), self._grammar.start())) + if tree is not None: + yield tree + + def _add_constituents_spanning(self, span, constituents, tokens): + """ + Find any constituents that might cover ``span``, and add them + to the most likely constituents table. + + :rtype: None + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find possible constituents. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be included in + the constituent; and the second integer is the index of + the first token that should not be included in the + constituent. I.e., the constituent should cover + ``text[span[0]:span[1]]``, where ``text`` is the text + that we are parsing. + + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. In particular, + ``constituents(s,e,nv)`` is the most likely + ``ProbabilisticTree`` that covers ``text[s:e]`` + and has a node value ``nv.symbol()``, where ``text`` + is the text that we are parsing. When + ``_add_constituents_spanning`` is called, ``constituents`` + should contain all possible constituents that are shorter + than ``span``. + + :type tokens: list of tokens + :param tokens: The text we are parsing. This is only used for + trace output. + """ + # Since some of the grammar productions may be unary, we need to + # repeatedly try all of the productions until none of them add any + # new constituents. + changed = True + while changed: + changed = False + + # Find all ways instantiations of the grammar productions that + # cover the span. + instantiations = self._find_instantiations(span, constituents) + + # For each production instantiation, add a new + # ProbabilisticTree whose probability is the product + # of the childrens' probabilities and the production's + # probability. + for (production, children) in instantiations: + subtrees = [c for c in children if isinstance(c, Tree)] + p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob()) + node = production.lhs().symbol() + tree = ProbabilisticTree(node, children, prob=p) + + # If it's new a constituent, then add it to the + # constituents dictionary. + c = constituents.get((span[0], span[1], production.lhs())) + if self._trace > 1: + if c is None or c != tree: + if c is None or c.prob() < tree.prob(): + print(" Insert:", end=" ") + else: + print(" Discard:", end=" ") + self._trace_production(production, p, span, len(tokens)) + if c is None or c.prob() < tree.prob(): + constituents[span[0], span[1], production.lhs()] = tree + changed = True + + def _find_instantiations(self, span, constituents): + """ + :return: a list of the production instantiations that cover a + given span of the text. A "production instantiation" is + a tuple containing a production and a list of children, + where the production's right hand side matches the list of + children; and the children cover ``span``. :rtype: list + of ``pair`` of ``Production``, (list of + (``ProbabilisticTree`` or token. + + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find production instantiations. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be covered by + the production instantiation; and the second integer is + the index of the first token that should not be covered by + the production instantiation. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + rv = [] + for production in self._grammar.productions(): + childlists = self._match_rhs(production.rhs(), span, constituents) + + for childlist in childlists: + rv.append((production, childlist)) + return rv + + def _match_rhs(self, rhs, span, constituents): + """ + :return: a set of all the lists of children that cover ``span`` + and that match ``rhs``. + :rtype: list(list(ProbabilisticTree or token) + + :type rhs: list(Nonterminal or any) + :param rhs: The list specifying what kinds of children need to + cover ``span``. Each nonterminal in ``rhs`` specifies + that the corresponding child should be a tree whose node + value is that nonterminal's symbol. Each terminal in ``rhs`` + specifies that the corresponding child should be a token + whose type is that terminal. + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find child lists. The span is specified as a + pair of integers, where the first integer is the index of + the first token that should be covered by the child list; + and the second integer is the index of the first token + that should not be covered by the child list. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + (start, end) = span + + # Base case + if start >= end and rhs == (): + return [[]] + if start >= end or rhs == (): + return [] + + # Find everything that matches the 1st symbol of the RHS + childlists = [] + for split in range(start, end + 1): + l = constituents.get((start, split, rhs[0])) + if l is not None: + rights = self._match_rhs(rhs[1:], (split, end), constituents) + childlists += [[l] + r for r in rights] + + return childlists + + def _trace_production(self, production, p, span, width): + """ + Print trace output indicating that a given production has been + applied at a given location. + + :param production: The production that has been applied + :type production: Production + :param p: The probability of the tree produced by the production. + :type p: float + :param span: The span of the production + :type span: tuple + :rtype: None + """ + + str = "|" + "." * span[0] + str += "=" * (span[1] - span[0]) + str += "." * (width - span[1]) + "| " + str += "%s" % production + if self._trace > 2: + str = f"{str:<40} {p:12.10f} " + + print(str) + + def _trace_lexical_insertion(self, token, index, width): + str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| " + str += f"{token}" + print(str) + + def __repr__(self): + return "" % self._grammar + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.grammar import PCFG + from nltk.parse import ViterbiParser + + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + # Define two demos. Each demo has a sentence and a grammar. + demos = [ + ("I saw the man with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + try: + snum = int(sys.stdin.readline().strip()) - 1 + sent, grammar = demos[snum] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + parser = ViterbiParser(grammar) + all_parses = {} + + print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = parser.parse_all(tokens) + time = time.time() - t + average = ( + reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + ) + num_parses = len(parses) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print("Time (secs) # Parses Average P(parse)") + print("-----------------------------------------") + print("%11.4f%11d%19.14f" % (time, num_parses, average)) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------------------------") + print("%11s%11d%19.14f" % ("n/a", len(parses), p)) + + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__init__.py b/venv/lib/python3.10/site-packages/nltk/sem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbb3f032bef5ce79ab7232566bc73aa17ff661b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/__init__.py @@ -0,0 +1,75 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Semantic Interpretation Package + +This package contains classes for representing semantic structure in +formulas of first-order logic and for evaluating such formulas in +set-theoretic models. + + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + +The package has two main components: + + - ``logic`` provides support for analyzing expressions of First + Order Logic (FOL). + - ``evaluate`` allows users to recursively determine truth in a + model for formulas of FOL. + +A model consists of a domain of discourse and a valuation function, +which assigns values to non-logical constants. We assume that entities +in the domain are represented as strings such as ``'b1'``, ``'g1'``, +etc. A ``Valuation`` is initialized with a list of (symbol, value) +pairs, where values are entities, sets of entities or sets of tuples +of entities. +The domain of discourse can be inferred from the valuation, and model +is then created with domain and valuation as parameters. + + >>> from nltk.sem import Valuation, Model + >>> v = [('adam', 'b1'), ('betty', 'g1'), ('fido', 'd1'), + ... ('girl', set(['g1', 'g2'])), ('boy', set(['b1', 'b2'])), + ... ('dog', set(['d1'])), + ... ('love', set([('b1', 'g1'), ('b2', 'g2'), ('g1', 'b1'), ('g2', 'b1')]))] + >>> val = Valuation(v) + >>> dom = val.domain + >>> m = Model(dom, val) +""" + +from nltk.sem.boxer import Boxer +from nltk.sem.drt import DRS, DrtExpression +from nltk.sem.evaluate import ( + Assignment, + Model, + Undefined, + Valuation, + arity, + is_rel, + read_valuation, + set2rel, +) +from nltk.sem.lfg import FStructure +from nltk.sem.logic import ( + ApplicationExpression, + Expression, + LogicalExpressionException, + Variable, + binding_ops, + boolean_ops, + equality_preds, + read_logic, +) +from nltk.sem.relextract import clause, extract_rels, rtuple +from nltk.sem.skolemize import skolemize +from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep + +# from nltk.sem.glue import Glue +# from nltk.sem.hole import HoleSemantics +# from nltk.sem.cooper_storage import CooperStore + +# don't import chat80 as its names are too generic diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bceeaad467e9261d36f6df637f00fc4a12734349 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23699a66c7b3f160af71ea577aa19e111f5ad6bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/boxer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac7fe5d107ed0b24ef02b96dcf56d7088f7e2a93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/chat80.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33d9ed57d6df92bed2d439fd3a71ff301b38b536 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/cooper_storage.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2236c7361fed8357e7a5bf0fc6bdea418e90bb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcf525b7cb181089f3e2306bca8dacaeaed3147e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/drt_glue_demo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99b33ab0c317f4c7171b2eb56f47daec394c9f99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/evaluate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b0e4312b6b64e97e0e5bc759a6bc366997ba38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/glue.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f810ace4cb15dbffa165b08663ab3ad9b9ebe8ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/hole.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc60e03aa1e82cd229abe91c98395c89361d4a88 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/lfg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce18f9aac6d972bde6b2a79a70faf8b7d15c0275 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/linearlogic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b380a667e9d5d2dbb267c0d99444955c0579fd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/logic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b382969e28e34ba75d7d3b766254d447d7f4224 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/relextract.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae29487182dc61b230f21eb4ccac886db4aba109 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/skolemize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aae140ce87fe9fe0f5f4813c777ce96bedf6467c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sem/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sem/boxer.py b/venv/lib/python3.10/site-packages/nltk/sem/boxer.py new file mode 100644 index 0000000000000000000000000000000000000000..d0acd4a607e3bf3481b3f896e8103a9069870c56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/boxer.py @@ -0,0 +1,1605 @@ +# Natural Language Toolkit: Interface to Boxer +# +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An interface to Boxer. + +This interface relies on the latest version of the development (subversion) version of +C&C and Boxer. + +Usage +===== + +Set the environment variable CANDC to the bin directory of your CandC installation. +The models directory should be in the CandC root directory. +For example:: + + /path/to/candc/ + bin/ + candc + boxer + models/ + boxer/ +""" + +import operator +import os +import re +import subprocess +import tempfile +from functools import reduce +from optparse import OptionParser + +from nltk.internals import find_binary +from nltk.sem.drt import ( + DRS, + DrtApplicationExpression, + DrtEqualityExpression, + DrtNegatedExpression, + DrtOrExpression, + DrtParser, + DrtProposition, + DrtTokens, + DrtVariableExpression, +) +from nltk.sem.logic import ( + ExpectedMoreTokensException, + LogicalExpressionException, + UnexpectedTokenException, + Variable, +) + + +class Boxer: + """ + This class is an interface to Johan Bos's program Boxer, a wide-coverage + semantic parser that produces Discourse Representation Structures (DRSs). + """ + + def __init__( + self, + boxer_drs_interpreter=None, + elimeq=False, + bin_dir=None, + verbose=False, + resolve=True, + ): + """ + :param boxer_drs_interpreter: A class that converts from the + ``AbstractBoxerDrs`` object hierarchy to a different object. The + default is ``NltkDrtBoxerDrsInterpreter``, which converts to the NLTK + DRT hierarchy. + :param elimeq: When set to true, Boxer removes all equalities from the + DRSs and discourse referents standing in the equality relation are + unified, but only if this can be done in a meaning-preserving manner. + :param resolve: When set to true, Boxer will resolve all anaphoric DRSs and perform merge-reduction. + Resolution follows Van der Sandt's theory of binding and accommodation. + """ + if boxer_drs_interpreter is None: + boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter() + self._boxer_drs_interpreter = boxer_drs_interpreter + + self._resolve = resolve + self._elimeq = elimeq + + self.set_bin_dir(bin_dir, verbose) + + def set_bin_dir(self, bin_dir, verbose=False): + self._candc_bin = self._find_binary("candc", bin_dir, verbose) + self._candc_models_path = os.path.normpath( + os.path.join(self._candc_bin[:-5], "../models") + ) + self._boxer_bin = self._find_binary("boxer", bin_dir, verbose) + + def interpret(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: str Input sentence to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_multi(self, input, discourse_id=None, question=False, verbose=False): + """ + Use Boxer to give a first order representation. + + :param input: list of str Input sentences to parse as a single discourse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_id: str An identifier to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + discourse_ids = [discourse_id] if discourse_id is not None else None + (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose) + if not d: + raise Exception(f'Unable to interpret: "{input}"') + return d + + def interpret_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of str Input sentences to parse as individual discourses + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: list of ``drt.DrtExpression`` + """ + return self.interpret_multi_sents( + [[input] for input in inputs], discourse_ids, question, verbose + ) + + def interpret_multi_sents( + self, inputs, discourse_ids=None, question=False, verbose=False + ): + """ + Use Boxer to give a first order representation. + + :param inputs: list of list of str Input discourses to parse + :param occur_index: bool Should predicates be occurrence indexed? + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :return: ``drt.DrtExpression`` + """ + if discourse_ids is not None: + assert len(inputs) == len(discourse_ids) + assert reduce(operator.and_, (id is not None for id in discourse_ids)) + use_disc_id = True + else: + discourse_ids = list(map(str, range(len(inputs)))) + use_disc_id = False + + candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose) + boxer_out = self._call_boxer(candc_out, verbose=verbose) + + # if 'ERROR: input file contains no ccg/2 terms.' in boxer_out: + # raise UnparseableInputException('Could not parse with candc: "%s"' % input_str) + + drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id) + return [drs_dict.get(id, None) for id in discourse_ids] + + def _call_candc(self, inputs, discourse_ids, question, verbose=False): + """ + Call the ``candc`` binary with the given input. + + :param inputs: list of list of str Input discourses to parse + :param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate. + :param filename: str A filename for the output file + :return: stdout + """ + args = [ + "--models", + os.path.join(self._candc_models_path, ["boxer", "questions"][question]), + "--candc-printer", + "boxer", + ] + return self._call( + "\n".join( + sum( + ([f"'{id}'"] + d for d, id in zip(inputs, discourse_ids)), + [], + ) + ), + self._candc_bin, + args, + verbose, + ) + + def _call_boxer(self, candc_out, verbose=False): + """ + Call the ``boxer`` binary with the given input. + + :param candc_out: str output from C&C parser + :return: stdout + """ + f = None + try: + fd, temp_filename = tempfile.mkstemp( + prefix="boxer-", suffix=".in", text=True + ) + f = os.fdopen(fd, "w") + f.write(candc_out.decode("utf-8")) + finally: + if f: + f.close() + + args = [ + "--box", + "false", + "--semantics", + "drs", + #'--flat', 'false', # removed from boxer + "--resolve", + ["false", "true"][self._resolve], + "--elimeq", + ["false", "true"][self._elimeq], + "--format", + "prolog", + "--instantiate", + "true", + "--input", + temp_filename, + ] + stdout = self._call(None, self._boxer_bin, args, verbose) + os.remove(temp_filename) + return stdout + + def _find_binary(self, name, bin_dir, verbose=False): + return find_binary( + name, + path_to_bin=bin_dir, + env_vars=["CANDC"], + url="http://svn.ask.it.usyd.edu.au/trac/candc/", + binary_names=[name, name + ".exe"], + verbose=verbose, + ) + + def _call(self, input_str, binary, args=[], verbose=False): + """ + Call the binary with the given input. + + :param input_str: A string whose contents are used as stdin. + :param binary: The location of the binary to call + :param args: A list of command-line arguments. + :return: stdout + """ + if verbose: + print("Calling:", binary) + print("Args:", args) + print("Input:", input_str) + print("Command:", binary + " " + " ".join(args)) + + # Call via a subprocess + if input_str is None: + cmd = [binary] + args + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args)) + p = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) + stdout, stderr = p.communicate() + + if verbose: + print("Return code:", p.returncode) + if stdout: + print("stdout:\n", stdout, "\n") + if stderr: + print("stderr:\n", stderr, "\n") + if p.returncode != 0: + raise Exception( + "ERROR CALLING: {} {}\nReturncode: {}\n{}".format( + binary, " ".join(args), p.returncode, stderr + ) + ) + + return stdout + + def _parse_to_drs_dict(self, boxer_out, use_disc_id): + lines = boxer_out.decode("utf-8").split("\n") + drs_dict = {} + i = 0 + while i < len(lines): + line = lines[i] + if line.startswith("id("): + comma_idx = line.index(",") + discourse_id = line[3:comma_idx] + if discourse_id[0] == "'" and discourse_id[-1] == "'": + discourse_id = discourse_id[1:-1] + drs_id = line[comma_idx + 1 : line.index(")")] + i += 1 + line = lines[i] + assert line.startswith(f"sem({drs_id},") + if line[-4:] == "').'": + line = line[:-4] + ")." + assert line.endswith(")."), f"can't parse line: {line}" + + search_start = len(f"sem({drs_id},[") + brace_count = 1 + drs_start = -1 + for j, c in enumerate(line[search_start:]): + if c == "[": + brace_count += 1 + if c == "]": + brace_count -= 1 + if brace_count == 0: + drs_start = search_start + j + 1 + if line[drs_start : drs_start + 3] == "','": + drs_start = drs_start + 3 + else: + drs_start = drs_start + 1 + break + assert drs_start > -1 + + drs_input = line[drs_start:-2].strip() + parsed = self._parse_drs(drs_input, discourse_id, use_disc_id) + drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed) + i += 1 + return drs_dict + + def _parse_drs(self, drs_string, discourse_id, use_disc_id): + return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string) + + +class BoxerOutputDrsParser(DrtParser): + def __init__(self, discourse_id=None): + """ + This class is used to parse the Prolog DRS output from Boxer into a + hierarchy of python objects. + """ + DrtParser.__init__(self) + self.discourse_id = discourse_id + self.sentence_id_offset = None + self.quote_chars = [("'", "'", "\\", False)] + + def parse(self, data, signature=None): + return DrtParser.parse(self, data, signature) + + def get_all_symbols(self): + return ["(", ")", ",", "[", "]", ":"] + + def handle(self, tok, context): + return self.handle_drs(tok) + + def attempt_adjuncts(self, expression, context): + return expression + + def parse_condition(self, indices): + """ + Parse a DRS condition + + :return: list of ``DrtExpression`` + """ + tok = self.token() + accum = self.handle_condition(tok, indices) + if accum is None: + raise UnexpectedTokenException(tok) + return accum + + def handle_drs(self, tok): + if tok == "drs": + return self.parse_drs() + elif tok in ["merge", "smerge"]: + return self._handle_binary_expression(self._make_merge_expression)(None, []) + elif tok in ["alfa"]: + return self._handle_alfa(self._make_merge_expression)(None, []) + + def handle_condition(self, tok, indices): + """ + Handle a DRS condition + + :param indices: list of int + :return: list of ``DrtExpression`` + """ + if tok == "not": + return [self._handle_not()] + + if tok == "or": + conds = [self._handle_binary_expression(self._make_or_expression)] + elif tok == "imp": + conds = [self._handle_binary_expression(self._make_imp_expression)] + elif tok == "eq": + conds = [self._handle_eq()] + elif tok == "prop": + conds = [self._handle_prop()] + + elif tok == "pred": + conds = [self._handle_pred()] + elif tok == "named": + conds = [self._handle_named()] + elif tok == "rel": + conds = [self._handle_rel()] + elif tok == "timex": + conds = self._handle_timex() + elif tok == "card": + conds = [self._handle_card()] + + elif tok == "whq": + conds = [self._handle_whq()] + elif tok == "duplex": + conds = [self._handle_duplex()] + + else: + conds = [] + + return sum( + ( + [cond(sent_index, word_indices) for cond in conds] + for sent_index, word_indices in self._sent_and_word_indices(indices) + ), + [], + ) + + def _handle_not(self): + self.assertToken(self.token(), "(") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return BoxerNot(drs) + + def _handle_pred(self): + # pred(_G3943, dog, n, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + pos = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + + def _handle_pred_f(sent_index, word_indices): + return BoxerPred( + self.discourse_id, sent_index, word_indices, variable, name, pos, sense + ) + + return _handle_pred_f + + def _handle_duplex(self): + # duplex(whq, drs(...), var, drs(...)) + self.assertToken(self.token(), "(") + # self.assertToken(self.token(), '[') + ans_types = [] + # while self.token(0) != ']': + # cat = self.token() + # self.assertToken(self.token(), ':') + # if cat == 'des': + # ans_types.append(self.token()) + # elif cat == 'num': + # ans_types.append('number') + # typ = self.token() + # if typ == 'cou': + # ans_types.append('count') + # else: + # ans_types.append(typ) + # else: + # ans_types.append(self.token()) + # self.token() #swallow the ']' + + self.assertToken(self.token(), "whq") + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _handle_named(self): + # named(x0, john, per, 0) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + name = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ",") + sense = self.token() # as per boxer rev 2554 + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerNamed( + self.discourse_id, sent_index, word_indices, variable, name, type, sense + ) + + def _handle_rel(self): + # rel(_G3993, _G3943, agent, 0) + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ",") + rel = self.token() + self.assertToken(self.token(), ",") + sense = int(self.token()) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerRel( + self.discourse_id, sent_index, word_indices, var1, var2, rel, sense + ) + + def _handle_timex(self): + # timex(_G18322, date([]: (+), []:'XXXX', [1004]:'04', []:'XX')) + self.assertToken(self.token(), "(") + arg = self.parse_variable() + self.assertToken(self.token(), ",") + new_conds = self._handle_time_expression(arg) + self.assertToken(self.token(), ")") + return new_conds + + def _handle_time_expression(self, arg): + # date([]: (+), []:'XXXX', [1004]:'04', []:'XX') + tok = self.token() + self.assertToken(self.token(), "(") + if tok == "date": + conds = self._handle_date(arg) + elif tok == "time": + conds = self._handle_time(arg) + else: + return None + self.assertToken(self.token(), ")") + return [ + lambda sent_index, word_indices: BoxerPred( + self.discourse_id, sent_index, word_indices, arg, tok, "n", 0 + ) + ] + [lambda sent_index, word_indices: cond for cond in conds] + + def _handle_date(self, arg): + # []: (+), []:'XXXX', [1004]:'04', []:'XX' + conds = [] + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + self.assertToken(self.token(), "(") + pol = self.token() + self.assertToken(self.token(), ")") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_pol_{pol}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + year = self.token() + if year != "XXXX": + year = year.replace(":", "_") + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_year_{year}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + month = self.token() + if month != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_month_{month}", + "a", + 0, + ) + ) + self.assertToken(self.token(), ",") + + ((sent_index, word_indices),) = self._sent_and_word_indices( + self._parse_index_list() + ) + day = self.token() + if day != "XX": + conds.append( + BoxerPred( + self.discourse_id, + sent_index, + word_indices, + arg, + f"date_day_{day}", + "a", + 0, + ) + ) + + return conds + + def _handle_time(self, arg): + # time([1018]:'18', []:'XX', []:'XX') + conds = [] + self._parse_index_list() + hour = self.token() + if hour != "XX": + conds.append(self._make_atom("r_hour_2", arg, hour)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + min = self.token() + if min != "XX": + conds.append(self._make_atom("r_min_2", arg, min)) + self.assertToken(self.token(), ",") + + self._parse_index_list() + sec = self.token() + if sec != "XX": + conds.append(self._make_atom("r_sec_2", arg, sec)) + + return conds + + def _handle_card(self): + # card(_G18535, 28, ge) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + value = self.token() + self.assertToken(self.token(), ",") + type = self.token() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerCard( + self.discourse_id, sent_index, word_indices, variable, value, type + ) + + def _handle_prop(self): + # prop(_G15949, drs(...)) + self.assertToken(self.token(), "(") + variable = self.parse_variable() + self.assertToken(self.token(), ",") + drs = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerProp( + self.discourse_id, sent_index, word_indices, variable, drs + ) + + def _parse_index_list(self): + # [1001,1002]: + indices = [] + self.assertToken(self.token(), "[") + while self.token(0) != "]": + indices.append(self.parse_index()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ":") + return indices + + def parse_drs(self): + # drs([[1001]:_G3943], + # [[1002]:pred(_G3943, dog, n, 0)] + # ) + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + refs = set() + while self.token(0) != "]": + indices = self._parse_index_list() + refs.add(self.parse_variable()) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ",") + self.assertToken(self.token(), "[") + conds = [] + while self.token(0) != "]": + indices = self._parse_index_list() + conds.extend(self.parse_condition(indices)) + if self.token(0) == ",": + self.token() # swallow ',' + self.token() # swallow ']' + self.assertToken(self.token(), ")") + return BoxerDrs(list(refs), conds) + + def _handle_binary_expression(self, make_callback): + self.assertToken(self.token(), "(") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_alfa(self, make_callback): + self.assertToken(self.token(), "(") + type = self.token() + self.assertToken(self.token(), ",") + drs1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + drs2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: make_callback( + sent_index, word_indices, drs1, drs2 + ) + + def _handle_eq(self): + self.assertToken(self.token(), "(") + var1 = self.parse_variable() + self.assertToken(self.token(), ",") + var2 = self.parse_variable() + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerEq( + self.discourse_id, sent_index, word_indices, var1, var2 + ) + + def _handle_whq(self): + self.assertToken(self.token(), "(") + self.assertToken(self.token(), "[") + ans_types = [] + while self.token(0) != "]": + cat = self.token() + self.assertToken(self.token(), ":") + if cat == "des": + ans_types.append(self.token()) + elif cat == "num": + ans_types.append("number") + typ = self.token() + if typ == "cou": + ans_types.append("count") + else: + ans_types.append(typ) + else: + ans_types.append(self.token()) + self.token() # swallow the ']' + + self.assertToken(self.token(), ",") + d1 = self.process_next_expression(None) + self.assertToken(self.token(), ",") + ref = self.parse_variable() + self.assertToken(self.token(), ",") + d2 = self.process_next_expression(None) + self.assertToken(self.token(), ")") + return lambda sent_index, word_indices: BoxerWhq( + self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 + ) + + def _make_merge_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + + def _make_or_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2) + + def _make_imp_expression(self, sent_index, word_indices, drs1, drs2): + return BoxerDrs(drs1.refs, drs1.conds, drs2) + + def parse_variable(self): + var = self.token() + assert re.match(r"^[exps]\d+$", var), var + return var + + def parse_index(self): + return int(self.token()) + + def _sent_and_word_indices(self, indices): + """ + :return: list of (sent_index, word_indices) tuples + """ + sent_indices = {(i / 1000) - 1 for i in indices if i >= 0} + if sent_indices: + pairs = [] + for sent_index in sent_indices: + word_indices = [ + (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1 + ] + pairs.append((sent_index, word_indices)) + return pairs + else: + word_indices = [(i % 1000) - 1 for i in indices] + return [(None, word_indices)] + + +class BoxerDrsParser(DrtParser): + """ + Reparse the str form of subclasses of ``AbstractBoxerDrs`` + """ + + def __init__(self, discourse_id=None): + DrtParser.__init__(self) + self.discourse_id = discourse_id + + def get_all_symbols(self): + return [ + DrtTokens.OPEN, + DrtTokens.CLOSE, + DrtTokens.COMMA, + DrtTokens.OPEN_BRACKET, + DrtTokens.CLOSE_BRACKET, + ] + + def attempt_adjuncts(self, expression, context): + return expression + + def handle(self, tok, context): + try: + # if tok == 'drs': + # self.assertNextToken(DrtTokens.OPEN) + # label = int(self.token()) + # self.assertNextToken(DrtTokens.COMMA) + # refs = list(map(int, self.handle_refs())) + # self.assertNextToken(DrtTokens.COMMA) + # conds = self.handle_conds(None) + # self.assertNextToken(DrtTokens.CLOSE) + # return BoxerDrs(label, refs, conds) + if tok == "pred": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + pos = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense) + elif tok == "named": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + name = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNamed( + disc_id, sent_id, word_ids, variable, name, type, sense + ) + elif tok == "rel": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + rel = self.token() + self.assertNextToken(DrtTokens.COMMA) + sense = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense) + elif tok == "prop": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + variable = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerProp(disc_id, sent_id, word_ids, variable, drs) + elif tok == "not": + self.assertNextToken(DrtTokens.OPEN) + drs = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerNot(drs) + elif tok == "imp": + self.assertNextToken(DrtTokens.OPEN) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerDrs(drs1.refs, drs1.conds, drs2) + elif tok == "or": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2) + elif tok == "eq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + var1 = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + var2 = int(self.token()) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerEq(disc_id, sent_id, word_ids, var1, var2) + elif tok == "card": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = map(int, self.handle_refs()) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + value = self.token() + self.assertNextToken(DrtTokens.COMMA) + type = self.token() + self.assertNextToken(DrtTokens.CLOSE) + return BoxerCard(disc_id, sent_id, word_ids, var, value, type) + elif tok == "whq": + self.assertNextToken(DrtTokens.OPEN) + disc_id = ( + self.discourse_id if self.discourse_id is not None else self.token() + ) + self.assertNextToken(DrtTokens.COMMA) + sent_id = self.nullableIntToken() + self.assertNextToken(DrtTokens.COMMA) + word_ids = list(map(int, self.handle_refs())) + self.assertNextToken(DrtTokens.COMMA) + ans_types = self.handle_refs() + self.assertNextToken(DrtTokens.COMMA) + drs1 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.COMMA) + var = int(self.token()) + self.assertNextToken(DrtTokens.COMMA) + drs2 = self.process_next_expression(None) + self.assertNextToken(DrtTokens.CLOSE) + return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2) + except Exception as e: + raise LogicalExpressionException(self._currentIndex, str(e)) from e + assert False, repr(tok) + + def nullableIntToken(self): + t = self.token() + return int(t) if t != "None" else None + + def get_next_token_variable(self, description): + try: + return self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + + +class AbstractBoxerDrs: + def variables(self): + """ + :return: (set, set, set) + """ + variables, events, propositions = self._variables() + return (variables - (events | propositions), events, propositions - events) + + def variable_types(self): + vartypes = {} + for t, vars in zip(("z", "e", "p"), self.variables()): + for v in vars: + vartypes[v] = t + return vartypes + + def _variables(self): + """ + :return: (set, set, set) + """ + return (set(), set(), set()) + + def atoms(self): + return set() + + def clean(self): + return self + + def _clean_name(self, name): + return name.replace("-", "_").replace("'", "_") + + def renumber_sentences(self, f): + return self + + def __hash__(self): + return hash(f"{self}") + + +class BoxerDrs(AbstractBoxerDrs): + def __init__(self, refs, conds, consequent=None): + AbstractBoxerDrs.__init__(self) + self.refs = refs + self.conds = conds + self.consequent = consequent + + def _variables(self): + variables = (set(), set(), set()) + for cond in self.conds: + for s, v in zip(variables, cond._variables()): + s.update(v) + if self.consequent is not None: + for s, v in zip(variables, self.consequent._variables()): + s.update(v) + return variables + + def atoms(self): + atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set()) + if self.consequent is not None: + atoms.update(self.consequent.atoms()) + return atoms + + def clean(self): + consequent = self.consequent.clean() if self.consequent else None + return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent) + + def renumber_sentences(self, f): + consequent = self.consequent.renumber_sentences(f) if self.consequent else None + return BoxerDrs( + self.refs, [c.renumber_sentences(f) for c in self.conds], consequent + ) + + def __repr__(self): + s = "drs([{}], [{}])".format( + ", ".join("%s" % r for r in self.refs), + ", ".join("%s" % c for c in self.conds), + ) + if self.consequent is not None: + s = f"imp({s}, {self.consequent})" + return s + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.refs == other.refs + and len(self.conds) == len(other.conds) + and reduce( + operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds)) + ) + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerNot(AbstractBoxerDrs): + def __init__(self, drs): + AbstractBoxerDrs.__init__(self) + self.drs = drs + + def _variables(self): + return self.drs._variables() + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerNot(self.drs.clean()) + + def renumber_sentences(self, f): + return BoxerNot(self.drs.renumber_sentences(f)) + + def __repr__(self): + return "not(%s)" % (self.drs) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.drs == other.drs + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + +class BoxerIndexed(AbstractBoxerDrs): + def __init__(self, discourse_id, sent_index, word_indices): + AbstractBoxerDrs.__init__(self) + self.discourse_id = discourse_id + self.sent_index = sent_index + self.word_indices = word_indices + + def atoms(self): + return {self} + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.discourse_id == other.discourse_id + and self.sent_index == other.sent_index + and self.word_indices == other.word_indices + and reduce(operator.and_, (s == o for s, o in zip(self, other))) + ) + + def __ne__(self, other): + return not self == other + + __hash__ = AbstractBoxerDrs.__hash__ + + def __repr__(self): + s = "{}({}, {}, [{}]".format( + self._pred(), + self.discourse_id, + self.sent_index, + ", ".join("%s" % wi for wi in self.word_indices), + ) + for v in self: + s += ", %s" % v + return s + ")" + + +class BoxerPred(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.pos = pos + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.pos, + self.sense, + ) + + def clean(self): + return BoxerPred( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.pos, + self.sense, + ) + + def renumber_sentences(self, f): + new_sent_index = f(self.sent_index) + return BoxerPred( + self.discourse_id, + new_sent_index, + self.word_indices, + self.var, + self.name, + self.pos, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.pos, self.sense)) + + def _pred(self): + return "pred" + + +class BoxerNamed(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.name = name + self.type = type + self.sense = sense + + def _variables(self): + return ({self.var}, set(), set()) + + def change_var(self, var): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + var, + self.name, + self.type, + self.sense, + ) + + def clean(self): + return BoxerNamed( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self._clean_name(self.name), + self.type, + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerNamed( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.name, + self.type, + self.sense, + ) + + def __iter__(self): + return iter((self.var, self.name, self.type, self.sense)) + + def _pred(self): + return "named" + + +class BoxerRel(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + self.rel = rel + self.sense = sense + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def clean(self): + return BoxerRel( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var1, + self.var2, + self._clean_name(self.rel), + self.sense, + ) + + def renumber_sentences(self, f): + return BoxerRel( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + self.rel, + self.sense, + ) + + def __iter__(self): + return iter((self.var1, self.var2, self.rel, self.sense)) + + def _pred(self): + return "rel" + + +class BoxerProp(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, drs): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.drs = drs + + def _variables(self): + return tuple( + map(operator.or_, (set(), set(), {self.var}), self.drs._variables()) + ) + + def referenced_labels(self): + return {self.drs} + + def atoms(self): + return self.drs.atoms() + + def clean(self): + return BoxerProp( + self.discourse_id, + self.sent_index, + self.word_indices, + self.var, + self.drs.clean(), + ) + + def renumber_sentences(self, f): + return BoxerProp( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.drs.renumber_sentences(f), + ) + + def __iter__(self): + return iter((self.var, self.drs)) + + def _pred(self): + return "prop" + + +class BoxerEq(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var1, var2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var1 = var1 + self.var2 = var2 + + def _variables(self): + return ({self.var1, self.var2}, set(), set()) + + def atoms(self): + return set() + + def renumber_sentences(self, f): + return BoxerEq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var1, + self.var2, + ) + + def __iter__(self): + return iter((self.var1, self.var2)) + + def _pred(self): + return "eq" + + +class BoxerCard(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, var, value, type): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.var = var + self.value = value + self.type = type + + def _variables(self): + return ({self.var}, set(), set()) + + def renumber_sentences(self, f): + return BoxerCard( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.var, + self.value, + self.type, + ) + + def __iter__(self): + return iter((self.var, self.value, self.type)) + + def _pred(self): + return "card" + + +class BoxerOr(BoxerIndexed): + def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.drs1 = drs1 + self.drs2 = drs2 + + def _variables(self): + return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables())) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerOr( + self.discourse_id, + self.sent_index, + self.word_indices, + self.drs1.clean(), + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerOr( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.drs1, + self.drs2, + ) + + def __iter__(self): + return iter((self.drs1, self.drs2)) + + def _pred(self): + return "or" + + +class BoxerWhq(BoxerIndexed): + def __init__( + self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2 + ): + BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) + self.ans_types = ans_types + self.drs1 = drs1 + self.variable = variable + self.drs2 = drs2 + + def _variables(self): + return tuple( + map( + operator.or_, + ({self.variable}, set(), set()), + self.drs1._variables(), + self.drs2._variables(), + ) + ) + + def atoms(self): + return self.drs1.atoms() | self.drs2.atoms() + + def clean(self): + return BoxerWhq( + self.discourse_id, + self.sent_index, + self.word_indices, + self.ans_types, + self.drs1.clean(), + self.variable, + self.drs2.clean(), + ) + + def renumber_sentences(self, f): + return BoxerWhq( + self.discourse_id, + f(self.sent_index), + self.word_indices, + self.ans_types, + self.drs1, + self.variable, + self.drs2, + ) + + def __iter__(self): + return iter( + ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2) + ) + + def _pred(self): + return "whq" + + +class PassthroughBoxerDrsInterpreter: + def interpret(self, ex): + return ex + + +class NltkDrtBoxerDrsInterpreter: + def __init__(self, occur_index=False): + self._occur_index = occur_index + + def interpret(self, ex): + """ + :param ex: ``AbstractBoxerDrs`` + :return: ``DrtExpression`` + """ + if isinstance(ex, BoxerDrs): + drs = DRS( + [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds)) + ) + if ex.consequent is not None: + drs.consequent = self.interpret(ex.consequent) + return drs + elif isinstance(ex, BoxerNot): + return DrtNegatedExpression(self.interpret(ex.drs)) + elif isinstance(ex, BoxerPred): + pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerNamed): + pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerRel): + pred = self._add_occur_indexing("%s" % (ex.rel), ex) + return self._make_atom(pred, ex.var1, ex.var2) + elif isinstance(ex, BoxerProp): + return DrtProposition(Variable(ex.var), self.interpret(ex.drs)) + elif isinstance(ex, BoxerEq): + return DrtEqualityExpression( + DrtVariableExpression(Variable(ex.var1)), + DrtVariableExpression(Variable(ex.var2)), + ) + elif isinstance(ex, BoxerCard): + pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex) + return self._make_atom(pred, ex.var) + elif isinstance(ex, BoxerOr): + return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2)) + elif isinstance(ex, BoxerWhq): + drs1 = self.interpret(ex.drs1) + drs2 = self.interpret(ex.drs2) + return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds) + assert False, f"{ex.__class__.__name__}: {ex}" + + def _make_atom(self, pred, *args): + accum = DrtVariableExpression(Variable(pred)) + for arg in args: + accum = DrtApplicationExpression( + accum, DrtVariableExpression(Variable(arg)) + ) + return accum + + def _add_occur_indexing(self, base, ex): + if self._occur_index and ex.sent_index is not None: + if ex.discourse_id: + base += "_%s" % ex.discourse_id + base += "_s%s" % ex.sent_index + base += "_w%s" % sorted(ex.word_indices)[0] + return base + + +class UnparseableInputException(Exception): + pass + + +if __name__ == "__main__": + opts = OptionParser("usage: %prog TEXT [options]") + opts.add_option( + "--verbose", + "-v", + help="display verbose logs", + action="store_true", + default=False, + dest="verbose", + ) + opts.add_option( + "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol" + ) + opts.add_option( + "--question", + "-q", + help="input is a question", + action="store_true", + default=False, + dest="question", + ) + opts.add_option( + "--occur", + "-o", + help="occurrence index", + action="store_true", + default=False, + dest="occur_index", + ) + (options, args) = opts.parse_args() + + if len(args) != 1: + opts.error("incorrect number of arguments") + + interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index) + drs = Boxer(interpreter).interpret_multi( + args[0].split(r"\n"), question=options.question, verbose=options.verbose + ) + if drs is None: + print(None) + else: + drs = drs.simplify().eliminate_equality() + if options.fol: + print(drs.fol().normalize()) + else: + drs.pretty_print() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/chat80.py b/venv/lib/python3.10/site-packages/nltk/sem/chat80.py new file mode 100644 index 0000000000000000000000000000000000000000..3d1e77a49f19b2e4414f66741570cdb033ec7ca6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/chat80.py @@ -0,0 +1,857 @@ +# Natural Language Toolkit: Chat-80 KB Reader +# See https://www.w3.org/TR/swbp-skos-core-guide/ +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +r""" +Overview +======== + +Chat-80 was a natural language system which allowed the user to +interrogate a Prolog knowledge base in the domain of world +geography. It was developed in the early '80s by Warren and Pereira; see +``https://www.aclweb.org/anthology/J82-3002.pdf`` for a description and +``http://www.cis.upenn.edu/~pereira/oldies.html`` for the source +files. + +This module contains functions to extract data from the Chat-80 +relation files ('the world database'), and convert then into a format +that can be incorporated in the FOL models of +``nltk.sem.evaluate``. The code assumes that the Prolog +input files are available in the NLTK corpora directory. + +The Chat-80 World Database consists of the following files:: + + world0.pl + rivers.pl + cities.pl + countries.pl + contain.pl + borders.pl + +This module uses a slightly modified version of ``world0.pl``, in which +a set of Prolog rules have been omitted. The modified file is named +``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since +it uses a list rather than a string in the second field. + +Reading Chat-80 Files +===================== + +Chat-80 relations are like tables in a relational database. The +relation acts as the name of the table; the first argument acts as the +'primary key'; and subsequent arguments are further fields in the +table. In general, the name of the table provides a label for a unary +predicate whose extension is all the primary keys. For example, +relations in ``cities.pl`` are of the following form:: + + 'city(athens,greece,1368).' + +Here, ``'athens'`` is the key, and will be mapped to a member of the +unary predicate *city*. + +The fields in the table are mapped to binary predicates. The first +argument of the predicate is the primary key, while the second +argument is the data in the relevant field. Thus, in the above +example, the third field is mapped to the binary predicate +*population_of*, whose extension is a set of pairs such as +``'(athens, 1368)'``. + +An exception to this general framework is required by the relations in +the files ``borders.pl`` and ``contains.pl``. These contain facts of the +following form:: + + 'borders(albania,greece).' + + 'contains0(africa,central_africa).' + +We do not want to form a unary concept out the element in +the first field of these records, and we want the label of the binary +relation just to be ``'border'``/``'contain'`` respectively. + +In order to drive the extraction process, we use 'relation metadata bundles' +which are Python dictionaries such as the following:: + + city = {'label': 'city', + 'closures': [], + 'schema': ['city', 'country', 'population'], + 'filename': 'cities.pl'} + +According to this, the file ``city['filename']`` contains a list of +relational tuples (or more accurately, the corresponding strings in +Prolog form) whose predicate symbol is ``city['label']`` and whose +relational schema is ``city['schema']``. The notion of a ``closure`` is +discussed in the next section. + +Concepts +======== +In order to encapsulate the results of the extraction, a class of +``Concept`` objects is introduced. A ``Concept`` object has a number of +attributes, in particular a ``prefLabel`` and ``extension``, which make +it easier to inspect the output of the extraction. In addition, the +``extension`` can be further processed: in the case of the ``'border'`` +relation, we check that the relation is symmetric, and in the case +of the ``'contain'`` relation, we carry out the transitive +closure. The closure properties associated with a concept is +indicated in the relation metadata, as indicated earlier. + +The ``extension`` of a ``Concept`` object is then incorporated into a +``Valuation`` object. + +Persistence +=========== +The functions ``val_dump`` and ``val_load`` are provided to allow a +valuation to be stored in a persistent database and re-loaded, rather +than having to be re-computed each time. + +Individuals and Lexical Items +============================= +As well as deriving relations from the Chat-80 data, we also create a +set of individual constants, one for each entity in the domain. The +individual constants are string-identical to the entities. For +example, given a data item such as ``'zloty'``, we add to the valuation +a pair ``('zloty', 'zloty')``. In order to parse English sentences that +refer to these entities, we also create a lexical item such as the +following for each individual constant:: + + PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty' + +The set of rules is written to the file ``chat_pnames.cfg`` in the +current directory. + +""" + +import os +import re +import shelve +import sys + +import nltk.data + +########################################################################### +# Chat-80 relation metadata bundles needed to build the valuation +########################################################################### + +borders = { + "rel_name": "borders", + "closures": ["symmetric"], + "schema": ["region", "border"], + "filename": "borders.pl", +} + +contains = { + "rel_name": "contains0", + "closures": ["transitive"], + "schema": ["region", "contain"], + "filename": "contain.pl", +} + +city = { + "rel_name": "city", + "closures": [], + "schema": ["city", "country", "population"], + "filename": "cities.pl", +} + +country = { + "rel_name": "country", + "closures": [], + "schema": [ + "country", + "region", + "latitude", + "longitude", + "area", + "population", + "capital", + "currency", + ], + "filename": "countries.pl", +} + +circle_of_lat = { + "rel_name": "circle_of_latitude", + "closures": [], + "schema": ["circle_of_latitude", "degrees"], + "filename": "world1.pl", +} + +circle_of_long = { + "rel_name": "circle_of_longitude", + "closures": [], + "schema": ["circle_of_longitude", "degrees"], + "filename": "world1.pl", +} + +continent = { + "rel_name": "continent", + "closures": [], + "schema": ["continent"], + "filename": "world1.pl", +} + +region = { + "rel_name": "in_continent", + "closures": [], + "schema": ["region", "continent"], + "filename": "world1.pl", +} + +ocean = { + "rel_name": "ocean", + "closures": [], + "schema": ["ocean"], + "filename": "world1.pl", +} + +sea = {"rel_name": "sea", "closures": [], "schema": ["sea"], "filename": "world1.pl"} + + +items = [ + "borders", + "contains", + "city", + "country", + "circle_of_lat", + "circle_of_long", + "continent", + "region", + "ocean", + "sea", +] +items = tuple(sorted(items)) + +item_metadata = { + "borders": borders, + "contains": contains, + "city": city, + "country": country, + "circle_of_lat": circle_of_lat, + "circle_of_long": circle_of_long, + "continent": continent, + "region": region, + "ocean": ocean, + "sea": sea, +} + +rels = item_metadata.values() + +not_unary = ["borders.pl", "contain.pl"] + +########################################################################### + + +class Concept: + """ + A Concept class, loosely based on SKOS + (https://www.w3.org/TR/swbp-skos-core-guide/). + """ + + def __init__(self, prefLabel, arity, altLabels=[], closures=[], extension=set()): + """ + :param prefLabel: the preferred label for the concept + :type prefLabel: str + :param arity: the arity of the concept + :type arity: int + :param altLabels: other (related) labels + :type altLabels: list + :param closures: closure properties of the extension + (list items can be ``symmetric``, ``reflexive``, ``transitive``) + :type closures: list + :param extension: the extensional value of the concept + :type extension: set + """ + self.prefLabel = prefLabel + self.arity = arity + self.altLabels = altLabels + self.closures = closures + # keep _extension internally as a set + self._extension = extension + # public access is via a list (for slicing) + self.extension = sorted(list(extension)) + + def __str__(self): + # _extension = '' + # for element in sorted(self.extension): + # if isinstance(element, tuple): + # element = '(%s, %s)' % (element) + # _extension += element + ', ' + # _extension = _extension[:-1] + + return "Label = '{}'\nArity = {}\nExtension = {}".format( + self.prefLabel, + self.arity, + self.extension, + ) + + def __repr__(self): + return "Concept('%s')" % self.prefLabel + + def augment(self, data): + """ + Add more data to the ``Concept``'s extension set. + + :param data: a new semantic value + :type data: string or pair of strings + :rtype: set + + """ + self._extension.add(data) + self.extension = sorted(list(self._extension)) + return self._extension + + def _make_graph(self, s): + """ + Convert a set of pairs into an adjacency linked list encoding of a graph. + """ + g = {} + for (x, y) in s: + if x in g: + g[x].append(y) + else: + g[x] = [y] + return g + + def _transclose(self, g): + """ + Compute the transitive closure of a graph represented as a linked list. + """ + for x in g: + for adjacent in g[x]: + # check that adjacent is a key + if adjacent in g: + for y in g[adjacent]: + if y not in g[x]: + g[x].append(y) + return g + + def _make_pairs(self, g): + """ + Convert an adjacency linked list back into a set of pairs. + """ + pairs = [] + for node in g: + for adjacent in g[node]: + pairs.append((node, adjacent)) + return set(pairs) + + def close(self): + """ + Close a binary relation in the ``Concept``'s extension set. + + :return: a new extension for the ``Concept`` in which the + relation is closed under a given property + """ + from nltk.sem import is_rel + + assert is_rel(self._extension) + if "symmetric" in self.closures: + pairs = [] + for (x, y) in self._extension: + pairs.append((y, x)) + sym = set(pairs) + self._extension = self._extension.union(sym) + if "transitive" in self.closures: + all = self._make_graph(self._extension) + closed = self._transclose(all) + trans = self._make_pairs(closed) + self._extension = self._extension.union(trans) + self.extension = sorted(list(self._extension)) + + +def clause2concepts(filename, rel_name, schema, closures=[]): + """ + Convert a file of Prolog clauses into a list of ``Concept`` objects. + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param schema: the schema used in a set of relational tuples + :type schema: list + :param closures: closure properties for the extension of the concept + :type closures: list + :return: a list of ``Concept`` objects + :rtype: list + """ + concepts = [] + # position of the subject of a binary relation + subj = 0 + # label of the 'primary key' + pkey = schema[0] + # fields other than the primary key + fields = schema[1:] + + # convert a file into a list of lists + records = _str2records(filename, rel_name) + + # add a unary concept corresponding to the set of entities + # in the primary key position + # relations in 'not_unary' are more like ordinary binary relations + if not filename in not_unary: + concepts.append(unary_concept(pkey, subj, records)) + + # add a binary concept for each non-key field + for field in fields: + obj = schema.index(field) + concepts.append(binary_concept(field, closures, subj, obj, records)) + + return concepts + + +def cities2table(filename, rel_name, dbname, verbose=False, setup=False): + """ + Convert a file of Prolog clauses into a database table. + + This is not generic, since it doesn't allow arbitrary + schemas to be set as a parameter. + + Intended usage:: + + cities2table('cities.pl', 'city', 'city.db', verbose=True, setup=True) + + :param filename: filename containing the relations + :type filename: str + :param rel_name: name of the relation + :type rel_name: str + :param dbname: filename of persistent store + :type schema: str + """ + import sqlite3 + + records = _str2records(filename, rel_name) + connection = sqlite3.connect(dbname) + cur = connection.cursor() + if setup: + cur.execute( + """CREATE TABLE city_table + (City text, Country text, Population int)""" + ) + + table_name = "city_table" + for t in records: + cur.execute("insert into %s values (?,?,?)" % table_name, t) + if verbose: + print("inserting values into %s: " % table_name, t) + connection.commit() + if verbose: + print("Committing update to %s" % dbname) + cur.close() + + +def sql_query(dbname, query): + """ + Execute an SQL query over a database. + :param dbname: filename of persistent store + :type schema: str + :param query: SQL query + :type rel_name: str + """ + import sqlite3 + + try: + path = nltk.data.find(dbname) + connection = sqlite3.connect(str(path)) + cur = connection.cursor() + return cur.execute(query) + except (ValueError, sqlite3.OperationalError): + import warnings + + warnings.warn( + "Make sure the database file %s is installed and uncompressed." % dbname + ) + raise + + +def _str2records(filename, rel): + """ + Read a file into memory and convert each relation clause into a list. + """ + recs = [] + contents = nltk.data.load("corpora/chat80/%s" % filename, format="text") + for line in contents.splitlines(): + if line.startswith(rel): + line = re.sub(rel + r"\(", "", line) + line = re.sub(r"\)\.$", "", line) + record = line.split(",") + recs.append(record) + return recs + + +def unary_concept(label, subj, records): + """ + Make a unary concept out of the primary key in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key. + + :param label: the preferred label for the concept + :type label: string + :param subj: position in the record of the subject of the predicate + :type subj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 1 + :rtype: Concept + """ + c = Concept(label, arity=1, extension=set()) + for record in records: + c.augment(record[subj]) + return c + + +def binary_concept(label, closures, subj, obj, records): + """ + Make a binary concept out of the primary key and another field in a record. + + A record is a list of entities in some relation, such as + ``['france', 'paris']``, where ``'france'`` is acting as the primary + key, and ``'paris'`` stands in the ``'capital_of'`` relation to + ``'france'``. + + More generally, given a record such as ``['a', 'b', 'c']``, where + label is bound to ``'B'``, and ``obj`` bound to 1, the derived + binary concept will have label ``'B_of'``, and its extension will + be a set of pairs such as ``('a', 'b')``. + + + :param label: the base part of the preferred label for the concept + :type label: str + :param closures: closure properties for the extension of the concept + :type closures: list + :param subj: position in the record of the subject of the predicate + :type subj: int + :param obj: position in the record of the object of the predicate + :type obj: int + :param records: a list of records + :type records: list of lists + :return: ``Concept`` of arity 2 + :rtype: Concept + """ + if not label == "border" and not label == "contain": + label = label + "_of" + c = Concept(label, arity=2, closures=closures, extension=set()) + for record in records: + c.augment((record[subj], record[obj])) + # close the concept's extension according to the properties in closures + c.close() + return c + + +def process_bundle(rels): + """ + Given a list of relation metadata bundles, make a corresponding + dictionary of concepts, indexed by the relation name. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list(dict) + :return: a dictionary of concepts, indexed by the relation name. + :rtype: dict(str): Concept + """ + concepts = {} + for rel in rels: + rel_name = rel["rel_name"] + closures = rel["closures"] + schema = rel["schema"] + filename = rel["filename"] + + concept_list = clause2concepts(filename, rel_name, schema, closures) + for c in concept_list: + label = c.prefLabel + if label in concepts: + for data in c.extension: + concepts[label].augment(data) + concepts[label].close() + else: + concepts[label] = c + return concepts + + +def make_valuation(concepts, read=False, lexicon=False): + """ + Convert a list of ``Concept`` objects into a list of (label, extension) pairs; + optionally create a ``Valuation`` object. + + :param concepts: concepts + :type concepts: list(Concept) + :param read: if ``True``, ``(symbol, set)`` pairs are read into a ``Valuation`` + :type read: bool + :rtype: list or Valuation + """ + vals = [] + + for c in concepts: + vals.append((c.prefLabel, c.extension)) + if lexicon: + read = True + if read: + from nltk.sem import Valuation + + val = Valuation({}) + val.update(vals) + # add labels for individuals + val = label_indivs(val, lexicon=lexicon) + return val + else: + return vals + + +def val_dump(rels, db): + """ + Make a ``Valuation`` from a list of relation metadata bundles and dump to + persistent database. + + :param rels: bundle of metadata needed for constructing a concept + :type rels: list of dict + :param db: name of file to which data is written. + The suffix '.db' will be automatically appended. + :type db: str + """ + concepts = process_bundle(rels).values() + valuation = make_valuation(concepts, read=True) + db_out = shelve.open(db, "n") + + db_out.update(valuation) + + db_out.close() + + +def val_load(db): + """ + Load a ``Valuation`` from a persistent database. + + :param db: name of file from which data is read. + The suffix '.db' should be omitted from the name. + :type db: str + """ + dbname = db + ".db" + + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + db_in = shelve.open(db) + from nltk.sem import Valuation + + val = Valuation(db_in) + # val.read(db_in.items()) + return val + + +# def alpha(str): +# """ +# Utility to filter out non-alphabetic constants. + +#:param str: candidate constant +#:type str: string +#:rtype: bool +# """ +# try: +# int(str) +# return False +# except ValueError: +## some unknown values in records are labeled '?' +# if not str == '?': +# return True + + +def label_indivs(valuation, lexicon=False): + """ + Assign individual constants to the individuals in the domain of a ``Valuation``. + + Given a valuation with an entry of the form ``{'rel': {'a': True}}``, + add a new entry ``{'a': 'a'}``. + + :type valuation: Valuation + :rtype: Valuation + """ + # collect all the individuals into a domain + domain = valuation.domain + # convert the domain into a sorted list of alphabetic terms + # use the same string as a label + pairs = [(e, e) for e in domain] + if lexicon: + lex = make_lex(domain) + with open("chat_pnames.cfg", "w") as outfile: + outfile.writelines(lex) + # read the pairs into the valuation + valuation.update(pairs) + return valuation + + +def make_lex(symbols): + """ + Create lexical CFG rules for each individual symbol. + + Given a valuation with an entry of the form ``{'zloty': 'zloty'}``, + create a lexical rule for the proper name 'Zloty'. + + :param symbols: a list of individual constants in the semantic representation + :type symbols: sequence -- set(str) + :rtype: list(str) + """ + lex = [] + header = """ +################################################################## +# Lexical rules automatically generated by running 'chat80.py -x'. +################################################################## + +""" + lex.append(header) + template = r"PropN[num=sg, sem=<\P.(P %s)>] -> '%s'\n" + + for s in symbols: + parts = s.split("_") + caps = [p.capitalize() for p in parts] + pname = "_".join(caps) + rule = template % (s, pname) + lex.append(rule) + return lex + + +########################################################################### +# Interface function to emulate other corpus readers +########################################################################### + + +def concepts(items=items): + """ + Build a list of concepts corresponding to the relation names in ``items``. + + :param items: names of the Chat-80 relations to extract + :type items: list(str) + :return: the ``Concept`` objects which are extracted from the relations + :rtype: list(Concept) + """ + if isinstance(items, str): + items = (items,) + + rels = [item_metadata[r] for r in items] + + concept_map = process_bundle(rels) + return concept_map.values() + + +########################################################################### + + +def main(): + import sys + from optparse import OptionParser + + description = """ +Extract data from the Chat-80 Prolog files and convert them into a +Valuation object for use in the NLTK semantics package. + """ + + opts = OptionParser(description=description) + opts.set_defaults(verbose=True, lex=False, vocab=False) + opts.add_option( + "-s", "--store", dest="outdb", help="store a valuation in DB", metavar="DB" + ) + opts.add_option( + "-l", + "--load", + dest="indb", + help="load a stored valuation from DB", + metavar="DB", + ) + opts.add_option( + "-c", + "--concepts", + action="store_true", + help="print concepts instead of a valuation", + ) + opts.add_option( + "-r", + "--relation", + dest="label", + help="print concept with label REL (check possible labels with '-v' option)", + metavar="REL", + ) + opts.add_option( + "-q", + "--quiet", + action="store_false", + dest="verbose", + help="don't print out progress info", + ) + opts.add_option( + "-x", + "--lex", + action="store_true", + dest="lex", + help="write a file of lexical entries for country names, then exit", + ) + opts.add_option( + "-v", + "--vocab", + action="store_true", + dest="vocab", + help="print out the vocabulary of concept labels and their arity, then exit", + ) + + (options, args) = opts.parse_args() + if options.outdb and options.indb: + opts.error("Options --store and --load are mutually exclusive") + + if options.outdb: + # write the valuation to a persistent database + if options.verbose: + outdb = options.outdb + ".db" + print("Dumping a valuation to %s" % outdb) + val_dump(rels, options.outdb) + sys.exit(0) + else: + # try to read in a valuation from a database + if options.indb is not None: + dbname = options.indb + ".db" + if not os.access(dbname, os.R_OK): + sys.exit("Cannot read file: %s" % dbname) + else: + valuation = val_load(options.indb) + # we need to create the valuation from scratch + else: + # build some concepts + concept_map = process_bundle(rels) + concepts = concept_map.values() + # just print out the vocabulary + if options.vocab: + items = sorted((c.arity, c.prefLabel) for c in concepts) + for (arity, label) in items: + print(label, arity) + sys.exit(0) + # show all the concepts + if options.concepts: + for c in concepts: + print(c) + print() + if options.label: + print(concept_map[options.label]) + sys.exit(0) + else: + # turn the concepts into a Valuation + if options.lex: + if options.verbose: + print("Writing out lexical rules") + make_valuation(concepts, lexicon=True) + else: + valuation = make_valuation(concepts, read=True) + print(valuation) + + +def sql_demo(): + """ + Print out every row from the 'city.db' database. + """ + print() + print("Using SQL to extract rows from 'city.db' RDB.") + for row in sql_query("corpora/city_database/city.db", "SELECT * FROM city_table"): + print(row) + + +if __name__ == "__main__": + main() + sql_demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/cooper_storage.py b/venv/lib/python3.10/site-packages/nltk/sem/cooper_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..a41502187ed1dfbfae5bc21bdf7c29624cab1e0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/cooper_storage.py @@ -0,0 +1,124 @@ +# Natural Language Toolkit: Cooper storage for Quantifier Ambiguity +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +from nltk.parse import load_parser +from nltk.parse.featurechart import InstantiateVarsChart +from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable + + +class CooperStore: + """ + A container for handling quantifier ambiguity via Cooper storage. + """ + + def __init__(self, featstruct): + """ + :param featstruct: The value of the ``sem`` node in a tree from + ``parse_with_bindops()`` + :type featstruct: FeatStruct (with features ``core`` and ``store``) + + """ + self.featstruct = featstruct + self.readings = [] + try: + self.core = featstruct["CORE"] + self.store = featstruct["STORE"] + except KeyError: + print("%s is not a Cooper storage structure" % featstruct) + + def _permute(self, lst): + """ + :return: An iterator over the permutations of the input list + :type lst: list + :rtype: iter + """ + remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :] + if lst: + for index, x in enumerate(lst): + for y in self._permute(remove(lst, index)): + yield (x,) + y + else: + yield () + + def s_retrieve(self, trace=False): + r""" + Carry out S-Retrieval of binding operators in store. If hack=True, + serialize the bindop and core as strings and reparse. Ugh. + + Each permutation of the store (i.e. list of binding operators) is + taken to be a possible scoping of quantifiers. We iterate through the + binding operators in each permutation, and successively apply them to + the current term, starting with the core semantic representation, + working from the inside out. + + Binding operators are of the form:: + + bo(\P.all x.(man(x) -> P(x)),z1) + """ + for perm, store_perm in enumerate(self._permute(self.store)): + if trace: + print("Permutation %s" % (perm + 1)) + term = self.core + for bindop in store_perm: + # we just want the arguments that are wrapped by the 'bo' predicate + quant, varex = tuple(bindop.args) + # use var to make an abstraction over the current term and then + # apply the quantifier to it + term = ApplicationExpression( + quant, LambdaExpression(varex.variable, term) + ) + if trace: + print(" ", term) + term = term.simplify() + self.readings.append(term) + + +def parse_with_bindops(sentence, grammar=None, trace=0): + """ + Use a grammar with Binding Operators to parse a sentence. + """ + if not grammar: + grammar = "grammars/book_grammars/storage.fcfg" + parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart) + # Parse the sentence. + tokens = sentence.split() + return list(parser.parse(tokens)) + + +def demo(): + from nltk.sem import cooper_storage as cs + + sentence = "every girl chases a dog" + # sentence = "a man gives a bone to every dog" + print() + print("Analysis of sentence '%s'" % sentence) + print("=" * 50) + trees = cs.parse_with_bindops(sentence, trace=0) + for tree in trees: + semrep = cs.CooperStore(tree.label()["SEM"]) + print() + print("Binding operators:") + print("-" * 15) + for s in semrep.store: + print(s) + print() + print("Core:") + print("-" * 15) + print(semrep.core) + print() + print("S-Retrieval:") + print("-" * 15) + semrep.s_retrieve(trace=True) + print("Readings:") + print("-" * 15) + + for i, reading in enumerate(semrep.readings): + print(f"{i + 1}: {reading}") + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/drt.py b/venv/lib/python3.10/site-packages/nltk/sem/drt.py new file mode 100644 index 0000000000000000000000000000000000000000..53441d6617310683bab97bb7abd84f656ebc28af --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/drt.py @@ -0,0 +1,1460 @@ +# Natural Language Toolkit: Discourse Representation Theory (DRT) +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import operator +from functools import reduce +from itertools import chain + +from nltk.sem.logic import ( + APP, + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BinaryExpression, + BooleanExpression, + ConstantExpression, + EqualityExpression, + EventVariableExpression, + ExistsExpression, + Expression, + FunctionVariableExpression, + ImpExpression, + IndividualVariableExpression, + LambdaExpression, + LogicParser, + NegatedExpression, + OrExpression, + Tokens, + Variable, + is_eventvar, + is_funcvar, + is_indvar, + unique_variable, +) + +# Import Tkinter-based modules if they are available +try: + from tkinter import Canvas, Tk + from tkinter.font import Font + + from nltk.util import in_idle + +except ImportError: + # No need to print a warning here, nltk.draw has already printed one. + pass + + +class DrtTokens(Tokens): + DRS = "DRS" + DRS_CONC = "+" + PRONOUN = "PRO" + OPEN_BRACKET = "[" + CLOSE_BRACKET = "]" + COLON = ":" + + PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON] + + SYMBOLS = Tokens.SYMBOLS + PUNCT + + TOKENS = Tokens.TOKENS + [DRS] + PUNCT + + +class DrtParser(LogicParser): + """A lambda calculus expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = dict( + [(x, 1) for x in DrtTokens.LAMBDA_LIST] + + [(x, 2) for x in DrtTokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST] + + [(DrtTokens.COLON, 5)] + + [(DrtTokens.DRS_CONC, 6)] + + [(x, 7) for x in DrtTokens.OR_LIST] + + [(x, 8) for x in DrtTokens.IMP_LIST] + + [(None, 9)] + ) + + def get_all_symbols(self): + """This method exists to be overridden""" + return DrtTokens.SYMBOLS + + def isvariable(self, tok): + return tok not in DrtTokens.TOKENS + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if tok in DrtTokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in DrtTokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok == DrtTokens.OPEN: + if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET: + return self.handle_DRS(tok, context) + else: + return self.handle_open(tok, context) + + elif tok.upper() == DrtTokens.DRS: + self.assertNextToken(DrtTokens.OPEN) + return self.handle_DRS(tok, context) + + elif self.isvariable(tok): + if self.inRange(0) and self.token(0) == DrtTokens.COLON: + return self.handle_prop(tok, context) + else: + return self.handle_variable(tok, context) + + def make_NegatedExpression(self, expression): + return DrtNegatedExpression(expression) + + def handle_DRS(self, tok, context): + # a DRS + refs = self.handle_refs() + if ( + self.inRange(0) and self.token(0) == DrtTokens.COMMA + ): # if there is a comma (it's optional) + self.token() # swallow the comma + conds = self.handle_conds(context) + self.assertNextToken(DrtTokens.CLOSE) + return DRS(refs, conds, None) + + def handle_refs(self): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + refs = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x,y],C) + if refs and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + refs.append(self.get_next_token_variable("quantified")) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return refs + + def handle_conds(self, context): + self.assertNextToken(DrtTokens.OPEN_BRACKET) + conds = [] + while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: + # Support expressions like: DRS([x y],C) == DRS([x, y],C) + if conds and self.token(0) == DrtTokens.COMMA: + self.token() # swallow the comma + conds.append(self.process_next_expression(context)) + self.assertNextToken(DrtTokens.CLOSE_BRACKET) + return conds + + def handle_prop(self, tok, context): + variable = self.make_VariableExpression(tok) + self.assertNextToken(":") + drs = self.process_next_expression(DrtTokens.COLON) + return DrtProposition(variable, drs) + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return DrtEqualityExpression(first, second) + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok == DrtTokens.DRS_CONC: + return lambda first, second: DrtConcatenation(first, second, None) + elif tok in DrtTokens.OR_LIST: + return DrtOrExpression + elif tok in DrtTokens.IMP_LIST: + + def make_imp_expression(first, second): + if isinstance(first, DRS): + return DRS(first.refs, first.conds, second) + if isinstance(first, DrtConcatenation): + return DrtConcatenation(first.first, first.second, second) + raise Exception("Antecedent of implication must be a DRS") + + return make_imp_expression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def make_ApplicationExpression(self, function, argument): + return DrtApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return DrtVariableExpression(Variable(name)) + + def make_LambdaExpression(self, variables, term): + return DrtLambdaExpression(variables, term) + + +class DrtExpression: + """ + This is the base abstract DRT Expression from which every DRT + Expression extends. + """ + + _drt_parser = DrtParser() + + @classmethod + def fromstring(cls, s): + return cls._drt_parser.parse(s) + + def applyto(self, other): + return DrtApplicationExpression(self, other) + + def __neg__(self): + return DrtNegatedExpression(self) + + def __and__(self, other): + return NotImplemented + + def __or__(self, other): + assert isinstance(other, DrtExpression) + return DrtOrExpression(self, other) + + def __gt__(self, other): + assert isinstance(other, DrtExpression) + if isinstance(self, DRS): + return DRS(self.refs, self.conds, other) + if isinstance(self, DrtConcatenation): + return DrtConcatenation(self.first, self.second, other) + raise Exception("Antecedent of implication must be a DRS") + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``DrtExpression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, DrtExpression) + + f1 = self.simplify().fol() + f2 = other.simplify().fol() + return f1.equiv(f2, prover) + + @property + def type(self): + raise AttributeError( + "'%s' object has no attribute 'type'" % self.__class__.__name__ + ) + + def typecheck(self, signature=None): + raise NotImplementedError() + + def __add__(self, other): + return DrtConcatenation(self, other, None) + + def get_refs(self, recursive=False): + """ + Return the set of discourse referents in this DRS. + :param recursive: bool Also find discourse referents in subterms? + :return: list of ``Variable`` objects + """ + raise NotImplementedError() + + def is_pronoun_function(self): + """Is self of the form "PRO(x)"?""" + return ( + isinstance(self, DrtApplicationExpression) + and isinstance(self.function, DrtAbstractVariableExpression) + and self.function.variable.name == DrtTokens.PRONOUN + and isinstance(self.argument, DrtIndividualVariableExpression) + ) + + def make_EqualityExpression(self, first, second): + return DrtEqualityExpression(first, second) + + def make_VariableExpression(self, variable): + return DrtVariableExpression(variable) + + def resolve_anaphora(self): + return resolve_anaphora(self) + + def eliminate_equality(self): + return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__) + + def pretty_format(self): + """ + Draw the DRS + :return: the pretty print string + """ + return "\n".join(self._pretty()) + + def pretty_print(self): + print(self.pretty_format()) + + def draw(self): + DrsDrawer(self).draw() + + +class DRS(DrtExpression, Expression): + """A Discourse Representation Structure.""" + + def __init__(self, refs, conds, consequent=None): + """ + :param refs: list of ``DrtIndividualVariableExpression`` for the + discourse referents + :param conds: list of ``Expression`` for the conditions + """ + self.refs = refs + self.conds = conds + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + if variable in self.refs: + # if a bound variable is the thing being replaced + if not replace_bound: + return self + else: + i = self.refs.index(variable) + if self.consequent: + consequent = self.consequent.replace( + variable, expression, True, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs[:i] + [expression.variable] + self.refs[i + 1 :], + [ + cond.replace(variable, expression, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + else: + if alpha_convert: + # any bound variable that appears in the expression must + # be alpha converted to avoid a conflict + for ref in set(self.refs) & expression.free(): + newvar = unique_variable(ref) + newvarex = DrtVariableExpression(newvar) + i = self.refs.index(ref) + if self.consequent: + consequent = self.consequent.replace( + ref, newvarex, True, alpha_convert + ) + else: + consequent = None + self = DRS( + self.refs[:i] + [newvar] + self.refs[i + 1 :], + [ + cond.replace(ref, newvarex, True, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + # replace in the conditions + if self.consequent: + consequent = self.consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + consequent = None + return DRS( + self.refs, + [ + cond.replace(variable, expression, replace_bound, alpha_convert) + for cond in self.conds + ], + consequent, + ) + + def free(self): + """:see: Expression.free()""" + conds_free = reduce(operator.or_, [c.free() for c in self.conds], set()) + if self.consequent: + conds_free.update(self.consequent.free()) + return conds_free - set(self.refs) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + if recursive: + conds_refs = self.refs + list( + chain.from_iterable(c.get_refs(True) for c in self.conds) + ) + if self.consequent: + conds_refs.extend(self.consequent.get_refs(True)) + return conds_refs + else: + return self.refs + + def visit(self, function, combinator): + """:see: Expression.visit()""" + parts = list(map(function, self.conds)) + if self.consequent: + parts.append(function(self.consequent)) + return combinator(parts) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + consequent = function(self.consequent) if self.consequent else None + return combinator(self.refs, list(map(function, self.conds)), consequent) + + def eliminate_equality(self): + drs = self + i = 0 + while i < len(drs.conds): + cond = drs.conds[i] + if ( + isinstance(cond, EqualityExpression) + and isinstance(cond.first, AbstractVariableExpression) + and isinstance(cond.second, AbstractVariableExpression) + ): + drs = DRS( + list(set(drs.refs) - {cond.second.variable}), + drs.conds[:i] + drs.conds[i + 1 :], + drs.consequent, + ) + if cond.second.variable != cond.first.variable: + drs = drs.replace(cond.second.variable, cond.first, False, False) + i = 0 + i -= 1 + i += 1 + + conds = [] + for cond in drs.conds: + new_cond = cond.eliminate_equality() + new_cond_simp = new_cond.simplify() + if ( + not isinstance(new_cond_simp, DRS) + or new_cond_simp.refs + or new_cond_simp.conds + or new_cond_simp.consequent + ): + conds.append(new_cond) + + consequent = drs.consequent.eliminate_equality() if drs.consequent else None + return DRS(drs.refs, conds, consequent) + + def fol(self): + if self.consequent: + accum = None + if self.conds: + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + + if accum: + accum = ImpExpression(accum, self.consequent.fol()) + else: + accum = self.consequent.fol() + + for ref in self.refs[::-1]: + accum = AllExpression(ref, accum) + + return accum + + else: + if not self.conds: + raise Exception("Cannot convert DRS with no conditions to FOL.") + accum = reduce(AndExpression, [c.fol() for c in self.conds]) + for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]): + accum = ExistsExpression(ref, accum) + return accum + + def _pretty(self): + refs_line = " ".join(self._order_ref_strings(self.refs)) + + cond_lines = [ + cond + for cond_line in [ + filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds + ] + for cond in cond_line + ] + length = max([len(refs_line)] + list(map(len, cond_lines))) + drs = ( + [ + " _" + "_" * length + "_ ", + "| " + refs_line.ljust(length) + " |", + "|-" + "-" * length + "-|", + ] + + ["| " + line.ljust(length) + " |" for line in cond_lines] + + ["|_" + "_" * length + "_|"] + ) + if self.consequent: + return DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _order_ref_strings(self, refs): + strings = ["%s" % ref for ref in refs] + ind_vars = [] + func_vars = [] + event_vars = [] + other_vars = [] + for s in strings: + if is_indvar(s): + ind_vars.append(s) + elif is_funcvar(s): + func_vars.append(s) + elif is_eventvar(s): + event_vars.append(s) + else: + other_vars.append(s) + return ( + sorted(other_vars) + + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0])) + + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + ) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DRS): + if len(self.refs) == len(other.refs): + converted_other = other + for (r1, r2) in zip(self.refs, converted_other.refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + if self.consequent == converted_other.consequent and len( + self.conds + ) == len(converted_other.conds): + for c1, c2 in zip(self.conds, converted_other.conds): + if not (c1 == c2): + return False + return True + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + drs = "([{}],[{}])".format( + ",".join(self._order_ref_strings(self.refs)), + ", ".join("%s" % cond for cond in self.conds), + ) # map(str, self.conds))) + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + +def DrtVariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``DrtAbstractVariableExpression`` appropriate for the given variable. + """ + if is_indvar(variable.name): + return DrtIndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return DrtFunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return DrtEventVariableExpression(variable) + else: + return DrtConstantExpression(variable) + + +class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression): + def fol(self): + return self + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return [] + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s, blank] + + def eliminate_equality(self): + return self + + +class DrtIndividualVariableExpression( + DrtAbstractVariableExpression, IndividualVariableExpression +): + pass + + +class DrtFunctionVariableExpression( + DrtAbstractVariableExpression, FunctionVariableExpression +): + pass + + +class DrtEventVariableExpression( + DrtIndividualVariableExpression, EventVariableExpression +): + pass + + +class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression): + pass + + +class DrtProposition(DrtExpression, Expression): + def __init__(self, variable, drs): + self.variable = variable + self.drs = drs + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + if self.variable == variable: + assert isinstance( + expression, DrtAbstractVariableExpression + ), "Can only replace a proposition label with a variable" + return DrtProposition( + expression.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + else: + return DrtProposition( + self.variable, + self.drs.replace(variable, expression, replace_bound, alpha_convert), + ) + + def eliminate_equality(self): + return DrtProposition(self.variable, self.drs.eliminate_equality()) + + def get_refs(self, recursive=False): + return self.drs.get_refs(True) if recursive else [] + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.variable == other.variable + and self.drs == other.drs + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def fol(self): + return self.drs.fol() + + def _pretty(self): + drs_s = self.drs._pretty() + blank = " " * len("%s" % self.variable) + return ( + [blank + " " + line for line in drs_s[:1]] + + ["%s" % self.variable + ":" + line for line in drs_s[1:2]] + + [blank + " " + line for line in drs_s[2:]] + ) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.drs)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.drs)) + + def __str__(self): + return f"prop({self.variable}, {self.drs})" + + +class DrtNegatedExpression(DrtExpression, NegatedExpression): + def fol(self): + return NegatedExpression(self.term.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return self.term.get_refs(recursive) + + def _pretty(self): + term_lines = self.term._pretty() + return ( + [" " + line for line in term_lines[:2]] + + ["__ " + line for line in term_lines[2:3]] + + [" | " + line for line in term_lines[3:4]] + + [" " + line for line in term_lines[4:]] + ) + + +class DrtLambdaExpression(DrtExpression, LambdaExpression): + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + return self.__class__( + newvar, + self.term.replace(self.variable, DrtVariableExpression(newvar), True), + ) + + def fol(self): + return LambdaExpression(self.variable, self.term.fol()) + + def _pretty(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT + term_lines = term._pretty() + blank = " " * len(var_string) + return ( + [" " + blank + line for line in term_lines[:1]] + + [r" \ " + blank + line for line in term_lines[1:2]] + + [r" /\ " + var_string + line for line in term_lines[2:3]] + + [" " + blank + line for line in term_lines[3:]] + ) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + [self.variable] + self.term.get_refs(True) if recursive else [self.variable] + ) + + +class DrtBinaryExpression(DrtExpression, BinaryExpression): + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.first.get_refs(True) + self.second.get_refs(True) if recursive else [] + ) + + def _pretty(self): + return DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + + @staticmethod + def _assemble_pretty(first_lines, op, second_lines): + max_lines = max(len(first_lines), len(second_lines)) + first_lines = _pad_vertically(first_lines, max_lines) + second_lines = _pad_vertically(second_lines, max_lines) + blank = " " * len(op) + first_second_lines = list(zip(first_lines, second_lines)) + return ( + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[:2] + ] + + [ + "(" + first_line + " " + op + " " + second_line + ")" + for first_line, second_line in first_second_lines[2:3] + ] + + [ + " " + first_line + " " + blank + " " + second_line + " " + for first_line, second_line in first_second_lines[3:] + ] + ) + + def _pretty_subex(self, subex): + return subex._pretty() + + +class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression): + pass + + +class DrtOrExpression(DrtBooleanExpression, OrExpression): + def fol(self): + return OrExpression(self.first.fol(), self.second.fol()) + + def _pretty_subex(self, subex): + if isinstance(subex, DrtOrExpression): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + +class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression): + def fol(self): + return EqualityExpression(self.first.fol(), self.second.fol()) + + +class DrtConcatenation(DrtBooleanExpression): + """DRS of the form '(DRS + DRS)'""" + + def __init__(self, first, second, consequent=None): + DrtBooleanExpression.__init__(self, first, second) + self.consequent = consequent + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + first = self.first + second = self.second + consequent = self.consequent + + # If variable is bound + if variable in self.get_refs(): + if replace_bound: + first = first.replace( + variable, expression, replace_bound, alpha_convert + ) + second = second.replace( + variable, expression, replace_bound, alpha_convert + ) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + else: + if alpha_convert: + # alpha convert every ref that is free in 'expression' + for ref in set(self.get_refs(True)) & expression.free(): + v = DrtVariableExpression(unique_variable(ref)) + first = first.replace(ref, v, True, alpha_convert) + second = second.replace(ref, v, True, alpha_convert) + if consequent: + consequent = consequent.replace(ref, v, True, alpha_convert) + + first = first.replace(variable, expression, replace_bound, alpha_convert) + second = second.replace(variable, expression, replace_bound, alpha_convert) + if consequent: + consequent = consequent.replace( + variable, expression, replace_bound, alpha_convert + ) + + return self.__class__(first, second, consequent) + + def eliminate_equality(self): + # TODO: at some point. for now, simplify. + drs = self.simplify() + assert not isinstance(drs, DrtConcatenation) + return drs.eliminate_equality() + + def simplify(self): + first = self.first.simplify() + second = self.second.simplify() + consequent = self.consequent.simplify() if self.consequent else None + + if isinstance(first, DRS) and isinstance(second, DRS): + # For any ref that is in both 'first' and 'second' + for ref in set(first.get_refs(True)) & set(second.get_refs(True)): + # alpha convert the ref in 'second' to prevent collision + newvar = DrtVariableExpression(unique_variable(ref)) + second = second.replace(ref, newvar, True) + + return DRS(first.refs + second.refs, first.conds + second.conds, consequent) + else: + return self.__class__(first, second, consequent) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + refs = self.first.get_refs(recursive) + self.second.get_refs(recursive) + if self.consequent and recursive: + refs.extend(self.consequent.get_refs(True)) + return refs + + def getOp(self): + return DrtTokens.DRS_CONC + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. + If we are comparing \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(other, DrtConcatenation): + self_refs = self.get_refs() + other_refs = other.get_refs() + if len(self_refs) == len(other_refs): + converted_other = other + for (r1, r2) in zip(self_refs, other_refs): + varex = self.make_VariableExpression(r1) + converted_other = converted_other.replace(r2, varex, True) + return ( + self.first == converted_other.first + and self.second == converted_other.second + and self.consequent == converted_other.consequent + ) + return False + + def __ne__(self, other): + return not self == other + + __hash__ = DrtBooleanExpression.__hash__ + + def fol(self): + e = AndExpression(self.first.fol(), self.second.fol()) + if self.consequent: + e = ImpExpression(e, self.consequent.fol()) + return e + + def _pretty(self): + drs = DrtBinaryExpression._assemble_pretty( + self._pretty_subex(self.first), + self.getOp(), + self._pretty_subex(self.second), + ) + if self.consequent: + drs = DrtBinaryExpression._assemble_pretty( + drs, DrtTokens.IMP, self.consequent._pretty() + ) + return drs + + def _pretty_subex(self, subex): + if isinstance(subex, DrtConcatenation): + return [line[1:-1] for line in subex._pretty()] + return DrtBooleanExpression._pretty_subex(self, subex) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + if self.consequent: + return combinator( + [function(self.first), function(self.second), function(self.consequent)] + ) + else: + return combinator([function(self.first), function(self.second)]) + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + if self.consequent: + return ( + DrtTokens.OPEN + + drs + + " " + + DrtTokens.IMP + + " " + + "%s" % self.consequent + + DrtTokens.CLOSE + ) + return drs + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, DrtConcatenation) and subex.consequent is None: + return s[1:-1] + return s + + +class DrtApplicationExpression(DrtExpression, ApplicationExpression): + def fol(self): + return ApplicationExpression(self.function.fol(), self.argument.fol()) + + def get_refs(self, recursive=False): + """:see: AbstractExpression.get_refs()""" + return ( + self.function.get_refs(True) + self.argument.get_refs(True) + if recursive + else [] + ) + + def _pretty(self): + function, args = self.uncurry() + function_lines = function._pretty() + args_lines = [arg._pretty() for arg in args] + max_lines = max(map(len, [function_lines] + args_lines)) + function_lines = _pad_vertically(function_lines, max_lines) + args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines] + func_args_lines = list(zip(function_lines, list(zip(*args_lines)))) + return ( + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[:2] + ] + + [ + func_line + "(" + ",".join(args_line) + ")" + for func_line, args_line in func_args_lines[2:3] + ] + + [ + func_line + " " + " ".join(args_line) + " " + for func_line, args_line in func_args_lines[3:] + ] + ) + + +def _pad_vertically(lines, max_lines): + pad_line = [" " * len(lines[0])] + return lines + pad_line * (max_lines - len(lines)) + + +class PossibleAntecedents(list, DrtExpression, Expression): + def free(self): + """Set of free variables.""" + return set(self) + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """Replace all instances of variable v with expression E in self, + where v is free in self.""" + result = PossibleAntecedents() + for item in self: + if item == variable: + self.append(expression) + else: + self.append(item) + return result + + def _pretty(self): + s = "%s" % self + blank = " " * len(s) + return [blank, blank, s] + + def __str__(self): + return "[" + ",".join("%s" % it for it in self) + "]" + + +class AnaphoraResolutionException(Exception): + pass + + +def resolve_anaphora(expression, trail=[]): + if isinstance(expression, ApplicationExpression): + if expression.is_pronoun_function(): + possible_antecedents = PossibleAntecedents() + for ancestor in trail: + for ref in ancestor.get_refs(): + refex = expression.make_VariableExpression(ref) + + # ========================================================== + # Don't allow resolution to itself or other types + # ========================================================== + if refex.__class__ == expression.argument.__class__ and not ( + refex == expression.argument + ): + possible_antecedents.append(refex) + + if len(possible_antecedents) == 1: + resolution = possible_antecedents[0] + else: + resolution = possible_antecedents + return expression.make_EqualityExpression(expression.argument, resolution) + else: + r_function = resolve_anaphora(expression.function, trail + [expression]) + r_argument = resolve_anaphora(expression.argument, trail + [expression]) + return expression.__class__(r_function, r_argument) + + elif isinstance(expression, DRS): + r_conds = [] + for cond in expression.conds: + r_cond = resolve_anaphora(cond, trail + [expression]) + + # if the condition is of the form '(x = [])' then raise exception + if isinstance(r_cond, EqualityExpression): + if isinstance(r_cond.first, PossibleAntecedents): + # Reverse the order so that the variable is on the left + temp = r_cond.first + r_cond.first = r_cond.second + r_cond.second = temp + if isinstance(r_cond.second, PossibleAntecedents): + if not r_cond.second: + raise AnaphoraResolutionException( + "Variable '%s' does not " + "resolve to anything." % r_cond.first + ) + + r_conds.append(r_cond) + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__(expression.refs, r_conds, consequent) + + elif isinstance(expression, AbstractVariableExpression): + return expression + + elif isinstance(expression, NegatedExpression): + return expression.__class__( + resolve_anaphora(expression.term, trail + [expression]) + ) + + elif isinstance(expression, DrtConcatenation): + if expression.consequent: + consequent = resolve_anaphora(expression.consequent, trail + [expression]) + else: + consequent = None + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + consequent, + ) + + elif isinstance(expression, BinaryExpression): + return expression.__class__( + resolve_anaphora(expression.first, trail + [expression]), + resolve_anaphora(expression.second, trail + [expression]), + ) + + elif isinstance(expression, LambdaExpression): + return expression.__class__( + expression.variable, resolve_anaphora(expression.term, trail + [expression]) + ) + + +class DrsDrawer: + BUFFER = 3 # Space between elements + TOPSPACE = 10 # Space above whole DRS + OUTERSPACE = 6 # Space to the left, right, and bottom of the while DRS + + def __init__(self, drs, size_canvas=True, canvas=None): + """ + :param drs: ``DrtExpression``, The DRS to be drawn + :param size_canvas: bool, True if the canvas size should be the exact size of the DRS + :param canvas: ``Canvas`` The canvas on which to draw the DRS. If none is given, create a new canvas. + """ + master = None + if not canvas: + master = Tk() + master.title("DRT") + + font = Font(family="helvetica", size=12) + + if size_canvas: + canvas = Canvas(master, width=0, height=0) + canvas.font = font + self.canvas = canvas + (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE) + + width = max(right + self.OUTERSPACE, 100) + height = bottom + self.OUTERSPACE + canvas = Canvas(master, width=width, height=height) # , bg='white') + else: + canvas = Canvas(master, width=300, height=300) + + canvas.pack() + canvas.font = font + + self.canvas = canvas + self.drs = drs + self.master = master + + def _get_text_height(self): + """Get the height of a line of text""" + return self.canvas.font.metrics("linespace") + + def draw(self, x=OUTERSPACE, y=TOPSPACE): + """Draw the DRS""" + self._handle(self.drs, self._draw_command, x, y) + + if self.master and not in_idle(): + self.master.mainloop() + else: + return self._visit(self.drs, x, y) + + def _visit(self, expression, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param expression: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + return self._handle(expression, self._visit_command, x, y) + + def _draw_command(self, item, x, y): + """ + Draw the given item at the given location + + :param item: the item to draw + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item) + elif isinstance(item, tuple): + # item is the lower-right of a box + (right, bottom) = item + self.canvas.create_rectangle(x, y, right, bottom) + horiz_line_y = ( + y + self._get_text_height() + (self.BUFFER * 2) + ) # the line separating refs from conds + self.canvas.create_line(x, horiz_line_y, right, horiz_line_y) + + return self._visit_command(item, x, y) + + def _visit_command(self, item, x, y): + """ + Return the bottom-rightmost point without actually drawing the item + + :param item: the item to visit + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if isinstance(item, str): + return (x + self.canvas.font.measure(item), y + self._get_text_height()) + elif isinstance(item, tuple): + return item + + def _handle(self, expression, command, x=0, y=0): + """ + :param expression: the expression to handle + :param command: the function to apply, either _draw_command or _visit_command + :param x: the top of the current drawing area + :param y: the left side of the current drawing area + :return: the bottom-rightmost point + """ + if command == self._visit_command: + # if we don't need to draw the item, then we can use the cached values + try: + # attempt to retrieve cached values + right = expression._drawing_width + x + bottom = expression._drawing_height + y + return (right, bottom) + except AttributeError: + # the values have not been cached yet, so compute them + pass + + if isinstance(expression, DrtAbstractVariableExpression): + factory = self._handle_VariableExpression + elif isinstance(expression, DRS): + factory = self._handle_DRS + elif isinstance(expression, DrtNegatedExpression): + factory = self._handle_NegatedExpression + elif isinstance(expression, DrtLambdaExpression): + factory = self._handle_LambdaExpression + elif isinstance(expression, BinaryExpression): + factory = self._handle_BinaryExpression + elif isinstance(expression, DrtApplicationExpression): + factory = self._handle_ApplicationExpression + elif isinstance(expression, PossibleAntecedents): + factory = self._handle_VariableExpression + elif isinstance(expression, DrtProposition): + factory = self._handle_DrtProposition + else: + raise Exception(expression.__class__.__name__) + + (right, bottom) = factory(expression, command, x, y) + + # cache the values + expression._drawing_width = right - x + expression._drawing_height = bottom - y + + return (right, bottom) + + def _handle_VariableExpression(self, expression, command, x, y): + return command("%s" % expression, x, y) + + def _handle_NegatedExpression(self, expression, command, x, y): + # Find the width of the negation symbol + right = self._visit_command(DrtTokens.NOT, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + DrtTokens.NOT, + x, + self._get_centered_top(y, bottom - y, self._get_text_height()), + ) + + return (right, bottom) + + def _handle_DRS(self, expression, command, x, y): + left = x + self.BUFFER # indent the left side + bottom = y + self.BUFFER # indent the top + + # Handle Discourse Referents + if expression.refs: + refs = " ".join("%s" % r for r in expression.refs) + else: + refs = " " + (max_right, bottom) = command(refs, left, bottom) + bottom += self.BUFFER * 2 + + # Handle Conditions + if expression.conds: + for cond in expression.conds: + (right, bottom) = self._handle(cond, command, left, bottom) + max_right = max(max_right, right) + bottom += self.BUFFER + else: + bottom += self._get_text_height() + self.BUFFER + + # Handle Box + max_right += self.BUFFER + return command((max_right, bottom), x, y) + + def _handle_ApplicationExpression(self, expression, command, x, y): + function, args = expression.uncurry() + if not isinstance(function, DrtAbstractVariableExpression): + # It's not a predicate expression ("P(x,y)"), so leave arguments curried + function = expression.function + args = [expression.argument] + + # Get the max bottom of any element on the line + function_bottom = self._visit(function, x, y)[1] + max_bottom = max( + [function_bottom] + [self._visit(arg, x, y)[1] for arg in args] + ) + + line_height = max_bottom - y + + # Handle 'function' + function_drawing_top = self._get_centered_top( + y, line_height, function._drawing_height + ) + right = self._handle(function, command, x, function_drawing_top)[0] + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, right, centred_string_top)[0] + + # Handle each arg + for (i, arg) in enumerate(args): + arg_drawing_top = self._get_centered_top( + y, line_height, arg._drawing_height + ) + right = self._handle(arg, command, right, arg_drawing_top)[0] + + if i + 1 < len(args): + # since it's not the last arg, add a comma + right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0] + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max_bottom) + + def _handle_LambdaExpression(self, expression, command, x, y): + # Find the width of the lambda symbol and abstracted variables + variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT + right = self._visit_command(variables, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + # Handle variables now that we know the y-coordinate + command( + variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()) + ) + + return (right, bottom) + + def _handle_BinaryExpression(self, expression, command, x, y): + # Get the full height of the line, based on the operands + first_height = self._visit(expression.first, 0, 0)[1] + second_height = self._visit(expression.second, 0, 0)[1] + line_height = max(first_height, second_height) + + # Handle open paren + centred_string_top = self._get_centered_top( + y, line_height, self._get_text_height() + ) + right = command(DrtTokens.OPEN, x, centred_string_top)[0] + + # Handle the first operand + first_height = expression.first._drawing_height + (right, first_bottom) = self._handle( + expression.first, + command, + right, + self._get_centered_top(y, line_height, first_height), + ) + + # Handle the operator + right = command(" %s " % expression.getOp(), right, centred_string_top)[0] + + # Handle the second operand + second_height = expression.second._drawing_height + (right, second_bottom) = self._handle( + expression.second, + command, + right, + self._get_centered_top(y, line_height, second_height), + ) + + # Handle close paren + right = command(DrtTokens.CLOSE, right, centred_string_top)[0] + + return (right, max(first_bottom, second_bottom)) + + def _handle_DrtProposition(self, expression, command, x, y): + # Find the width of the negation symbol + right = command(expression.variable, x, y)[0] + + # Handle term + (right, bottom) = self._handle(expression.term, command, right, y) + + return (right, bottom) + + def _get_centered_top(self, top, full_height, item_height): + """Get the y-coordinate of the point that a figure should start at if + its height is 'item_height' and it needs to be centered in an area that + starts at 'top' and is 'full_height' tall.""" + return top + (full_height - item_height) / 2 + + +def demo(): + print("=" * 20 + "TEST PARSE" + "=" * 20) + dexpr = DrtExpression.fromstring + print(dexpr(r"([x,y],[sees(x,y)])")) + print(dexpr(r"([x],[man(x), walks(x)])")) + print(dexpr(r"\x.\y.([],[sees(x,y)])")) + print(dexpr(r"\x.([],[walks(x)])(john)")) + print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))")) + print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))")) + print(dexpr(r"([x],[PRO(x), sees(John,x)])")) + print(dexpr(r"([x],[man(x), -([],[walks(x)])])")) + print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])")) + + print("=" * 20 + "Test fol()" + "=" * 20) + print(dexpr(r"([x,y],[sees(x,y)])").fol()) + + print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20) + e1 = dexpr(r"\x.([],[P(x)])") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + print("=" * 20 + "Test resolve_anaphora()" + "=" * 20) + print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])"))) + print( + resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])")) + ) + print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))"))) + + print("=" * 20 + "Test pretty_print()" + "=" * 20) + dexpr(r"([],[])").pretty_print() + dexpr( + r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])" + ).pretty_print() + dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print() + dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print() + dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print() + + +def test_draw(): + try: + from tkinter import Tk + except ImportError as e: + raise ValueError("tkinter is required, but it's not available.") + + expressions = [ + r"x", + r"([],[])", + r"([x],[])", + r"([x],[man(x)])", + r"([x,y],[sees(x,y)])", + r"([x],[man(x), walks(x)])", + r"\x.([],[man(x), walks(x)])", + r"\x y.([],[sees(x,y)])", + r"([],[(([],[walks(x)]) + ([],[runs(x)]))])", + r"([x],[man(x), -([],[walks(x)])])", + r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])", + ] + + for e in expressions: + d = DrtExpression.fromstring(e) + d.draw() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py b/venv/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..fe27c9fc66f92600ebdcb13eb622d3d07db36985 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/drt_glue_demo.py @@ -0,0 +1,553 @@ +# Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse +# Representation Theory (DRT) as meaning language +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +try: + from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk + from tkinter.font import Font + + from nltk.draw.util import CanvasFrame, ShowText + +except ImportError: + """Ignore ImportError because tkinter might not be available.""" + +from nltk.parse import MaltParser +from nltk.sem.drt import DrsDrawer, DrtVariableExpression +from nltk.sem.glue import DrtGlue +from nltk.sem.logic import Variable +from nltk.tag import RegexpTagger +from nltk.util import in_idle + + +class DrtGlueDemo: + def __init__(self, examples): + # Set up the main window. + self._top = Tk() + self._top.title("DRT Glue Demo") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts.self._error = None + self._init_fonts(self._top) + + self._examples = examples + self._readingCache = [None for example in examples] + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Set the data to None + self._curExample = -1 + self._readings = [] + self._drs = None + self._drsWidget = None + self._error = None + + self._init_glue() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_exampleListbox(self._top) + self._init_readingListbox(self._top) + self._init_canvas(self._top) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_glue(self): + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + self._glue = DrtGlue(depparser=depparser, remove_duplicates=False) + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_exampleListbox(self, parent): + self._exampleFrame = listframe = Frame(parent) + self._exampleFrame.pack(fill="both", side="left", padx=2) + self._exampleList_label = Label( + self._exampleFrame, font=self._boldfont, text="Examples" + ) + self._exampleList_label.pack() + self._exampleList = Listbox( + self._exampleFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._exampleList.pack(side="right", fill="both", expand=1) + + for example in self._examples: + self._exampleList.insert("end", (" %s" % example)) + self._exampleList.config(height=min(len(self._examples), 25), width=40) + + # Add a scrollbar if there are more than 25 examples. + if len(self._examples) > 25: + listscroll = Scrollbar(self._exampleFrame, orient="vertical") + self._exampleList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._exampleList.yview) + listscroll.pack(side="left", fill="y") + + # If they select a example, apply it. + self._exampleList.bind("<>", self._exampleList_select) + + def _init_readingListbox(self, parent): + self._readingFrame = listframe = Frame(parent) + self._readingFrame.pack(fill="both", side="left", padx=2) + self._readingList_label = Label( + self._readingFrame, font=self._boldfont, text="Readings" + ) + self._readingList_label.pack() + self._readingList = Listbox( + self._readingFrame, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._readingList.pack(side="right", fill="both", expand=1) + + # Add a scrollbar if there are more than 25 examples. + listscroll = Scrollbar(self._readingFrame, orient="vertical") + self._readingList.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._readingList.yview) + listscroll.pack(side="right", fill="y") + + self._populate_readingListbox() + + def _populate_readingListbox(self): + # Populate the listbox with integers + self._readingList.delete(0, "end") + for i in range(len(self._readings)): + self._readingList.insert("end", (" %s" % (i + 1))) + self._readingList.config(height=min(len(self._readings), 25), width=5) + + # If they select a example, apply it. + self._readingList.bind("<>", self._readingList_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("n", self.next) + self._top.bind("", self.next) + self._top.bind("p", self.prev) + self._top.bind("", self.prev) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Prev", + background="#90c0d0", + foreground="black", + command=self.prev, + ).pack(side="left") + Button( + buttonframe, + text="Next", + background="#90c0d0", + foreground="black", + command=self.next, + ).pack(side="left") + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + actionmenu = Menu(menubar, tearoff=0) + actionmenu.add_command( + label="Next", underline=0, command=self.next, accelerator="n, Space" + ) + actionmenu.add_command( + label="Previous", underline=0, command=self.prev, accelerator="p, Backspace" + ) + menubar.add_cascade(label="Action", underline=0, menu=actionmenu) + + optionmenu = Menu(menubar, tearoff=0) + optionmenu.add_checkbutton( + label="Remove Duplicates", + underline=0, + variable=self._glue.remove_duplicates, + command=self._toggle_remove_duplicates, + accelerator="r", + ) + menubar.add_cascade(label="Options", underline=0, menu=optionmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old DRS, widgets, etc. + if self._drsWidget is not None: + self._drsWidget.clear() + + if self._drs: + self._drsWidget = DrsWidget(self._canvas, self._drs) + self._drsWidget.draw() + + if self._error: + self._drsWidget = DrsWidget(self._canvas, self._error) + self._drsWidget.draw() + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def prev(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or before) the first item + if index <= 0: + self._select_previous_example() + else: + self._readingList_store_selection(index - 1) + + else: + # select its first reading + self._readingList_store_selection(readingListSize - 1) + + else: + self._select_previous_example() + + def _select_previous_example(self): + # if the current example is not the first example + if self._curExample > 0: + self._exampleList_store_selection(self._curExample - 1) + else: + # go to the last example + self._exampleList_store_selection(len(self._examples) - 1) + + def next(self, *e): + selection = self._readingList.curselection() + readingListSize = self._readingList.size() + + # if there are readings + if readingListSize > 0: + # if one reading is currently selected + if len(selection) == 1: + index = int(selection[0]) + + # if it's on (or past) the last item + if index >= (readingListSize - 1): + self._select_next_example() + else: + self._readingList_store_selection(index + 1) + + else: + # select its first reading + self._readingList_store_selection(0) + + else: + self._select_next_example() + + def _select_next_example(self): + # if the current example is not the last example + if self._curExample < len(self._examples) - 1: + self._exampleList_store_selection(self._curExample + 1) + else: + # go to the first example + self._exampleList_store_selection(0) + + def about(self, *e): + ABOUT = ( + "NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n" + + "Written by Daniel H. Garrette" + ) + TITLE = "About: NLTK DRT Glue Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + def _toggle_remove_duplicates(self): + self._glue.remove_duplicates = not self._glue.remove_duplicates + + self._exampleList.selection_clear(0, "end") + self._readings = [] + self._populate_readingListbox() + self._readingCache = [None for ex in self._examples] + self._curExample = -1 + self._error = None + + self._drs = None + self._redraw() + + def _exampleList_select(self, event): + selection = self._exampleList.curselection() + if len(selection) != 1: + return + self._exampleList_store_selection(int(selection[0])) + + def _exampleList_store_selection(self, index): + self._curExample = index + example = self._examples[index] + + self._exampleList.selection_clear(0, "end") + if example: + cache = self._readingCache[index] + if cache: + if isinstance(cache, list): + self._readings = cache + self._error = None + else: + self._readings = [] + self._error = cache + else: + try: + self._readings = self._glue.parse_to_meaning(example) + self._error = None + self._readingCache[index] = self._readings + except Exception as e: + self._readings = [] + self._error = DrtVariableExpression(Variable("Error: " + str(e))) + self._readingCache[index] = self._error + + # add a star to the end of the example + self._exampleList.delete(index) + self._exampleList.insert(index, (" %s *" % example)) + self._exampleList.config( + height=min(len(self._examples), 25), width=40 + ) + + self._populate_readingListbox() + + self._exampleList.selection_set(index) + + self._drs = None + self._redraw() + + def _readingList_select(self, event): + selection = self._readingList.curselection() + if len(selection) != 1: + return + self._readingList_store_selection(int(selection[0])) + + def _readingList_store_selection(self, index): + reading = self._readings[index] + + self._readingList.selection_clear(0, "end") + if reading: + self._readingList.selection_set(index) + + self._drs = reading.simplify().normalize().resolve_anaphora() + + self._redraw() + + +class DrsWidget: + def __init__(self, canvas, drs, **attribs): + self._drs = drs + self._canvas = canvas + canvas.font = Font( + font=canvas.itemcget(canvas.create_text(0, 0, text=""), "font") + ) + canvas._BUFFER = 3 + self.bbox = (0, 0, 0, 0) + + def draw(self): + (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw() + self.bbox = (0, 0, right + 1, bottom + 1) + + def clear(self): + self._canvas.create_rectangle(self.bbox, fill="white", width="0") + + +def demo(): + examples = [ + "John walks", + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + # 'every man believes a dog yawns', + # 'John gives David a sandwich', + "John chases himself", + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + # 'John likes a cat', + # 'John likes every cat', + # 'he walks', + # 'John walks and he leaves' + ] + DrtGlueDemo(examples).mainloop() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/evaluate.py b/venv/lib/python3.10/site-packages/nltk/sem/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..bbff44f70b34ce0ac4de038b83a95a325d44abaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/evaluate.py @@ -0,0 +1,829 @@ +# Natural Language Toolkit: Models for first-order languages with lambda +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein , +# URL: +# For license information, see LICENSE.TXT + +# TODO: +# - fix tracing +# - fix iterator-based approach to existentials + +""" +This module provides data structures for representing first-order +models. +""" + +import inspect +import re +import sys +import textwrap +from pprint import pformat + +from nltk.decorators import decorator # this used in code that is commented out +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + Expression, + IffExpression, + ImpExpression, + IndividualVariableExpression, + IotaExpression, + LambdaExpression, + NegatedExpression, + OrExpression, + Variable, + is_indvar, +) + + +class Error(Exception): + pass + + +class Undefined(Error): + pass + + +def trace(f, *args, **kw): + argspec = inspect.getfullargspec(f) + d = dict(zip(argspec[0], args)) + if d.pop("trace", None): + print() + for item in d.items(): + print("%s => %s" % item) + return f(*args, **kw) + + +def is_rel(s): + """ + Check whether a set represents a relation (of any arity). + + :param s: a set containing tuples of str elements + :type s: set + :rtype: bool + """ + # we have the empty relation, i.e. set() + if len(s) == 0: + return True + # all the elements are tuples of the same length + elif all(isinstance(el, tuple) for el in s) and len(max(s)) == len(min(s)): + return True + else: + raise ValueError("Set %r contains sequences of different lengths" % s) + + +def set2rel(s): + """ + Convert a set containing individuals (strings or numbers) into a set of + unary tuples. Any tuples of strings already in the set are passed through + unchanged. + + For example: + - set(['a', 'b']) => set([('a',), ('b',)]) + - set([3, 27]) => set([('3',), ('27',)]) + + :type s: set + :rtype: set of tuple of str + """ + new = set() + for elem in s: + if isinstance(elem, str): + new.add((elem,)) + elif isinstance(elem, int): + new.add(str(elem)) + else: + new.add(elem) + return new + + +def arity(rel): + """ + Check the arity of a relation. + :type rel: set of tuples + :rtype: int of tuple of str + """ + if len(rel) == 0: + return 0 + return len(list(rel)[0]) + + +class Valuation(dict): + """ + A dictionary which represents a model-theoretic Valuation of non-logical constants. + Keys are strings representing the constants to be interpreted, and values correspond + to individuals (represented as strings) and n-ary relations (represented as sets of tuples + of strings). + + An instance of ``Valuation`` will raise a KeyError exception (i.e., + just behave like a standard dictionary) if indexed with an expression that + is not in its list of symbols. + """ + + def __init__(self, xs): + """ + :param xs: a list of (symbol, value) pairs. + """ + super().__init__() + for (sym, val) in xs: + if isinstance(val, str) or isinstance(val, bool): + self[sym] = val + elif isinstance(val, set): + self[sym] = set2rel(val) + else: + msg = textwrap.fill( + "Error in initializing Valuation. " + "Unrecognized value for symbol '%s':\n%s" % (sym, val), + width=66, + ) + + raise ValueError(msg) + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Unknown expression: '%s'" % key) + + def __str__(self): + return pformat(self) + + @property + def domain(self): + """Set-theoretic domain of the value-space of a Valuation.""" + dom = [] + for val in self.values(): + if isinstance(val, str): + dom.append(val) + elif not isinstance(val, bool): + dom.extend( + [elem for tuple_ in val for elem in tuple_ if elem is not None] + ) + return set(dom) + + @property + def symbols(self): + """The non-logical constants which the Valuation recognizes.""" + return sorted(self.keys()) + + @classmethod + def fromstring(cls, s): + return read_valuation(s) + + +########################################## +# REs used by the _read_valuation function +########################################## +_VAL_SPLIT_RE = re.compile(r"\s*=+>\s*") +_ELEMENT_SPLIT_RE = re.compile(r"\s*,\s*") +_TUPLES_RE = re.compile( + r"""\s* + (\([^)]+\)) # tuple-expression + \s*""", + re.VERBOSE, +) + + +def _read_valuation_line(s): + """ + Read a line in a valuation file. + + Lines are expected to be of the form:: + + noosa => n + girl => {g1, g2} + chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} + + :param s: input line + :type s: str + :return: a pair (symbol, value) + :rtype: tuple + """ + pieces = _VAL_SPLIT_RE.split(s) + symbol = pieces[0] + value = pieces[1] + # check whether the value is meant to be a set + if value.startswith("{"): + value = value[1:-1] + tuple_strings = _TUPLES_RE.findall(value) + # are the set elements tuples? + if tuple_strings: + set_elements = [] + for ts in tuple_strings: + ts = ts[1:-1] + element = tuple(_ELEMENT_SPLIT_RE.split(ts)) + set_elements.append(element) + else: + set_elements = _ELEMENT_SPLIT_RE.split(value) + value = set(set_elements) + return symbol, value + + +def read_valuation(s, encoding=None): + """ + Convert a valuation string into a valuation. + + :param s: a valuation string + :type s: str + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a ``nltk.sem`` valuation + :rtype: Valuation + """ + if encoding is not None: + s = s.decode(encoding) + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(_read_valuation_line(line)) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return Valuation(statements) + + +class Assignment(dict): + r""" + A dictionary which represents an assignment of values to variables. + + An assignment can only assign values from its domain. + + If an unknown expression *a* is passed to a model *M*\ 's + interpretation function *i*, *i* will first check whether *M*\ 's + valuation assigns an interpretation to *a* as a constant, and if + this fails, *i* will delegate the interpretation of *a* to + *g*. *g* only assigns values to individual variables (i.e., + members of the class ``IndividualVariableExpression`` in the ``logic`` + module. If a variable is not assigned a value by *g*, it will raise + an ``Undefined`` exception. + + A variable *Assignment* is a mapping from individual variables to + entities in the domain. Individual variables are usually indicated + with the letters ``'x'``, ``'y'``, ``'w'`` and ``'z'``, optionally + followed by an integer (e.g., ``'x0'``, ``'y332'``). Assignments are + created using the ``Assignment`` constructor, which also takes the + domain as a parameter. + + >>> from nltk.sem.evaluate import Assignment + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g3 = Assignment(dom, [('x', 'u1'), ('y', 'u2')]) + >>> g3 == {'x': 'u1', 'y': 'u2'} + True + + There is also a ``print`` format for assignments which uses a notation + closer to that in logic textbooks: + + >>> print(g3) + g[u1/x][u2/y] + + It is also possible to update an assignment using the ``add`` method: + + >>> dom = set(['u1', 'u2', 'u3', 'u4']) + >>> g4 = Assignment(dom) + >>> g4.add('x', 'u1') + {'x': 'u1'} + + With no arguments, ``purge()`` is equivalent to ``clear()`` on a dictionary: + + >>> g4.purge() + >>> g4 + {} + + :param domain: the domain of discourse + :type domain: set + :param assign: a list of (varname, value) associations + :type assign: list + """ + + def __init__(self, domain, assign=None): + super().__init__() + self.domain = domain + if assign: + for (var, val) in assign: + assert val in self.domain, "'{}' is not in the domain: {}".format( + val, + self.domain, + ) + assert is_indvar(var), ( + "Wrong format for an Individual Variable: '%s'" % var + ) + self[var] = val + self.variant = None + self._addvariant() + + def __getitem__(self, key): + if key in self: + return dict.__getitem__(self, key) + else: + raise Undefined("Not recognized as a variable: '%s'" % key) + + def copy(self): + new = Assignment(self.domain) + new.update(self) + return new + + def purge(self, var=None): + """ + Remove one or all keys (i.e. logic variables) from an + assignment, and update ``self.variant``. + + :param var: a Variable acting as a key for the assignment. + """ + if var: + del self[var] + else: + self.clear() + self._addvariant() + return None + + def __str__(self): + """ + Pretty printing for assignments. {'x', 'u'} appears as 'g[u/x]' + """ + gstring = "g" + # Deterministic output for unit testing. + variant = sorted(self.variant) + for (val, var) in variant: + gstring += f"[{val}/{var}]" + return gstring + + def _addvariant(self): + """ + Create a more pretty-printable version of the assignment. + """ + list_ = [] + for item in self.items(): + pair = (item[1], item[0]) + list_.append(pair) + self.variant = list_ + return None + + def add(self, var, val): + """ + Add a new variable-value pair to the assignment, and update + ``self.variant``. + + """ + assert val in self.domain, f"{val} is not in the domain {self.domain}" + assert is_indvar(var), "Wrong format for an Individual Variable: '%s'" % var + self[var] = val + self._addvariant() + return self + + +class Model: + """ + A first order model is a domain *D* of discourse and a valuation *V*. + + A domain *D* is a set, and a valuation *V* is a map that associates + expressions with values in the model. + The domain of *V* should be a subset of *D*. + + Construct a new ``Model``. + + :type domain: set + :param domain: A set of entities representing the domain of discourse of the model. + :type valuation: Valuation + :param valuation: the valuation of the model. + :param prop: If this is set, then we are building a propositional\ + model and don't require the domain of *V* to be subset of *D*. + """ + + def __init__(self, domain, valuation): + assert isinstance(domain, set) + self.domain = domain + self.valuation = valuation + if not domain.issuperset(valuation.domain): + raise Error( + "The valuation domain, %s, must be a subset of the model's domain, %s" + % (valuation.domain, domain) + ) + + def __repr__(self): + return f"({self.domain!r}, {self.valuation!r})" + + def __str__(self): + return f"Domain = {self.domain},\nValuation = \n{self.valuation}" + + def evaluate(self, expr, g, trace=None): + """ + Read input expressions, and provide a handler for ``satisfy`` + that blocks further propagation of the ``Undefined`` error. + :param expr: An ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :rtype: bool or 'Undefined' + """ + try: + parsed = Expression.fromstring(expr) + value = self.satisfy(parsed, g, trace=trace) + if trace: + print() + print(f"'{expr}' evaluates to {value} under M, {g}") + return value + except Undefined: + if trace: + print() + print(f"'{expr}' is undefined under M, {g}") + return "Undefined" + + def satisfy(self, parsed, g, trace=None): + """ + Recursive interpretation function for a formula of first-order logic. + + Raises an ``Undefined`` error when ``parsed`` is an atomic string + but is not a symbol or an individual variable. + + :return: Returns a truth value or ``Undefined`` if ``parsed`` is\ + complex, and calls the interpretation function ``i`` if ``parsed``\ + is atomic. + + :param parsed: An expression of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + """ + + if isinstance(parsed, ApplicationExpression): + function, arguments = parsed.uncurry() + if isinstance(function, AbstractVariableExpression): + # It's a predicate expression ("P(x,y)"), so used uncurried arguments + funval = self.satisfy(function, g) + argvals = tuple(self.satisfy(arg, g) for arg in arguments) + return argvals in funval + else: + # It must be a lambda expression, so use curried form + funval = self.satisfy(parsed.function, g) + argval = self.satisfy(parsed.argument, g) + return funval[argval] + elif isinstance(parsed, NegatedExpression): + return not self.satisfy(parsed.term, g) + elif isinstance(parsed, AndExpression): + return self.satisfy(parsed.first, g) and self.satisfy(parsed.second, g) + elif isinstance(parsed, OrExpression): + return self.satisfy(parsed.first, g) or self.satisfy(parsed.second, g) + elif isinstance(parsed, ImpExpression): + return (not self.satisfy(parsed.first, g)) or self.satisfy(parsed.second, g) + elif isinstance(parsed, IffExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, EqualityExpression): + return self.satisfy(parsed.first, g) == self.satisfy(parsed.second, g) + elif isinstance(parsed, AllExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if not self.satisfy(parsed.term, new_g): + return False + return True + elif isinstance(parsed, ExistsExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, IotaExpression): + new_g = g.copy() + for u in self.domain: + new_g.add(parsed.variable.name, u) + if self.satisfy(parsed.term, new_g): + return True + return False + elif isinstance(parsed, LambdaExpression): + cf = {} + var = parsed.variable.name + for u in self.domain: + val = self.satisfy(parsed.term, g.add(var, u)) + # NB the dict would be a lot smaller if we do this: + # if val: cf[u] = val + # But then need to deal with cases where f(a) should yield + # a function rather than just False. + cf[u] = val + return cf + else: + return self.i(parsed, g, trace) + + # @decorator(trace_eval) + def i(self, parsed, g, trace=False): + """ + An interpretation function. + + Assuming that ``parsed`` is atomic: + + - if ``parsed`` is a non-logical constant, calls the valuation *V* + - else if ``parsed`` is an individual variable, calls assignment *g* + - else returns ``Undefined``. + + :param parsed: an ``Expression`` of ``logic``. + :type g: Assignment + :param g: an assignment to individual variables. + :return: a semantic value + """ + # If parsed is a propositional letter 'p', 'q', etc, it could be in valuation.symbols + # and also be an IndividualVariableExpression. We want to catch this first case. + # So there is a procedural consequence to the ordering of clauses here: + if parsed.variable.name in self.valuation.symbols: + return self.valuation[parsed.variable.name] + elif isinstance(parsed, IndividualVariableExpression): + return g[parsed.variable.name] + + else: + raise Undefined("Can't find a value for %s" % parsed) + + def satisfiers(self, parsed, varex, g, trace=None, nesting=0): + """ + Generate the entities from the model's domain that satisfy an open formula. + + :param parsed: an open formula + :type parsed: Expression + :param varex: the relevant free individual variable in ``parsed``. + :type varex: VariableExpression or str + :param g: a variable assignment + :type g: Assignment + :return: a set of the entities that satisfy ``parsed``. + """ + + spacer = " " + indent = spacer + (spacer * nesting) + candidates = [] + + if isinstance(varex, str): + var = Variable(varex) + else: + var = varex + + if var in parsed.free(): + if trace: + print() + print( + (spacer * nesting) + + f"Open formula is '{parsed}' with assignment {g}" + ) + for u in self.domain: + new_g = g.copy() + new_g.add(var.name, u) + if trace and trace > 1: + lowtrace = trace - 1 + else: + lowtrace = 0 + value = self.satisfy(parsed, new_g, lowtrace) + + if trace: + print(indent + "(trying assignment %s)" % new_g) + + # parsed == False under g[u/var]? + if value == False: + if trace: + print(indent + f"value of '{parsed}' under {new_g} is False") + + # so g[u/var] is a satisfying assignment + else: + candidates.append(u) + if trace: + print(indent + f"value of '{parsed}' under {new_g} is {value}") + + result = {c for c in candidates} + # var isn't free in parsed + else: + raise Undefined(f"{var.name} is not free in {parsed}") + + return result + + +# ////////////////////////////////////////////////////////////////////// +# Demo.. +# ////////////////////////////////////////////////////////////////////// +# number of spacer chars +mult = 30 + +# Demo 1: Propositional Logic +################# +def propdemo(trace=None): + """Example of a propositional model.""" + + global val1, dom1, m1, g1 + val1 = Valuation([("P", True), ("Q", True), ("R", False)]) + dom1 = set() + m1 = Model(dom1, val1) + g1 = Assignment(dom1) + + print() + print("*" * mult) + print("Propositional Formulas Demo") + print("*" * mult) + print("(Propositional constants treated as nullary predicates)") + print() + print("Model m1:\n", m1) + print("*" * mult) + sentences = [ + "(P & Q)", + "(P & R)", + "- P", + "- R", + "- - P", + "- (P & R)", + "(P | R)", + "(R | P)", + "(R | R)", + "(- P | R)", + "(P | - P)", + "(P -> Q)", + "(P -> R)", + "(R -> P)", + "(P <-> P)", + "(R <-> R)", + "(P <-> R)", + ] + + for sent in sentences: + if trace: + print() + m1.evaluate(sent, g1, trace) + else: + print(f"The value of '{sent}' is: {m1.evaluate(sent, g1)}") + + +# Demo 2: FOL Model +############# + + +def folmodel(quiet=False, trace=None): + """Example of a first-order model.""" + + global val2, v2, dom2, m2, g2 + + v2 = [ + ("adam", "b1"), + ("betty", "g1"), + ("fido", "d1"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1"}), + ("love", {("b1", "g1"), ("b2", "g2"), ("g1", "b1"), ("g2", "b1")}), + ] + val2 = Valuation(v2) + dom2 = val2.domain + m2 = Model(dom2, val2) + g2 = Assignment(dom2, [("x", "b1"), ("y", "g2")]) + + if not quiet: + print() + print("*" * mult) + print("Models Demo") + print("*" * mult) + print("Model m2:\n", "-" * 14, "\n", m2) + print("Variable assignment = ", g2) + + exprs = ["adam", "boy", "love", "walks", "x", "y", "z"] + parsed_exprs = [Expression.fromstring(e) for e in exprs] + + print() + for parsed in parsed_exprs: + try: + print( + "The interpretation of '%s' in m2 is %s" + % (parsed, m2.i(parsed, g2)) + ) + except Undefined: + print("The interpretation of '%s' in m2 is Undefined" % parsed) + + applications = [ + ("boy", ("adam")), + ("walks", ("adam",)), + ("love", ("adam", "y")), + ("love", ("y", "adam")), + ] + + for (fun, args) in applications: + try: + funval = m2.i(Expression.fromstring(fun), g2) + argsval = tuple(m2.i(Expression.fromstring(arg), g2) for arg in args) + print(f"{fun}({args}) evaluates to {argsval in funval}") + except Undefined: + print(f"{fun}({args}) evaluates to Undefined") + + +# Demo 3: FOL +######### + + +def foldemo(trace=None): + """ + Interpretation of closed expressions in a first-order model. + """ + folmodel(quiet=True) + + print() + print("*" * mult) + print("FOL Formulas Demo") + print("*" * mult) + + formulas = [ + "love (adam, betty)", + "(adam = mia)", + "\\x. (boy(x) | girl(x))", + "\\x. boy(x)(adam)", + "\\x y. love(x, y)", + "\\x y. love(x, y)(adam)(betty)", + "\\x y. love(x, y)(adam, betty)", + "\\x y. (boy(x) & love(x, y))", + "\\x. exists y. (boy(x) & love(x, y))", + "exists z1. boy(z1)", + "exists x. (boy(x) & -(x = adam))", + "exists x. (boy(x) & all y. love(y, x))", + "all x. (boy(x) | girl(x))", + "all x. (girl(x) -> exists y. boy(y) & love(x, y))", # Every girl loves exists boy. + "exists x. (boy(x) & all y. (girl(y) -> love(y, x)))", # There is exists boy that every girl loves. + "exists x. (boy(x) & all y. (girl(y) -> love(x, y)))", # exists boy loves every girl. + "all x. (dog(x) -> - girl(x))", + "exists x. exists y. (love(x, y) & love(x, y))", + ] + + for fmla in formulas: + g2.purge() + if trace: + m2.evaluate(fmla, g2, trace) + else: + print(f"The value of '{fmla}' is: {m2.evaluate(fmla, g2)}") + + +# Demo 3: Satisfaction +############# + + +def satdemo(trace=None): + """Satisfiers of an open formula in a first order model.""" + + print() + print("*" * mult) + print("Satisfiers Demo") + print("*" * mult) + + folmodel(quiet=True) + + formulas = [ + "boy(x)", + "(x = x)", + "(boy(x) | girl(x))", + "(boy(x) & girl(x))", + "love(adam, x)", + "love(x, adam)", + "-(x = adam)", + "exists z22. love(x, z22)", + "exists y. love(y, x)", + "all y. (girl(y) -> love(x, y))", + "all y. (girl(y) -> love(y, x))", + "all y. (girl(y) -> (boy(x) & love(y, x)))", + "(boy(x) & all y. (girl(y) -> love(x, y)))", + "(boy(x) & all y. (girl(y) -> love(y, x)))", + "(boy(x) & exists y. (girl(y) & love(y, x)))", + "(girl(x) -> dog(x))", + "all y. (dog(y) -> (x = y))", + "exists y. love(y, x)", + "exists y. (love(adam, y) & love(y, x))", + ] + + if trace: + print(m2) + + for fmla in formulas: + print(fmla) + Expression.fromstring(fmla) + + parsed = [Expression.fromstring(fmla) for fmla in formulas] + + for p in parsed: + g2.purge() + print( + "The satisfiers of '{}' are: {}".format(p, m2.satisfiers(p, "x", g2, trace)) + ) + + +def demo(num=0, trace=None): + """ + Run exists demos. + + - num = 1: propositional logic demo + - num = 2: first order model demo (only if trace is set) + - num = 3: first order sentences demo + - num = 4: satisfaction of open formulas demo + - any other value: run all the demos + + :param trace: trace = 1, or trace = 2 for more verbose tracing + """ + demos = {1: propdemo, 2: folmodel, 3: foldemo, 4: satdemo} + + try: + demos[num](trace=trace) + except KeyError: + for num in demos: + demos[num](trace=trace) + + +if __name__ == "__main__": + demo(2, trace=0) diff --git a/venv/lib/python3.10/site-packages/nltk/sem/glue.py b/venv/lib/python3.10/site-packages/nltk/sem/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..1098c83bec71cee14b6c06e93ba3f15366c0ada2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/glue.py @@ -0,0 +1,835 @@ +# Natural Language Toolkit: Glue Semantics +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import os +from itertools import chain + +import nltk +from nltk.internals import Counter +from nltk.sem import drt, linearlogic +from nltk.sem.logic import ( + AbstractVariableExpression, + Expression, + LambdaExpression, + Variable, + VariableExpression, +) +from nltk.tag import BigramTagger, RegexpTagger, TrigramTagger, UnigramTagger + +SPEC_SEMTYPES = { + "a": "ex_quant", + "an": "ex_quant", + "every": "univ_quant", + "the": "def_art", + "no": "no_quant", + "default": "ex_quant", +} + +OPTIONAL_RELATIONSHIPS = ["nmod", "vmod", "punct"] + + +class GlueFormula: + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = Expression.fromstring(meaning) + elif isinstance(meaning, Expression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def applyto(self, arg): + """self = (\\x.(walk x), (subj -o f)) + arg = (john , subj) + returns ((walk john), f) + """ + if self.indices & arg.indices: # if the sets are NOT disjoint + raise linearlogic.LinearLogicApplicationException( + f"'{self}' applied to '{arg}'. Indices are not disjoint." + ) + else: # if the sets ARE disjoint + return_indices = self.indices | arg.indices + + try: + return_glue = linearlogic.ApplicationExpression( + self.glue, arg.glue, arg.indices + ) + except linearlogic.LinearLogicApplicationException as e: + raise linearlogic.LinearLogicApplicationException( + f"'{self.simplify()}' applied to '{arg.simplify()}'" + ) from e + + arg_meaning_abstracted = arg.meaning + if return_indices: + for dep in self.glue.simplify().antecedent.dependencies[ + ::-1 + ]: # if self.glue is (A -o B), dep is in A.dependencies + arg_meaning_abstracted = self.make_LambdaExpression( + Variable("v%s" % dep), arg_meaning_abstracted + ) + return_meaning = self.meaning.applyto(arg_meaning_abstracted) + + return self.__class__(return_meaning, return_glue, return_indices) + + def make_VariableExpression(self, name): + return VariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def lambda_abstract(self, other): + assert isinstance(other, GlueFormula) + assert isinstance(other.meaning, AbstractVariableExpression) + return self.__class__( + self.make_LambdaExpression(other.meaning.variable, self.meaning), + linearlogic.ImpExpression(other.glue, self.glue), + ) + + def compile(self, counter=None): + """From Iddo Lev's PhD Dissertation p108-109""" + if not counter: + counter = Counter() + (compiled_glue, new_forms) = self.glue.simplify().compile_pos( + counter, self.__class__ + ) + return new_forms + [ + self.__class__(self.meaning, compiled_glue, {counter.get()}) + ] + + def simplify(self): + return self.__class__( + self.meaning.simplify(), self.glue.simplify(), self.indices + ) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.meaning == other.meaning + and self.glue == other.glue + ) + + def __ne__(self, other): + return not self == other + + # sorting for use in doctests which must be deterministic + def __lt__(self, other): + return str(self) < str(other) + + def __str__(self): + assert isinstance(self.indices, set) + accum = f"{self.meaning} : {self.glue}" + if self.indices: + accum += ( + " : {" + ", ".join(str(index) for index in sorted(self.indices)) + "}" + ) + return accum + + def __repr__(self): + return "%s" % self + + +class GlueDict(dict): + def __init__(self, filename, encoding=None): + self.filename = filename + self.file_encoding = encoding + self.read_file() + + def read_file(self, empty_first=True): + if empty_first: + self.clear() + + try: + contents = nltk.data.load( + self.filename, format="text", encoding=self.file_encoding + ) + # TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load() + except LookupError as e: + try: + contents = nltk.data.load( + "file:" + self.filename, format="text", encoding=self.file_encoding + ) + except LookupError: + raise e + lines = contents.splitlines() + + for line in lines: # example: 'n : (\\x.( x), (v-or))' + # lambdacalc -^ linear logic -^ + line = line.strip() # remove trailing newline + if not len(line): + continue # skip empty lines + if line[0] == "#": + continue # skip commented out lines + + parts = line.split( + " : ", 2 + ) # ['verb', '(\\x.( x), ( subj -o f ))', '[subj]'] + + glue_formulas = [] + paren_count = 0 + tuple_start = 0 + tuple_comma = 0 + + relationships = None + + if len(parts) > 1: + for (i, c) in enumerate(parts[1]): + if c == "(": + if paren_count == 0: # if it's the first '(' of a tuple + tuple_start = i + 1 # then save the index + paren_count += 1 + elif c == ")": + paren_count -= 1 + if paren_count == 0: # if it's the last ')' of a tuple + meaning_term = parts[1][ + tuple_start:tuple_comma + ] # '\\x.( x)' + glue_term = parts[1][tuple_comma + 1 : i] # '(v-r)' + glue_formulas.append( + [meaning_term, glue_term] + ) # add the GlueFormula to the list + elif c == ",": + if ( + paren_count == 1 + ): # if it's a comma separating the parts of the tuple + tuple_comma = i # then save the index + elif c == "#": # skip comments at the ends of lines + if ( + paren_count != 0 + ): # if the line hasn't parsed correctly so far + raise RuntimeError( + "Formula syntax is incorrect for entry " + line + ) + break # break to the next line + + if len(parts) > 2: # if there is a relationship entry at the end + rel_start = parts[2].index("[") + 1 + rel_end = parts[2].index("]") + if rel_start == rel_end: + relationships = frozenset() + else: + relationships = frozenset( + r.strip() for r in parts[2][rel_start:rel_end].split(",") + ) + + try: + start_inheritance = parts[0].index("(") + end_inheritance = parts[0].index(")") + sem = parts[0][:start_inheritance].strip() + supertype = parts[0][start_inheritance + 1 : end_inheritance] + except: + sem = parts[0].strip() + supertype = None + + if sem not in self: + self[sem] = {} + + if ( + relationships is None + ): # if not specified for a specific relationship set + # add all relationship entries for parents + if supertype: + for rels in self[supertype]: + if rels not in self[sem]: + self[sem][rels] = [] + glue = self[supertype][rels] + self[sem][rels].extend(glue) + self[sem][rels].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if None not in self[sem]: + self[sem][None] = [] + self[sem][None].extend( + glue_formulas + ) # add the glue formulas to every rel entry + else: + if relationships not in self[sem]: + self[sem][relationships] = [] + if supertype: + self[sem][relationships].extend(self[supertype][relationships]) + self[sem][relationships].extend( + glue_formulas + ) # add the glue entry to the dictionary + + def __str__(self): + accum = "" + for pos in self: + str_pos = "%s" % pos + for relset in self[pos]: + i = 1 + for gf in self[pos][relset]: + if i == 1: + accum += str_pos + ": " + else: + accum += " " * (len(str_pos) + 2) + accum += "%s" % gf + if relset and i == len(self[pos][relset]): + accum += " : %s" % relset + accum += "\n" + i += 1 + return accum + + def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False): + if node is None: + # TODO: should it be depgraph.root? Is this code tested? + top = depgraph.nodes[0] + depList = list(chain.from_iterable(top["deps"].values())) + root = depgraph.nodes[depList[0]] + + return self.to_glueformula_list(depgraph, root, Counter(), verbose) + + glueformulas = self.lookup(node, depgraph, counter) + for dep_idx in chain.from_iterable(node["deps"].values()): + dep = depgraph.nodes[dep_idx] + glueformulas.extend( + self.to_glueformula_list(depgraph, dep, counter, verbose) + ) + return glueformulas + + def lookup(self, node, depgraph, counter): + semtype_names = self.get_semtypes(node) + + semtype = None + for name in semtype_names: + if name in self: + semtype = self[name] + break + if semtype is None: + # raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word) + return [] + + self.add_missing_dependencies(node, depgraph) + + lookup = self._lookup_semtype_option(semtype, node, depgraph) + + if not len(lookup): + raise KeyError( + "There is no GlueDict entry for sem type of '%s' " + "with tag '%s', and rel '%s'" % (node["word"], node["tag"], node["rel"]) + ) + + return self.get_glueformulas_from_semtype_entry( + lookup, node["word"], node, depgraph, counter + ) + + def add_missing_dependencies(self, node, depgraph): + rel = node["rel"].lower() + + if rel == "main": + headnode = depgraph.nodes[node["head"]] + subj = self.lookup_unique("subj", headnode, depgraph) + relation = subj["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(subj["address"]) + # node['deps'].append(subj['address']) + + def _lookup_semtype_option(self, semtype, node, depgraph): + relationships = frozenset( + depgraph.nodes[dep]["rel"].lower() + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() not in OPTIONAL_RELATIONSHIPS + ) + + try: + lookup = semtype[relationships] + except KeyError: + # An exact match is not found, so find the best match where + # 'best' is defined as the glue entry whose relationship set has the + # most relations of any possible relationship set that is a subset + # of the actual depgraph + best_match = frozenset() + for relset_option in set(semtype) - {None}: + if ( + len(relset_option) > len(best_match) + and relset_option < relationships + ): + best_match = relset_option + if not best_match: + if None in semtype: + best_match = None + else: + return None + lookup = semtype[best_match] + + return lookup + + def get_semtypes(self, node): + """ + Based on the node, return a list of plausible semtypes in order of + plausibility. + """ + rel = node["rel"].lower() + word = node["word"].lower() + + if rel == "spec": + if word in SPEC_SEMTYPES: + return [SPEC_SEMTYPES[word]] + else: + return [SPEC_SEMTYPES["default"]] + elif rel in ["nmod", "vmod"]: + return [node["tag"], rel] + else: + return [node["tag"]] + + def get_glueformulas_from_semtype_entry( + self, lookup, word, node, depgraph, counter + ): + glueformulas = [] + + glueFormulaFactory = self.get_GlueFormula_factory() + for meaning, glue in lookup: + gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue) + if not len(glueformulas): + gf.word = word + else: + gf.word = f"{word}{len(glueformulas) + 1}" + + gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get()) + + glueformulas.append(gf) + return glueformulas + + def get_meaning_formula(self, generic, word): + """ + :param generic: A meaning formula string containing the + parameter "" + :param word: The actual word to be replace "" + """ + word = word.replace(".", "") + return generic.replace("", word) + + def initialize_labels(self, expr, node, depgraph, unique_index): + if isinstance(expr, linearlogic.AtomicExpression): + name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index) + if name[0].isupper(): + return linearlogic.VariableExpression(name) + else: + return linearlogic.ConstantExpression(name) + else: + return linearlogic.ImpExpression( + self.initialize_labels(expr.antecedent, node, depgraph, unique_index), + self.initialize_labels(expr.consequent, node, depgraph, unique_index), + ) + + def find_label_name(self, name, node, depgraph, unique_index): + try: + dot = name.index(".") + + before_dot = name[:dot] + after_dot = name[dot + 1 :] + if before_dot == "super": + return self.find_label_name( + after_dot, depgraph.nodes[node["head"]], depgraph, unique_index + ) + else: + return self.find_label_name( + after_dot, + self.lookup_unique(before_dot, node, depgraph), + depgraph, + unique_index, + ) + except ValueError: + lbl = self.get_label(node) + if name == "f": + return lbl + elif name == "v": + return "%sv" % lbl + elif name == "r": + return "%sr" % lbl + elif name == "super": + return self.get_label(depgraph.nodes[node["head"]]) + elif name == "var": + return f"{lbl.upper()}{unique_index}" + elif name == "a": + return self.get_label(self.lookup_unique("conja", node, depgraph)) + elif name == "b": + return self.get_label(self.lookup_unique("conjb", node, depgraph)) + else: + return self.get_label(self.lookup_unique(name, node, depgraph)) + + def get_label(self, node): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + value = node["address"] + + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def lookup_unique(self, rel, node, depgraph): + """ + Lookup 'key'. There should be exactly one item in the associated relation. + """ + deps = [ + depgraph.nodes[dep] + for dep in chain.from_iterable(node["deps"].values()) + if depgraph.nodes[dep]["rel"].lower() == rel.lower() + ] + + if len(deps) == 0: + raise KeyError( + "'{}' doesn't contain a feature '{}'".format(node["word"], rel) + ) + elif len(deps) > 1: + raise KeyError( + "'{}' should only have one feature '{}'".format(node["word"], rel) + ) + else: + return deps[0] + + def get_GlueFormula_factory(self): + return GlueFormula + + +class Glue: + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + self.verbose = verbose + self.remove_duplicates = remove_duplicates + self.depparser = depparser + + from nltk import Prover9 + + self.prover = Prover9() + + if semtype_file: + self.semtype_file = semtype_file + else: + self.semtype_file = os.path.join( + "grammars", "sample_grammars", "glue.semtype" + ) + + def train_depparser(self, depgraphs=None): + if depgraphs: + self.depparser.train(depgraphs) + else: + self.depparser.train_from_file( + nltk.data.find( + os.path.join("grammars", "sample_grammars", "glue_train.conll") + ) + ) + + def parse_to_meaning(self, sentence): + readings = [] + for agenda in self.parse_to_compiled(sentence): + readings.extend(self.get_readings(agenda)) + return readings + + def get_readings(self, agenda): + readings = [] + agenda_length = len(agenda) + atomics = dict() + nonatomics = dict() + while agenda: # is not empty + cur = agenda.pop() + glue_simp = cur.glue.simplify() + if isinstance( + glue_simp, linearlogic.ImpExpression + ): # if cur.glue is non-atomic + for key in atomics: + try: + if isinstance(cur.glue, linearlogic.ApplicationExpression): + bindings = cur.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.antecedent.unify(key, bindings) + for atomic in atomics[key]: + if not ( + cur.indices & atomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(cur.applyto(atomic)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + nonatomics[glue_simp.antecedent].append(cur) + except KeyError: + nonatomics[glue_simp.antecedent] = [cur] + + else: # else cur.glue is atomic + for key in nonatomics: + for nonatomic in nonatomics[key]: + try: + if isinstance( + nonatomic.glue, linearlogic.ApplicationExpression + ): + bindings = nonatomic.glue.bindings + else: + bindings = linearlogic.BindingDict() + glue_simp.unify(key, bindings) + if not ( + cur.indices & nonatomic.indices + ): # if the sets of indices are disjoint + try: + agenda.append(nonatomic.applyto(cur)) + except linearlogic.LinearLogicApplicationException: + pass + except linearlogic.UnificationException: + pass + try: + atomics[glue_simp].append(cur) + except KeyError: + atomics[glue_simp] = [cur] + + for entry in atomics: + for gf in atomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + for entry in nonatomics: + for gf in nonatomics[entry]: + if len(gf.indices) == agenda_length: + self._add_to_reading_list(gf, readings) + return readings + + def _add_to_reading_list(self, glueformula, reading_list): + add_reading = True + if self.remove_duplicates: + for reading in reading_list: + try: + if reading.equiv(glueformula.meaning, self.prover): + add_reading = False + break + except Exception as e: + # if there is an exception, the syntax of the formula + # may not be understandable by the prover, so don't + # throw out the reading. + print("Error when checking logical equality of statements", e) + + if add_reading: + reading_list.append(glueformula.meaning) + + def parse_to_compiled(self, sentence): + gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)] + return [self.gfl_to_compiled(gfl) for gfl in gfls] + + def dep_parse(self, sentence): + """ + Return a dependency graph for the sentence. + + :param sentence: the sentence to be parsed + :type sentence: list(str) + :rtype: DependencyGraph + """ + + # Lazy-initialize the depparser + if self.depparser is None: + from nltk.parse import MaltParser + + self.depparser = MaltParser(tagger=self.get_pos_tagger()) + if not self.depparser._trained: + self.train_depparser() + return self.depparser.parse(sentence, verbose=self.verbose) + + def depgraph_to_glue(self, depgraph): + return self.get_glue_dict().to_glueformula_list(depgraph) + + def get_glue_dict(self): + return GlueDict(self.semtype_file) + + def gfl_to_compiled(self, gfl): + index_counter = Counter() + return_list = [] + for gf in gfl: + return_list.extend(gf.compile(index_counter)) + + if self.verbose: + print("Compiled Glue Premises:") + for cgf in return_list: + print(cgf) + + return return_list + + def get_pos_tagger(self): + from nltk.corpus import brown + + regexp_tagger = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] + ) + brown_train = brown.tagged_sents(categories="news") + unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger) + bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger) + trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger) + + # Override particular words + main_tagger = RegexpTagger( + [(r"(A|a|An|an)$", "ex_quant"), (r"(Every|every|All|all)$", "univ_quant")], + backoff=trigram_tagger, + ) + + return main_tagger + + +class DrtGlueFormula(GlueFormula): + def __init__(self, meaning, glue, indices=None): + if not indices: + indices = set() + + if isinstance(meaning, str): + self.meaning = drt.DrtExpression.fromstring(meaning) + elif isinstance(meaning, drt.DrtExpression): + self.meaning = meaning + else: + raise RuntimeError( + "Meaning term neither string or expression: %s, %s" + % (meaning, meaning.__class__) + ) + + if isinstance(glue, str): + self.glue = linearlogic.LinearLogicParser().parse(glue) + elif isinstance(glue, linearlogic.Expression): + self.glue = glue + else: + raise RuntimeError( + "Glue term neither string or expression: %s, %s" + % (glue, glue.__class__) + ) + + self.indices = indices + + def make_VariableExpression(self, name): + return drt.DrtVariableExpression(name) + + def make_LambdaExpression(self, variable, term): + return drt.DrtLambdaExpression(variable, term) + + +class DrtGlueDict(GlueDict): + def get_GlueFormula_factory(self): + return DrtGlueFormula + + +class DrtGlue(Glue): + def __init__( + self, semtype_file=None, remove_duplicates=False, depparser=None, verbose=False + ): + if not semtype_file: + semtype_file = os.path.join( + "grammars", "sample_grammars", "drt_glue.semtype" + ) + Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose) + + def get_glue_dict(self): + return DrtGlueDict(self.semtype_file) + + +def demo(show_example=-1): + from nltk.parse import MaltParser + + examples = [ + "David sees Mary", + "David eats a sandwich", + "every man chases a dog", + "every man believes a dog sleeps", + "John gives David a sandwich", + "John chases himself", + ] + # 'John persuades David to order a pizza', + # 'John tries to go', + # 'John tries to find a unicorn', + # 'John seems to vanish', + # 'a unicorn seems to approach', + # 'every big cat leaves', + # 'every gray cat leaves', + # 'every big gray cat leaves', + # 'a former senator leaves', + + print("============== DEMO ==============") + + tagger = RegexpTagger( + [ + ("^(David|Mary|John)$", "NNP"), + ( + "^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$", + "VB", + ), + ("^(go|order|vanish|find|approach)$", "VB"), + ("^(a)$", "ex_quant"), + ("^(every)$", "univ_quant"), + ("^(sandwich|man|dog|pizza|unicorn|cat|senator)$", "NN"), + ("^(big|gray|former)$", "JJ"), + ("^(him|himself)$", "PRP"), + ] + ) + + depparser = MaltParser(tagger=tagger) + glue = Glue(depparser=depparser, verbose=False) + + for (i, sentence) in enumerate(examples): + if i == show_example or show_example == -1: + print(f"[[[Example {i}]]] {sentence}") + for reading in glue.parse_to_meaning(sentence.split()): + print(reading.simplify()) + print("") + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/hole.py b/venv/lib/python3.10/site-packages/nltk/sem/hole.py new file mode 100644 index 0000000000000000000000000000000000000000..4570cb02a3bf183a73a1f9b5e78b8f0e1dac430f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/hole.py @@ -0,0 +1,395 @@ +# Natural Language Toolkit: Logic +# +# Author: Peter Wang +# Updated by: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +An implementation of the Hole Semantics model, following Blackburn and Bos, +Representation and Inference for Natural Language (CSLI, 2005). + +The semantic representations are built by the grammar hole.fcfg. +This module contains driver code to read in sentences and parse them +according to a hole semantics grammar. + +After parsing, the semantic representation is in the form of an underspecified +representation that is not easy to read. We use a "plugging" algorithm to +convert that representation into first-order logic formulas. +""" + +from functools import reduce + +from nltk.parse import load_parser +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + ExistsExpression, + IffExpression, + ImpExpression, + LambdaExpression, + NegatedExpression, + OrExpression, +) +from nltk.sem.skolemize import skolemize + +# Note that in this code there may be multiple types of trees being referred to: +# +# 1. parse trees +# 2. the underspecified representation +# 3. first-order logic formula trees +# 4. the search space when plugging (search tree) +# + + +class Constants: + ALL = "ALL" + EXISTS = "EXISTS" + NOT = "NOT" + AND = "AND" + OR = "OR" + IMP = "IMP" + IFF = "IFF" + PRED = "PRED" + LEQ = "LEQ" + HOLE = "HOLE" + LABEL = "LABEL" + + MAP = { + ALL: lambda v, e: AllExpression(v.variable, e), + EXISTS: lambda v, e: ExistsExpression(v.variable, e), + NOT: NegatedExpression, + AND: AndExpression, + OR: OrExpression, + IMP: ImpExpression, + IFF: IffExpression, + PRED: ApplicationExpression, + } + + +class HoleSemantics: + """ + This class holds the broken-down components of a hole semantics, i.e. it + extracts the holes, labels, logic formula fragments and constraints out of + a big conjunction of such as produced by the hole semantics grammar. It + then provides some operations on the semantics dealing with holes, labels + and finding legal ways to plug holes with labels. + """ + + def __init__(self, usr): + """ + Constructor. `usr' is a ``sem.Expression`` representing an + Underspecified Representation Structure (USR). A USR has the following + special predicates: + ALL(l,v,n), + EXISTS(l,v,n), + AND(l,n,n), + OR(l,n,n), + IMP(l,n,n), + IFF(l,n,n), + PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions, + LEQ(n,n), + HOLE(n), + LABEL(n) + where l is the label of the node described by the predicate, n is either + a label or a hole, and v is a variable. + """ + self.holes = set() + self.labels = set() + self.fragments = {} # mapping of label -> formula fragment + self.constraints = set() # set of Constraints + self._break_down(usr) + self.top_most_labels = self._find_top_most_labels() + self.top_hole = self._find_top_hole() + + def is_node(self, x): + """ + Return true if x is a node (label or hole) in this semantic + representation. + """ + return x in (self.labels | self.holes) + + def _break_down(self, usr): + """ + Extract holes, labels, formula fragments and constraints from the hole + semantics underspecified representation (USR). + """ + if isinstance(usr, AndExpression): + self._break_down(usr.first) + self._break_down(usr.second) + elif isinstance(usr, ApplicationExpression): + func, args = usr.uncurry() + if func.variable.name == Constants.LEQ: + self.constraints.add(Constraint(args[0], args[1])) + elif func.variable.name == Constants.HOLE: + self.holes.add(args[0]) + elif func.variable.name == Constants.LABEL: + self.labels.add(args[0]) + else: + label = args[0] + assert label not in self.fragments + self.fragments[label] = (func, args[1:]) + else: + raise ValueError(usr.label()) + + def _find_top_nodes(self, node_list): + top_nodes = node_list.copy() + for f in self.fragments.values(): + # the label is the first argument of the predicate + args = f[1] + for arg in args: + if arg in node_list: + top_nodes.discard(arg) + return top_nodes + + def _find_top_most_labels(self): + """ + Return the set of labels which are not referenced directly as part of + another formula fragment. These will be the top-most labels for the + subtree that they are part of. + """ + return self._find_top_nodes(self.labels) + + def _find_top_hole(self): + """ + Return the hole that will be the top of the formula tree. + """ + top_holes = self._find_top_nodes(self.holes) + assert len(top_holes) == 1 # it must be unique + return top_holes.pop() + + def pluggings(self): + """ + Calculate and return all the legal pluggings (mappings of labels to + holes) of this semantics given the constraints. + """ + record = [] + self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) + return record + + def _plug_nodes(self, queue, potential_labels, plug_acc, record): + """ + Plug the nodes in `queue' with the labels in `potential_labels'. + + Each element of `queue' is a tuple of the node to plug and the list of + ancestor holes from the root of the graph to that node. + + `potential_labels' is a set of the labels which are still available for + plugging. + + `plug_acc' is the incomplete mapping of holes to labels made on the + current branch of the search tree so far. + + `record' is a list of all the complete pluggings that we have found in + total so far. It is the only parameter that is destructively updated. + """ + if queue != []: + (node, ancestors) = queue[0] + if node in self.holes: + # The node is a hole, try to plug it. + self._plug_hole( + node, ancestors, queue[1:], potential_labels, plug_acc, record + ) + else: + assert node in self.labels + # The node is a label. Replace it in the queue by the holes and + # labels in the formula fragment named by that label. + args = self.fragments[node][1] + head = [(a, ancestors) for a in args if self.is_node(a)] + self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) + else: + raise Exception("queue empty") + + def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): + """ + Try all possible ways of plugging a single hole. + See _plug_nodes for the meanings of the parameters. + """ + # Add the current hole we're trying to plug into the list of ancestors. + assert hole not in ancestors0 + ancestors = [hole] + ancestors0 + + # Try each potential label in this hole in turn. + for l in potential_labels0: + # Is the label valid in this hole? + if self._violates_constraints(l, ancestors): + continue + + plug_acc = plug_acc0.copy() + plug_acc[hole] = l + potential_labels = potential_labels0.copy() + potential_labels.remove(l) + + if len(potential_labels) == 0: + # No more potential labels. That must mean all the holes have + # been filled so we have found a legal plugging so remember it. + # + # Note that the queue might not be empty because there might + # be labels on there that point to formula fragments with + # no holes in them. _sanity_check_plugging will make sure + # all holes are filled. + self._sanity_check_plugging(plug_acc, self.top_hole, []) + record.append(plug_acc) + else: + # Recursively try to fill in the rest of the holes in the + # queue. The label we just plugged into the hole could have + # holes of its own so at the end of the queue. Putting it on + # the end of the queue gives us a breadth-first search, so that + # all the holes at level i of the formula tree are filled + # before filling level i+1. + # A depth-first search would work as well since the trees must + # be finite but the bookkeeping would be harder. + self._plug_nodes( + queue + [(l, ancestors)], potential_labels, plug_acc, record + ) + + def _violates_constraints(self, label, ancestors): + """ + Return True if the `label' cannot be placed underneath the holes given + by the set `ancestors' because it would violate the constraints imposed + on it. + """ + for c in self.constraints: + if c.lhs == label: + if c.rhs not in ancestors: + return True + return False + + def _sanity_check_plugging(self, plugging, node, ancestors): + """ + Make sure that a given plugging is legal. We recursively go through + each node and make sure that no constraints are violated. + We also check that all holes have been filled. + """ + if node in self.holes: + ancestors = [node] + ancestors + label = plugging[node] + else: + label = node + assert label in self.labels + for c in self.constraints: + if c.lhs == label: + assert c.rhs in ancestors + args = self.fragments[label][1] + for arg in args: + if self.is_node(arg): + self._sanity_check_plugging(plugging, arg, [label] + ancestors) + + def formula_tree(self, plugging): + """ + Return the first-order logic formula tree for this underspecified + representation using the plugging given. + """ + return self._formula_tree(plugging, self.top_hole) + + def _formula_tree(self, plugging, node): + if node in plugging: + return self._formula_tree(plugging, plugging[node]) + elif node in self.fragments: + pred, args = self.fragments[node] + children = [self._formula_tree(plugging, arg) for arg in args] + return reduce(Constants.MAP[pred.variable.name], children) + else: + return node + + +class Constraint: + """ + This class represents a constraint of the form (L =< N), + where L is a label and N is a node (a label or a hole). + """ + + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def __eq__(self, other): + if self.__class__ == other.__class__: + return self.lhs == other.lhs and self.rhs == other.rhs + else: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(repr(self)) + + def __repr__(self): + return f"({self.lhs} < {self.rhs})" + + +def hole_readings(sentence, grammar_filename=None, verbose=False): + if not grammar_filename: + grammar_filename = "grammars/sample_grammars/hole.fcfg" + + if verbose: + print("Reading grammar file", grammar_filename) + + parser = load_parser(grammar_filename) + + # Parse the sentence. + tokens = sentence.split() + trees = list(parser.parse(tokens)) + if verbose: + print("Got %d different parses" % len(trees)) + + all_readings = [] + for tree in trees: + # Get the semantic feature from the top of the parse tree. + sem = tree.label()["SEM"].simplify() + + # Print the raw semantic representation. + if verbose: + print("Raw: ", sem) + + # Skolemize away all quantifiers. All variables become unique. + while isinstance(sem, LambdaExpression): + sem = sem.term + skolemized = skolemize(sem) + + if verbose: + print("Skolemized:", skolemized) + + # Break the hole semantics representation down into its components + # i.e. holes, labels, formula fragments and constraints. + hole_sem = HoleSemantics(skolemized) + + # Maybe show the details of the semantic representation. + if verbose: + print("Holes: ", hole_sem.holes) + print("Labels: ", hole_sem.labels) + print("Constraints: ", hole_sem.constraints) + print("Top hole: ", hole_sem.top_hole) + print("Top labels: ", hole_sem.top_most_labels) + print("Fragments:") + for l, f in hole_sem.fragments.items(): + print(f"\t{l}: {f}") + + # Find all the possible ways to plug the formulas together. + pluggings = hole_sem.pluggings() + + # Build FOL formula trees using the pluggings. + readings = list(map(hole_sem.formula_tree, pluggings)) + + # Print out the formulas in a textual format. + if verbose: + for i, r in enumerate(readings): + print() + print("%d. %s" % (i, r)) + print() + + all_readings.extend(readings) + + return all_readings + + +if __name__ == "__main__": + for r in hole_readings("a dog barks"): + print(r) + print() + for r in hole_readings("every girl chases a dog"): + print(r) diff --git a/venv/lib/python3.10/site-packages/nltk/sem/lfg.py b/venv/lib/python3.10/site-packages/nltk/sem/lfg.py new file mode 100644 index 0000000000000000000000000000000000000000..13473b0087940c9b42cc4c36d5f442bb0f78eafe --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/lfg.py @@ -0,0 +1,261 @@ +# Natural Language Toolkit: Lexical Functional Grammar +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from itertools import chain + +from nltk.internals import Counter + + +class FStructure(dict): + def safeappend(self, key, item): + """ + Append 'item' to the list at 'key'. If no list exists for 'key', then + construct one. + """ + if key not in self: + self[key] = [] + self[key].append(item) + + def __setitem__(self, key, value): + dict.__setitem__(self, key.lower(), value) + + def __getitem__(self, key): + return dict.__getitem__(self, key.lower()) + + def __contains__(self, key): + return dict.__contains__(self, key.lower()) + + def to_glueformula_list(self, glue_dict): + depgraph = self.to_depgraph() + return glue_dict.to_glueformula_list(depgraph) + + def to_depgraph(self, rel=None): + from nltk.parse.dependencygraph import DependencyGraph + + depgraph = DependencyGraph() + nodes = depgraph.nodes + + self._to_depgraph(nodes, 0, "ROOT") + + # Add all the dependencies for all the nodes + for address, node in nodes.items(): + for n2 in (n for n in nodes.values() if n["rel"] != "TOP"): + if n2["head"] == address: + relation = n2["rel"] + node["deps"].setdefault(relation, []) + node["deps"][relation].append(n2["address"]) + + depgraph.root = nodes[1] + + return depgraph + + def _to_depgraph(self, nodes, head, rel): + index = len(nodes) + + nodes[index].update( + { + "address": index, + "word": self.pred[0], + "tag": self.pred[1], + "head": head, + "rel": rel, + } + ) + + for feature in sorted(self): + for item in sorted(self[feature]): + if isinstance(item, FStructure): + item._to_depgraph(nodes, index, feature) + elif isinstance(item, tuple): + new_index = len(nodes) + nodes[new_index].update( + { + "address": new_index, + "word": item[0], + "tag": item[1], + "head": index, + "rel": feature, + } + ) + elif isinstance(item, list): + for n in item: + n._to_depgraph(nodes, index, feature) + else: + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + + @staticmethod + def read_depgraph(depgraph): + return FStructure._read_depgraph(depgraph.root, depgraph) + + @staticmethod + def _read_depgraph(node, depgraph, label_counter=None, parent=None): + if not label_counter: + label_counter = Counter() + + if node["rel"].lower() in ["spec", "punct"]: + # the value of a 'spec' entry is a word, not an FStructure + return (node["word"], node["tag"]) + + else: + fstruct = FStructure() + fstruct.pred = None + fstruct.label = FStructure._make_label(label_counter.get()) + + fstruct.parent = parent + + word, tag = node["word"], node["tag"] + if tag[:2] == "VB": + if tag[2:3] == "D": + fstruct.safeappend("tense", ("PAST", "tense")) + fstruct.pred = (word, tag[:2]) + + if not fstruct.pred: + fstruct.pred = (word, tag) + + children = [ + depgraph.nodes[idx] + for idx in chain.from_iterable(node["deps"].values()) + ] + for child in children: + fstruct.safeappend( + child["rel"], + FStructure._read_depgraph(child, depgraph, label_counter, fstruct), + ) + + return fstruct + + @staticmethod + def _make_label(value): + """ + Pick an alphabetic character as identifier for an entity in the model. + + :param value: where to index into the list of characters + :type value: int + """ + letter = [ + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "a", + "b", + "c", + "d", + "e", + ][value - 1] + num = int(value) // 26 + if num > 0: + return letter + str(num) + else: + return letter + + def __repr__(self): + return self.__str__().replace("\n", "") + + def __str__(self): + return self.pretty_format() + + def pretty_format(self, indent=3): + try: + accum = "%s:[" % self.label + except NameError: + accum = "[" + try: + accum += "pred '%s'" % (self.pred[0]) + except NameError: + pass + + for feature in sorted(self): + for item in self[feature]: + if isinstance(item, FStructure): + next_indent = indent + len(feature) + 3 + len(self.label) + accum += "\n{}{} {}".format( + " " * (indent), + feature, + item.pretty_format(next_indent), + ) + elif isinstance(item, tuple): + accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0]) + elif isinstance(item, list): + accum += "\n{}{} {{{}}}".format( + " " * (indent), + feature, + ("\n%s" % (" " * (indent + len(feature) + 2))).join(item), + ) + else: # ERROR + raise Exception( + "feature %s is not an FStruct, a list, or a tuple" % feature + ) + return accum + "]" + + +def demo_read_depgraph(): + from nltk.parse.dependencygraph import DependencyGraph + + dg1 = DependencyGraph( + """\ +Esso NNP 2 SUB +said VBD 0 ROOT +the DT 5 NMOD +Whiting NNP 5 NMOD +field NN 6 SUB +started VBD 2 VMOD +production NN 6 OBJ +Tuesday NNP 6 VMOD +""" + ) + dg2 = DependencyGraph( + """\ +John NNP 2 SUB +sees VBP 0 ROOT +Mary NNP 2 OBJ +""" + ) + dg3 = DependencyGraph( + """\ +a DT 2 SPEC +man NN 3 SUBJ +walks VB 0 ROOT +""" + ) + dg4 = DependencyGraph( + """\ +every DT 2 SPEC +girl NN 3 SUBJ +chases VB 0 ROOT +a DT 5 SPEC +dog NN 3 OBJ +""" + ) + + depgraphs = [dg1, dg2, dg3, dg4] + for dg in depgraphs: + print(FStructure.read_depgraph(dg)) + + +if __name__ == "__main__": + demo_read_depgraph() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/linearlogic.py b/venv/lib/python3.10/site-packages/nltk/sem/linearlogic.py new file mode 100644 index 0000000000000000000000000000000000000000..474f835e2f1bbe19fe2486e259bea2d08fa473b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/linearlogic.py @@ -0,0 +1,482 @@ +# Natural Language Toolkit: Linear Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.internals import Counter +from nltk.sem.logic import APP, LogicParser + +_counter = Counter() + + +class Tokens: + # Punctuation + OPEN = "(" + CLOSE = ")" + + # Operations + IMP = "-o" + + PUNCT = [OPEN, CLOSE] + TOKENS = PUNCT + [IMP] + + +class LinearLogicParser(LogicParser): + """A linear logic expression parser.""" + + def __init__(self): + LogicParser.__init__(self) + + self.operator_precedence = {APP: 1, Tokens.IMP: 2, None: 3} + self.right_associated_operations += [Tokens.IMP] + + def get_all_symbols(self): + return Tokens.TOKENS + + def handle(self, tok, context): + if tok not in Tokens.TOKENS: + return self.handle_variable(tok, context) + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def get_BooleanExpression_factory(self, tok): + if tok == Tokens.IMP: + return ImpExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. If the next tokens + are an argument in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + self.token() # swallow then open paren + argument = self.process_next_expression(APP) + self.assertNextToken(Tokens.CLOSE) + expression = ApplicationExpression(expression, argument, None) + return expression + + def make_VariableExpression(self, name): + if name[0].isupper(): + return VariableExpression(name) + else: + return ConstantExpression(name) + + +class Expression: + + _linear_logic_parser = LinearLogicParser() + + @classmethod + def fromstring(cls, s): + return cls._linear_logic_parser.parse(s) + + def applyto(self, other, other_indices=None): + return ApplicationExpression(self, other, other_indices) + + def __call__(self, other): + return self.applyto(other) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + +class AtomicExpression(Expression): + def __init__(self, name, dependencies=None): + """ + :param name: str for the constant name + :param dependencies: list of int for the indices on which this atom is dependent + """ + assert isinstance(name, str) + self.name = name + + if not dependencies: + dependencies = [] + self.dependencies = dependencies + + def simplify(self, bindings=None): + """ + If 'self' is bound by 'bindings', return the atomic to which it is bound. + Otherwise, return self. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``AtomicExpression`` + """ + if bindings and self in bindings: + return bindings[self] + else: + return self + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + self.dependencies = [] + return (self, []) + + def initialize_labels(self, fstruct): + self.name = fstruct.initialize_label(self.name.lower()) + + def __eq__(self, other): + return self.__class__ == other.__class__ and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __str__(self): + accum = self.name + if self.dependencies: + accum += "%s" % self.dependencies + return accum + + def __hash__(self): + return hash(self.name) + + +class ConstantExpression(AtomicExpression): + def unify(self, other, bindings): + """ + If 'other' is a constant, then it must be equal to 'self'. If 'other' is a variable, + then it must not be bound to anything other than 'self'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + if isinstance(other, VariableExpression): + try: + return bindings + BindingDict([(other, self)]) + except VariableBindingException: + pass + elif self == other: + return bindings + raise UnificationException(self, other, bindings) + + +class VariableExpression(AtomicExpression): + def unify(self, other, bindings): + """ + 'self' must not be bound to anything other than 'other'. + + :param other: ``Expression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and the new binding + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, Expression) + try: + if self == other: + return bindings + else: + return bindings + BindingDict([(self, other)]) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + +class ImpExpression(Expression): + def __init__(self, antecedent, consequent): + """ + :param antecedent: ``Expression`` for the antecedent + :param consequent: ``Expression`` for the consequent + """ + assert isinstance(antecedent, Expression) + assert isinstance(consequent, Expression) + self.antecedent = antecedent + self.consequent = consequent + + def simplify(self, bindings=None): + return self.__class__( + self.antecedent.simplify(bindings), self.consequent.simplify(bindings) + ) + + def unify(self, other, bindings): + """ + Both the antecedent and consequent of 'self' and 'other' must unify. + + :param other: ``ImpExpression`` + :param bindings: ``BindingDict`` A dictionary of all current bindings + :return: ``BindingDict`` A new combined dictionary of of 'bindings' and any new bindings + :raise UnificationException: If 'self' and 'other' cannot be unified in the context of 'bindings' + """ + assert isinstance(other, ImpExpression) + try: + return ( + bindings + + self.antecedent.unify(other.antecedent, bindings) + + self.consequent.unify(other.consequent, bindings) + ) + except VariableBindingException as e: + raise UnificationException(self, other, bindings) from e + + def compile_pos(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,set) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_neg(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_pos(index_counter, glueFormulaFactory) + return (ImpExpression(a, c), a_new + c_new) + + def compile_neg(self, index_counter, glueFormulaFactory): + """ + From Iddo Lev's PhD Dissertation p108-109 + + :param index_counter: ``Counter`` for unique indices + :param glueFormulaFactory: ``GlueFormula`` for creating new glue formulas + :return: (``Expression``,list of ``GlueFormula``) for the compiled linear logic and any newly created glue formulas + """ + (a, a_new) = self.antecedent.compile_pos(index_counter, glueFormulaFactory) + (c, c_new) = self.consequent.compile_neg(index_counter, glueFormulaFactory) + fresh_index = index_counter.get() + c.dependencies.append(fresh_index) + new_v = glueFormulaFactory("v%s" % fresh_index, a, {fresh_index}) + return (c, a_new + c_new + [new_v]) + + def initialize_labels(self, fstruct): + self.antecedent.initialize_labels(fstruct) + self.consequent.initialize_labels(fstruct) + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.antecedent == other.antecedent + and self.consequent == other.consequent + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "{}{} {} {}{}".format( + Tokens.OPEN, + self.antecedent, + Tokens.IMP, + self.consequent, + Tokens.CLOSE, + ) + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.IMP}{hash(self.consequent)}") + + +class ApplicationExpression(Expression): + def __init__(self, function, argument, argument_indices=None): + """ + :param function: ``Expression`` for the function + :param argument: ``Expression`` for the argument + :param argument_indices: set for the indices of the glue formula from which the argument came + :raise LinearLogicApplicationException: If 'function' cannot be applied to 'argument' given 'argument_indices'. + """ + function_simp = function.simplify() + argument_simp = argument.simplify() + + assert isinstance(function_simp, ImpExpression) + assert isinstance(argument_simp, Expression) + + bindings = BindingDict() + + try: + if isinstance(function, ApplicationExpression): + bindings += function.bindings + if isinstance(argument, ApplicationExpression): + bindings += argument.bindings + bindings += function_simp.antecedent.unify(argument_simp, bindings) + except UnificationException as e: + raise LinearLogicApplicationException( + f"Cannot apply {function_simp} to {argument_simp}. {e}" + ) from e + + # If you are running it on complied premises, more conditions apply + if argument_indices: + # A.dependencies of (A -o (B -o C)) must be a proper subset of argument_indices + if not set(function_simp.antecedent.dependencies) < argument_indices: + raise LinearLogicApplicationException( + "Dependencies unfulfilled when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + if set(function_simp.antecedent.dependencies) == argument_indices: + raise LinearLogicApplicationException( + "Dependencies not a proper subset of indices when attempting to apply Linear Logic formula %s to %s" + % (function_simp, argument_simp) + ) + + self.function = function + self.argument = argument + self.bindings = bindings + + def simplify(self, bindings=None): + """ + Since function is an implication, return its consequent. There should be + no need to check that the application is valid since the checking is done + by the constructor. + + :param bindings: ``BindingDict`` A dictionary of bindings used to simplify + :return: ``Expression`` + """ + if not bindings: + bindings = self.bindings + + return self.function.simplify(bindings).consequent + + def __eq__(self, other): + return ( + self.__class__ == other.__class__ + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + def __str__(self): + return "%s" % self.function + Tokens.OPEN + "%s" % self.argument + Tokens.CLOSE + + def __hash__(self): + return hash(f"{hash(self.antecedent)}{Tokens.OPEN}{hash(self.consequent)}") + + +class BindingDict: + def __init__(self, bindings=None): + """ + :param bindings: + list [(``VariableExpression``, ``AtomicExpression``)] to initialize the dictionary + dict {``VariableExpression``: ``AtomicExpression``} to initialize the dictionary + """ + self.d = {} + + if isinstance(bindings, dict): + bindings = bindings.items() + + if bindings: + for (v, b) in bindings: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``VariableExpression`` The variable bind + :param binding: ``Expression`` The expression to which 'variable' should be bound + :raise VariableBindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, VariableExpression) + assert isinstance(binding, Expression) + + assert variable != binding + + existing = self.d.get(variable, None) + + if not existing or binding == existing: + self.d[variable] = binding + else: + raise VariableBindingException( + "Variable %s already bound to another value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, VariableExpression) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise VariableBindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except VariableBindingException as e: + raise VariableBindingException( + "Attempting to add two contradicting" + " VariableBindingsLists: %s, %s" % (self, other) + ) from e + + def __ne__(self, other): + return not self == other + + def __eq__(self, other): + if not isinstance(other, BindingDict): + raise TypeError + return self.d == other.d + + def __str__(self): + return "{" + ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + "}" + + def __repr__(self): + return "BindingDict: %s" % self + + +class VariableBindingException(Exception): + pass + + +class UnificationException(Exception): + def __init__(self, a, b, bindings): + Exception.__init__(self, f"Cannot unify {a} with {b} given {bindings}") + + +class LinearLogicApplicationException(Exception): + pass + + +def demo(): + lexpr = Expression.fromstring + + print(lexpr(r"f")) + print(lexpr(r"(g -o f)")) + print(lexpr(r"((g -o G) -o G)")) + print(lexpr(r"g -o h -o f")) + print(lexpr(r"(g -o f)(g)").simplify()) + print(lexpr(r"(H -o f)(g)").simplify()) + print(lexpr(r"((g -o G) -o G)((g -o f))").simplify()) + print(lexpr(r"(H -o H)((g -o f))").simplify()) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/logic.py b/venv/lib/python3.10/site-packages/nltk/sem/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..aed3a118760b0a9111fc0445df870231f943e1e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/logic.py @@ -0,0 +1,2065 @@ +# Natural Language Toolkit: Logic +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A version of first order predicate logic, built on +top of the typed lambda calculus. +""" + +import operator +import re +from collections import defaultdict +from functools import reduce, total_ordering + +from nltk.internals import Counter +from nltk.util import Trie + +APP = "APP" + +_counter = Counter() + + +class Tokens: + LAMBDA = "\\" + LAMBDA_LIST = ["\\"] + + # Quantifiers + EXISTS = "exists" + EXISTS_LIST = ["some", "exists", "exist"] + ALL = "all" + ALL_LIST = ["all", "forall"] + IOTA = "iota" + IOTA_LIST = ["iota"] + + # Punctuation + DOT = "." + OPEN = "(" + CLOSE = ")" + COMMA = "," + + # Operations + NOT = "-" + NOT_LIST = ["not", "-", "!"] + AND = "&" + AND_LIST = ["and", "&", "^"] + OR = "|" + OR_LIST = ["or", "|"] + IMP = "->" + IMP_LIST = ["implies", "->", "=>"] + IFF = "<->" + IFF_LIST = ["iff", "<->", "<=>"] + EQ = "=" + EQ_LIST = ["=", "=="] + NEQ = "!=" + NEQ_LIST = ["!="] + + # Collections of tokens + BINOPS = AND_LIST + OR_LIST + IMP_LIST + IFF_LIST + QUANTS = EXISTS_LIST + ALL_LIST + IOTA_LIST + PUNCT = [DOT, OPEN, CLOSE, COMMA] + + TOKENS = BINOPS + EQ_LIST + NEQ_LIST + QUANTS + LAMBDA_LIST + PUNCT + NOT_LIST + + # Special + SYMBOLS = [x for x in TOKENS if re.match(r"^[-\\.(),!&^|>=<]*$", x)] + + +def boolean_ops(): + """ + Boolean operators + """ + names = ["negation", "conjunction", "disjunction", "implication", "equivalence"] + for pair in zip(names, [Tokens.NOT, Tokens.AND, Tokens.OR, Tokens.IMP, Tokens.IFF]): + print("%-15s\t%s" % pair) + + +def equality_preds(): + """ + Equality predicates + """ + names = ["equality", "inequality"] + for pair in zip(names, [Tokens.EQ, Tokens.NEQ]): + print("%-15s\t%s" % pair) + + +def binding_ops(): + """ + Binding operators + """ + names = ["existential", "universal", "lambda"] + for pair in zip(names, [Tokens.EXISTS, Tokens.ALL, Tokens.LAMBDA, Tokens.IOTA]): + print("%-15s\t%s" % pair) + + +class LogicParser: + """A lambda calculus expression parser.""" + + def __init__(self, type_check=False): + """ + :param type_check: should type checking be performed + to their types? + :type type_check: bool + """ + assert isinstance(type_check, bool) + + self._currentIndex = 0 + self._buffer = [] + self.type_check = type_check + + """A list of tuples of quote characters. The 4-tuple is comprised + of the start character, the end character, the escape character, and + a boolean indicating whether the quotes should be included in the + result. Quotes are used to signify that a token should be treated as + atomic, ignoring any special characters within the token. The escape + character allows the quote end character to be used within the quote. + If True, the boolean indicates that the final token should contain the + quote and escape characters. + This method exists to be overridden""" + self.quote_chars = [] + + self.operator_precedence = dict( + [(x, 1) for x in Tokens.LAMBDA_LIST] + + [(x, 2) for x in Tokens.NOT_LIST] + + [(APP, 3)] + + [(x, 4) for x in Tokens.EQ_LIST + Tokens.NEQ_LIST] + + [(x, 5) for x in Tokens.QUANTS] + + [(x, 6) for x in Tokens.AND_LIST] + + [(x, 7) for x in Tokens.OR_LIST] + + [(x, 8) for x in Tokens.IMP_LIST] + + [(x, 9) for x in Tokens.IFF_LIST] + + [(None, 10)] + ) + self.right_associated_operations = [APP] + + def parse(self, data, signature=None): + """ + Parse the expression. + + :param data: str for the input to be parsed + :param signature: ``dict`` that maps variable names to type + strings + :returns: a parsed Expression + """ + data = data.rstrip() + + self._currentIndex = 0 + self._buffer, mapping = self.process(data) + + try: + result = self.process_next_expression(None) + if self.inRange(0): + raise UnexpectedTokenException(self._currentIndex + 1, self.token(0)) + except LogicalExpressionException as e: + msg = "{}\n{}\n{}^".format(e, data, " " * mapping[e.index - 1]) + raise LogicalExpressionException(None, msg) from e + + if self.type_check: + result.typecheck(signature) + + return result + + def process(self, data): + """Split the data into tokens""" + out = [] + mapping = {} + tokenTrie = Trie(self.get_all_symbols()) + token = "" + data_idx = 0 + token_start_idx = data_idx + while data_idx < len(data): + cur_data_idx = data_idx + quoted_token, data_idx = self.process_quoted_token(data_idx, data) + if quoted_token: + if not token: + token_start_idx = cur_data_idx + token += quoted_token + continue + + st = tokenTrie + c = data[data_idx] + symbol = "" + while c in st: + symbol += c + st = st[c] + if len(data) - data_idx > len(symbol): + c = data[data_idx + len(symbol)] + else: + break + if Trie.LEAF in st: + # token is a complete symbol + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + mapping[len(out)] = data_idx + out.append(symbol) + data_idx += len(symbol) + else: + if data[data_idx] in " \t\n": # any whitespace + if token: + mapping[len(out)] = token_start_idx + out.append(token) + token = "" + else: + if not token: + token_start_idx = data_idx + token += data[data_idx] + data_idx += 1 + if token: + mapping[len(out)] = token_start_idx + out.append(token) + mapping[len(out)] = len(data) + mapping[len(out) + 1] = len(data) + 1 + return out, mapping + + def process_quoted_token(self, data_idx, data): + token = "" + c = data[data_idx] + i = data_idx + for start, end, escape, incl_quotes in self.quote_chars: + if c == start: + if incl_quotes: + token += c + i += 1 + while data[i] != end: + if data[i] == escape: + if incl_quotes: + token += data[i] + i += 1 + if len(data) == i: # if there are no more chars + raise LogicalExpressionException( + None, + "End of input reached. " + "Escape character [%s] found at end." % escape, + ) + token += data[i] + else: + token += data[i] + i += 1 + if len(data) == i: + raise LogicalExpressionException( + None, "End of input reached. " "Expected: [%s]" % end + ) + if incl_quotes: + token += data[i] + i += 1 + if not token: + raise LogicalExpressionException(None, "Empty quoted token found") + break + return token, i + + def get_all_symbols(self): + """This method exists to be overridden""" + return Tokens.SYMBOLS + + def inRange(self, location): + """Return TRUE if the given location is within the buffer""" + return self._currentIndex + location < len(self._buffer) + + def token(self, location=None): + """Get the next waiting token. If a location is given, then + return the token at currentIndex+location without advancing + currentIndex; setting it gives lookahead/lookback capability.""" + try: + if location is None: + tok = self._buffer[self._currentIndex] + self._currentIndex += 1 + else: + tok = self._buffer[self._currentIndex + location] + return tok + except IndexError as e: + raise ExpectedMoreTokensException(self._currentIndex + 1) from e + + def isvariable(self, tok): + return tok not in Tokens.TOKENS + + def process_next_expression(self, context): + """Parse the next complete expression from the stream and return it.""" + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + self._currentIndex + 1, message="Expression expected." + ) from e + + accum = self.handle(tok, context) + + if not accum: + raise UnexpectedTokenException( + self._currentIndex, tok, message="Expression expected." + ) + + return self.attempt_adjuncts(accum, context) + + def handle(self, tok, context): + """This method is intended to be overridden for logics that + use different operators or expressions""" + if self.isvariable(tok): + return self.handle_variable(tok, context) + + elif tok in Tokens.NOT_LIST: + return self.handle_negation(tok, context) + + elif tok in Tokens.LAMBDA_LIST: + return self.handle_lambda(tok, context) + + elif tok in Tokens.QUANTS: + return self.handle_quant(tok, context) + + elif tok == Tokens.OPEN: + return self.handle_open(tok, context) + + def attempt_adjuncts(self, expression, context): + cur_idx = None + while cur_idx != self._currentIndex: # while adjuncts are added + cur_idx = self._currentIndex + expression = self.attempt_EqualityExpression(expression, context) + expression = self.attempt_ApplicationExpression(expression, context) + expression = self.attempt_BooleanExpression(expression, context) + return expression + + def handle_negation(self, tok, context): + return self.make_NegatedExpression(self.process_next_expression(Tokens.NOT)) + + def make_NegatedExpression(self, expression): + return NegatedExpression(expression) + + def handle_variable(self, tok, context): + # It's either: 1) a predicate expression: sees(x,y) + # 2) an application expression: P(x) + # 3) a solo variable: john OR x + accum = self.make_VariableExpression(tok) + if self.inRange(0) and self.token(0) == Tokens.OPEN: + # The predicate has arguments + if not isinstance(accum, FunctionVariableExpression) and not isinstance( + accum, ConstantExpression + ): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal predicate name. " + "Individual variables may not be used as " + "predicates." % tok, + ) + self.token() # swallow the Open Paren + + # curry the arguments + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + + def get_next_token_variable(self, description): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException(e.index, "Variable expected.") from e + if isinstance(self.make_VariableExpression(tok), ConstantExpression): + raise LogicalExpressionException( + self._currentIndex, + "'%s' is an illegal variable name. " + "Constants may not be %s." % (tok, description), + ) + return Variable(tok) + + def handle_lambda(self, tok, context): + # Expression is a lambda expression + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following lambda operator.", + ) + vars = [self.get_next_token_variable("abstracted")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: \x y.M == \x.\y.M + vars.append(self.get_next_token_variable("abstracted")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_LambdaExpression(vars.pop(), accum) + return accum + + def handle_quant(self, tok, context): + # Expression is a quantified expression: some x.M + factory = self.get_QuantifiedExpression_factory(tok) + + if not self.inRange(0): + raise ExpectedMoreTokensException( + self._currentIndex + 2, + message="Variable and Expression expected following quantifier '%s'." + % tok, + ) + vars = [self.get_next_token_variable("quantified")] + while True: + if not self.inRange(0) or ( + self.token(0) == Tokens.DOT and not self.inRange(1) + ): + raise ExpectedMoreTokensException( + self._currentIndex + 2, message="Expression expected." + ) + if not self.isvariable(self.token(0)): + break + # Support expressions like: some x y.M == some x.some y.M + vars.append(self.get_next_token_variable("quantified")) + if self.inRange(0) and self.token(0) == Tokens.DOT: + self.token() # swallow the dot + + accum = self.process_next_expression(tok) + while vars: + accum = self.make_QuanifiedExpression(factory, vars.pop(), accum) + return accum + + def get_QuantifiedExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different quantifiers""" + if tok in Tokens.EXISTS_LIST: + return ExistsExpression + elif tok in Tokens.ALL_LIST: + return AllExpression + elif tok in Tokens.IOTA_LIST: + return IotaExpression + else: + self.assertToken(tok, Tokens.QUANTS) + + def make_QuanifiedExpression(self, factory, variable, term): + return factory(variable, term) + + def handle_open(self, tok, context): + # Expression is in parens + accum = self.process_next_expression(None) + self.assertNextToken(Tokens.CLOSE) + return accum + + def attempt_EqualityExpression(self, expression, context): + """Attempt to make an equality expression. If the next token is an + equality operator, then an EqualityExpression will be returned. + Otherwise, the parameter will be returned.""" + if self.inRange(0): + tok = self.token(0) + if tok in Tokens.EQ_LIST + Tokens.NEQ_LIST and self.has_priority( + tok, context + ): + self.token() # swallow the "=" or "!=" + expression = self.make_EqualityExpression( + expression, self.process_next_expression(tok) + ) + if tok in Tokens.NEQ_LIST: + expression = self.make_NegatedExpression(expression) + return expression + + def make_EqualityExpression(self, first, second): + """This method serves as a hook for other logic parsers that + have different equality expression classes""" + return EqualityExpression(first, second) + + def attempt_BooleanExpression(self, expression, context): + """Attempt to make a boolean expression. If the next token is a boolean + operator, then a BooleanExpression will be returned. Otherwise, the + parameter will be returned.""" + while self.inRange(0): + tok = self.token(0) + factory = self.get_BooleanExpression_factory(tok) + if factory and self.has_priority(tok, context): + self.token() # swallow the operator + expression = self.make_BooleanExpression( + factory, expression, self.process_next_expression(tok) + ) + else: + break + return expression + + def get_BooleanExpression_factory(self, tok): + """This method serves as a hook for other logic parsers that + have different boolean operators""" + if tok in Tokens.AND_LIST: + return AndExpression + elif tok in Tokens.OR_LIST: + return OrExpression + elif tok in Tokens.IMP_LIST: + return ImpExpression + elif tok in Tokens.IFF_LIST: + return IffExpression + else: + return None + + def make_BooleanExpression(self, factory, first, second): + return factory(first, second) + + def attempt_ApplicationExpression(self, expression, context): + """Attempt to make an application expression. The next tokens are + a list of arguments in parens, then the argument expression is a + function being applied to the arguments. Otherwise, return the + argument expression.""" + if self.has_priority(APP, context): + if self.inRange(0) and self.token(0) == Tokens.OPEN: + if ( + not isinstance(expression, LambdaExpression) + and not isinstance(expression, ApplicationExpression) + and not isinstance(expression, FunctionVariableExpression) + and not isinstance(expression, ConstantExpression) + ): + raise LogicalExpressionException( + self._currentIndex, + ("The function '%s" % expression) + + "' is not a Lambda Expression, an " + "Application Expression, or a " + "functional predicate, so it may " + "not take arguments.", + ) + self.token() # swallow then open paren + # curry the arguments + accum = self.make_ApplicationExpression( + expression, self.process_next_expression(APP) + ) + while self.inRange(0) and self.token(0) == Tokens.COMMA: + self.token() # swallow the comma + accum = self.make_ApplicationExpression( + accum, self.process_next_expression(APP) + ) + self.assertNextToken(Tokens.CLOSE) + return accum + return expression + + def make_ApplicationExpression(self, function, argument): + return ApplicationExpression(function, argument) + + def make_VariableExpression(self, name): + return VariableExpression(Variable(name)) + + def make_LambdaExpression(self, variable, term): + return LambdaExpression(variable, term) + + def has_priority(self, operation, context): + return self.operator_precedence[operation] < self.operator_precedence[ + context + ] or ( + operation in self.right_associated_operations + and self.operator_precedence[operation] == self.operator_precedence[context] + ) + + def assertNextToken(self, expected): + try: + tok = self.token() + except ExpectedMoreTokensException as e: + raise ExpectedMoreTokensException( + e.index, message="Expected token '%s'." % expected + ) from e + + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def assertToken(self, tok, expected): + if isinstance(expected, list): + if tok not in expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + else: + if tok != expected: + raise UnexpectedTokenException(self._currentIndex, tok, expected) + + def __repr__(self): + if self.inRange(0): + msg = "Next token: " + self.token(0) + else: + msg = "No more tokens" + return "<" + self.__class__.__name__ + ": " + msg + ">" + + +def read_logic(s, logic_parser=None, encoding=None): + """ + Convert a file of First Order Formulas into a list of {Expression}s. + + :param s: the contents of the file + :type s: str + :param logic_parser: The parser to be used to parse the logical expression + :type logic_parser: LogicParser + :param encoding: the encoding of the input string, if it is binary + :type encoding: str + :return: a list of parsed formulas. + :rtype: list(Expression) + """ + if encoding is not None: + s = s.decode(encoding) + if logic_parser is None: + logic_parser = LogicParser() + + statements = [] + for linenum, line in enumerate(s.splitlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + statements.append(logic_parser.parse(line)) + except LogicalExpressionException as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + return statements + + +@total_ordering +class Variable: + def __init__(self, name): + """ + :param name: the name of the variable + """ + assert isinstance(name, str), "%s is not a string" % name + self.name = name + + def __eq__(self, other): + return isinstance(other, Variable) and self.name == other.name + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Variable): + raise TypeError + return self.name < other.name + + def substitute_bindings(self, bindings): + return bindings.get(self, self) + + def __hash__(self): + return hash(self.name) + + def __str__(self): + return self.name + + def __repr__(self): + return "Variable('%s')" % self.name + + +def unique_variable(pattern=None, ignore=None): + """ + Return a new, unique variable. + + :param pattern: ``Variable`` that is being replaced. The new variable must + be the same type. + :param term: a set of ``Variable`` objects that should not be returned from + this function. + :rtype: Variable + """ + if pattern is not None: + if is_indvar(pattern.name): + prefix = "z" + elif is_funcvar(pattern.name): + prefix = "F" + elif is_eventvar(pattern.name): + prefix = "e0" + else: + assert False, "Cannot generate a unique constant" + else: + prefix = "z" + + v = Variable(f"{prefix}{_counter.get()}") + while ignore is not None and v in ignore: + v = Variable(f"{prefix}{_counter.get()}") + return v + + +def skolem_function(univ_scope=None): + """ + Return a skolem function over the variables in univ_scope + param univ_scope + """ + skolem = VariableExpression(Variable("F%s" % _counter.get())) + if univ_scope: + for v in list(univ_scope): + skolem = skolem(VariableExpression(v)) + return skolem + + +class Type: + def __repr__(self): + return "%s" % self + + def __hash__(self): + return hash("%s" % self) + + @classmethod + def fromstring(cls, s): + return read_type(s) + + +class ComplexType(Type): + def __init__(self, first, second): + assert isinstance(first, Type), "%s is not a Type" % first + assert isinstance(second, Type), "%s is not a Type" % second + self.first = first + self.second = second + + def __eq__(self, other): + return ( + isinstance(other, ComplexType) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + if isinstance(other, ComplexType): + return self.first.matches(other.first) and self.second.matches(other.second) + else: + return self == ANY_TYPE + + def resolve(self, other): + if other == ANY_TYPE: + return self + elif isinstance(other, ComplexType): + f = self.first.resolve(other.first) + s = self.second.resolve(other.second) + if f and s: + return ComplexType(f, s) + else: + return None + elif self == ANY_TYPE: + return other + else: + return None + + def __str__(self): + if self == ANY_TYPE: + return "%s" % ANY_TYPE + else: + return f"<{self.first},{self.second}>" + + def str(self): + if self == ANY_TYPE: + return ANY_TYPE.str() + else: + return f"({self.first.str()} -> {self.second.str()})" + + +class BasicType(Type): + def __eq__(self, other): + return isinstance(other, BasicType) and ("%s" % self) == ("%s" % other) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return other == ANY_TYPE or self == other + + def resolve(self, other): + if self.matches(other): + return self + else: + return None + + +class EntityType(BasicType): + def __str__(self): + return "e" + + def str(self): + return "IND" + + +class TruthValueType(BasicType): + def __str__(self): + return "t" + + def str(self): + return "BOOL" + + +class EventType(BasicType): + def __str__(self): + return "v" + + def str(self): + return "EVENT" + + +class AnyType(BasicType, ComplexType): + def __init__(self): + pass + + @property + def first(self): + return self + + @property + def second(self): + return self + + def __eq__(self, other): + return isinstance(other, AnyType) or other.__eq__(self) + + def __ne__(self, other): + return not self == other + + __hash__ = Type.__hash__ + + def matches(self, other): + return True + + def resolve(self, other): + return other + + def __str__(self): + return "?" + + def str(self): + return "ANY" + + +TRUTH_TYPE = TruthValueType() +ENTITY_TYPE = EntityType() +EVENT_TYPE = EventType() +ANY_TYPE = AnyType() + + +def read_type(type_string): + assert isinstance(type_string, str) + type_string = type_string.replace(" ", "") # remove spaces + + if type_string[0] == "<": + assert type_string[-1] == ">" + paren_count = 0 + for i, char in enumerate(type_string): + if char == "<": + paren_count += 1 + elif char == ">": + paren_count -= 1 + assert paren_count > 0 + elif char == ",": + if paren_count == 1: + break + return ComplexType( + read_type(type_string[1:i]), read_type(type_string[i + 1 : -1]) + ) + elif type_string[0] == "%s" % ENTITY_TYPE: + return ENTITY_TYPE + elif type_string[0] == "%s" % TRUTH_TYPE: + return TRUTH_TYPE + elif type_string[0] == "%s" % ANY_TYPE: + return ANY_TYPE + else: + raise LogicalExpressionException( + None, "Unexpected character: '%s'." % type_string[0] + ) + + +class TypeException(Exception): + def __init__(self, msg): + super().__init__(msg) + + +class InconsistentTypeHierarchyException(TypeException): + def __init__(self, variable, expression=None): + if expression: + msg = ( + "The variable '%s' was found in multiple places with different" + " types in '%s'." % (variable, expression) + ) + else: + msg = ( + "The variable '%s' was found in multiple places with different" + " types." % (variable) + ) + super().__init__(msg) + + +class TypeResolutionException(TypeException): + def __init__(self, expression, other_type): + super().__init__( + "The type of '%s', '%s', cannot be resolved with type '%s'" + % (expression, expression.type, other_type) + ) + + +class IllegalTypeException(TypeException): + def __init__(self, expression, other_type, allowed_type): + super().__init__( + "Cannot set type of %s '%s' to '%s'; must match type '%s'." + % (expression.__class__.__name__, expression, other_type, allowed_type) + ) + + +def typecheck(expressions, signature=None): + """ + Ensure correct typing across a collection of ``Expression`` objects. + :param expressions: a collection of expressions + :param signature: dict that maps variable names to types (or string + representations of types) + """ + # typecheck and create master signature + for expression in expressions: + signature = expression.typecheck(signature) + # apply master signature to all expressions + for expression in expressions[:-1]: + expression.typecheck(signature) + return signature + + +class SubstituteBindingsI: + """ + An interface for classes that can perform substitutions for + variables. + """ + + def substitute_bindings(self, bindings): + """ + :return: The object that is obtained by replacing + each variable bound by ``bindings`` with its values. + Aliases are already resolved. (maybe?) + :rtype: (any) + """ + raise NotImplementedError() + + def variables(self): + """ + :return: A list of all variables in this object. + """ + raise NotImplementedError() + + +class Expression(SubstituteBindingsI): + """This is the base abstract object for all logical expressions""" + + _logic_parser = LogicParser() + _type_checking_logic_parser = LogicParser(type_check=True) + + @classmethod + def fromstring(cls, s, type_check=False, signature=None): + if type_check: + return cls._type_checking_logic_parser.parse(s, signature) + else: + return cls._logic_parser.parse(s, signature) + + def __call__(self, other, *additional): + accum = self.applyto(other) + for a in additional: + accum = accum(a) + return accum + + def applyto(self, other): + assert isinstance(other, Expression), "%s is not an Expression" % other + return ApplicationExpression(self, other) + + def __neg__(self): + return NegatedExpression(self) + + def negate(self): + """If this is a negated expression, remove the negation. + Otherwise add a negation.""" + return -self + + def __and__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return AndExpression(self, other) + + def __or__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return OrExpression(self, other) + + def __gt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return ImpExpression(self, other) + + def __lt__(self, other): + if not isinstance(other, Expression): + raise TypeError("%s is not an Expression" % other) + return IffExpression(self, other) + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return not self == other + + def equiv(self, other, prover=None): + """ + Check for logical equivalence. + Pass the expression (self <-> other) to the theorem prover. + If the prover says it is valid, then the self and other are equal. + + :param other: an ``Expression`` to check equality against + :param prover: a ``nltk.inference.api.Prover`` + """ + assert isinstance(other, Expression), "%s is not an Expression" % other + + if prover is None: + from nltk.inference import Prover9 + + prover = Prover9() + bicond = IffExpression(self.simplify(), other.simplify()) + return prover.prove(bicond) + + def __hash__(self): + return hash(repr(self)) + + def substitute_bindings(self, bindings): + expr = self + for var in expr.variables(): + if var in bindings: + val = bindings[var] + if isinstance(val, Variable): + val = self.make_VariableExpression(val) + elif not isinstance(val, Expression): + raise ValueError( + "Can not substitute a non-expression " + "value into an expression: %r" % (val,) + ) + # Substitute bindings in the target value. + val = val.substitute_bindings(bindings) + # Replace var w/ the target value. + expr = expr.replace(var, val) + return expr.simplify() + + def typecheck(self, signature=None): + """ + Infer and check types. Raise exceptions if necessary. + + :param signature: dict that maps variable names to types (or string + representations of types) + :return: the signature, plus any additional type mappings + """ + sig = defaultdict(list) + if signature: + for key in signature: + val = signature[key] + varEx = VariableExpression(Variable(key)) + if isinstance(val, Type): + varEx.type = val + else: + varEx.type = read_type(val) + sig[key].append(varEx) + + self._set_type(signature=sig) + + return {key: sig[key][0].type for key in sig} + + def findtype(self, variable): + """ + Find the type of the given variable as it is used in this expression. + For example, finding the type of "P" in "P(x) & Q(x,y)" yields "" + + :param variable: Variable + """ + raise NotImplementedError() + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """ + Set the type of this expression to be the given type. Raise type + exceptions where applicable. + + :param other_type: Type + :param signature: dict(str -> list(AbstractVariableExpression)) + """ + raise NotImplementedError() + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """ + Replace every instance of 'variable' with 'expression' + :param variable: ``Variable`` The variable to replace + :param expression: ``Expression`` The expression with which to replace it + :param replace_bound: bool Should bound variables be replaced? + :param alpha_convert: bool Alpha convert automatically to avoid name clashes? + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + + return self.visit_structured( + lambda e: e.replace(variable, expression, replace_bound, alpha_convert), + self.__class__, + ) + + def normalize(self, newvars=None): + """Rename auto-generated unique variables""" + + def get_indiv_vars(e): + if isinstance(e, IndividualVariableExpression): + return {e} + elif isinstance(e, AbstractVariableExpression): + return set() + else: + return e.visit( + get_indiv_vars, lambda parts: reduce(operator.or_, parts, set()) + ) + + result = self + for i, e in enumerate(sorted(get_indiv_vars(self), key=lambda e: e.variable)): + if isinstance(e, EventVariableExpression): + newVar = e.__class__(Variable("e0%s" % (i + 1))) + elif isinstance(e, IndividualVariableExpression): + newVar = e.__class__(Variable("z%s" % (i + 1))) + else: + newVar = e + result = result.replace(e.variable, newVar, True) + return result + + def visit(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation: + + return combinator(map(function, self.subexpressions)) + + Bound variables are neither applied upon by the function nor given to + the combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function,R>`` to combine the results of the + function calls + :return: result of combination ``R`` + """ + raise NotImplementedError() + + def visit_structured(self, function, combinator): + """ + Recursively visit subexpressions. Apply 'function' to each + subexpression and pass the result of each function application + to the 'combinator' for aggregation. The combinator must have + the same signature as the constructor. The function is not + applied to bound variables, but they are passed to the + combinator. + :param function: ``Function`` to call on each subexpression + :param combinator: ``Function`` with the same signature as the + constructor, to combine the results of the function calls + :return: result of combination + """ + return self.visit(function, lambda parts: combinator(*parts)) + + def __repr__(self): + return f"<{self.__class__.__name__} {self}>" + + def __str__(self): + return self.str() + + def variables(self): + """ + Return a set of all the variables for binding substitution. + The variables returned include all free (non-bound) individual + variables and any variable starting with '?' or '@'. + :return: set of ``Variable`` objects + """ + return self.free() | { + p for p in self.predicates() | self.constants() if re.match("^[?@]", p.name) + } + + def free(self): + """ + Return a set of all the free (non-bound) variables. This includes + both individual and predicate variables, but not constants. + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.free(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def constants(self): + """ + Return a set of individual constants (non-predicates). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.constants(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def predicates(self): + """ + Return a set of predicates (constants, not variables). + :return: set of ``Variable`` objects + """ + return self.visit( + lambda e: e.predicates(), lambda parts: reduce(operator.or_, parts, set()) + ) + + def simplify(self): + """ + :return: beta-converted version of this expression + """ + return self.visit_structured(lambda e: e.simplify(), self.__class__) + + def make_VariableExpression(self, variable): + return VariableExpression(variable) + + +class ApplicationExpression(Expression): + r""" + This class is used to represent two related types of logical expressions. + + The first is a Predicate Expression, such as "P(x,y)". A predicate + expression is comprised of a ``FunctionVariableExpression`` or + ``ConstantExpression`` as the predicate and a list of Expressions as the + arguments. + + The second is a an application of one expression to another, such as + "(\x.dog(x))(fido)". + + The reason Predicate Expressions are treated as Application Expressions is + that the Variable Expression predicate of the expression may be replaced + with another Expression, such as a LambdaExpression, which would mean that + the Predicate should be thought of as being applied to the arguments. + + The logical expression reader will always curry arguments in a application expression. + So, "\x y.see(x,y)(john,mary)" will be represented internally as + "((\x y.(see(x))(y))(john))(mary)". This simplifies the internals since + there will always be exactly one argument in an application. + + The str() method will usually print the curried forms of application + expressions. The one exception is when the the application expression is + really a predicate expression (ie, underlying function is an + ``AbstractVariableExpression``). This means that the example from above + will be returned as "(\x y.see(x,y)(john))(mary)". + """ + + def __init__(self, function, argument): + """ + :param function: ``Expression``, for the function expression + :param argument: ``Expression``, for the argument + """ + assert isinstance(function, Expression), "%s is not an Expression" % function + assert isinstance(argument, Expression), "%s is not an Expression" % argument + self.function = function + self.argument = argument + + def simplify(self): + function = self.function.simplify() + argument = self.argument.simplify() + if isinstance(function, LambdaExpression): + return function.term.replace(function.variable, argument).simplify() + else: + return self.__class__(function, argument) + + @property + def type(self): + if isinstance(self.function.type, ComplexType): + return self.function.type.second + else: + return ANY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.argument._set_type(ANY_TYPE, signature) + try: + self.function._set_type( + ComplexType(self.argument.type, other_type), signature + ) + except TypeResolutionException as e: + raise TypeException( + "The function '%s' is of type '%s' and cannot be applied " + "to '%s' of type '%s'. Its argument must match type '%s'." + % ( + self.function, + self.function.type, + self.argument, + self.argument.type, + self.function.type.first, + ) + ) from e + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.is_atom(): + function, args = self.uncurry() + else: + # It's not a predicate expression ("P(x,y)"), so leave args curried + function = self.function + args = [self.argument] + + found = [arg.findtype(variable) for arg in [function] + args] + + unique = [] + for f in found: + if f != ANY_TYPE: + if unique: + for u in unique: + if f.matches(u): + break + else: + unique.append(f) + + if len(unique) == 1: + return list(unique)[0] + else: + return ANY_TYPE + + def constants(self): + """:see: Expression.constants()""" + if isinstance(self.function, AbstractVariableExpression): + function_constants = set() + else: + function_constants = self.function.constants() + return function_constants | self.argument.constants() + + def predicates(self): + """:see: Expression.predicates()""" + if isinstance(self.function, ConstantExpression): + function_preds = {self.function.variable} + else: + function_preds = self.function.predicates() + return function_preds | self.argument.predicates() + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.function), function(self.argument)]) + + def __eq__(self, other): + return ( + isinstance(other, ApplicationExpression) + and self.function == other.function + and self.argument == other.argument + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + # uncurry the arguments and find the base function + if self.is_atom(): + function, args = self.uncurry() + arg_str = ",".join("%s" % arg for arg in args) + else: + # Leave arguments curried + function = self.function + arg_str = "%s" % self.argument + + function_str = "%s" % function + parenthesize_function = False + if isinstance(function, LambdaExpression): + if isinstance(function.term, ApplicationExpression): + if not isinstance(function.term.function, AbstractVariableExpression): + parenthesize_function = True + elif not isinstance(function.term, BooleanExpression): + parenthesize_function = True + elif isinstance(function, ApplicationExpression): + parenthesize_function = True + + if parenthesize_function: + function_str = Tokens.OPEN + function_str + Tokens.CLOSE + + return function_str + Tokens.OPEN + arg_str + Tokens.CLOSE + + def uncurry(self): + """ + Uncurry this application expression + + return: A tuple (base-function, arg-list) + """ + function = self.function + args = [self.argument] + while isinstance(function, ApplicationExpression): + # (\x.\y.sees(x,y)(john))(mary) + args.insert(0, function.argument) + function = function.function + return (function, args) + + @property + def pred(self): + """ + Return uncurried base-function. + If this is an atom, then the result will be a variable expression. + Otherwise, it will be a lambda expression. + """ + return self.uncurry()[0] + + @property + def args(self): + """ + Return uncurried arg-list + """ + return self.uncurry()[1] + + def is_atom(self): + """ + Is this expression an atom (as opposed to a lambda expression applied + to a term)? + """ + return isinstance(self.pred, AbstractVariableExpression) + + +@total_ordering +class AbstractVariableExpression(Expression): + """This class represents a variable to be used as a predicate or entity""" + + def __init__(self, variable): + """ + :param variable: ``Variable``, for the variable + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + self.variable = variable + + def simplify(self): + return self + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not an Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + if self.variable == variable: + return expression + else: + return self + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + resolution = other_type + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if self.variable == variable: + return self.type + else: + return ANY_TYPE + + def predicates(self): + """:see: Expression.predicates()""" + return set() + + def __eq__(self, other): + """Allow equality between instances of ``AbstractVariableExpression`` + subtypes.""" + return ( + isinstance(other, AbstractVariableExpression) + and self.variable == other.variable + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, AbstractVariableExpression): + raise TypeError + return self.variable < other.variable + + __hash__ = Expression.__hash__ + + def __str__(self): + return "%s" % self.variable + + +class IndividualVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single lowercase + character (other than 'e') followed by zero or more digits.""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(ENTITY_TYPE): + raise IllegalTypeException(self, other_type, ENTITY_TYPE) + + signature[self.variable.name].append(self) + + def _get_type(self): + return ENTITY_TYPE + + type = property(_get_type, _set_type) + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class FunctionVariableExpression(AbstractVariableExpression): + """This class represents variables that take the form of a single uppercase + character followed by zero or more digits.""" + + type = ANY_TYPE + + def free(self): + """:see: Expression.free()""" + return {self.variable} + + def constants(self): + """:see: Expression.constants()""" + return set() + + +class EventVariableExpression(IndividualVariableExpression): + """This class represents variables that take the form of a single lowercase + 'e' character followed by zero or more digits.""" + + type = EVENT_TYPE + + +class ConstantExpression(AbstractVariableExpression): + """This class represents variables that do not take the form of a single + character followed by zero or more digits.""" + + type = ENTITY_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if other_type == ANY_TYPE: + # entity type by default, for individuals + resolution = ENTITY_TYPE + else: + resolution = other_type + if self.type != ENTITY_TYPE: + resolution = resolution.resolve(self.type) + + for varEx in signature[self.variable.name]: + resolution = varEx.type.resolve(resolution) + if not resolution: + raise InconsistentTypeHierarchyException(self) + + signature[self.variable.name].append(self) + for varEx in signature[self.variable.name]: + varEx.type = resolution + + def free(self): + """:see: Expression.free()""" + return set() + + def constants(self): + """:see: Expression.constants()""" + return {self.variable} + + +def VariableExpression(variable): + """ + This is a factory method that instantiates and returns a subtype of + ``AbstractVariableExpression`` appropriate for the given variable. + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if is_indvar(variable.name): + return IndividualVariableExpression(variable) + elif is_funcvar(variable.name): + return FunctionVariableExpression(variable) + elif is_eventvar(variable.name): + return EventVariableExpression(variable) + else: + return ConstantExpression(variable) + + +class VariableBinderExpression(Expression): + """This an abstract class for any Expression that binds a variable in an + Expression. This includes LambdaExpressions and Quantified Expressions""" + + def __init__(self, variable, term): + """ + :param variable: ``Variable``, for the variable + :param term: ``Expression``, for the term + """ + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(term, Expression), "%s is not an Expression" % term + self.variable = variable + self.term = term + + def replace(self, variable, expression, replace_bound=False, alpha_convert=True): + """:see: Expression.replace()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + assert isinstance(expression, Expression), ( + "%s is not an Expression" % expression + ) + # if the bound variable is the thing being replaced + if self.variable == variable: + if replace_bound: + assert isinstance(expression, AbstractVariableExpression), ( + "%s is not a AbstractVariableExpression" % expression + ) + return self.__class__( + expression.variable, + self.term.replace(variable, expression, True, alpha_convert), + ) + else: + return self + else: + # if the bound variable appears in the expression, then it must + # be alpha converted to avoid a conflict + if alpha_convert and self.variable in expression.free(): + self = self.alpha_convert(unique_variable(pattern=self.variable)) + + # replace in the term + return self.__class__( + self.variable, + self.term.replace(variable, expression, replace_bound, alpha_convert), + ) + + def alpha_convert(self, newvar): + """Rename all occurrences of the variable introduced by this variable + binder in the expression to ``newvar``. + :param newvar: ``Variable``, for the new variable + """ + assert isinstance(newvar, Variable), "%s is not a Variable" % newvar + return self.__class__( + newvar, self.term.replace(self.variable, VariableExpression(newvar), True) + ) + + def free(self): + """:see: Expression.free()""" + return self.term.free() - {self.variable} + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + if variable == self.variable: + return ANY_TYPE + else: + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def visit_structured(self, function, combinator): + """:see: Expression.visit_structured()""" + return combinator(self.variable, function(self.term)) + + def __eq__(self, other): + r"""Defines equality modulo alphabetic variance. If we are comparing + \x.M and \y.N, then check equality of M and N[x/y].""" + if isinstance(self, other.__class__) or isinstance(other, self.__class__): + if self.variable == other.variable: + return self.term == other.term + else: + # Comparing \x.M and \y.N. Relabel y in N with x and continue. + varex = VariableExpression(self.variable) + return self.term == other.term.replace(other.variable, varex) + else: + return False + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + +class LambdaExpression(VariableBinderExpression): + @property + def type(self): + return ComplexType(self.term.findtype(self.variable), self.term.type) + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + self.term._set_type(other_type.second, signature) + if not self.type.resolve(other_type): + raise TypeResolutionException(self, other_type) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + Tokens.LAMBDA + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class QuantifiedExpression(VariableBinderExpression): + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def __str__(self): + variables = [self.variable] + term = self.term + while term.__class__ == self.__class__: + variables.append(term.variable) + term = term.term + return ( + self.getQuantifier() + + " " + + " ".join("%s" % v for v in variables) + + Tokens.DOT + + "%s" % term + ) + + +class ExistsExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.EXISTS + + +class AllExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.ALL + + +class IotaExpression(QuantifiedExpression): + def getQuantifier(self): + return Tokens.IOTA + + +class NegatedExpression(Expression): + def __init__(self, term): + assert isinstance(term, Expression), "%s is not an Expression" % term + self.term = term + + @property + def type(self): + return TRUTH_TYPE + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.term._set_type(TRUTH_TYPE, signature) + + def findtype(self, variable): + assert isinstance(variable, Variable), "%s is not a Variable" % variable + return self.term.findtype(variable) + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.term)]) + + def negate(self): + """:see: Expression.negate()""" + return self.term + + def __eq__(self, other): + return isinstance(other, NegatedExpression) and self.term == other.term + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + return Tokens.NOT + "%s" % self.term + + +class BinaryExpression(Expression): + def __init__(self, first, second): + assert isinstance(first, Expression), "%s is not an Expression" % first + assert isinstance(second, Expression), "%s is not an Expression" % second + self.first = first + self.second = second + + @property + def type(self): + return TRUTH_TYPE + + def findtype(self, variable): + """:see Expression.findtype()""" + assert isinstance(variable, Variable), "%s is not a Variable" % variable + f = self.first.findtype(variable) + s = self.second.findtype(variable) + if f == s or s == ANY_TYPE: + return f + elif f == ANY_TYPE: + return s + else: + return ANY_TYPE + + def visit(self, function, combinator): + """:see: Expression.visit()""" + return combinator([function(self.first), function(self.second)]) + + def __eq__(self, other): + return ( + (isinstance(self, other.__class__) or isinstance(other, self.__class__)) + and self.first == other.first + and self.second == other.second + ) + + def __ne__(self, other): + return not self == other + + __hash__ = Expression.__hash__ + + def __str__(self): + first = self._str_subex(self.first) + second = self._str_subex(self.second) + return Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE + + def _str_subex(self, subex): + return "%s" % subex + + +class BooleanExpression(BinaryExpression): + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(TRUTH_TYPE, signature) + self.second._set_type(TRUTH_TYPE, signature) + + +class AndExpression(BooleanExpression): + """This class represents conjunctions""" + + def getOp(self): + return Tokens.AND + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, AndExpression): + return s[1:-1] + return s + + +class OrExpression(BooleanExpression): + """This class represents disjunctions""" + + def getOp(self): + return Tokens.OR + + def _str_subex(self, subex): + s = "%s" % subex + if isinstance(subex, OrExpression): + return s[1:-1] + return s + + +class ImpExpression(BooleanExpression): + """This class represents implications""" + + def getOp(self): + return Tokens.IMP + + +class IffExpression(BooleanExpression): + """This class represents biconditionals""" + + def getOp(self): + return Tokens.IFF + + +class EqualityExpression(BinaryExpression): + """This class represents equality expressions like "(x = y)".""" + + def _set_type(self, other_type=ANY_TYPE, signature=None): + """:see Expression._set_type()""" + assert isinstance(other_type, Type) + + if signature is None: + signature = defaultdict(list) + + if not other_type.matches(TRUTH_TYPE): + raise IllegalTypeException(self, other_type, TRUTH_TYPE) + self.first._set_type(ENTITY_TYPE, signature) + self.second._set_type(ENTITY_TYPE, signature) + + def getOp(self): + return Tokens.EQ + + +### Utilities + + +class LogicalExpressionException(Exception): + def __init__(self, index, message): + self.index = index + Exception.__init__(self, message) + + +class UnexpectedTokenException(LogicalExpressionException): + def __init__(self, index, unexpected=None, expected=None, message=None): + if unexpected and expected: + msg = "Unexpected token: '%s'. " "Expected token '%s'." % ( + unexpected, + expected, + ) + elif unexpected: + msg = "Unexpected token: '%s'." % unexpected + if message: + msg += " " + message + else: + msg = "Expected token '%s'." % expected + LogicalExpressionException.__init__(self, index, msg) + + +class ExpectedMoreTokensException(LogicalExpressionException): + def __init__(self, index, message=None): + if not message: + message = "More tokens expected." + LogicalExpressionException.__init__( + self, index, "End of input found. " + message + ) + + +def is_indvar(expr): + """ + An individual variable must be a single lowercase character other than 'e', + followed by zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[a-df-z]\d*$", expr) is not None + + +def is_funcvar(expr): + """ + A function variable must be a single uppercase character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^[A-Z]\d*$", expr) is not None + + +def is_eventvar(expr): + """ + An event variable must be a single lowercase 'e' character followed by + zero or more digits. + + :param expr: str + :return: bool True if expr is of the correct form + """ + assert isinstance(expr, str), "%s is not a string" % expr + return re.match(r"^e\d*$", expr) is not None + + +def demo(): + lexpr = Expression.fromstring + print("=" * 20 + "Test reader" + "=" * 20) + print(lexpr(r"john")) + print(lexpr(r"man(x)")) + print(lexpr(r"-man(x)")) + print(lexpr(r"(man(x) & tall(x) & walks(x))")) + print(lexpr(r"exists x.(man(x) & tall(x) & walks(x))")) + print(lexpr(r"\x.man(x)")) + print(lexpr(r"\x.man(x)(john)")) + print(lexpr(r"\x y.sees(x,y)")) + print(lexpr(r"\x y.sees(x,y)(a,b)")) + print(lexpr(r"(\x.exists y.walks(x,y))(x)")) + print(lexpr(r"exists x.x = y")) + print(lexpr(r"exists x.(x = y)")) + print(lexpr("P(x) & x=y & P(y)")) + print(lexpr(r"\P Q.exists x.(P(x) & Q(x))")) + print(lexpr(r"man(x) <-> tall(x)")) + + print("=" * 20 + "Test simplify" + "=" * 20) + print(lexpr(r"\x.\y.sees(x,y)(john)(mary)").simplify()) + print(lexpr(r"\x.\y.sees(x,y)(john, mary)").simplify()) + print(lexpr(r"all x.(man(x) & (\x.exists y.walks(x,y))(x))").simplify()) + print(lexpr(r"(\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x))(\x.bark(x))").simplify()) + + print("=" * 20 + "Test alpha conversion and binder expression equality" + "=" * 20) + e1 = lexpr("exists x.P(x)") + print(e1) + e2 = e1.alpha_convert(Variable("z")) + print(e2) + print(e1 == e2) + + +def demo_errors(): + print("=" * 20 + "Test reader errors" + "=" * 20) + demoException("(P(x) & Q(x)") + demoException("((P(x) &) & Q(x))") + demoException("P(x) -> ") + demoException("P(x") + demoException("P(x,") + demoException("P(x,)") + demoException("exists") + demoException("exists x.") + demoException("\\") + demoException("\\ x y.") + demoException("P(x)Q(x)") + demoException("(P(x)Q(x)") + demoException("exists x -> y") + + +def demoException(s): + try: + Expression.fromstring(s) + except LogicalExpressionException as e: + print(f"{e.__class__.__name__}: {e}") + + +def printtype(ex): + print(f"{ex.str()} : {ex.type}") + + +if __name__ == "__main__": + demo() +# demo_errors() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/relextract.py b/venv/lib/python3.10/site-packages/nltk/sem/relextract.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf755a3c4ab91678ae2965b96e79235a7c59120 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/relextract.py @@ -0,0 +1,539 @@ +# Natural Language Toolkit: Relation Extraction +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +Code for extracting relational triples from the ieer and conll2002 corpora. + +Relations are stored internally as dictionaries ('reldicts'). + +The two serialization outputs are "rtuple" and "clause". + +- An rtuple is a tuple of the form ``(subj, filler, obj)``, + where ``subj`` and ``obj`` are pairs of Named Entity mentions, and ``filler`` is the string of words + occurring between ``sub`` and ``obj`` (with no intervening NEs). Strings are printed via ``repr()`` to + circumvent locale variations in rendering utf-8 encoded strings. +- A clause is an atom of the form ``relsym(subjsym, objsym)``, + where the relation, subject and object have been canonicalized to single strings. +""" + +# todo: get a more general solution to canonicalized symbols for clauses -- maybe use xmlcharrefs? + +import html +import re +from collections import defaultdict + +# Dictionary that associates corpora with NE classes +NE_CLASSES = { + "ieer": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + ], + "conll2002": ["LOC", "PER", "ORG"], + "ace": [ + "LOCATION", + "ORGANIZATION", + "PERSON", + "DURATION", + "DATE", + "CARDINAL", + "PERCENT", + "MONEY", + "MEASURE", + "FACILITY", + "GPE", + ], +} + +# Allow abbreviated class labels +short2long = dict(LOC="LOCATION", ORG="ORGANIZATION", PER="PERSON") +long2short = dict(LOCATION="LOC", ORGANIZATION="ORG", PERSON="PER") + + +def _expand(type): + """ + Expand an NE class name. + :type type: str + :rtype: str + """ + try: + return short2long[type] + except KeyError: + return type + + +def class_abbrev(type): + """ + Abbreviate an NE class name. + :type type: str + :rtype: str + """ + try: + return long2short[type] + except KeyError: + return type + + +def _join(lst, sep=" ", untag=False): + """ + Join a list into a string, turning tags tuples into tag strings or just words. + :param untag: if ``True``, omit the tag from tagged input strings. + :type lst: list + :rtype: str + """ + try: + return sep.join(lst) + except TypeError: + if untag: + return sep.join(tup[0] for tup in lst) + from nltk.tag import tuple2str + + return sep.join(tuple2str(tup) for tup in lst) + + +def descape_entity(m, defs=html.entities.entitydefs): + """ + Translate one entity to its ISO Latin value. + Inspired by example from effbot.org + + + """ + try: + return defs[m.group(1)] + + except KeyError: + return m.group(0) # use as is + + +def list2sym(lst): + """ + Convert a list of strings into a canonical symbol. + :type lst: list + :return: a Unicode string without whitespace + :rtype: unicode + """ + sym = _join(lst, "_", untag=True) + sym = sym.lower() + ENT = re.compile(r"&(\w+?);") + sym = ENT.sub(descape_entity, sym) + sym = sym.replace(".", "") + return sym + + +def tree2semi_rel(tree): + """ + Group a chunk structure into a list of 'semi-relations' of the form (list(str), ``Tree``). + + In order to facilitate the construction of (``Tree``, string, ``Tree``) triples, this + identifies pairs whose first member is a list (possibly empty) of terminal + strings, and whose second member is a ``Tree`` of the form (NE_label, terminals). + + :param tree: a chunk tree + :return: a list of pairs (list(str), ``Tree``) + :rtype: list of tuple + """ + + from nltk.tree import Tree + + semi_rels = [] + semi_rel = [[], None] + + for dtr in tree: + if not isinstance(dtr, Tree): + semi_rel[0].append(dtr) + else: + # dtr is a Tree + semi_rel[1] = dtr + semi_rels.append(semi_rel) + semi_rel = [[], None] + return semi_rels + + +def semi_rel2reldict(pairs, window=5, trace=False): + """ + Converts the pairs generated by ``tree2semi_rel`` into a 'reldict': a dictionary which + stores information about the subject and object NEs plus the filler between them. + Additionally, a left and right context of length =< window are captured (within + a given input sentence). + + :param pairs: a pair of list(str) and ``Tree``, as generated by + :param window: a threshold for the number of items to include in the left and right context + :type window: int + :return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon' + :rtype: list(defaultdict) + """ + result = [] + while len(pairs) > 2: + reldict = defaultdict(str) + reldict["lcon"] = _join(pairs[0][0][-window:]) + reldict["subjclass"] = pairs[0][1].label() + reldict["subjtext"] = _join(pairs[0][1].leaves()) + reldict["subjsym"] = list2sym(pairs[0][1].leaves()) + reldict["filler"] = _join(pairs[1][0]) + reldict["untagged_filler"] = _join(pairs[1][0], untag=True) + reldict["objclass"] = pairs[1][1].label() + reldict["objtext"] = _join(pairs[1][1].leaves()) + reldict["objsym"] = list2sym(pairs[1][1].leaves()) + reldict["rcon"] = _join(pairs[2][0][:window]) + if trace: + print( + "(%s(%s, %s)" + % ( + reldict["untagged_filler"], + reldict["subjclass"], + reldict["objclass"], + ) + ) + result.append(reldict) + pairs = pairs[1:] + return result + + +def extract_rels(subjclass, objclass, doc, corpus="ace", pattern=None, window=10): + """ + Filter the output of ``semi_rel2reldict`` according to specified NE classes and a filler pattern. + + The parameters ``subjclass`` and ``objclass`` can be used to restrict the + Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION', + 'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'). + + :param subjclass: the class of the subject Named Entity. + :type subjclass: str + :param objclass: the class of the object Named Entity. + :type objclass: str + :param doc: input document + :type doc: ieer document or a list of chunk trees + :param corpus: name of the corpus to take as input; possible values are + 'ieer' and 'conll2002' + :type corpus: str + :param pattern: a regular expression for filtering the fillers of + retrieved triples. + :type pattern: SRE_Pattern + :param window: filters out fillers which exceed this threshold + :type window: int + :return: see ``mk_reldicts`` + :rtype: list(defaultdict) + """ + + if subjclass and subjclass not in NE_CLASSES[corpus]: + if _expand(subjclass) in NE_CLASSES[corpus]: + subjclass = _expand(subjclass) + else: + raise ValueError( + "your value for the subject type has not been recognized: %s" + % subjclass + ) + if objclass and objclass not in NE_CLASSES[corpus]: + if _expand(objclass) in NE_CLASSES[corpus]: + objclass = _expand(objclass) + else: + raise ValueError( + "your value for the object type has not been recognized: %s" % objclass + ) + + if corpus == "ace" or corpus == "conll2002": + pairs = tree2semi_rel(doc) + elif corpus == "ieer": + pairs = tree2semi_rel(doc.text) + tree2semi_rel(doc.headline) + else: + raise ValueError("corpus type not recognized") + + reldicts = semi_rel2reldict(pairs) + + relfilter = lambda x: ( + x["subjclass"] == subjclass + and len(x["filler"].split()) <= window + and pattern.match(x["filler"]) + and x["objclass"] == objclass + ) + + return list(filter(relfilter, reldicts)) + + +def rtuple(reldict, lcon=False, rcon=False): + """ + Pretty print the reldict as an rtuple. + :param reldict: a relation dictionary + :type reldict: defaultdict + """ + items = [ + class_abbrev(reldict["subjclass"]), + reldict["subjtext"], + reldict["filler"], + class_abbrev(reldict["objclass"]), + reldict["objtext"], + ] + format = "[%s: %r] %r [%s: %r]" + if lcon: + items = [reldict["lcon"]] + items + format = "...%r)" + format + if rcon: + items.append(reldict["rcon"]) + format = format + "(%r..." + printargs = tuple(items) + return format % printargs + + +def clause(reldict, relsym): + """ + Print the relation in clausal form. + :param reldict: a relation dictionary + :type reldict: defaultdict + :param relsym: a label for the relation + :type relsym: str + """ + items = (relsym, reldict["subjsym"], reldict["objsym"]) + return "%s(%r, %r)" % items + + +####################################################### +# Demos of relation extraction with regular expressions +####################################################### + +############################################ +# Example of in(ORG, LOC) +############################################ +def in_demo(trace=0, sql=True): + """ + Select pairs of organizations and locations whose mentions occur with an + intervening occurrence of the preposition "in". + + If the sql parameter is set to True, then the entity pairs are loaded into + an in-memory database, and subsequently pulled out using an SQL "SELECT" + query. + """ + from nltk.corpus import ieer + + if sql: + try: + import sqlite3 + + connection = sqlite3.connect(":memory:") + cur = connection.cursor() + cur.execute( + """create table Locations + (OrgName text, LocationName text, DocID text)""" + ) + except ImportError: + import warnings + + warnings.warn("Cannot import sqlite; sql flag will be ignored.") + + IN = re.compile(r".*\bin\b(?!\b.+ing)") + + print() + print("IEER: in(ORG, LOC) -- just the clauses:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + if trace: + print(doc.docno) + print("=" * 15) + for rel in extract_rels("ORG", "LOC", doc, corpus="ieer", pattern=IN): + print(clause(rel, relsym="IN")) + if sql: + try: + rtuple = (rel["subjtext"], rel["objtext"], doc.docno) + cur.execute( + """insert into Locations + values (?, ?, ?)""", + rtuple, + ) + connection.commit() + except NameError: + pass + + if sql: + try: + cur.execute( + """select OrgName from Locations + where LocationName = 'Atlanta'""" + ) + print() + print("Extract data from SQL table: ORGs in Atlanta") + print("-" * 15) + for row in cur: + print(row) + except NameError: + pass + + +############################################ +# Example of has_role(PER, LOC) +############################################ + + +def roles_demo(trace=0): + from nltk.corpus import ieer + + roles = r""" + (.*( # assorted roles + analyst| + chair(wo)?man| + commissioner| + counsel| + director| + economist| + editor| + executive| + foreman| + governor| + head| + lawyer| + leader| + librarian).*)| + manager| + partner| + president| + producer| + professor| + researcher| + spokes(wo)?man| + writer| + ,\sof\sthe?\s* # "X, of (the) Y" + """ + ROLES = re.compile(roles, re.VERBOSE) + + print() + print("IEER: has_role(PER, ORG) -- raw rtuples:") + print("=" * 45) + + for file in ieer.fileids(): + for doc in ieer.parsed_docs(file): + lcon = rcon = False + if trace: + print(doc.docno) + print("=" * 15) + lcon = rcon = True + for rel in extract_rels("PER", "ORG", doc, corpus="ieer", pattern=ROLES): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################## +### Show what's in the IEER Headlines +############################################## + + +def ieer_headlines(): + + from nltk.corpus import ieer + from nltk.tree import Tree + + print("IEER: First 20 Headlines") + print("=" * 45) + + trees = [ + (doc.docno, doc.headline) + for file in ieer.fileids() + for doc in ieer.parsed_docs(file) + ] + for tree in trees[:20]: + print() + print("%s:\n%s" % tree) + + +############################################# +## Dutch CONLL2002: take_on_role(PER, ORG +############################################# + + +def conllned(trace=1): + """ + Find the copula+'van' relation ('of') in the Dutch tagged training corpus + from CoNLL 2002. + """ + + from nltk.corpus import conll2002 + + vnv = """ + ( + is/V| # 3rd sing present and + was/V| # past forms of the verb zijn ('be') + werd/V| # and also present + wordt/V # past of worden ('become) + ) + .* # followed by anything + van/Prep # followed by van ('of') + """ + VAN = re.compile(vnv, re.VERBOSE) + + print() + print("Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:") + print("=" * 45) + + for doc in conll2002.chunked_sents("ned.train"): + lcon = rcon = False + if trace: + lcon = rcon = True + for rel in extract_rels( + "PER", "ORG", doc, corpus="conll2002", pattern=VAN, window=10 + ): + print(rtuple(rel, lcon=lcon, rcon=rcon)) + + +############################################# +## Spanish CONLL2002: (PER, ORG) +############################################# + + +def conllesp(): + from nltk.corpus import conll2002 + + de = """ + .* + ( + de/SP| + del/SP + ) + """ + DE = re.compile(de, re.VERBOSE) + + print() + print("Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:") + print("=" * 45) + rels = [ + rel + for doc in conll2002.chunked_sents("esp.train") + for rel in extract_rels("ORG", "LOC", doc, corpus="conll2002", pattern=DE) + ] + for r in rels[:10]: + print(clause(r, relsym="DE")) + print() + + +def ne_chunked(): + print() + print("1500 Sentences from Penn Treebank, as processed by NLTK NE Chunker") + print("=" * 45) + ROLE = re.compile( + r".*(chairman|president|trader|scientist|economist|analyst|partner).*" + ) + rels = [] + for i, sent in enumerate(nltk.corpus.treebank.tagged_sents()[:1500]): + sent = nltk.ne_chunk(sent) + rels = extract_rels("PER", "ORG", sent, corpus="ace", pattern=ROLE, window=7) + for rel in rels: + print(f"{i:<5}{rtuple(rel)}") + + +if __name__ == "__main__": + import nltk + from nltk.sem import relextract + + in_demo(trace=0) + roles_demo(trace=0) + conllned() + conllesp() + ieer_headlines() + ne_chunked() diff --git a/venv/lib/python3.10/site-packages/nltk/sem/skolemize.py b/venv/lib/python3.10/site-packages/nltk/sem/skolemize.py new file mode 100644 index 0000000000000000000000000000000000000000..6f98437cee85ecf4a023a71a3f4518e25893ef8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/skolemize.py @@ -0,0 +1,148 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.sem.logic import ( + AllExpression, + AndExpression, + ApplicationExpression, + EqualityExpression, + ExistsExpression, + IffExpression, + ImpExpression, + NegatedExpression, + OrExpression, + VariableExpression, + skolem_function, + unique_variable, +) + + +def skolemize(expression, univ_scope=None, used_variables=None): + """ + Skolemize the expression and convert to conjunctive normal form (CNF) + """ + if univ_scope is None: + univ_scope = set() + if used_variables is None: + used_variables = set() + + if isinstance(expression, AllExpression): + term = skolemize( + expression.term, + univ_scope | {expression.variable}, + used_variables | {expression.variable}, + ) + return term.replace( + expression.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(expression, AndExpression): + return skolemize(expression.first, univ_scope, used_variables) & skolemize( + expression.second, univ_scope, used_variables + ) + elif isinstance(expression, OrExpression): + return to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, ImpExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, IffExpression): + return to_cnf( + skolemize(-expression.first, univ_scope, used_variables), + skolemize(expression.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(expression.first, univ_scope, used_variables), + skolemize(-expression.second, univ_scope, used_variables), + ) + elif isinstance(expression, EqualityExpression): + return expression + elif isinstance(expression, NegatedExpression): + negated = expression.term + if isinstance(negated, AllExpression): + term = skolemize( + -negated.term, univ_scope, used_variables | {negated.variable} + ) + if univ_scope: + return term.replace(negated.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression( + unique_variable(ignore=used_variables) + ) + return term.replace(negated.variable, skolem_constant) + elif isinstance(negated, AndExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, OrExpression): + return skolemize(-negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, ImpExpression): + return skolemize(negated.first, univ_scope, used_variables) & skolemize( + -negated.second, univ_scope, used_variables + ) + elif isinstance(negated, IffExpression): + return to_cnf( + skolemize(-negated.first, univ_scope, used_variables), + skolemize(-negated.second, univ_scope, used_variables), + ) & to_cnf( + skolemize(negated.first, univ_scope, used_variables), + skolemize(negated.second, univ_scope, used_variables), + ) + elif isinstance(negated, EqualityExpression): + return expression + elif isinstance(negated, NegatedExpression): + return skolemize(negated.term, univ_scope, used_variables) + elif isinstance(negated, ExistsExpression): + term = skolemize( + -negated.term, + univ_scope | {negated.variable}, + used_variables | {negated.variable}, + ) + return term.replace( + negated.variable, + VariableExpression(unique_variable(ignore=used_variables)), + ) + elif isinstance(negated, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + elif isinstance(expression, ExistsExpression): + term = skolemize( + expression.term, univ_scope, used_variables | {expression.variable} + ) + if univ_scope: + return term.replace(expression.variable, skolem_function(univ_scope)) + else: + skolem_constant = VariableExpression(unique_variable(ignore=used_variables)) + return term.replace(expression.variable, skolem_constant) + elif isinstance(expression, ApplicationExpression): + return expression + else: + raise Exception("'%s' cannot be skolemized" % expression) + + +def to_cnf(first, second): + """ + Convert this split disjunction to conjunctive normal form (CNF) + """ + if isinstance(first, AndExpression): + r_first = to_cnf(first.first, second) + r_second = to_cnf(first.second, second) + return r_first & r_second + elif isinstance(second, AndExpression): + r_first = to_cnf(first, second.first) + r_second = to_cnf(first, second.second) + return r_first & r_second + else: + return first | second diff --git a/venv/lib/python3.10/site-packages/nltk/sem/util.py b/venv/lib/python3.10/site-packages/nltk/sem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8d119db424331b9b9873733a0acc6e9b3754a5cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sem/util.py @@ -0,0 +1,309 @@ +# Natural Language Toolkit: Semantic Interpretation +# +# Author: Ewan Klein +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Utility functions for batch-processing sentences: parsing and +extraction of the semantic representation of the root node of the the +syntax tree, followed by evaluation of the semantic representation in +a first-order model. +""" + +import codecs + +from nltk.sem import evaluate + +############################################################## +## Utility functions for connecting parse output to semantics +############################################################## + + +def parse_sents(inputs, grammar, trace=0): + """ + Convert input sentences into syntactic trees. + + :param inputs: sentences to be parsed + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :rtype: list(nltk.tree.Tree) or dict(list(str)): list(Tree) + :return: a mapping from input sentences to a list of ``Tree`` instances. + """ + # put imports here to avoid circult dependencies + from nltk.grammar import FeatureGrammar + from nltk.parse import FeatureChartParser, load_parser + + if isinstance(grammar, FeatureGrammar): + cp = FeatureChartParser(grammar) + else: + cp = load_parser(grammar, trace=trace) + parses = [] + for sent in inputs: + tokens = sent.split() # use a tokenizer? + syntrees = list(cp.parse(tokens)) + parses.append(syntrees) + return parses + + +def root_semrep(syntree, semkey="SEM"): + """ + Find the semantic representation at the root of a tree. + + :param syntree: a parse ``Tree`` + :param semkey: the feature label to use for the root semantics in the tree + :return: the semantic representation at the root of a ``Tree`` + :rtype: sem.Expression + """ + from nltk.grammar import FeatStructNonterminal + + node = syntree.label() + assert isinstance(node, FeatStructNonterminal) + try: + return node[semkey] + except KeyError: + print(node, end=" ") + print("has no specification for the feature %s" % semkey) + raise + + +def interpret_sents(inputs, grammar, semkey="SEM", trace=0): + """ + Add the semantic representation to each syntactic parse tree + of each input sentence. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of pairs (parse-tree, semantic-representations) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression))) + """ + return [ + [(syn, root_semrep(syn, semkey)) for syn in syntrees] + for syntrees in parse_sents(inputs, grammar, trace=trace) + ] + + +def evaluate_sents(inputs, grammar, model, assignment, trace=0): + """ + Add the truth-in-a-model value to each semantic representation + for each syntactic parse of each input sentences. + + :param inputs: a list of sentences + :type inputs: list(str) + :param grammar: ``FeatureGrammar`` or name of feature-based grammar + :type grammar: nltk.grammar.FeatureGrammar + :return: a mapping from sentences to lists of triples (parse-tree, semantic-representations, evaluation-in-model) + :rtype: list(list(tuple(nltk.tree.Tree, nltk.sem.logic.ConstantExpression, bool or dict(str): bool))) + """ + return [ + [ + (syn, sem, model.evaluate("%s" % sem, assignment, trace=trace)) + for (syn, sem) in interpretations + ] + for interpretations in interpret_sents(inputs, grammar) + ] + + +def demo_model0(): + global m0, g0 + # Initialize a valuation of non-logical constants.""" + v = [ + ("john", "b1"), + ("mary", "g1"), + ("suzie", "g2"), + ("fido", "d1"), + ("tess", "d2"), + ("noosa", "n"), + ("girl", {"g1", "g2"}), + ("boy", {"b1", "b2"}), + ("dog", {"d1", "d2"}), + ("bark", {"d1", "d2"}), + ("walk", {"b1", "g2", "d1"}), + ("chase", {("b1", "g1"), ("b2", "g1"), ("g1", "d1"), ("g2", "d2")}), + ( + "see", + {("b1", "g1"), ("b2", "d2"), ("g1", "b1"), ("d2", "b1"), ("g2", "n")}, + ), + ("in", {("b1", "n"), ("b2", "n"), ("d2", "n")}), + ("with", {("b1", "g1"), ("g1", "b1"), ("d1", "b1"), ("b1", "d1")}), + ] + # Read in the data from ``v`` + val = evaluate.Valuation(v) + # Bind ``dom`` to the ``domain`` property of ``val`` + dom = val.domain + # Initialize a model with parameters ``dom`` and ``val``. + m0 = evaluate.Model(dom, val) + # Initialize a variable assignment with parameter ``dom`` + g0 = evaluate.Assignment(dom) + + +def read_sents(filename, encoding="utf8"): + with codecs.open(filename, "r", encoding) as fp: + sents = [l.rstrip() for l in fp] + + # get rid of blank lines + sents = [l for l in sents if len(l) > 0] + sents = [l for l in sents if not l[0] == "#"] + return sents + + +def demo_legacy_grammar(): + """ + Check that interpret_sents() is compatible with legacy grammars that use + a lowercase 'sem' feature. + + Define 'test.fcfg' to be the following + + """ + from nltk.grammar import FeatureGrammar + + g = FeatureGrammar.fromstring( + """ + % start S + S[sem=] -> 'hello' + """ + ) + print("Reading grammar: %s" % g) + print("*" * 20) + for reading in interpret_sents(["hello"], g, semkey="sem"): + syn, sem = reading[0] + print() + print("output: ", sem) + + +def demo(): + import sys + from optparse import OptionParser + + description = """ + Parse and evaluate some sentences. + """ + + opts = OptionParser(description=description) + + opts.set_defaults( + evaluate=True, + beta=True, + syntrace=0, + semtrace=0, + demo="default", + grammar="", + sentences="", + ) + + opts.add_option( + "-d", + "--demo", + dest="demo", + help="choose demo D; omit this for the default demo, or specify 'chat80'", + metavar="D", + ) + opts.add_option( + "-g", "--gram", dest="grammar", help="read in grammar G", metavar="G" + ) + opts.add_option( + "-m", + "--model", + dest="model", + help="import model M (omit '.py' suffix)", + metavar="M", + ) + opts.add_option( + "-s", + "--sentences", + dest="sentences", + help="read in a file of test sentences S", + metavar="S", + ) + opts.add_option( + "-e", + "--no-eval", + action="store_false", + dest="evaluate", + help="just do a syntactic analysis", + ) + opts.add_option( + "-b", + "--no-beta-reduction", + action="store_false", + dest="beta", + help="don't carry out beta-reduction", + ) + opts.add_option( + "-t", + "--syntrace", + action="count", + dest="syntrace", + help="set syntactic tracing on; requires '-e' option", + ) + opts.add_option( + "-T", + "--semtrace", + action="count", + dest="semtrace", + help="set semantic tracing on", + ) + + (options, args) = opts.parse_args() + + SPACER = "-" * 30 + + demo_model0() + + sents = [ + "Fido sees a boy with Mary", + "John sees Mary", + "every girl chases a dog", + "every boy chases a girl", + "John walks with a girl in Noosa", + "who walks", + ] + + gramfile = "grammars/sample_grammars/sem2.fcfg" + + if options.sentences: + sentsfile = options.sentences + if options.grammar: + gramfile = options.grammar + if options.model: + exec("import %s as model" % options.model) + + if sents is None: + sents = read_sents(sentsfile) + + # Set model and assignment + model = m0 + g = g0 + + if options.evaluate: + evaluations = evaluate_sents(sents, gramfile, model, g, trace=options.semtrace) + else: + semreps = interpret_sents(sents, gramfile, trace=options.syntrace) + + for i, sent in enumerate(sents): + n = 1 + print("\nSentence: %s" % sent) + print(SPACER) + if options.evaluate: + + for (syntree, semrep, value) in evaluations[i]: + if isinstance(value, dict): + value = set(value.keys()) + print("%d: %s" % (n, semrep)) + print(value) + n += 1 + else: + + for (syntree, semrep) in semreps[i]: + print("%d: %s" % (n, semrep)) + n += 1 + + +if __name__ == "__main__": + demo() + demo_legacy_grammar() diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__init__.py b/venv/lib/python3.10/site-packages/nltk/translate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0059c1e19003bc946f699ca5895f9932ed4ec341 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/__init__.py @@ -0,0 +1,32 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird , Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Experimental features for machine translation. +These interfaces are prone to change. + +isort:skip_file +""" + +from nltk.translate.api import AlignedSent, Alignment, PhraseTable +from nltk.translate.ibm_model import IBMModel +from nltk.translate.ibm1 import IBMModel1 +from nltk.translate.ibm2 import IBMModel2 +from nltk.translate.ibm3 import IBMModel3 +from nltk.translate.ibm4 import IBMModel4 +from nltk.translate.ibm5 import IBMModel5 +from nltk.translate.bleu_score import sentence_bleu as bleu +from nltk.translate.ribes_score import sentence_ribes as ribes +from nltk.translate.meteor_score import meteor_score as meteor +from nltk.translate.metrics import alignment_error_rate +from nltk.translate.stack_decoder import StackDecoder +from nltk.translate.nist_score import sentence_nist as nist +from nltk.translate.chrf_score import sentence_chrf as chrf +from nltk.translate.gale_church import trace +from nltk.translate.gdfa import grow_diag_final_and +from nltk.translate.gleu_score import sentence_gleu as gleu +from nltk.translate.phrase_based import extract diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..377b250d196e146bc09fefaa4cb506ea09d89881 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e89391ee7d1ad0e7e70c3c810d700260a2f7e62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gdfa.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bc4e34168f17a245f373924771fab7c718cd857 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm1.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb7178f378dd67e1dba4f846d1d9528d39f61b23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67debd17ecd6ec97b84bfce317a709dce93d688a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/phrase_based.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/api.py b/venv/lib/python3.10/site-packages/nltk/translate/api.py new file mode 100644 index 0000000000000000000000000000000000000000..cf00f2b52f00cd7bf6df82d9b8d4557bb0592079 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/api.py @@ -0,0 +1,334 @@ +# Natural Language Toolkit: API for alignment and translation objects +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +import subprocess +from collections import namedtuple + + +class AlignedSent: + """ + Return an aligned sentence object, which encapsulates two sentences + along with an ``Alignment`` between them. + + Typically used in machine translation to represent a sentence and + its translation. + + >>> from nltk.translate import AlignedSent, Alignment + >>> algnsent = AlignedSent(['klein', 'ist', 'das', 'Haus'], + ... ['the', 'house', 'is', 'small'], Alignment.fromstring('0-3 1-2 2-0 3-1')) + >>> algnsent.words + ['klein', 'ist', 'das', 'Haus'] + >>> algnsent.mots + ['the', 'house', 'is', 'small'] + >>> algnsent.alignment + Alignment([(0, 3), (1, 2), (2, 0), (3, 1)]) + >>> from nltk.corpus import comtrans + >>> print(comtrans.aligned_sents()[54]) + 'So why should EU arm...'> + >>> print(comtrans.aligned_sents()[54].alignment) + 0-0 0-1 1-0 2-2 3-4 3-5 4-7 5-8 6-3 7-9 8-9 9-10 9-11 10-12 11-6 12-6 13-13 + + :param words: Words in the target language sentence + :type words: list(str) + :param mots: Words in the source language sentence + :type mots: list(str) + :param alignment: Word-level alignments between ``words`` and ``mots``. + Each alignment is represented as a 2-tuple (words_index, mots_index). + :type alignment: Alignment + """ + + def __init__(self, words, mots, alignment=None): + self._words = words + self._mots = mots + if alignment is None: + self.alignment = Alignment([]) + else: + assert type(alignment) is Alignment + self.alignment = alignment + + @property + def words(self): + return self._words + + @property + def mots(self): + return self._mots + + def _get_alignment(self): + return self._alignment + + def _set_alignment(self, alignment): + _check_alignment(len(self.words), len(self.mots), alignment) + self._alignment = alignment + + alignment = property(_get_alignment, _set_alignment) + + def __repr__(self): + """ + Return a string representation for this ``AlignedSent``. + + :rtype: str + """ + words = "[%s]" % (", ".join("'%s'" % w for w in self._words)) + mots = "[%s]" % (", ".join("'%s'" % w for w in self._mots)) + + return f"AlignedSent({words}, {mots}, {self._alignment!r})" + + def _to_dot(self): + """ + Dot representation of the aligned sentence + """ + s = "graph align {\n" + s += "node[shape=plaintext]\n" + + # Declare node + for w in self._words: + s += f'"{w}_source" [label="{w}"] \n' + + for w in self._mots: + s += f'"{w}_target" [label="{w}"] \n' + + # Alignment + for u, v in self._alignment: + s += f'"{self._words[u]}_source" -- "{self._mots[v]}_target" \n' + + # Connect the source words + for i in range(len(self._words) - 1): + s += '"{}_source" -- "{}_source" [style=invis]\n'.format( + self._words[i], + self._words[i + 1], + ) + + # Connect the target words + for i in range(len(self._mots) - 1): + s += '"{}_target" -- "{}_target" [style=invis]\n'.format( + self._mots[i], + self._mots[i + 1], + ) + + # Put it in the same rank + s += "{rank = same; %s}\n" % (" ".join('"%s_source"' % w for w in self._words)) + s += "{rank = same; %s}\n" % (" ".join('"%s_target"' % w for w in self._mots)) + + s += "}" + + return s + + def _repr_svg_(self): + """ + Ipython magic : show SVG representation of this ``AlignedSent``. + """ + dot_string = self._to_dot().encode("utf8") + output_format = "svg" + try: + process = subprocess.Popen( + ["dot", "-T%s" % output_format], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + except OSError as e: + raise Exception("Cannot find the dot binary from Graphviz package") from e + out, err = process.communicate(dot_string) + + return out.decode("utf8") + + def __str__(self): + """ + Return a human-readable string representation for this ``AlignedSent``. + + :rtype: str + """ + source = " ".join(self._words)[:20] + "..." + target = " ".join(self._mots)[:20] + "..." + return f" '{target}'>" + + def invert(self): + """ + Return the aligned sentence pair, reversing the directionality + + :rtype: AlignedSent + """ + return AlignedSent(self._mots, self._words, self._alignment.invert()) + + +class Alignment(frozenset): + """ + A storage class for representing alignment between two sequences, s1, s2. + In general, an alignment is a set of tuples of the form (i, j, ...) + representing an alignment between the i-th element of s1 and the + j-th element of s2. Tuples are extensible (they might contain + additional data, such as a boolean to indicate sure vs possible alignments). + + >>> from nltk.translate import Alignment + >>> a = Alignment([(0, 0), (0, 1), (1, 2), (2, 2)]) + >>> a.invert() + Alignment([(0, 0), (1, 0), (2, 1), (2, 2)]) + >>> print(a.invert()) + 0-0 1-0 2-1 2-2 + >>> a[0] + [(0, 1), (0, 0)] + >>> a.invert()[2] + [(2, 1), (2, 2)] + >>> b = Alignment([(0, 0), (0, 1)]) + >>> b.issubset(a) + True + >>> c = Alignment.fromstring('0-0 0-1') + >>> b == c + True + """ + + def __new__(cls, pairs): + self = frozenset.__new__(cls, pairs) + self._len = max(p[0] for p in self) if self != frozenset([]) else 0 + self._index = None + return self + + @classmethod + def fromstring(cls, s): + """ + Read a giza-formatted string and return an Alignment object. + + >>> Alignment.fromstring('0-0 2-1 9-2 21-3 10-4 7-5') + Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)]) + + :type s: str + :param s: the positional alignments in giza format + :rtype: Alignment + :return: An Alignment object corresponding to the string representation ``s``. + """ + + return Alignment([_giza2pair(a) for a in s.split()]) + + def __getitem__(self, key): + """ + Look up the alignments that map from a given index or slice. + """ + if not self._index: + self._build_index() + return self._index.__getitem__(key) + + def invert(self): + """ + Return an Alignment object, being the inverted mapping. + """ + return Alignment(((p[1], p[0]) + p[2:]) for p in self) + + def range(self, positions=None): + """ + Work out the range of the mapping from the given positions. + If no positions are specified, compute the range of the entire mapping. + """ + image = set() + if not self._index: + self._build_index() + if not positions: + positions = list(range(len(self._index))) + for p in positions: + image.update(f for _, f in self._index[p]) + return sorted(image) + + def __repr__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return "Alignment(%r)" % sorted(self) + + def __str__(self): + """ + Produce a Giza-formatted string representing the alignment. + """ + return " ".join("%d-%d" % p[:2] for p in sorted(self)) + + def _build_index(self): + """ + Build a list self._index such that self._index[i] is a list + of the alignments originating from word i. + """ + self._index = [[] for _ in range(self._len + 1)] + for p in self: + self._index[p[0]].append(p) + + +def _giza2pair(pair_string): + i, j = pair_string.split("-") + return int(i), int(j) + + +def _naacl2pair(pair_string): + i, j, p = pair_string.split("-") + return int(i), int(j) + + +def _check_alignment(num_words, num_mots, alignment): + """ + Check whether the alignments are legal. + + :param num_words: the number of source language words + :type num_words: int + :param num_mots: the number of target language words + :type num_mots: int + :param alignment: alignment to be checked + :type alignment: Alignment + :raise IndexError: if alignment falls outside the sentence + """ + + assert type(alignment) is Alignment + + if not all(0 <= pair[0] < num_words for pair in alignment): + raise IndexError("Alignment is outside boundary of words") + if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment): + raise IndexError("Alignment is outside boundary of mots") + + +PhraseTableEntry = namedtuple("PhraseTableEntry", ["trg_phrase", "log_prob"]) + + +class PhraseTable: + """ + In-memory store of translations for a given phrase, and the log + probability of the those translations + """ + + def __init__(self): + self.src_phrases = dict() + + def translations_for(self, src_phrase): + """ + Get the translations for a source language phrase + + :param src_phrase: Source language phrase of interest + :type src_phrase: tuple(str) + + :return: A list of target language phrases that are translations + of ``src_phrase``, ordered in decreasing order of + likelihood. Each list element is a tuple of the target + phrase and its log probability. + :rtype: list(PhraseTableEntry) + """ + return self.src_phrases[src_phrase] + + def add(self, src_phrase, trg_phrase, log_prob): + """ + :type src_phrase: tuple(str) + :type trg_phrase: tuple(str) + + :param log_prob: Log probability that given ``src_phrase``, + ``trg_phrase`` is its translation + :type log_prob: float + """ + entry = PhraseTableEntry(trg_phrase=trg_phrase, log_prob=log_prob) + if src_phrase not in self.src_phrases: + self.src_phrases[src_phrase] = [] + self.src_phrases[src_phrase].append(entry) + self.src_phrases[src_phrase].sort(key=lambda e: e.log_prob, reverse=True) + + def __contains__(self, src_phrase): + return src_phrase in self.src_phrases diff --git a/venv/lib/python3.10/site-packages/nltk/translate/bleu_score.py b/venv/lib/python3.10/site-packages/nltk/translate/bleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2cc949db964b029f4e7324cbbc7236d3ff9248 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/bleu_score.py @@ -0,0 +1,685 @@ +# Natural Language Toolkit: BLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# Contributors: Björn Mattsson, Dmitrijs Milajevs, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +"""BLEU score implementation.""" + +import math +import sys +import warnings +from collections import Counter +from fractions import Fraction + +from nltk.util import ngrams + + +def sentence_bleu( + references, + hypothesis, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate BLEU score (Bilingual Evaluation Understudy) from + Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. + "BLEU: a method for automatic evaluation of machine translation." + In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 0.5045... + + If there is no ngrams overlap for any order of n-grams, BLEU returns the + value 0. This is because the precision for the order of n-grams without + overlap is 0, and the geometric mean in the final BLEU score computation + multiplies the 0 with the precision of other n-grams. This results in 0 + (independently of the precision of the other n-gram orders). The following + example has zero 3-gram and 4-gram overlaps: + + >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS + 0.0 + + To avoid this harsh behaviour when no ngram overlaps are found a smoothing + function can be used. + + >>> chencherry = SmoothingFunction() + >>> sentence_bleu([reference1, reference2, reference3], hypothesis2, + ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS + 0.0370... + + The default BLEU calculates a score for up to 4-grams using uniform + weights (this is called BLEU-4). To evaluate your translations with + higher/lower order ngrams, use customized weights. E.g. when accounting + for up to 5-grams with uniform weights (this is called BLEU-5) use: + + >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.) + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + 0.3920... + + Multiple BLEU scores can be computed at once, by supplying a list of weights. + E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use: + >>> weights = [ + ... (1./2., 1./2.), + ... (1./3., 1./3., 1./3.), + ... (1./4., 1./4., 1./4., 1./4.) + ... ] + >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS + [0.7453..., 0.6240..., 0.5045...] + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied. + :rtype: float / list(float) + """ + return corpus_bleu( + [references], [hypothesis], weights, smoothing_function, auto_reweigh + ) + + +def corpus_bleu( + list_of_references, + hypotheses, + weights=(0.25, 0.25, 0.25, 0.25), + smoothing_function=None, + auto_reweigh=False, +): + """ + Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level BLEU scores (i.e. macro-average + precision), the original BLEU metric (Papineni et al. 2002) accounts for + the micro-average precision (i.e. summing the numerators and denominators + for each hypothesis-reference(s) pairs before the division). + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5920... + + The example below show that corpus_bleu() is different from averaging + sentence_bleu() for hypotheses + + >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1) + >>> score2 = sentence_bleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6223... + + Custom weights may be supplied to fine-tune the BLEU score further. + A tuple of float weights for unigrams, bigrams, trigrams and so on can be given. + >>> weights = (0.1, 0.3, 0.5, 0.1) + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + 0.5818... + + This particular weight gave extra value to trigrams. + Furthermore, multiple weights can be given, resulting in multiple BLEU scores. + >>> weights = [ + ... (0.5, 0.5), + ... (0.333, 0.333, 0.334), + ... (0.25, 0.25, 0.25, 0.25), + ... (0.2, 0.2, 0.2, 0.2, 0.2) + ... ] + >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS + [0.8242..., 0.7067..., 0.5920..., 0.4719...] + + :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights) + :type weights: tuple(float) / list(tuple(float)) + :param smoothing_function: + :type smoothing_function: SmoothingFunction + :param auto_reweigh: Option to re-normalize the weights uniformly. + :type auto_reweigh: bool + :return: The corpus-level BLEU score. + :rtype: float + """ + # Before proceeding to compute BLEU, perform sanity checks. + + p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches. + p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref. + hyp_lengths, ref_lengths = 0, 0 + + assert len(list_of_references) == len(hypotheses), ( + "The number of hypotheses and their reference(s) should be the " "same " + ) + + try: + weights[0][0] + except TypeError: + weights = [weights] + max_weight_length = max(len(weight) for weight in weights) + + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + # For each order of ngram, calculate the numerator and + # denominator for the corpus-level modified precision. + for i in range(1, max_weight_length + 1): + p_i = modified_precision(references, hypothesis, i) + p_numerators[i] += p_i.numerator + p_denominators[i] += p_i.denominator + + # Calculate the hypothesis length and the closest reference length. + # Adds them to the corpus-level hypothesis and reference counts. + hyp_len = len(hypothesis) + hyp_lengths += hyp_len + ref_lengths += closest_ref_length(references, hyp_len) + + # Calculate corpus-level brevity penalty. + bp = brevity_penalty(ref_lengths, hyp_lengths) + + # Collects the various precision values for the different ngram orders. + p_n = [ + Fraction(p_numerators[i], p_denominators[i], _normalize=False) + for i in range(1, max_weight_length + 1) + ] + + # Returns 0 if there's no matching n-grams + # We only need to check for p_numerators[1] == 0, since if there's + # no unigrams, there won't be any higher order ngrams. + if p_numerators[1] == 0: + return 0 if len(weights) == 1 else [0] * len(weights) + + # If there's no smoothing, set use method0 from SmoothinFunction class. + if not smoothing_function: + smoothing_function = SmoothingFunction().method0 + # Smoothen the modified precision. + # Note: smoothing_function() may convert values into floats; + # it tries to retain the Fraction object as much as the + # smoothing method allows. + p_n = smoothing_function( + p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths + ) + + bleu_scores = [] + for weight in weights: + # Uniformly re-weighting based on maximum hypothesis lengths if largest + # order of n-grams < 4 and weights is set at default. + if auto_reweigh: + if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25): + weight = (1 / hyp_lengths,) * hyp_lengths + + s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0) + s = bp * math.exp(math.fsum(s)) + bleu_scores.append(s) + return bleu_scores[0] if len(weights) == 1 else bleu_scores + + +def modified_precision(references, hypothesis, n): + """ + Calculate modified ngram precision. + + The normal precision method may lead to some wrong translations with + high-precision, e.g., the translation, in which a word of reference + repeats several times, has very high precision. + + This function only returns the Fraction object that contains the numerator + and denominator necessary to calculate the corpus-level precision. + To calculate the modified precision for a single pair of hypothesis and + references, cast the Fraction object into a float. + + The famous "the the the ... " example shows that you can get BLEU precision + by duplicating high frequency words. + + >>> reference1 = 'the cat is on the mat'.split() + >>> reference2 = 'there is a cat on the mat'.split() + >>> hypothesis1 = 'the the the the the the the'.split() + >>> references = [reference1, reference2] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.2857... + + In the modified n-gram precision, a reference word will be considered + exhausted after a matching hypothesis word is identified, e.g. + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> hypothesis = 'of the'.split() + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis, n=1)) + 1.0 + >>> float(modified_precision(references, hypothesis, n=2)) + 1.0 + + An example of a normal machine translation hypothesis: + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', + ... 'forever', 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + >>> references = [reference1, reference2, reference3] + >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS + 0.9444... + >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS + 0.5714... + >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS + 0.5882352941176471 + >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS + 0.07692... + + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hypothesis: A hypothesis translation. + :type hypothesis: list(str) + :param n: The ngram order. + :type n: int + :return: BLEU's modified precision for the nth order ngram. + :rtype: Fraction + """ + # Extracts all ngrams in hypothesis + # Set an empty Counter if hypothesis is empty. + counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter() + # Extract a union of references' counts. + # max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references]) + max_counts = {} + for reference in references: + reference_counts = ( + Counter(ngrams(reference, n)) if len(reference) >= n else Counter() + ) + for ngram in counts: + max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram]) + + # Assigns the intersection between hypothesis and references' counts. + clipped_counts = { + ngram: min(count, max_counts[ngram]) for ngram, count in counts.items() + } + + numerator = sum(clipped_counts.values()) + # Ensures that denominator is minimum 1 to avoid ZeroDivisionError. + # Usually this happens when the ngram order is > len(reference). + denominator = max(1, sum(counts.values())) + + return Fraction(numerator, denominator, _normalize=False) + + +def closest_ref_length(references, hyp_len): + """ + This function finds the reference that is the closest length to the + hypothesis. The closest reference length is referred to as *r* variable + from the brevity penalty formula in Papineni et. al. (2002) + + :param references: A list of reference translations. + :type references: list(list(str)) + :param hyp_len: The length of the hypothesis. + :type hyp_len: int + :return: The length of the reference that's closest to the hypothesis. + :rtype: int + """ + ref_lens = (len(reference) for reference in references) + closest_ref_len = min( + ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len) + ) + return closest_ref_len + + +def brevity_penalty(closest_ref_len, hyp_len): + """ + Calculate brevity penalty. + + As the modified n-gram precision still has the problem from the short + length sentence, brevity penalty is used to modify the overall BLEU + score according to length. + + An example from the paper. There are three references with length 12, 15 + and 17. And a concise hypothesis of the length 12. The brevity penalty is 1. + + >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15 + >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17 + >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12 + >>> references = [reference1, reference2, reference3] + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + In case a hypothesis translation is shorter than the references, penalty is + applied. + + >>> references = [['a'] * 28, ['a'] * 28] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 0.2635971381157267 + + The length of the closest reference is used to compute the penalty. If the + length of a hypothesis is 12, and the reference lengths are 13 and 2, the + penalty is applied because the hypothesis length (12) is less then the + closest reference length (13). + + >>> references = [['a'] * 13, ['a'] * 2] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.9200... + + The brevity penalty doesn't depend on reference order. More importantly, + when two reference sentences are at the same distance, the shortest + reference sentence length is used. + + >>> references = [['a'] * 13, ['a'] * 11] + >>> hypothesis = ['a'] * 12 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> bp1 = brevity_penalty(closest_ref_len, hyp_len) + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len) + >>> bp2 = brevity_penalty(closest_ref_len, hyp_len) + >>> bp1 == bp2 == 1 + True + + A test example from mteval-v13a.pl (starting from the line 705): + + >>> references = [['a'] * 11, ['a'] * 8] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS + 0.8668... + + >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7] + >>> hypothesis = ['a'] * 7 + >>> hyp_len = len(hypothesis) + >>> closest_ref_len = closest_ref_length(references, hyp_len) + >>> brevity_penalty(closest_ref_len, hyp_len) + 1.0 + + :param hyp_len: The length of the hypothesis for a single sentence OR the + sum of all the hypotheses' lengths for a corpus + :type hyp_len: int + :param closest_ref_len: The length of the closest reference for a single + hypothesis OR the sum of all the closest references for every hypotheses. + :type closest_ref_len: int + :return: BLEU's brevity penalty. + :rtype: float + """ + if hyp_len > closest_ref_len: + return 1 + # If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0 + elif hyp_len == 0: + return 0 + else: + return math.exp(1 - closest_ref_len / hyp_len) + + +class SmoothingFunction: + """ + This is an implementation of the smoothing techniques + for segment-level BLEU scores that was presented in + Boxing Chen and Collin Cherry (2014) A Systematic Comparison of + Smoothing Techniques for Sentence-Level BLEU. In WMT14. + http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf + """ + + def __init__(self, epsilon=0.1, alpha=5, k=5): + """ + This will initialize the parameters required for the various smoothing + techniques, the default values are set to the numbers used in the + experiments from Chen and Cherry (2014). + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', + ... 'that', 'the', 'military', 'always', 'obeys', 'the', + ... 'commands', 'of', 'the', 'party'] + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', + ... 'that', 'the', 'military', 'will', 'forever', 'heed', + ... 'Party', 'commands'] + + >>> chencherry = SmoothingFunction() + >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS + 0.4452... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS + 0.4118... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS + 0.4905... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS + 0.4135... + >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS + 0.4905... + + :param epsilon: the epsilon value use in method 1 + :type epsilon: float + :param alpha: the alpha value use in method 6 + :type alpha: int + :param k: the k value use in method 4 + :type k: int + """ + self.epsilon = epsilon + self.alpha = alpha + self.k = k + + def method0(self, p_n, *args, **kwargs): + """ + No smoothing. + """ + p_n_new = [] + for i, p_i in enumerate(p_n): + if p_i.numerator != 0: + p_n_new.append(p_i) + else: + _msg = str( + "\nThe hypothesis contains 0 counts of {}-gram overlaps.\n" + "Therefore the BLEU score evaluates to 0, independently of\n" + "how many N-gram overlaps of lower order it contains.\n" + "Consider using lower n-gram order or use " + "SmoothingFunction()" + ).format(i + 1) + warnings.warn(_msg) + # When numerator==0 where denonminator==0 or !=0, the result + # for the precision score should be equal to 0 or undefined. + # Due to BLEU geometric mean computation in logarithm space, + # we we need to take the return sys.float_info.min such that + # math.log(sys.float_info.min) returns a 0 precision score. + p_n_new.append(sys.float_info.min) + return p_n_new + + def method1(self, p_n, *args, **kwargs): + """ + Smoothing method 1: Add *epsilon* counts to precision with 0 counts. + """ + return [ + (p_i.numerator + self.epsilon) / p_i.denominator + if p_i.numerator == 0 + else p_i + for p_i in p_n + ] + + def method2(self, p_n, *args, **kwargs): + """ + Smoothing method 2: Add 1 to both numerator and denominator from + Chin-Yew Lin and Franz Josef Och (2004) ORANGE: a Method for + Evaluating Automatic Evaluation Metrics for Machine Translation. + In COLING 2004. + """ + return [ + Fraction(p_n[i].numerator + 1, p_n[i].denominator + 1, _normalize=False) + if i != 0 + else p_n[0] + for i in range(len(p_n)) + ] + + def method3(self, p_n, *args, **kwargs): + """ + Smoothing method 3: NIST geometric sequence smoothing + The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each + precision score whose matching n-gram count is null. + k is 1 for the first 'n' value for which the n-gram match count is null/ + + For example, if the text contains: + + - one 2-gram match + - and (consequently) two 1-gram matches + + the n-gram count for each individual precision score would be: + + - n=1 => prec_count = 2 (two unigrams) + - n=2 => prec_count = 1 (one bigram) + - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1) + - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2) + """ + incvnt = 1 # From the mteval-v13a.pl, it's referred to as k. + for i, p_i in enumerate(p_n): + if p_i.numerator == 0: + p_n[i] = 1 / (2**incvnt * p_i.denominator) + incvnt += 1 + return p_n + + def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 4: + Shorter translations may have inflated precision values due to having + smaller denominators; therefore, we give them proportionally + smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry + suggests dividing by 1/ln(len(T)), where T is the length of the translation. + """ + incvnt = 1 + hyp_len = hyp_len if hyp_len else len(hypothesis) + for i, p_i in enumerate(p_n): + if p_i.numerator == 0 and hyp_len > 1: + # incvnt = i + 1 * self.k / math.log( + # hyp_len + # ) # Note that this K is different from the K from NIST. + # p_n[i] = incvnt / p_i.denominator\ + numerator = 1 / (2**incvnt * self.k / math.log(hyp_len)) + p_n[i] = numerator / p_i.denominator + incvnt += 1 + return p_n + + def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 5: + The matched counts for similar values of n should be similar. To a + calculate the n-gram matched count, it averages the n−1, n and n+1 gram + matched counts. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + m = {} + # Requires an precision value for an addition ngram order. + p_n_plus1 = p_n + [modified_precision(references, hypothesis, 5)] + m[-1] = p_n[0] + 1 + for i, p_i in enumerate(p_n): + p_n[i] = (m[i - 1] + p_i + p_n_plus1[i + 1]) / 3 + m[i] = p_n[i] + return p_n + + def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 6: + Interpolates the maximum likelihood estimate of the precision *p_n* with + a prior estimate *pi0*. The prior is estimated by assuming that the ratio + between pn and pn−1 will be the same as that between pn−1 and pn−2; from + Gao and He (2013) Training MRF-Based Phrase Translation Models using + Gradient Ascent. In NAACL. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + # This smoothing only works when p_1 and p_2 is non-zero. + # Raise an error with an appropriate message when the input is too short + # to use this smoothing technique. + assert p_n[2], "This smoothing method requires non-zero precision for bigrams." + for i, p_i in enumerate(p_n): + if i in [0, 1]: # Skips the first 2 orders of ngrams. + continue + else: + pi0 = 0 if p_n[i - 2] == 0 else p_n[i - 1] ** 2 / p_n[i - 2] + # No. of ngrams in translation that matches the reference. + m = p_i.numerator + # No. of ngrams in translation. + l = sum(1 for _ in ngrams(hypothesis, i + 1)) + # Calculates the interpolated precision. + p_n[i] = (m + self.alpha * pi0) / (l + self.alpha) + return p_n + + def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs): + """ + Smoothing method 7: + Interpolates methods 4 and 5. + """ + hyp_len = hyp_len if hyp_len else len(hypothesis) + p_n = self.method4(p_n, references, hypothesis, hyp_len) + p_n = self.method5(p_n, references, hypothesis, hyp_len) + return p_n diff --git a/venv/lib/python3.10/site-packages/nltk/translate/chrf_score.py b/venv/lib/python3.10/site-packages/nltk/translate/chrf_score.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b54f3a07166ba5179b2850cca82b21fe7c39f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/chrf_score.py @@ -0,0 +1,222 @@ +# Natural Language Toolkit: ChrF score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Maja Popovic +# Contributors: Liling Tan, Aleš Tamchyna (Memsource) +# URL: +# For license information, see LICENSE.TXT + +""" ChrF score implementation """ +import re +from collections import Counter, defaultdict + +from nltk.util import ngrams + + +def sentence_chrf( + reference, hypothesis, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the sentence level CHRF (Character n-gram F-score) described in + - Maja Popovic. 2015. CHRF: Character n-gram F-score for Automatic MT Evaluation. + In Proceedings of the 10th Workshop on Machine Translation. + https://www.statmt.org/wmt15/pdf/WMT49.pdf + - Maja Popovic. 2016. CHRF Deconstructed: β Parameters and n-gram Weights. + In Proceedings of the 1st Conference on Machine Translation. + https://www.statmt.org/wmt16/pdf/W16-2341.pdf + + This implementation of CHRF only supports a single reference at the moment. + + For details not reported in the paper, consult Maja Popovic's original + implementation: https://github.com/m-popovic/chrF + + The code should output results equivalent to running CHRF++ with the + following options: -nw 0 -b 3 + + An example from the original BLEU paper + https://www.aclweb.org/anthology/P02-1040.pdf + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> sentence_chrf(ref1, hyp2) # doctest: +ELLIPSIS + 0.3330... + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_chrf(ref, hyp) # doctest: +ELLIPSIS + 0.1468... + + An example to show that this function allows users to use strings instead of + tokens, i.e. list(str) as inputs. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands') + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party') + >>> sentence_chrf(ref1, hyp1) # doctest: +ELLIPSIS + 0.6349... + >>> type(ref1) == type(hyp1) == str + True + >>> sentence_chrf(ref1.split(), hyp1.split()) # doctest: +ELLIPSIS + 0.6349... + + To skip the unigrams and only use 2- to 3-grams: + + >>> sentence_chrf(ref1, hyp1, min_len=2, max_len=3) # doctest: +ELLIPSIS + 0.6617... + + :param references: reference sentence + :type references: list(str) / str + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) / str + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + return corpus_chrf( + [reference], + [hypothesis], + min_len, + max_len, + beta=beta, + ignore_whitespace=ignore_whitespace, + ) + + +def _preprocess(sent, ignore_whitespace): + if type(sent) != str: + # turn list of tokens into a string + sent = " ".join(sent) + + if ignore_whitespace: + sent = re.sub(r"\s+", "", sent) + return sent + + +def chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=3.0, epsilon=1e-16 +): + """ + This function computes the precision, recall and fscore from the ngram + overlaps. It returns the `support` which is the true positive score. + + By underspecifying the input type, the function will be agnostic as to how + it computes the ngrams and simply take the whichever element in the list; + it could be either token or character. + + :param reference: The reference sentence. + :type reference: list + :param hypothesis: The hypothesis sentence. + :type hypothesis: list + :param n: Extract up to the n-th order ngrams + :type n: int + :param beta: The parameter to assign more importance to recall over precision. + :type beta: float + :param epsilon: The fallback value if the hypothesis or reference is empty. + :type epsilon: float + :return: Returns the precision, recall and f-score and support (true positive). + :rtype: tuple(float) + """ + ref_ngrams = Counter(ngrams(reference, n)) + hyp_ngrams = Counter(ngrams(hypothesis, n)) + + # calculate the number of ngram matches + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + try: + prec = tp / tpfp # precision + rec = tp / tpfn # recall + factor = beta**2 + fscore = (1 + factor) * (prec * rec) / (factor * prec + rec) + except ZeroDivisionError: + prec = rec = fscore = epsilon + return prec, rec, fscore, tp + + +def corpus_chrf( + references, hypotheses, min_len=1, max_len=6, beta=3.0, ignore_whitespace=True +): + """ + Calculates the corpus level CHRF (Character n-gram F-score), it is the + macro-averaged value of the sentence/segment level CHRF score. + + This implementation of CHRF only supports a single reference at the moment. + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> ref2 = str('It is the guiding principle which guarantees the military ' + ... 'forces always being under the command of the Party').split() + >>> + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct') + >>> corpus_chrf([ref1, ref2, ref1, ref2], [hyp1, hyp2, hyp2, hyp1]) # doctest: +ELLIPSIS + 0.3910... + + :param references: a corpus of list of reference sentences, w.r.t. hypotheses + :type references: list(list(str)) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :param beta: the parameter to assign more importance to recall over precision + :type beta: float + :param ignore_whitespace: ignore whitespace characters in scoring + :type ignore_whitespace: bool + :return: the sentence level CHRF score. + :rtype: float + """ + + assert len(references) == len( + hypotheses + ), "The number of hypotheses and their references should be the same" + num_sents = len(hypotheses) + + # Keep f-scores for each n-gram order separate + ngram_fscores = defaultdict(lambda: list()) + + # Iterate through each hypothesis and their corresponding references. + for reference, hypothesis in zip(references, hypotheses): + + # preprocess both reference and hypothesis + reference = _preprocess(reference, ignore_whitespace) + hypothesis = _preprocess(hypothesis, ignore_whitespace) + + # Calculate f-scores for each sentence and for each n-gram order + # separately. + for n in range(min_len, max_len + 1): + # Compute the precision, recall, fscore and support. + prec, rec, fscore, tp = chrf_precision_recall_fscore_support( + reference, hypothesis, n, beta=beta + ) + ngram_fscores[n].append(fscore) + + # how many n-gram sizes + num_ngram_sizes = len(ngram_fscores) + + # sum of f-scores over all sentences for each n-gram order + total_scores = [sum(fscores) for n, fscores in ngram_fscores.items()] + + # macro-average over n-gram orders and over all sentences + return (sum(total_scores) / num_ngram_sizes) / num_sents diff --git a/venv/lib/python3.10/site-packages/nltk/translate/gale_church.py b/venv/lib/python3.10/site-packages/nltk/translate/gale_church.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c81940d9ac27c159b680d688343e67e9ef9c58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/gale_church.py @@ -0,0 +1,263 @@ +# Natural Language Toolkit: Gale-Church Aligner +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Torsten Marek +# Contributor: Cassidy Laidlaw, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" + +A port of the Gale-Church Aligner. + +Gale & Church (1993), A Program for Aligning Sentences in Bilingual Corpora. +https://aclweb.org/anthology/J93-1004.pdf + +""" + +import math + +try: + from norm import logsf as norm_logsf + from scipy.stats import norm +except ImportError: + + def erfcc(x): + """Complementary error function.""" + z = abs(x) + t = 1 / (1 + 0.5 * z) + r = t * math.exp( + -z * z + - 1.26551223 + + t + * ( + 1.00002368 + + t + * ( + 0.37409196 + + t + * ( + 0.09678418 + + t + * ( + -0.18628806 + + t + * ( + 0.27886807 + + t + * ( + -1.13520398 + + t + * (1.48851587 + t * (-0.82215223 + t * 0.17087277)) + ) + ) + ) + ) + ) + ) + ) + if x >= 0.0: + return r + else: + return 2.0 - r + + def norm_cdf(x): + """Return the area under the normal distribution from M{-∞..x}.""" + return 1 - 0.5 * erfcc(x / math.sqrt(2)) + + def norm_logsf(x): + try: + return math.log(1 - norm_cdf(x)) + except ValueError: + return float("-inf") + + +LOG2 = math.log(2) + + +class LanguageIndependent: + # These are the language-independent probabilities and parameters + # given in Gale & Church + + # for the computation, l_1 is always the language with less characters + PRIORS = { + (1, 0): 0.0099, + (0, 1): 0.0099, + (1, 1): 0.89, + (2, 1): 0.089, + (1, 2): 0.089, + (2, 2): 0.011, + } + + AVERAGE_CHARACTERS = 1 + VARIANCE_CHARACTERS = 6.8 + + +def trace(backlinks, source_sents_lens, target_sents_lens): + """ + Traverse the alignment cost from the tracebacks and retrieves + appropriate sentence pairs. + + :param backlinks: A dictionary where the key is the alignment points and value is the cost (referencing the LanguageIndependent.PRIORS) + :type backlinks: dict + :param source_sents_lens: A list of target sentences' lengths + :type source_sents_lens: list(int) + :param target_sents_lens: A list of target sentences' lengths + :type target_sents_lens: list(int) + """ + links = [] + position = (len(source_sents_lens), len(target_sents_lens)) + while position != (0, 0) and all(p >= 0 for p in position): + try: + s, t = backlinks[position] + except TypeError: + position = (position[0] - 1, position[1] - 1) + continue + for i in range(s): + for j in range(t): + links.append((position[0] - i - 1, position[1] - j - 1)) + position = (position[0] - s, position[1] - t) + + return links[::-1] + + +def align_log_prob(i, j, source_sents, target_sents, alignment, params): + """Returns the log probability of the two sentences C{source_sents[i]}, C{target_sents[j]} + being aligned with a specific C{alignment}. + + @param i: The offset of the source sentence. + @param j: The offset of the target sentence. + @param source_sents: The list of source sentence lengths. + @param target_sents: The list of target sentence lengths. + @param alignment: The alignment type, a tuple of two integers. + @param params: The sentence alignment parameters. + + @returns: The log probability of a specific alignment between the two sentences, given the parameters. + """ + l_s = sum(source_sents[i - offset - 1] for offset in range(alignment[0])) + l_t = sum(target_sents[j - offset - 1] for offset in range(alignment[1])) + try: + # actually, the paper says l_s * params.VARIANCE_CHARACTERS, this is based on the C + # reference implementation. With l_s in the denominator, insertions are impossible. + m = (l_s + l_t / params.AVERAGE_CHARACTERS) / 2 + delta = (l_s * params.AVERAGE_CHARACTERS - l_t) / math.sqrt( + m * params.VARIANCE_CHARACTERS + ) + except ZeroDivisionError: + return float("-inf") + + return -(LOG2 + norm_logsf(abs(delta)) + math.log(params.PRIORS[alignment])) + + +def align_blocks(source_sents_lens, target_sents_lens, params=LanguageIndependent): + """Return the sentence alignment of two text blocks (usually paragraphs). + + >>> align_blocks([5,5,5], [7,7,7]) + [(0, 0), (1, 1), (2, 2)] + >>> align_blocks([10,5,5], [12,20]) + [(0, 0), (1, 1), (2, 1)] + >>> align_blocks([12,20], [10,5,5]) + [(0, 0), (1, 1), (1, 2)] + >>> align_blocks([10,2,10,10,2,10], [12,3,20,3,12]) + [(0, 0), (1, 1), (2, 2), (3, 2), (4, 3), (5, 4)] + + @param source_sents_lens: The list of source sentence lengths. + @param target_sents_lens: The list of target sentence lengths. + @param params: the sentence alignment parameters. + @return: The sentence alignments, a list of index pairs. + """ + + alignment_types = list(params.PRIORS.keys()) + + # there are always three rows in the history (with the last of them being filled) + D = [[]] + + backlinks = {} + + for i in range(len(source_sents_lens) + 1): + for j in range(len(target_sents_lens) + 1): + min_dist = float("inf") + min_align = None + for a in alignment_types: + prev_i = -1 - a[0] + prev_j = j - a[1] + if prev_i < -len(D) or prev_j < 0: + continue + p = D[prev_i][prev_j] + align_log_prob( + i, j, source_sents_lens, target_sents_lens, a, params + ) + if p < min_dist: + min_dist = p + min_align = a + + if min_dist == float("inf"): + min_dist = 0 + + backlinks[(i, j)] = min_align + D[-1].append(min_dist) + + if len(D) > 2: + D.pop(0) + D.append([]) + + return trace(backlinks, source_sents_lens, target_sents_lens) + + +def align_texts(source_blocks, target_blocks, params=LanguageIndependent): + """Creates the sentence alignment of two texts. + + Texts can consist of several blocks. Block boundaries cannot be crossed by sentence + alignment links. + + Each block consists of a list that contains the lengths (in characters) of the sentences + in this block. + + @param source_blocks: The list of blocks in the source text. + @param target_blocks: The list of blocks in the target text. + @param params: the sentence alignment parameters. + + @returns: A list of sentence alignment lists + """ + if len(source_blocks) != len(target_blocks): + raise ValueError( + "Source and target texts do not have the same number of blocks." + ) + + return [ + align_blocks(source_block, target_block, params) + for source_block, target_block in zip(source_blocks, target_blocks) + ] + + +# File I/O functions; may belong in a corpus reader + + +def split_at(it, split_value): + """Splits an iterator C{it} at values of C{split_value}. + + Each instance of C{split_value} is swallowed. The iterator produces + subiterators which need to be consumed fully before the next subiterator + can be used. + """ + + def _chunk_iterator(first): + v = first + while v != split_value: + yield v + v = it.next() + + while True: + yield _chunk_iterator(it.next()) + + +def parse_token_stream(stream, soft_delimiter, hard_delimiter): + """Parses a stream of tokens and splits it into sentences (using C{soft_delimiter} tokens) + and blocks (using C{hard_delimiter} tokens) for use with the L{align_texts} function. + """ + return [ + [ + sum(len(token) for token in sentence_it) + for sentence_it in split_at(block_it, soft_delimiter) + ] + for block_it in split_at(stream, hard_delimiter) + ] diff --git a/venv/lib/python3.10/site-packages/nltk/translate/gdfa.py b/venv/lib/python3.10/site-packages/nltk/translate/gdfa.py new file mode 100644 index 0000000000000000000000000000000000000000..57df0cea63b35bfbf83f9d330bf137563b332a33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/gdfa.py @@ -0,0 +1,138 @@ +# Natural Language Toolkit: GDFA word alignment symmetrization +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +def grow_diag_final_and(srclen, trglen, e2f, f2e): + """ + This module symmetrisatizes the source-to-target and target-to-source + word alignment output and produces, aka. GDFA algorithm (Koehn, 2005). + + Step 1: Find the intersection of the bidirectional alignment. + + Step 2: Search for additional neighbor alignment points to be added, given + these criteria: (i) neighbor alignments points are not in the + intersection and (ii) neighbor alignments are in the union. + + Step 3: Add all other alignment points that are not in the intersection, not in + the neighboring alignments that met the criteria but in the original + forward/backward alignment outputs. + + >>> forw = ('0-0 2-1 9-2 21-3 10-4 7-5 11-6 9-7 12-8 1-9 3-10 ' + ... '4-11 17-12 17-13 25-14 13-15 24-16 11-17 28-18') + >>> back = ('0-0 1-9 2-9 3-10 4-11 5-12 6-6 7-5 8-6 9-7 10-4 ' + ... '11-6 12-8 13-12 15-12 17-13 18-13 19-12 20-13 ' + ... '21-3 22-12 23-14 24-17 25-15 26-17 27-18 28-18') + >>> srctext = ("この よう な ハロー 白色 わい 星 の L 関数 " + ... "は L と 共 に 不連続 に 増加 する こと が " + ... "期待 さ れる こと を 示し た 。") + >>> trgtext = ("Therefore , we expect that the luminosity function " + ... "of such halo white dwarfs increases discontinuously " + ... "with the luminosity .") + >>> srclen = len(srctext.split()) + >>> trglen = len(trgtext.split()) + >>> + >>> gdfa = grow_diag_final_and(srclen, trglen, forw, back) + >>> gdfa == sorted(set([(28, 18), (6, 6), (24, 17), (2, 1), (15, 12), (13, 12), + ... (2, 9), (3, 10), (26, 17), (25, 15), (8, 6), (9, 7), (20, + ... 13), (18, 13), (0, 0), (10, 4), (13, 15), (23, 14), (7, 5), + ... (25, 14), (1, 9), (17, 13), (4, 11), (11, 17), (9, 2), (22, + ... 12), (27, 18), (24, 16), (21, 3), (19, 12), (17, 12), (5, + ... 12), (11, 6), (12, 8)])) + True + + References: + Koehn, P., A. Axelrod, A. Birch, C. Callison, M. Osborne, and D. Talbot. + 2005. Edinburgh System Description for the 2005 IWSLT Speech + Translation Evaluation. In MT Eval Workshop. + + :type srclen: int + :param srclen: the number of tokens in the source language + :type trglen: int + :param trglen: the number of tokens in the target language + :type e2f: str + :param e2f: the forward word alignment outputs from source-to-target + language (in pharaoh output format) + :type f2e: str + :param f2e: the backward word alignment outputs from target-to-source + language (in pharaoh output format) + :rtype: set(tuple(int)) + :return: the symmetrized alignment points from the GDFA algorithm + """ + + # Converts pharaoh text format into list of tuples. + e2f = [tuple(map(int, a.split("-"))) for a in e2f.split()] + f2e = [tuple(map(int, a.split("-"))) for a in f2e.split()] + + neighbors = [(-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)] + alignment = set(e2f).intersection(set(f2e)) # Find the intersection. + union = set(e2f).union(set(f2e)) + + # *aligned* is used to check if neighbors are aligned in grow_diag() + aligned = defaultdict(set) + for i, j in alignment: + aligned["e"].add(i) + aligned["f"].add(j) + + def grow_diag(): + """ + Search for the neighbor points and them to the intersected alignment + points if criteria are met. + """ + prev_len = len(alignment) - 1 + # iterate until no new points added + while prev_len < len(alignment): + no_new_points = True + # for english word e = 0 ... en + for e in range(srclen): + # for foreign word f = 0 ... fn + for f in range(trglen): + # if ( e aligned with f) + if (e, f) in alignment: + # for each neighboring point (e-new, f-new) + for neighbor in neighbors: + neighbor = tuple(i + j for i, j in zip((e, f), neighbor)) + e_new, f_new = neighbor + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned and f_new not in aligned + ) and neighbor in union: + alignment.add(neighbor) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + prev_len += 1 + no_new_points = False + # iterate until no new points added + if no_new_points: + break + + def final_and(a): + """ + Adds remaining points that are not in the intersection, not in the + neighboring alignments but in the original *e2f* and *f2e* alignments + """ + # for english word e = 0 ... en + for e_new in range(srclen): + # for foreign word f = 0 ... fn + for f_new in range(trglen): + # if ( ( e-new not aligned and f-new not aligned) + # and (e-new, f-new in union(e2f, f2e) ) + if ( + e_new not in aligned + and f_new not in aligned + and (e_new, f_new) in union + ): + alignment.add((e_new, f_new)) + aligned["e"].add(e_new) + aligned["f"].add(f_new) + + grow_diag() + final_and(e2f) + final_and(f2e) + return sorted(alignment) diff --git a/venv/lib/python3.10/site-packages/nltk/translate/gleu_score.py b/venv/lib/python3.10/site-packages/nltk/translate/gleu_score.py new file mode 100644 index 0000000000000000000000000000000000000000..81932a73fb5bdd34e539dfd9d1b46f179fc26558 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/gleu_score.py @@ -0,0 +1,190 @@ +# Natural Language Toolkit: GLEU Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: Mike Schuster, Michael Wayne Goodman, Liling Tan +# URL: +# For license information, see LICENSE.TXT + +""" GLEU score implementation. """ + +from collections import Counter + +from nltk.util import everygrams, ngrams + + +def sentence_gleu(references, hypothesis, min_len=1, max_len=4): + """ + Calculates the sentence level GLEU (Google-BLEU) score described in + + Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V. Le, Mohammad Norouzi, + Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, + Jeff Klingner, Apurva Shah, Melvin Johnson, Xiaobing Liu, Lukasz Kaiser, + Stephan Gouws, Yoshikiyo Kato, Taku Kudo, Hideto Kazawa, Keith Stevens, + George Kurian, Nishant Patil, Wei Wang, Cliff Young, Jason Smith, + Jason Riesa, Alex Rudnick, Oriol Vinyals, Greg Corrado, Macduff Hughes, + Jeffrey Dean. (2016) Google’s Neural Machine Translation System: + Bridging the Gap between Human and Machine Translation. + eprint arXiv:1609.08144. https://arxiv.org/pdf/1609.08144v2.pdf + Retrieved on 27 Oct 2016. + + From Wu et al. (2016): + "The BLEU score has some undesirable properties when used for single + sentences, as it was designed to be a corpus measure. We therefore + use a slightly different score for our RL experiments which we call + the 'GLEU score'. For the GLEU score, we record all sub-sequences of + 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then + compute a recall, which is the ratio of the number of matching n-grams + to the number of total n-grams in the target (ground truth) sequence, + and a precision, which is the ratio of the number of matching n-grams + to the number of total n-grams in the generated output sequence. Then + GLEU score is simply the minimum of recall and precision. This GLEU + score's range is always between 0 (no matches) and 1 (all match) and + it is symmetrical when switching output and target. According to + our experiments, GLEU score correlates quite well with the BLEU + metric on a corpus level but does not have its drawbacks for our per + sentence reward objective." + + Note: The initial implementation only allowed a single reference, but now + a list of references is required (which is consistent with + bleu_score.sentence_bleu()). + + The infamous "the the the ... " example + + >>> ref = 'the cat is on the mat'.split() + >>> hyp = 'the the the the the the the'.split() + >>> sentence_gleu([ref], hyp) # doctest: +ELLIPSIS + 0.0909... + + An example to evaluate normal machine translation outputs + + >>> ref1 = str('It is a guide to action that ensures that the military ' + ... 'will forever heed Party commands').split() + >>> hyp1 = str('It is a guide to action which ensures that the military ' + ... 'always obeys the commands of the party').split() + >>> hyp2 = str('It is to insure the troops forever hearing the activity ' + ... 'guidebook that party direct').split() + >>> sentence_gleu([ref1], hyp1) # doctest: +ELLIPSIS + 0.4393... + >>> sentence_gleu([ref1], hyp2) # doctest: +ELLIPSIS + 0.1206... + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: the sentence level GLEU score. + :rtype: float + """ + return corpus_gleu([references], [hypothesis], min_len=min_len, max_len=max_len) + + +def corpus_gleu(list_of_references, hypotheses, min_len=1, max_len=4): + """ + Calculate a single corpus-level GLEU score (aka. system-level GLEU) for all + the hypotheses and their respective references. + + Instead of averaging the sentence level GLEU scores (i.e. macro-average + precision), Wu et al. (2016) sum up the matching tokens and the max of + hypothesis and reference tokens for each sentence, then compute using the + aggregate values. + + From Mike Schuster (via email): + "For the corpus, we just add up the two statistics n_match and + n_all = max(n_all_output, n_all_target) for all sentences, then + calculate gleu_score = n_match / n_all, so it is not just a mean of + the sentence gleu scores (in our case, longer sentences count more, + which I think makes sense as they are more difficult to translate)." + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> corpus_gleu(list_of_references, hypotheses) # doctest: +ELLIPSIS + 0.5673... + + The example below show that corpus_gleu() is different from averaging + sentence_gleu() for hypotheses + + >>> score1 = sentence_gleu([ref1a], hyp1) + >>> score2 = sentence_gleu([ref2a], hyp2) + >>> (score1 + score2) / 2 # doctest: +ELLIPSIS + 0.6144... + + :param list_of_references: a list of reference sentences, w.r.t. hypotheses + :type list_of_references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param min_len: The minimum order of n-gram this function should extract. + :type min_len: int + :param max_len: The maximum order of n-gram this function should extract. + :type max_len: int + :return: The corpus-level GLEU score. + :rtype: float + """ + # sanity check + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # sum matches and max-token-lengths over all sentences + corpus_n_match = 0 + corpus_n_all = 0 + + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_ngrams = Counter(everygrams(hypothesis, min_len, max_len)) + tpfp = sum(hyp_ngrams.values()) # True positives + False positives. + + hyp_counts = [] + for reference in references: + ref_ngrams = Counter(everygrams(reference, min_len, max_len)) + tpfn = sum(ref_ngrams.values()) # True positives + False negatives. + + overlap_ngrams = ref_ngrams & hyp_ngrams + tp = sum(overlap_ngrams.values()) # True positives. + + # While GLEU is defined as the minimum of precision and + # recall, we can reduce the number of division operations by one by + # instead finding the maximum of the denominators for the precision + # and recall formulae, since the numerators are the same: + # precision = tp / tpfp + # recall = tp / tpfn + # gleu_score = min(precision, recall) == tp / max(tpfp, tpfn) + n_all = max(tpfp, tpfn) + + if n_all > 0: + hyp_counts.append((tp, n_all)) + + # use the reference yielding the highest score + if hyp_counts: + n_match, n_all = max(hyp_counts, key=lambda hc: hc[0] / hc[1]) + corpus_n_match += n_match + corpus_n_all += n_all + + # corner case: empty corpus or empty references---don't divide by zero! + if corpus_n_all == 0: + gleu_score = 0.0 + else: + gleu_score = corpus_n_match / corpus_n_all + + return gleu_score diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm1.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm1.py new file mode 100644 index 0000000000000000000000000000000000000000..badb896968633d0db99f9b8fb2a7679b65d9a534 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm1.py @@ -0,0 +1,251 @@ +# Natural Language Toolkit: IBM Model 1 +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Chin Yee Lee +# Hengfeng Li +# Ruxin Hou +# Calvin Tanujaya Lim +# Based on earlier version by: +# Will Zhang +# Guan Gui +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that ignores word order. + +In IBM Model 1, word order is ignored for simplicity. As long as the +word alignments are equivalent, it doesn't matter where the word occurs +in the source or target sentence. Thus, the following three alignments +are equally likely:: + + Source: je mange du jambon + Target: i eat some ham + Alignment: (0,0) (1,1) (2,2) (3,3) + + Source: je mange du jambon + Target: some ham eat i + Alignment: (0,2) (1,3) (2,1) (3,1) + + Source: du jambon je mange + Target: eat i some ham + Alignment: (0,3) (1,2) (2,0) (3,1) + +Note that an alignment is represented here as +(word_index_in_target, word_index_in_source). + +The EM algorithm used in Model 1 is: + +:E step: In the training data, count how many times a source language + word is translated into a target language word, weighted by + the prior probability of the translation. + +:M step: Estimate the new probability of translation based on the + counts from the Expectation step. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel +from nltk.translate.ibm_model import Counts + + +class IBMModel1(IBMModel): + """ + Lexical translation model that ignores word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm1 = IBMModel1(bitext, 5) + + >>> print(round(ibm1.translation_table['buch']['book'], 3)) + 0.889 + >>> print(round(ibm1.translation_table['das']['book'], 3)) + 0.062 + >>> print(round(ibm1.translation_table['buch'][None], 3)) + 0.113 + >>> print(round(ibm1.translation_table['ja'][None], 3)) + 0.073 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, the following entry must be present: + ``translation_table``. + See ``IBMModel`` for the type and purpose of this table. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + initial_prob = 1 / len(self.trg_vocab) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "Target language vocabulary is too large (" + + str(len(self.trg_vocab)) + + " words). " + "Results may be less accurate." + ) + + for t in self.trg_vocab: + self.translation_table[t] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Counts() + for aligned_sentence in parallel_corpus: + trg_sentence = aligned_sentence.words + src_sentence = [None] + aligned_sentence.mots + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for t in trg_sentence: + for s in src_sentence: + count = self.prob_alignment_point(s, t) + normalized_count = count / total_count[t] + counts.t_given_s[t][s] += normalized_count + counts.any_t_given_s[s] += normalized_count + + # M step: Update probabilities with maximum likelihood estimate + self.maximize_lexical_translation_probabilities(counts) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for t in trg_sentence: + for s in src_sentence: + alignment_prob_for_t[t] += self.prob_alignment_point(s, t) + return alignment_prob_for_t + + def prob_alignment_point(self, s, t): + """ + Probability that word ``t`` in the target sentence is aligned to + word ``s`` in the source sentence + """ + return self.translation_table[t][s] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= self.translation_table[trg_word][src_word] + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = max(self.translation_table[trg_word][None], IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = self.translation_table[trg_word][src_word] + if align_prob >= best_prob: # prefer newer word in case of tie + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm2.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm2.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3ff375f045f4a809778ea8d3221e6b62e5e2ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm2.py @@ -0,0 +1,319 @@ +# Natural Language Toolkit: IBM Model 2 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that considers word order. + +IBM Model 2 improves on Model 1 by accounting for word order. +An alignment probability is introduced, a(i | j,l,m), which predicts +a source word position, given its aligned target word's position. + +The EM algorithm used in Model 2 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the source + sentence is aligned to a particular position in the target + sentence + +:M step: Estimate new probabilities based on the counts from the E step + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1 +from nltk.translate.ibm_model import Counts + + +class IBMModel2(IBMModel): + """ + Lexical translation model that considers word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm2 = IBMModel2(bitext, 5) + + >>> print(round(ibm2.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm2.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm2.translation_table['buch'][None], 3)) + 0.0 + >>> print(round(ibm2.translation_table['ja'][None], 3)) + 0.0 + + >>> print(round(ibm2.alignment_table[1][1][2][2], 3)) + 0.939 + >>> print(round(ibm2.alignment_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm2.alignment_table[2][2][4][5], 3)) + 1.0 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model and an alignment model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + # Get translation probabilities from IBM Model 1 + # Run more iterations of training for Model 1, since it is + # faster than Model 2 + ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) + self.translation_table = ibm1.translation_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / (l + 1) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A source sentence is too long (" + + str(l) + + " words). Results may be less accurate." + ) + + for i in range(0, l + 1): + for j in range(1, m + 1): + self.alignment_table[i][j][l][m] = initial_prob + + def train(self, parallel_corpus): + counts = Model2Counts() + for aligned_sentence in parallel_corpus: + src_sentence = [None] + aligned_sentence.mots + trg_sentence = ["UNUSED"] + aligned_sentence.words # 1-indexed + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for j in range(1, m + 1): + t = trg_sentence[j] + for i in range(0, l + 1): + s = src_sentence[i] + count = self.prob_alignment_point(i, j, src_sentence, trg_sentence) + normalized_count = count / total_count[t] + + counts.update_lexical_translation(normalized_count, s, t) + counts.update_alignment(normalized_count, i, j, l, m) + + # M step: Update probabilities with maximum likelihood estimates + self.maximize_lexical_translation_probabilities(counts) + self.maximize_alignment_probabilities(counts) + + def maximize_alignment_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for i, j_s in counts.alignment.items(): + for j, src_sentence_lengths in j_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.alignment[i][j][l][m] + / counts.alignment_for_any_i[j][l][m] + ) + self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for j in range(1, len(trg_sentence)): + t = trg_sentence[j] + for i in range(0, len(src_sentence)): + alignment_prob_for_t[t] += self.prob_alignment_point( + i, j, src_sentence, trg_sentence + ) + return alignment_prob_for_t + + def prob_alignment_point(self, i, j, src_sentence, trg_sentence): + """ + Probability that position j in ``trg_sentence`` is aligned to + position i in the ``src_sentence`` + """ + l = len(src_sentence) - 1 + m = len(trg_sentence) - 1 + s = src_sentence[i] + t = trg_sentence[j] + return self.translation_table[t][s] * self.alignment_table[i][j][l][m] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + l = len(alignment_info.src_sentence) - 1 + m = len(alignment_info.trg_sentence) - 1 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i][j][l][m] + ) + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = ( + self.translation_table[trg_word][None] + * self.alignment_table[0][j + 1][l][m] + ) + best_prob = max(best_prob, IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i + 1][j + 1][l][m] + ) + if align_prob >= best_prob: + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) + + +class Model2Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for alignment. + """ + + def __init__(self): + super().__init__() + self.alignment = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.alignment_for_any_i = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_lexical_translation(self, count, s, t): + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_alignment(self, count, i, j, l, m): + self.alignment[i][j][l][m] += count + self.alignment_for_any_i[j][l][m] += count diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm3.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm3.py new file mode 100644 index 0000000000000000000000000000000000000000..f295dee0b563bbcb9a5b9557c8d1602942a75bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm3.py @@ -0,0 +1,346 @@ +# Natural Language Toolkit: IBM Model 3 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that considers how a word can be aligned to +multiple words in another language. + +IBM Model 3 improves on Model 2 by directly modeling the phenomenon +where a word in one language may be translated into zero or more words +in another. This is expressed by the fertility probability, +n(phi | source word). + +If a source word translates into more than one word, it is possible to +generate sentences that have the same alignment in multiple ways. This +is modeled by a distortion step. The distortion probability, d(j|i,l,m), +predicts a target word position, given its aligned source word's +position. The distortion probability replaces the alignment probability +of Model 2. + +The fertility probability is not applicable for NULL. Target words that +align to NULL are assumed to be distributed uniformly in the target +sentence. The existence of these words is modeled by p1, the probability +that a target word produced by a real source word requires another +target word that is produced by NULL. + +The EM algorithm used in Model 3 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the target + sentence is aligned to a particular position in the source + sentence + - (c) count how many times a source word is aligned to phi number + of target words + - (d) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Because there are too many possible alignments, only the most probable +ones are considered. First, the best alignment is determined using prior +probabilities. Then, a hill climbing approach is used to find other good +candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel2 +from nltk.translate.ibm_model import Counts + + +class IBMModel3(IBMModel): + """ + Translation model that considers how a word can be aligned to + multiple words in another language + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + + >>> ibm3 = IBMModel3(bitext, 5) + + >>> print(round(ibm3.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm3.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm3.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm3.distortion_table[1][1][2][2], 3)) + 1.0 + >>> print(round(ibm3.distortion_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm3.distortion_table[2][2][4][5], 3)) + 0.75 + + >>> print(round(ibm3.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm3.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm3.p1, 3)) + 0.054 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, a distortion model, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``distortion_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + + if probability_tables is None: + # Get translation and alignment probabilities from IBM Model 2 + ibm2 = IBMModel2(sentence_aligned_corpus, iterations) + self.translation_table = ibm2.translation_table + self.alignment_table = ibm2.alignment_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.distortion_table = probability_tables["distortion_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.distortion_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(j | i,l,m). + Values accessed as ``distortion_table[j][i][l][m]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # d(j | i,l,m) = 1 / m for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / m + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(m) + + " words). Results may be less accurate." + ) + for j in range(1, m + 1): + for i in range(0, l + 1): + self.distortion_table[j][i][l][m] = initial_prob + + # simple initialization, taken from GIZA++ + self.fertility_table[0] = defaultdict(lambda: 0.2) + self.fertility_table[1] = defaultdict(lambda: 0.65) + self.fertility_table[2] = defaultdict(lambda: 0.1) + self.fertility_table[3] = defaultdict(lambda: 0.04) + MAX_FERTILITY = 10 + initial_fert_prob = 0.01 / (MAX_FERTILITY - 4) + for phi in range(4, MAX_FERTILITY): + self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob) + + self.p1 = 0.5 + + def train(self, parallel_corpus): + counts = Model3Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion(normalized_count, alignment_info, j, l, m) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for j, i_s in counts.distortion.items(): + for i, src_sentence_lengths in i_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.distortion[j][i][l][m] + / counts.distortion_for_any_j[i][l][m] + ) + self.distortion_table[j][i][l][m] = max(estimate, MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + src_sentence = alignment_info.src_sentence + trg_sentence = alignment_info.trg_sentence + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + p1 = self.p1 + p0 = 1 - p1 + + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + # Combine NULL insertion probability + null_fertility = alignment_info.fertility_of_i(0) + probability *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if probability < MIN_PROB: + return MIN_PROB + + # Compute combination (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + probability *= (m - null_fertility - i + 1) / i + if probability < MIN_PROB: + return MIN_PROB + + # Combine fertility probabilities + for i in range(1, l + 1): + fertility = alignment_info.fertility_of_i(i) + probability *= ( + factorial(fertility) * self.fertility_table[fertility][src_sentence[i]] + ) + if probability < MIN_PROB: + return MIN_PROB + + # Combine lexical and distortion probabilities + for j in range(1, m + 1): + t = trg_sentence[j] + i = alignment_info.alignment[j] + s = src_sentence[i] + + probability *= ( + self.translation_table[t][s] * self.distortion_table[j][i][l][m] + ) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model3Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.distortion_for_any_j = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_distortion(self, count, alignment_info, j, l, m): + i = alignment_info.alignment[j] + self.distortion[j][i][l][m] += count + self.distortion_for_any_j[i][l][m] += count diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm4.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm4.py new file mode 100644 index 0000000000000000000000000000000000000000..c7686939ac5027d6e16147cc82611cd4519ea51e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm4.py @@ -0,0 +1,490 @@ +# Natural Language Toolkit: IBM Model 4 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that reorders output words based on their type and +distance from other related words in the output sentence. + +IBM Model 4 improves the distortion model of Model 3, motivated by the +observation that certain words tend to be re-ordered in a predictable +way relative to one another. For example, in English +usually has its order flipped as in French. + +Model 4 requires words in the source and target vocabularies to be +categorized into classes. This can be linguistically driven, like parts +of speech (adjective, nouns, prepositions, etc). Word classes can also +be obtained by statistical methods. The original IBM Model 4 uses an +information theoretic approach to group words into 50 classes for each +vocabulary. + +Terminology +----------- + +:Cept: + A source word with non-zero fertility i.e. aligned to one or more + target words. +:Tablet: + The set of target word(s) aligned to a cept. +:Head of cept: + The first word of the tablet of that cept. +:Center of cept: + The average position of the words in that cept's tablet. If the + value is not an integer, the ceiling is taken. + For example, for a tablet with words in positions 2, 5, 6 in the + target sentence, the center of the corresponding cept is + ceil((2 + 5 + 6) / 3) = 5 +:Displacement: + For a head word, defined as (position of head word - position of + previous cept's center). Can be positive or negative. + For a non-head word, defined as (position of non-head word - + position of previous word in the same tablet). Always positive, + because successive words in a tablet are assumed to appear to the + right of the previous word. + +In contrast to Model 3 which reorders words in a tablet independently of +other words, Model 4 distinguishes between three cases. + +1. Words generated by NULL are distributed uniformly. +2. For a head word t, its position is modeled by the probability + d_head(displacement | word_class_s(s),word_class_t(t)), + where s is the previous cept, and word_class_s and word_class_t maps + s and t to a source and target language word class respectively. +3. For a non-head word t, its position is modeled by the probability + d_non_head(displacement | word_class_t(t)) + +The EM algorithm used in Model 4 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class, count how many times a head + word is located at a particular displacement from the + previous cept's center + - (c) for a particular word class, count how many times a + non-head word is located at a particular displacement from + the previous target word + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 3, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:dj: Displacement, Δj + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel3 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel4(IBMModel): + """ + Translation model that reorders output words based on their type and + their distance from other related words in the output sentence + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm4 = IBMModel4(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm4.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm4.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm4.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm4.head_distortion_table[1][0][1], 3)) + 1.0 + >>> print(round(ibm4.head_distortion_table[2][0][1], 3)) + 0.0 + >>> print(round(ibm4.non_head_distortion_table[3][6], 3)) + 0.5 + + >>> print(round(ibm4.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm4.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm4.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, distortion models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``. See ``IBMModel`` and + ``IBMModel4`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 3 + ibm3 = IBMModel3(sentence_aligned_corpus, iterations) + self.translation_table = ibm3.translation_table + self.alignment_table = ibm3.alignment_table + self.fertility_table = ibm3.fertility_table + self.p1 = ibm3.p1 + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(displacement of head + word | word class of previous cept,target word class). + Values accessed as ``distortion_table[dj][src_class][trg_class]``. + """ + + self.non_head_distortion_table = defaultdict( + lambda: defaultdict(lambda: self.MIN_PROB) + ) + """ + dict[int][int]: float. Probability(displacement of non-head + word | target word class). + Values accessed as ``distortion_table[dj][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set distortion probabilities uniformly to + 1 / cardinality of displacement values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum displacement is m-1, when a word is in the last + # position m of the target sentence and the previously placed + # word is in the first position. + # Conversely, the minimum displacement is -(m-1). + # Thus, the displacement range is (m-1) - (-(m-1)). Note that + # displacement cannot be zero and is not included in the range. + if max_m <= 1: + initial_prob = IBMModel.MIN_PROB + else: + initial_prob = 1 / (2 * (max_m - 1)) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for dj in range(1, max_m): + self.head_distortion_table[dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.head_distortion_table[-dj] = defaultdict( + lambda: defaultdict(lambda: initial_prob) + ) + self.non_head_distortion_table[dj] = defaultdict(lambda: initial_prob) + self.non_head_distortion_table[-dj] = defaultdict(lambda: initial_prob) + + def train(self, parallel_corpus): + counts = Model4Counts() + for aligned_sentence in parallel_corpus: + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion( + normalized_count, + alignment_info, + j, + self.src_classes, + self.trg_classes, + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + head_d_table = self.head_distortion_table + for dj, src_classes in counts.head_distortion.items(): + for s_cls, trg_classes in src_classes.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_distortion[dj][s_cls][t_cls] + / counts.head_distortion_for_any_dj[s_cls][t_cls] + ) + head_d_table[dj][s_cls][t_cls] = max(estimate, IBMModel.MIN_PROB) + + non_head_d_table = self.non_head_distortion_table + for dj, trg_classes in counts.non_head_distortion.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_distortion[dj][t_cls] + / counts.non_head_distortion_for_any_dj[t_cls] + ) + non_head_d_table[dj][t_cls] = max(estimate, IBMModel.MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + return IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + + @staticmethod # exposed for Model 5 to use + def model4_prob_t_a_given_s(alignment_info, ibm_model): + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = ibm_model.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * ibm_model.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return ibm_model.translation_table[t][s] + + def distortion_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + if i == 0: + # case 1: t is aligned to NULL + return 1.0 + if alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + src_class = None + if previous_cept is not None: + previous_s = alignment_info.src_sentence[previous_cept] + src_class = ibm_model.src_classes[previous_s] + trg_class = ibm_model.trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + return ibm_model.head_distortion_table[dj][src_class][trg_class] + + # case 3: t is a subsequent word of a tablet + previous_position = alignment_info.previous_in_tablet(j) + trg_class = ibm_model.trg_classes[t] + dj = j - previous_position + return ibm_model.non_head_distortion_table[dj][trg_class] + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + probability *= distortion_term(j) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model4Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.head_distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_distortion_for_any_dj = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_distortion_for_any_dj = defaultdict(lambda: 0.0) + + def update_distortion(self, count, alignment_info, j, src_classes, trg_classes): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + if i == 0: + # case 1: t is aligned to NULL + pass + elif alignment_info.is_head_word(j): + # case 2: t is the first word of a tablet + previous_cept = alignment_info.previous_cept(j) + if previous_cept is not None: + previous_src_word = alignment_info.src_sentence[previous_cept] + src_class = src_classes[previous_src_word] + else: + src_class = None + trg_class = trg_classes[t] + dj = j - alignment_info.center_of_cept(previous_cept) + self.head_distortion[dj][src_class][trg_class] += count + self.head_distortion_for_any_dj[src_class][trg_class] += count + else: + # case 3: t is a subsequent word of a tablet + previous_j = alignment_info.previous_in_tablet(j) + trg_class = trg_classes[t] + dj = j - previous_j + self.non_head_distortion[dj][trg_class] += count + self.non_head_distortion_for_any_dj[trg_class] += count diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm5.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm5.py new file mode 100644 index 0000000000000000000000000000000000000000..98ed2ec0aec4535fd6b4e18abbf8ecd8f696a9e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm5.py @@ -0,0 +1,663 @@ +# Natural Language Toolkit: IBM Model 5 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that keeps track of vacant positions in the target +sentence to decide where to place translated words. + +Translation can be viewed as a process where each word in the source +sentence is stepped through sequentially, generating translated words +for each source word. The target sentence can be viewed as being made +up of ``m`` empty slots initially, which gradually fill up as generated +words are placed in them. + +Models 3 and 4 use distortion probabilities to decide how to place +translated words. For simplicity, these models ignore the history of +which slots have already been occupied with translated words. +Consider the placement of the last translated word: there is only one +empty slot left in the target sentence, so the distortion probability +should be 1.0 for that position and 0.0 everywhere else. However, the +distortion probabilities for Models 3 and 4 are set up such that all +positions are under consideration. + +IBM Model 5 fixes this deficiency by accounting for occupied slots +during translation. It introduces the vacancy function v(j), the number +of vacancies up to, and including, position j in the target sentence. + +Terminology +----------- + +:Maximum vacancy: + The number of valid slots that a word can be placed in. + This is not necessarily the same as the number of vacant slots. + For example, if a tablet contains more than one word, the head word + cannot be placed at the last vacant slot because there will be no + space for the other words in the tablet. The number of valid slots + has to take into account the length of the tablet. + Non-head words cannot be placed before the head word, so vacancies + to the left of the head word are ignored. +:Vacancy difference: + For a head word: (v(j) - v(center of previous cept)) + Can be positive or negative. + For a non-head word: (v(j) - v(position of previously placed word)) + Always positive, because successive words in a tablet are assumed to + appear to the right of the previous word. + +Positioning of target words fall under three cases: + +1. Words generated by NULL are distributed uniformly +2. For a head word t, its position is modeled by the probability + v_head(dv | max_v,word_class_t(t)) +3. For a non-head word t, its position is modeled by the probability + v_non_head(dv | max_v,word_class_t(t)) + +dv and max_v are defined differently for head and non-head words. + +The EM algorithm used in Model 5 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class and maximum vacancy, count how + many times a head word and the previous cept's center have + a particular difference in number of vacancies + - (b) for a particular word class and maximum vacancy, count how + many times a non-head word and the previous target word + have a particular difference in number of vacancies + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 4, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. In addition, +pruning is used to weed out unlikely alignments based on Model 4 scores. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:max_v: Maximum vacancy +:dv: Vacancy difference, Δv + +The definition of v_head here differs from GIZA++, section 4.7 of +[Brown et al., 1993], and [Koehn, 2010]. In the latter cases, v_head is +v_head(v(j) | v(center of previous cept),max_v,word_class(t)). + +Here, we follow appendix B of [Brown et al., 1993] and combine v(j) with +v(center of previous cept) to obtain dv: +v_head(v(j) - v(center of previous cept) | max_v,word_class(t)). + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel5(IBMModel): + """ + Translation model that keeps track of vacant positions in the target + sentence to decide where to place translated words + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm5 = IBMModel5(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm5.head_vacancy_table[1][1][1], 3)) + 1.0 + >>> print(round(ibm5.head_vacancy_table[2][1][1], 3)) + 0.0 + >>> print(round(ibm5.non_head_vacancy_table[3][3][6], 3)) + 1.0 + + >>> print(round(ibm5.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm5.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm5.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + MIN_SCORE_FACTOR = 0.2 + """ + Alignments with scores below this factor are pruned during sampling + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, vacancy models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``, ``head_vacancy_table``, + ``non_head_vacancy_table``. See ``IBMModel``, ``IBMModel4``, + and ``IBMModel5`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 4 + ibm4 = IBMModel4( + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + ) + self.translation_table = ibm4.translation_table + self.alignment_table = ibm4.alignment_table + self.fertility_table = ibm4.fertility_table + self.p1 = ibm4.p1 + self.head_distortion_table = ibm4.head_distortion_table + self.non_head_distortion_table = ibm4.non_head_distortion_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + self.head_vacancy_table = probability_tables["head_vacancy_table"] + self.non_head_vacancy_table = probability_tables["non_head_vacancy_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``head_vacancy_table[dv][v_max][trg_class]``. + """ + + self.non_head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``non_head_vacancy_table[dv][v_max][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set vacancy probabilities uniformly to + 1 / cardinality of vacancy difference values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum vacancy difference occurs when a word is placed in + # the last available position m of the target sentence and the + # previous word position has no vacancies. + # The minimum is 1-max_v, when a word is placed in the first + # available position and the previous word is placed beyond the + # last available position. + # Thus, the number of possible vacancy difference values is + # (max_v) - (1-max_v) + 1 = 2 * max_v. + if max_m > 0 and (1 / (2 * max_m)) < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for max_v in range(1, max_m + 1): + for dv in range(1, max_m + 1): + initial_prob = 1 / (2 * max_v) + self.head_vacancy_table[dv][max_v] = defaultdict(lambda: initial_prob) + self.head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[dv][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + + def train(self, parallel_corpus): + counts = Model5Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + + slots = Slots(m) + for i in range(1, l + 1): + counts.update_vacancy( + normalized_count, alignment_info, i, self.trg_classes, slots + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_vacancy_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space according to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a IBM Model 4. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. Finally, prune alignments that have + substantially lower Model 4 scores than the best alignment. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments, best_alignment = super().sample(sentence_pair) + return self.prune(sampled_alignments), best_alignment + + def prune(self, alignment_infos): + """ + Removes alignments from ``alignment_infos`` that have + substantially lower Model 4 scores than the best alignment + + :return: Pruned alignments + :rtype: set(AlignmentInfo) + """ + alignments = [] + best_score = 0 + + for alignment_info in alignment_infos: + score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + best_score = max(score, best_score) + alignments.append((alignment_info, score)) + + threshold = IBMModel5.MIN_SCORE_FACTOR * best_score + alignments = [a[0] for a in alignments if a[1] > threshold] + return set(alignments) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one, according + to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = IBMModel4.model4_prob_t_a_given_s( + neighbor_alignment, self + ) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + slots = Slots(len(alignment_info.trg_sentence) - 1) + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = self.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * self.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return self.translation_table[t][s] + + def vacancy_term(i): + value = 1.0 + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL-aligned words + if tablet_length == 0: + return value + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.non_head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + return value + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + for i in range(1, len(alignment_info.src_sentence)): + probability *= vacancy_term(i) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + def maximize_vacancy_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + head_vacancy_table = self.head_vacancy_table + for dv, max_vs in counts.head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_vacancy[dv][max_v][t_cls] + / counts.head_vacancy_for_any_dv[max_v][t_cls] + ) + head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + non_head_vacancy_table = self.non_head_vacancy_table + for dv, max_vs in counts.non_head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_vacancy[dv][max_v][t_cls] + / counts.non_head_vacancy_for_any_dv[max_v][t_cls] + ) + non_head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + +class Model5Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for vacancies. + """ + + def __init__(self): + super().__init__() + self.head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.non_head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + + def update_vacancy(self, count, alignment_info, i, trg_classes, slots): + """ + :param count: Value to add to the vacancy counts + :param alignment_info: Alignment under consideration + :param i: Source word position under consideration + :param trg_classes: Target word classes + :param slots: Vacancy states of the slots in the target sentence. + Output parameter that will be modified as new words are placed + in the target sentence. + """ + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL aligned words + if tablet_length == 0: + return # ignore zero fertility words + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.head_vacancy[dv][max_v][trg_class] += count + self.head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.non_head_vacancy[dv][max_v][trg_class] += count + self.non_head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + +class Slots: + """ + Represents positions in a target sentence. Used to keep track of + which slot (position) is occupied. + """ + + def __init__(self, target_sentence_length): + self._slots = [False] * (target_sentence_length + 1) # 1-indexed + + def occupy(self, position): + """ + :return: Mark slot at ``position`` as occupied + """ + self._slots[position] = True + + def vacancies_at(self, position): + """ + :return: Number of vacant slots up to, and including, ``position`` + """ + vacancies = 0 + for k in range(1, position + 1): + if not self._slots[k]: + vacancies += 1 + return vacancies + + def __len__(self): + return len(self._slots) - 1 # exclude dummy zeroeth element diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ibm_model.py b/venv/lib/python3.10/site-packages/nltk/translate/ibm_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ed9a49408638605f8d4d627883d51e04816877d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ibm_model.py @@ -0,0 +1,549 @@ +# Natural Language Toolkit: IBM Model Core +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Common methods and classes for all IBM models. See ``IBMModel1``, +``IBMModel2``, ``IBMModel3``, ``IBMModel4``, and ``IBMModel5`` +for specific implementations. + +The IBM models are a series of generative models that learn lexical +translation probabilities, p(target language word|source language word), +given a sentence-aligned parallel corpus. + +The models increase in sophistication from model 1 to 5. Typically, the +output of lower models is used to seed the higher models. All models +use the Expectation-Maximization (EM) algorithm to learn various +probability tables. + +Words in a sentence are one-indexed. The first word of a sentence has +position 1, not 0. Index 0 is reserved in the source sentence for the +NULL token. The concept of position does not apply to NULL, but it is +indexed at 0 by convention. + +Each target word is aligned to exactly one source word or the NULL +token. + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +from bisect import insort_left +from collections import defaultdict +from copy import deepcopy +from math import ceil + + +def longest_target_sentence_length(sentence_aligned_corpus): + """ + :param sentence_aligned_corpus: Parallel corpus under consideration + :type sentence_aligned_corpus: list(AlignedSent) + :return: Number of words in the longest target language sentence + of ``sentence_aligned_corpus`` + """ + max_m = 0 + for aligned_sentence in sentence_aligned_corpus: + m = len(aligned_sentence.words) + max_m = max(m, max_m) + return max_m + + +class IBMModel: + """ + Abstract base class for all IBM models + """ + + # Avoid division by zero and precision errors by imposing a minimum + # value for probabilities. Note that this approach is theoretically + # incorrect, since it may create probabilities that sum to more + # than 1. In practice, the contribution of probabilities with MIN_PROB + # is tiny enough that the value of MIN_PROB can be treated as zero. + MIN_PROB = 1.0e-12 # GIZA++ is more liberal and uses 1.0e-7 + + def __init__(self, sentence_aligned_corpus): + self.init_vocab(sentence_aligned_corpus) + self.reset_probabilities() + + def reset_probabilities(self): + self.translation_table = defaultdict( + lambda: defaultdict(lambda: IBMModel.MIN_PROB) + ) + """ + dict[str][str]: float. Probability(target word | source word). + Values accessed as ``translation_table[target_word][source_word]``. + """ + + self.alignment_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: IBMModel.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(i | j,l,m). + Values accessed as ``alignment_table[i][j][l][m]``. + Used in model 2 and hill climbing in models 3 and above + """ + + self.fertility_table = defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + """ + dict[int][str]: float. Probability(fertility | source word). + Values accessed as ``fertility_table[fertility][source_word]``. + Used in model 3 and higher. + """ + + self.p1 = 0.5 + """ + Probability that a generated word requires another target word + that is aligned to NULL. + Used in model 3 and higher. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Initialize probability tables to a uniform distribution + + Derived classes should implement this accordingly. + """ + pass + + def init_vocab(self, sentence_aligned_corpus): + src_vocab = set() + trg_vocab = set() + for aligned_sentence in sentence_aligned_corpus: + trg_vocab.update(aligned_sentence.words) + src_vocab.update(aligned_sentence.mots) + # Add the NULL token + src_vocab.add(None) + + self.src_vocab = src_vocab + """ + set(str): All source language words used in training + """ + + self.trg_vocab = trg_vocab + """ + set(str): All target language words used in training + """ + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a higher IBM Model. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. + + Hill climbing may be stuck in a local maxima, hence the pegging + and trying out of different alignments. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments = set() + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + # Start from the best model 2 alignment + initial_alignment = self.best_model2_alignment(sentence_pair) + potential_alignment = self.hillclimb(initial_alignment) + sampled_alignments.update(self.neighboring(potential_alignment)) + best_alignment = potential_alignment + + # Start from other model 2 alignments, + # with the constraint that j is aligned (pegged) to i + for j in range(1, m + 1): + for i in range(0, l + 1): + initial_alignment = self.best_model2_alignment(sentence_pair, j, i) + potential_alignment = self.hillclimb(initial_alignment, j) + neighbors = self.neighboring(potential_alignment, j) + sampled_alignments.update(neighbors) + if potential_alignment.score > best_alignment.score: + best_alignment = potential_alignment + + return sampled_alignments, best_alignment + + def best_model2_alignment(self, sentence_pair, j_pegged=None, i_pegged=0): + """ + Finds the best alignment according to IBM Model 2 + + Used as a starting point for hill climbing in Models 3 and + above, because it is easier to compute than the best alignments + in higher models + + :param sentence_pair: Source and target language sentence pair + to be word-aligned + :type sentence_pair: AlignedSent + + :param j_pegged: If specified, the alignment point of j_pegged + will be fixed to i_pegged + :type j_pegged: int + + :param i_pegged: Alignment point to j_pegged + :type i_pegged: int + """ + src_sentence = [None] + sentence_pair.mots + trg_sentence = ["UNUSED"] + sentence_pair.words # 1-indexed + + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + + alignment = [0] * (m + 1) # init all alignments to NULL + cepts = [[] for i in range(l + 1)] # init all cepts to empty list + + for j in range(1, m + 1): + if j == j_pegged: + # use the pegged alignment instead of searching for best one + best_i = i_pegged + else: + best_i = 0 + max_alignment_prob = IBMModel.MIN_PROB + t = trg_sentence[j] + + for i in range(0, l + 1): + s = src_sentence[i] + alignment_prob = ( + self.translation_table[t][s] * self.alignment_table[i][j][l][m] + ) + + if alignment_prob >= max_alignment_prob: + max_alignment_prob = alignment_prob + best_i = i + + alignment[j] = best_i + cepts[best_i].append(j) + + return AlignmentInfo( + tuple(alignment), tuple(src_sentence), tuple(trg_sentence), cepts + ) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = self.prob_t_a_given_s(alignment) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = self.prob_t_a_given_s(neighbor_alignment) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def neighboring(self, alignment_info, j_pegged=None): + """ + Determine the neighbors of ``alignment_info``, obtained by + moving or swapping one alignment point + + :param j_pegged: If specified, neighbors that have a different + alignment point from j_pegged will not be considered + :type j_pegged: int + + :return: A set neighboring alignments represented by their + ``AlignmentInfo`` + :rtype: set(AlignmentInfo) + """ + neighbors = set() + + l = len(alignment_info.src_sentence) - 1 # exclude NULL + m = len(alignment_info.trg_sentence) - 1 + original_alignment = alignment_info.alignment + original_cepts = alignment_info.cepts + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that differ by one alignment point + for i in range(0, l + 1): + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + old_i = original_alignment[j] + + # update alignment + new_alignment[j] = i + + # update cepts + insort_left(new_cepts[i], j) + new_cepts[old_i].remove(j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + for j in range(1, m + 1): + if j != j_pegged: + # Add alignments that have two alignment points swapped + for other_j in range(1, m + 1): + if other_j != j_pegged and other_j != j: + new_alignment = list(original_alignment) + new_cepts = deepcopy(original_cepts) + other_i = original_alignment[other_j] + i = original_alignment[j] + + # update alignments + new_alignment[j] = other_i + new_alignment[other_j] = i + + # update cepts + new_cepts[other_i].remove(other_j) + insort_left(new_cepts[other_i], j) + new_cepts[i].remove(j) + insort_left(new_cepts[i], other_j) + + new_alignment_info = AlignmentInfo( + tuple(new_alignment), + alignment_info.src_sentence, + alignment_info.trg_sentence, + new_cepts, + ) + neighbors.add(new_alignment_info) + + return neighbors + + def maximize_lexical_translation_probabilities(self, counts): + for t, src_words in counts.t_given_s.items(): + for s in src_words: + estimate = counts.t_given_s[t][s] / counts.any_t_given_s[s] + self.translation_table[t][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_fertility_probabilities(self, counts): + for phi, src_words in counts.fertility.items(): + for s in src_words: + estimate = counts.fertility[phi][s] / counts.fertility_for_any_phi[s] + self.fertility_table[phi][s] = max(estimate, IBMModel.MIN_PROB) + + def maximize_null_generation_probabilities(self, counts): + p1_estimate = counts.p1 / (counts.p1 + counts.p0) + p1_estimate = max(p1_estimate, IBMModel.MIN_PROB) + # Clip p1 if it is too large, because p0 = 1 - p1 should not be + # smaller than MIN_PROB + self.p1 = min(p1_estimate, 1 - IBMModel.MIN_PROB) + + def prob_of_alignments(self, alignments): + probability = 0 + for alignment_info in alignments: + probability += self.prob_t_a_given_s(alignment_info) + return probability + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + + All required information is assumed to be in ``alignment_info`` + and self. + + Derived classes should override this method + """ + return 0.0 + + +class AlignmentInfo: + """ + Helper data object for training IBM Models 3 and up + + Read-only. For a source sentence and its counterpart in the target + language, this class holds information about the sentence pair's + alignment, cepts, and fertility. + + Warning: Alignments are one-indexed here, in contrast to + nltk.translate.Alignment and AlignedSent, which are zero-indexed + This class is not meant to be used outside of IBM models. + """ + + def __init__(self, alignment, src_sentence, trg_sentence, cepts): + if not isinstance(alignment, tuple): + raise TypeError( + "The alignment must be a tuple because it is used " + "to uniquely identify AlignmentInfo objects." + ) + + self.alignment = alignment + """ + tuple(int): Alignment function. ``alignment[j]`` is the position + in the source sentence that is aligned to the position j in the + target sentence. + """ + + self.src_sentence = src_sentence + """ + tuple(str): Source sentence referred to by this object. + Should include NULL token (None) in index 0. + """ + + self.trg_sentence = trg_sentence + """ + tuple(str): Target sentence referred to by this object. + Should have a dummy element in index 0 so that the first word + starts from index 1. + """ + + self.cepts = cepts + """ + list(list(int)): The positions of the target words, in + ascending order, aligned to a source word position. For example, + cepts[4] = (2, 3, 7) means that words in positions 2, 3 and 7 + of the target sentence are aligned to the word in position 4 of + the source sentence + """ + + self.score = None + """ + float: Optional. Probability of alignment, as defined by the + IBM model that assesses this alignment + """ + + def fertility_of_i(self, i): + """ + Fertility of word in position ``i`` of the source sentence + """ + return len(self.cepts[i]) + + def is_head_word(self, j): + """ + :return: Whether the word in position ``j`` of the target + sentence is a head word + """ + i = self.alignment[j] + return self.cepts[i][0] == j + + def center_of_cept(self, i): + """ + :return: The ceiling of the average positions of the words in + the tablet of cept ``i``, or 0 if ``i`` is None + """ + if i is None: + return 0 + + average_position = sum(self.cepts[i]) / len(self.cepts[i]) + return int(ceil(average_position)) + + def previous_cept(self, j): + """ + :return: The previous cept of ``j``, or None if ``j`` belongs to + the first cept + """ + i = self.alignment[j] + if i == 0: + raise ValueError( + "Words aligned to NULL cannot have a previous " + "cept because NULL has no position" + ) + previous_cept = i - 1 + while previous_cept > 0 and self.fertility_of_i(previous_cept) == 0: + previous_cept -= 1 + + if previous_cept <= 0: + previous_cept = None + return previous_cept + + def previous_in_tablet(self, j): + """ + :return: The position of the previous word that is in the same + tablet as ``j``, or None if ``j`` is the first word of the + tablet + """ + i = self.alignment[j] + tablet_position = self.cepts[i].index(j) + if tablet_position == 0: + return None + return self.cepts[i][tablet_position - 1] + + def zero_indexed_alignment(self): + """ + :return: Zero-indexed alignment, suitable for use in external + ``nltk.translate`` modules like ``nltk.translate.Alignment`` + :rtype: list(tuple) + """ + zero_indexed_alignment = [] + for j in range(1, len(self.trg_sentence)): + i = self.alignment[j] - 1 + if i < 0: + i = None # alignment to NULL token + zero_indexed_alignment.append((j - 1, i)) + return zero_indexed_alignment + + def __eq__(self, other): + return self.alignment == other.alignment + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self.alignment) + + +class Counts: + """ + Data object to store counts of various parameters during training + """ + + def __init__(self): + self.t_given_s = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.any_t_given_s = defaultdict(lambda: 0.0) + self.p0 = 0.0 + self.p1 = 0.0 + self.fertility = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.fertility_for_any_phi = defaultdict(lambda: 0.0) + + def update_lexical_translation(self, count, alignment_info, j): + i = alignment_info.alignment[j] + t = alignment_info.trg_sentence[j] + s = alignment_info.src_sentence[i] + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_null_generation(self, count, alignment_info): + m = len(alignment_info.trg_sentence) - 1 + fertility_of_null = alignment_info.fertility_of_i(0) + self.p1 += fertility_of_null * count + self.p0 += (m - 2 * fertility_of_null) * count + + def update_fertility(self, count, alignment_info): + for i in range(0, len(alignment_info.src_sentence)): + s = alignment_info.src_sentence[i] + phi = alignment_info.fertility_of_i(i) + self.fertility[phi][s] += count + self.fertility_for_any_phi[s] += count diff --git a/venv/lib/python3.10/site-packages/nltk/translate/meteor_score.py b/venv/lib/python3.10/site-packages/nltk/translate/meteor_score.py new file mode 100644 index 0000000000000000000000000000000000000000..847f2ad19205816f71caff5623b1d992ef2dbfda --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/meteor_score.py @@ -0,0 +1,409 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Uday Krishna +# Contributor: Tom Aarsen +# URL: +# For license information, see LICENSE.TXT + + +from itertools import chain, product +from typing import Callable, Iterable, List, Tuple + +from nltk.corpus import WordNetCorpusReader, wordnet +from nltk.stem.api import StemmerI +from nltk.stem.porter import PorterStemmer + + +def _generate_enums( + hypothesis: Iterable[str], + reference: Iterable[str], + preprocess: Callable[[str], str] = str.lower, +) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Takes in pre-tokenized inputs for hypothesis and reference and returns + enumerated word lists for each of them + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :preprocess: preprocessing method (default str.lower) + :return: enumerated words list + """ + if isinstance(hypothesis, str): + raise TypeError( + f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}' + ) + + if isinstance(reference, str): + raise TypeError( + f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}' + ) + + enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis))) + enum_reference_list = list(enumerate(map(preprocess, reference))) + return enum_hypothesis_list, enum_reference_list + + +def exact_match( + hypothesis: Iterable[str], reference: Iterable[str] +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference + and returns a word mapping based on the enumerated + word id between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _match_enums(enum_hypothesis_list, enum_reference_list) + + +def _match_enums( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference and returns + a word mapping between enum_hypothesis_list and enum_reference_list + based on the enumerated word id. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + for j in range(len(enum_reference_list))[::-1]: + if enum_hypothesis_list[i][1] == enum_reference_list[j][1]: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def _enum_stem_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between enum_hypothesis_list and + enum_reference_list based on the enumerated word id. The function also + returns a enumerated list of unmatched words for hypothesis and reference. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + stemmed_enum_hypothesis_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypothesis_list + ] + + stemmed_enum_reference_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_reference_list + ] + + return _match_enums(stemmed_enum_hypothesis_list, stemmed_enum_reference_list) + + +def stem_match( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=stemmer) + + +def _enum_wordnetsyn_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis + if any synonym of a hypothesis word is the exact match + to the reference word. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + hypothesis_syns = set( + chain.from_iterable( + ( + lemma.name() + for lemma in synset.lemmas() + if lemma.name().find("_") < 0 + ) + for synset in wordnet.synsets(enum_hypothesis_list[i][1]) + ) + ).union({enum_hypothesis_list[i][1]}) + for j in range(len(enum_reference_list))[::-1]: + if enum_reference_list[j][1] in hypothesis_syns: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def wordnetsyn_match( + hypothesis: Iterable[str], + reference: Iterable[str], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis if any synonym + of a hypothesis word is the exact match to the reference word. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: list of mapped tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + +def _enum_align_words( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + in case there are multiple matches the match which has the least number + of crossing is chosen. Takes enumerated list as input instead of + string input + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, + unmatched reference list + """ + exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums( + enum_hypothesis_list, enum_reference_list + ) + + stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer + ) + + wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + return ( + sorted( + exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0] + ), + enum_hypothesis_list, + enum_reference_list, + ) + + +def align_words( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + In case there are multiple matches the match which has the least number + of crossing is chosen. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_align_words( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet + ) + + +def _count_chunks(matches: List[Tuple[int, int]]) -> int: + """ + Counts the fewest possible number of chunks such that matched unigrams + of each chunk are adjacent to each other. This is used to calculate the + fragmentation part of the metric. + + :param matches: list containing a mapping of matched words (output of align_words) + :return: Number of chunks a sentence is divided into post alignment + """ + i = 0 + chunks = 1 + while i < len(matches) - 1: + if (matches[i + 1][0] == matches[i][0] + 1) and ( + matches[i + 1][1] == matches[i][1] + 1 + ): + i += 1 + continue + i += 1 + chunks += 1 + return chunks + + +def single_meteor_score( + reference: Iterable[str], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for single hypothesis and reference as per + "Meteor: An Automatic Metric for MT Evaluation with HighLevels of + Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal, + in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + + + >>> round(single_meteor_score(reference1, hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(single_meteor_score(['this', 'is', 'a', 'cat'], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param reference: pre-tokenized reference + :param hypothesis: pre-tokenized hypothesis + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a + function of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + enum_hypothesis, enum_reference = _generate_enums( + hypothesis, reference, preprocess=preprocess + ) + translation_length = len(enum_hypothesis) + reference_length = len(enum_reference) + matches, _, _ = _enum_align_words( + enum_hypothesis, enum_reference, stemmer=stemmer, wordnet=wordnet + ) + matches_count = len(matches) + try: + precision = float(matches_count) / translation_length + recall = float(matches_count) / reference_length + fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall) + chunk_count = float(_count_chunks(matches)) + frag_frac = chunk_count / matches_count + except ZeroDivisionError: + return 0.0 + penalty = gamma * frag_frac**beta + return (1 - penalty) * fmean + + +def meteor_score( + references: Iterable[Iterable[str]], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for hypothesis with multiple references as + described in "Meteor: An Automatic Metric for MT Evaluation with + HighLevels of Correlation with Human Judgments" by Alon Lavie and + Abhaya Agarwal, in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + In case of multiple references the best score is chosen. This method + iterates over single_meteor_score and picks the best pair among all + the references for a given hypothesis + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party'] + + >>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param references: pre-tokenized reference sentences + :param hypothesis: a pre-tokenized hypothesis sentence + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a function + of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + return max( + single_meteor_score( + reference, + hypothesis, + preprocess=preprocess, + stemmer=stemmer, + wordnet=wordnet, + alpha=alpha, + beta=beta, + gamma=gamma, + ) + for reference in references + ) diff --git a/venv/lib/python3.10/site-packages/nltk/translate/metrics.py b/venv/lib/python3.10/site-packages/nltk/translate/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..88444087f65395428c87a6c5d805c682958b6e55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/metrics.py @@ -0,0 +1,41 @@ +# Natural Language Toolkit: Translation metrics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Zhang +# Guan Gui +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def alignment_error_rate(reference, hypothesis, possible=None): + """ + Return the Alignment Error Rate (AER) of an alignment + with respect to a "gold standard" reference alignment. + Return an error rate between 0.0 (perfect alignment) and 1.0 (no + alignment). + + >>> from nltk.translate import Alignment + >>> ref = Alignment([(0, 0), (1, 1), (2, 2)]) + >>> test = Alignment([(0, 0), (1, 2), (2, 1)]) + >>> alignment_error_rate(ref, test) # doctest: +ELLIPSIS + 0.6666666666666667 + + :type reference: Alignment + :param reference: A gold standard alignment (sure alignments) + :type hypothesis: Alignment + :param hypothesis: A hypothesis alignment (aka. candidate alignments) + :type possible: Alignment or None + :param possible: A gold standard reference of possible alignments + (defaults to *reference* if None) + :rtype: float or None + """ + + if possible is None: + possible = reference + else: + assert reference.issubset(possible) # sanity check + + return 1.0 - (len(hypothesis & reference) + len(hypothesis & possible)) / float( + len(hypothesis) + len(reference) + ) diff --git a/venv/lib/python3.10/site-packages/nltk/translate/nist_score.py b/venv/lib/python3.10/site-packages/nltk/translate/nist_score.py new file mode 100644 index 0000000000000000000000000000000000000000..0035a9dcdae5f1acf703c2c957353f880db22615 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/nist_score.py @@ -0,0 +1,195 @@ +# Natural Language Toolkit: NIST Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: +# URL: +# For license information, see LICENSE.TXT + +"""NIST score implementation.""" + +import fractions +import math +from collections import Counter + +from nltk.util import ngrams + + +def sentence_nist(references, hypothesis, n=5): + """ + Calculate NIST score from + George Doddington. 2002. "Automatic evaluation of machine translation quality + using n-gram co-occurrence statistics." Proceedings of HLT. + Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 + + DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU + score. The official script used by NIST to compute BLEU and NIST score is + mteval-14.pl. The main differences are: + + - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. + - NIST has a different brevity penalty + - NIST score from mteval-14.pl has a self-contained tokenizer + + Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT + used in the NIST score computation. + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 3.3709... + + >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS + 1.4619... + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param n: highest n-gram order + :type n: int + """ + return corpus_nist([references], [hypothesis], n) + + +def corpus_nist(list_of_references, hypotheses, n=5): + """ + Calculate a single corpus-level NIST score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param n: highest n-gram order + :type n: int + """ + # Before proceeding to compute NIST, perform sanity checks. + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # Collect the ngram coounts from the reference sentences. + ngram_freq = Counter() + total_reference_words = 0 + for ( + references + ) in list_of_references: # For each source sent, there's a list of reference sents. + for reference in references: + # For each order of ngram, count the ngram occurrences. + for i in range(1, n + 1): + ngram_freq.update(ngrams(reference, i)) + total_reference_words += len(reference) + + # Compute the information weights based on the reference sentences. + # Eqn 2 in Doddington (2002): + # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ] + information_weights = {} + for _ngram in ngram_freq: # w_1 ... w_n + _mgram = _ngram[:-1] # w_1 ... w_n-1 + # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546 + # it's computed as such: + # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq else denominator = total_reference_words + # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2) + # + # Mathematically, it's equivalent to the our implementation: + if _mgram and _mgram in ngram_freq: + numerator = ngram_freq[_mgram] + else: + numerator = total_reference_words + information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) + + # Micro-average. + nist_precision_numerator_per_ngram = Counter() + nist_precision_denominator_per_ngram = Counter() + l_ref, l_sys = 0, 0 + # For each order of ngram. + for i in range(1, n + 1): + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_len = len(hypothesis) + + # Find reference with the best NIST score. + nist_score_per_ref = [] + for reference in references: + _ref_len = len(reference) + # Counter of ngrams in hypothesis. + hyp_ngrams = ( + Counter(ngrams(hypothesis, i)) + if len(hypothesis) >= i + else Counter() + ) + ref_ngrams = ( + Counter(ngrams(reference, i)) if len(reference) >= i else Counter() + ) + ngram_overlaps = hyp_ngrams & ref_ngrams + # Precision part of the score in Eqn 3 + _numerator = sum( + information_weights[_ngram] * count + for _ngram, count in ngram_overlaps.items() + ) + _denominator = sum(hyp_ngrams.values()) + _precision = 0 if _denominator == 0 else _numerator / _denominator + nist_score_per_ref.append( + (_precision, _numerator, _denominator, _ref_len) + ) + # Best reference. + precision, numerator, denominator, ref_len = max(nist_score_per_ref) + nist_precision_numerator_per_ngram[i] += numerator + nist_precision_denominator_per_ngram[i] += denominator + l_ref += ref_len + l_sys += hyp_len + + # Final NIST micro-average mean aggregation. + nist_precision = 0 + for i in nist_precision_numerator_per_ngram: + precision = ( + nist_precision_numerator_per_ngram[i] + / nist_precision_denominator_per_ngram[i] + ) + nist_precision += precision + # Eqn 3 in Doddington(2002) + return nist_precision * nist_length_penalty(l_ref, l_sys) + + +def nist_length_penalty(ref_len, hyp_len): + """ + Calculates the NIST length penalty, from Eq. 3 in Doddington (2002) + + penalty = exp( beta * log( min( len(hyp)/len(ref) , 1.0 ))) + + where, + + `beta` is chosen to make the brevity penalty factor = 0.5 when the + no. of words in the system output (hyp) is 2/3 of the average + no. of words in the reference translation (ref) + + The NIST penalty is different from BLEU's such that it minimize the impact + of the score of small variations in the length of a translation. + See Fig. 4 in Doddington (2002) + """ + ratio = hyp_len / ref_len + if 0 < ratio < 1: + ratio_x, score_x = 1.5, 0.5 + beta = math.log(score_x) / math.log(ratio_x) ** 2 + return math.exp(beta * math.log(ratio) ** 2) + else: # ratio <= 0 or ratio >= 1 + return max(min(ratio, 1.0), 0.0) diff --git a/venv/lib/python3.10/site-packages/nltk/translate/phrase_based.py b/venv/lib/python3.10/site-packages/nltk/translate/phrase_based.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd85109ad26055023c502d6bd233a220d28e7e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/phrase_based.py @@ -0,0 +1,193 @@ +# Natural Language Toolkit: Phrase Extraction Algorithm +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Liling Tan, Fredrik Hedman, Petra Barancikova +# URL: +# For license information, see LICENSE.TXT + + +def extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, +): + """ + This function checks for alignment point consistency and extracts + phrases using the chunk of consistent phrases. + + A phrase pair (e, f ) is consistent with an alignment A if and only if: + + (i) No English words in the phrase pair are aligned to words outside it. + + ∀e i ∈ e, (e i , f j ) ∈ A ⇒ f j ∈ f + + (ii) No Foreign words in the phrase pair are aligned to words outside it. + + ∀f j ∈ f , (e i , f j ) ∈ A ⇒ e i ∈ e + + (iii) The phrase pair contains at least one alignment point. + + ∃e i ∈ e ̄ , f j ∈ f ̄ s.t. (e i , f j ) ∈ A + + :type f_start: int + :param f_start: Starting index of the possible foreign language phrases + :type f_end: int + :param f_end: End index of the possible foreign language phrases + :type e_start: int + :param e_start: Starting index of the possible source language phrases + :type e_end: int + :param e_end: End index of the possible source language phrases + :type srctext: list + :param srctext: The source language tokens, a list of string. + :type trgtext: list + :param trgtext: The target language tokens, a list of string. + :type srclen: int + :param srclen: The number of tokens in the source language tokens. + :type trglen: int + :param trglen: The number of tokens in the target language tokens. + """ + + if f_end < 0: # 0-based indexing. + return {} + # Check if alignment points are consistent. + for e, f in alignment: + if (f_start <= f <= f_end) and (e < e_start or e > e_end): + return {} + + # Add phrase pairs (incl. additional unaligned f) + phrases = set() + fs = f_start + while True: + fe = min(f_end, f_start + max_phrase_length - 1) + while True: + # add phrase pair ([e_start, e_end], [fs, fe]) to set E + # Need to +1 in range to include the end-point. + src_phrase = " ".join(srctext[e_start : e_end + 1]) + trg_phrase = " ".join(trgtext[fs : fe + 1]) + # Include more data for later ordering. + phrases.add(((e_start, e_end + 1), (fs, fe + 1), src_phrase, trg_phrase)) + fe += 1 + if fe in f_aligned or fe >= trglen: + break + fs -= 1 + if fs in f_aligned or fs < 0: + break + return phrases + + +def phrase_extraction(srctext, trgtext, alignment, max_phrase_length=0): + """ + Phrase extraction algorithm extracts all consistent phrase pairs from + a word-aligned sentence pair. + + The idea is to loop over all possible source language (e) phrases and find + the minimal foreign phrase (f) that matches each of them. Matching is done + by identifying all alignment points for the source phrase and finding the + shortest foreign phrase that includes all the foreign counterparts for the + source words. + + In short, a phrase alignment has to + (a) contain all alignment points for all covered words + (b) contain at least one alignment point + + >>> srctext = "michael assumes that he will stay in the house" + >>> trgtext = "michael geht davon aus , dass er im haus bleibt" + >>> alignment = [(0,0), (1,1), (1,2), (1,3), (2,5), (3,6), (4,9), + ... (5,9), (6,7), (7,7), (8,8)] + >>> phrases = phrase_extraction(srctext, trgtext, alignment) + >>> for i in sorted(phrases): + ... print(i) + ... + ((0, 1), (0, 1), 'michael', 'michael') + ((0, 2), (0, 4), 'michael assumes', 'michael geht davon aus') + ((0, 2), (0, 5), 'michael assumes', 'michael geht davon aus ,') + ((0, 3), (0, 6), 'michael assumes that', 'michael geht davon aus , dass') + ((0, 4), (0, 7), 'michael assumes that he', 'michael geht davon aus , dass er') + ((0, 9), (0, 10), 'michael assumes that he will stay in the house', 'michael geht davon aus , dass er im haus bleibt') + ((1, 2), (1, 4), 'assumes', 'geht davon aus') + ((1, 2), (1, 5), 'assumes', 'geht davon aus ,') + ((1, 3), (1, 6), 'assumes that', 'geht davon aus , dass') + ((1, 4), (1, 7), 'assumes that he', 'geht davon aus , dass er') + ((1, 9), (1, 10), 'assumes that he will stay in the house', 'geht davon aus , dass er im haus bleibt') + ((2, 3), (4, 6), 'that', ', dass') + ((2, 3), (5, 6), 'that', 'dass') + ((2, 4), (4, 7), 'that he', ', dass er') + ((2, 4), (5, 7), 'that he', 'dass er') + ((2, 9), (4, 10), 'that he will stay in the house', ', dass er im haus bleibt') + ((2, 9), (5, 10), 'that he will stay in the house', 'dass er im haus bleibt') + ((3, 4), (6, 7), 'he', 'er') + ((3, 9), (6, 10), 'he will stay in the house', 'er im haus bleibt') + ((4, 6), (9, 10), 'will stay', 'bleibt') + ((4, 9), (7, 10), 'will stay in the house', 'im haus bleibt') + ((6, 8), (7, 8), 'in the', 'im') + ((6, 9), (7, 9), 'in the house', 'im haus') + ((8, 9), (8, 9), 'house', 'haus') + + :type srctext: str + :param srctext: The sentence string from the source language. + :type trgtext: str + :param trgtext: The sentence string from the target language. + :type alignment: list(tuple) + :param alignment: The word alignment outputs as list of tuples, where + the first elements of tuples are the source words' indices and + second elements are the target words' indices. This is also the output + format of nltk.translate.ibm1 + :rtype: list(tuple) + :return: A list of tuples, each element in a list is a phrase and each + phrase is a tuple made up of (i) its source location, (ii) its target + location, (iii) the source phrase and (iii) the target phrase. The phrase + list of tuples represents all the possible phrases extracted from the + word alignments. + :type max_phrase_length: int + :param max_phrase_length: maximal phrase length, if 0 or not specified + it is set to a length of the longer sentence (srctext or trgtext). + """ + + srctext = srctext.split() # e + trgtext = trgtext.split() # f + srclen = len(srctext) # len(e) + trglen = len(trgtext) # len(f) + # Keeps an index of which source/target words that are aligned. + f_aligned = [j for _, j in alignment] + max_phrase_length = max_phrase_length or max(srclen, trglen) + + # set of phrase pairs BP + bp = set() + + for e_start in range(srclen): + max_idx = min(srclen, e_start + max_phrase_length) + for e_end in range(e_start, max_idx): + # // find the minimally matching foreign phrase + # (f start , f end ) = ( length(f), 0 ) + # f_start ∈ [0, len(f) - 1]; f_end ∈ [0, len(f) - 1] + f_start, f_end = trglen - 1, -1 # 0-based indexing + + for e, f in alignment: + if e_start <= e <= e_end: + f_start = min(f, f_start) + f_end = max(f, f_end) + # add extract (f start , f end , e start , e end ) to set BP + phrases = extract( + f_start, + f_end, + e_start, + e_end, + alignment, + f_aligned, + srctext, + trgtext, + srclen, + trglen, + max_phrase_length, + ) + if phrases: + bp.update(phrases) + return bp diff --git a/venv/lib/python3.10/site-packages/nltk/translate/ribes_score.py b/venv/lib/python3.10/site-packages/nltk/translate/ribes_score.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d0bb5f14590082fb74e4a2c3613a40b6e168f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/ribes_score.py @@ -0,0 +1,330 @@ +# Natural Language Toolkit: RIBES Score +# +# Copyright (C) 2001-2023 NLTK Project +# Contributors: Katsuhito Sudoh, Liling Tan, Kasramvd, J.F.Sebastian +# Mark Byers, ekhumoro, P. Ortiz +# URL: +# For license information, see LICENSE.TXT +""" RIBES score implementation """ + +import math +from itertools import islice + +from nltk.util import choose, ngrams + + +def sentence_ribes(references, hypothesis, alpha=0.25, beta=0.10): + """ + The RIBES (Rank-based Intuitive Bilingual Evaluation Score) from + Hideki Isozaki, Tsutomu Hirao, Kevin Duh, Katsuhito Sudoh and + Hajime Tsukada. 2010. "Automatic Evaluation of Translation Quality for + Distant Language Pairs". In Proceedings of EMNLP. + https://www.aclweb.org/anthology/D/D10/D10-1092.pdf + + The generic RIBES scores used in shared task, e.g. Workshop for + Asian Translation (WAT) uses the following RIBES calculations: + + RIBES = kendall_tau * (alpha**p1) * (beta**bp) + + Please note that this re-implementation differs from the official + RIBES implementation and though it emulates the results as describe + in the original paper, there are further optimization implemented + in the official RIBES script. + + Users are encouraged to use the official RIBES script instead of this + implementation when evaluating your machine translation system. Refer + to https://www.kecl.ntt.co.jp/icl/lirg/ribes/ for the official script. + + :param references: a list of reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + best_ribes = -1.0 + # Calculates RIBES for each reference and returns the best score. + for reference in references: + # Collects the *worder* from the ranked correlation alignments. + worder = word_rank_alignment(reference, hypothesis) + nkt = kendall_tau(worder) + + # Calculates the brevity penalty + bp = min(1.0, math.exp(1.0 - len(reference) / len(hypothesis))) + + # Calculates the unigram precision, *p1* + p1 = len(worder) / len(hypothesis) + + _ribes = nkt * (p1**alpha) * (bp**beta) + + if _ribes > best_ribes: # Keeps the best score. + best_ribes = _ribes + + return best_ribes + + +def corpus_ribes(list_of_references, hypotheses, alpha=0.25, beta=0.10): + """ + This function "calculates RIBES for a system output (hypothesis) with + multiple references, and returns "best" score among multi-references and + individual scores. The scores are corpus-wise, i.e., averaged by the number + of sentences." (c.f. RIBES version 1.03.1 code). + + Different from BLEU's micro-average precision, RIBES calculates the + macro-average precision by averaging the best RIBES score for each pair of + hypothesis and its corresponding references + + >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', + ... 'interested', 'in', 'world', 'history'] + >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', + ... 'because', 'he', 'read', 'the', 'book'] + + >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] + >>> hypotheses = [hyp1, hyp2] + >>> round(corpus_ribes(list_of_references, hypotheses),4) + 0.3597 + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param alpha: hyperparameter used as a prior for the unigram precision. + :type alpha: float + :param beta: hyperparameter used as a prior for the brevity penalty. + :type beta: float + :return: The best ribes score from one of the references. + :rtype: float + """ + corpus_best_ribes = 0.0 + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + corpus_best_ribes += sentence_ribes(references, hypothesis, alpha, beta) + return corpus_best_ribes / len(hypotheses) + + +def position_of_ngram(ngram, sentence): + """ + This function returns the position of the first instance of the ngram + appearing in a sentence. + + Note that one could also use string as follows but the code is a little + convoluted with type casting back and forth: + + char_pos = ' '.join(sent)[:' '.join(sent).index(' '.join(ngram))] + word_pos = char_pos.count(' ') + + Another way to conceive this is: + + return next(i for i, ng in enumerate(ngrams(sentence, len(ngram))) + if ng == ngram) + + :param ngram: The ngram that needs to be searched + :type ngram: tuple + :param sentence: The list of tokens to search from. + :type sentence: list(str) + """ + # Iterates through the ngrams in sentence. + for i, sublist in enumerate(ngrams(sentence, len(ngram))): + # Returns the index of the word when ngram matches. + if ngram == sublist: + return i + + +def word_rank_alignment(reference, hypothesis, character_based=False): + """ + This is the word rank alignment algorithm described in the paper to produce + the *worder* list, i.e. a list of word indices of the hypothesis word orders + w.r.t. the list of reference words. + + Below is (H0, R0) example from the Isozaki et al. 2010 paper, + note the examples are indexed from 1 but the results here are indexed from 0: + + >>> ref = str('he was interested in world history because he ' + ... 'read the book').split() + >>> hyp = str('he read the book because he was interested in world ' + ... 'history').split() + >>> word_rank_alignment(ref, hyp) + [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + + The (H1, R1) example from the paper, note the 0th index: + + >>> ref = 'John hit Bob yesterday'.split() + >>> hyp = 'Bob hit John yesterday'.split() + >>> word_rank_alignment(ref, hyp) + [2, 1, 0, 3] + + Here is the (H2, R2) example from the paper, note the 0th index here too: + + >>> ref = 'the boy read the book'.split() + >>> hyp = 'the book was read by the boy'.split() + >>> word_rank_alignment(ref, hyp) + [3, 4, 2, 0, 1] + + :param reference: a reference sentence + :type reference: list(str) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + """ + worder = [] + hyp_len = len(hypothesis) + # Stores a list of possible ngrams from the reference sentence. + # This is used for matching context window later in the algorithm. + ref_ngrams = [] + hyp_ngrams = [] + for n in range(1, len(reference) + 1): + for ng in ngrams(reference, n): + ref_ngrams.append(ng) + for ng in ngrams(hypothesis, n): + hyp_ngrams.append(ng) + for i, h_word in enumerate(hypothesis): + # If word is not in the reference, continue. + if h_word not in reference: + continue + # If we can determine one-to-one word correspondence for unigrams that + # only appear once in both the reference and hypothesis. + elif hypothesis.count(h_word) == reference.count(h_word) == 1: + worder.append(reference.index(h_word)) + else: + max_window_size = max(i, hyp_len - i + 1) + for window in range(1, max_window_size): + if i + window < hyp_len: # If searching the right context is possible. + # Retrieve the right context window. + right_context_ngram = tuple(islice(hypothesis, i, i + window + 1)) + num_times_in_ref = ref_ngrams.count(right_context_ngram) + num_times_in_hyp = hyp_ngrams.count(right_context_ngram) + # If ngram appears only once in both ref and hyp. + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(right_context_ngram, reference) + worder.append(pos) # Add the positions of the ngram. + break + if window <= i: # If searching the left context is possible. + # Retrieve the left context window. + left_context_ngram = tuple(islice(hypothesis, i - window, i + 1)) + num_times_in_ref = ref_ngrams.count(left_context_ngram) + num_times_in_hyp = hyp_ngrams.count(left_context_ngram) + if num_times_in_ref == num_times_in_hyp == 1: + # Find the position of ngram that matched the reference. + pos = position_of_ngram(left_context_ngram, reference) + # Add the positions of the ngram. + worder.append(pos + len(left_context_ngram) - 1) + break + return worder + + +def find_increasing_sequences(worder): + """ + Given the *worder* list, this function groups monotonic +1 sequences. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> list(find_increasing_sequences(worder)) + [(7, 8, 9, 10), (0, 1, 2, 3, 4, 5)] + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + items = iter(worder) + a, b = None, next(items, None) + result = [b] + while b is not None: + a, b = b, next(items, None) + if b is not None and a + 1 == b: + result.append(b) + else: + if len(result) > 1: + yield tuple(result) + result = [b] + + +def kendall_tau(worder, normalize=True): + """ + Calculates the Kendall's Tau correlation coefficient given the *worder* + list of word alignments from word_rank_alignment(), using the formula: + + tau = 2 * num_increasing_pairs / num_possible_pairs -1 + + Note that the no. of increasing pairs can be discontinuous in the *worder* + list and each each increasing sequence can be tabulated as choose(len(seq), 2) + no. of increasing pairs, e.g. + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> number_possible_pairs = choose(len(worder), 2) + >>> round(kendall_tau(worder, normalize=False),3) + -0.236 + >>> round(kendall_tau(worder),3) + 0.382 + + :param worder: The worder list output from word_rank_alignment + :type worder: list(int) + :param normalize: Flag to indicate normalization to between 0.0 and 1.0. + :type normalize: boolean + :return: The Kendall's Tau correlation coefficient. + :rtype: float + """ + worder_len = len(worder) + # With worder_len < 2, `choose(worder_len, 2)` will be 0. + # As we divide by this, it will give a ZeroDivisionError. + # To avoid this, we can just return the lowest possible score. + if worder_len < 2: + tau = -1 + else: + # Extract the groups of increasing/monotonic sequences. + increasing_sequences = find_increasing_sequences(worder) + # Calculate no. of increasing_pairs in *worder* list. + num_increasing_pairs = sum(choose(len(seq), 2) for seq in increasing_sequences) + # Calculate no. of possible pairs. + num_possible_pairs = choose(worder_len, 2) + # Kendall's Tau computation. + tau = 2 * num_increasing_pairs / num_possible_pairs - 1 + if normalize: # If normalized, the tau output falls between 0.0 to 1.0 + return (tau + 1) / 2 + else: # Otherwise, the tau outputs falls between -1.0 to +1.0 + return tau + + +def spearman_rho(worder, normalize=True): + """ + Calculates the Spearman's Rho correlation coefficient given the *worder* + list of word alignment from word_rank_alignment(), using the formula: + + rho = 1 - sum(d**2) / choose(len(worder)+1, 3) + + Given that d is the sum of difference between the *worder* list of indices + and the original word indices from the reference sentence. + + Using the (H0,R0) and (H5, R5) example from the paper + + >>> worder = [7, 8, 9, 10, 6, 0, 1, 2, 3, 4, 5] + >>> round(spearman_rho(worder, normalize=False), 3) + -0.591 + >>> round(spearman_rho(worder), 3) + 0.205 + + :param worder: The worder list output from word_rank_alignment + :param type: list(int) + """ + worder_len = len(worder) + sum_d_square = sum((wi - i) ** 2 for wi, i in zip(worder, range(worder_len))) + rho = 1 - sum_d_square / choose(worder_len + 1, 3) + + if normalize: # If normalized, the rho output falls between 0.0 to 1.0 + return (rho + 1) / 2 + else: # Otherwise, the rho outputs falls between -1.0 to +1.0 + return rho diff --git a/venv/lib/python3.10/site-packages/nltk/translate/stack_decoder.py b/venv/lib/python3.10/site-packages/nltk/translate/stack_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..29c6c99ff8d39848e3e17d413e9b40296bd5dc71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/translate/stack_decoder.py @@ -0,0 +1,515 @@ +# Natural Language Toolkit: Stack decoder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +A decoder that uses stacks to implement phrase-based translation. + +In phrase-based translation, the source sentence is segmented into +phrases of one or more words, and translations for those phrases are +used to build the target sentence. + +Hypothesis data structures are used to keep track of the source words +translated so far and the partial output. A hypothesis can be expanded +by selecting an untranslated phrase, looking up its translation in a +phrase table, and appending that translation to the partial output. +Translation is complete when a hypothesis covers all source words. + +The search space is huge because the source sentence can be segmented +in different ways, the source phrases can be selected in any order, +and there could be multiple translations for the same source phrase in +the phrase table. To make decoding tractable, stacks are used to limit +the number of candidate hypotheses by doing histogram and/or threshold +pruning. + +Hypotheses with the same number of words translated are placed in the +same stack. In histogram pruning, each stack has a size limit, and +the hypothesis with the lowest score is removed when the stack is full. +In threshold pruning, hypotheses that score below a certain threshold +of the best hypothesis in that stack are removed. + +Hypothesis scoring can include various factors such as phrase +translation probability, language model probability, length of +translation, cost of remaining words to be translated, and so on. + + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. +""" + +import warnings +from collections import defaultdict +from math import log + + +class StackDecoder: + """ + Phrase-based stack decoder for machine translation + + >>> from nltk.translate import PhraseTable + >>> phrase_table = PhraseTable() + >>> phrase_table.add(('niemand',), ('nobody',), log(0.8)) + >>> phrase_table.add(('niemand',), ('no', 'one'), log(0.2)) + >>> phrase_table.add(('erwartet',), ('expects',), log(0.8)) + >>> phrase_table.add(('erwartet',), ('expecting',), log(0.2)) + >>> phrase_table.add(('niemand', 'erwartet'), ('one', 'does', 'not', 'expect'), log(0.1)) + >>> phrase_table.add(('die', 'spanische', 'inquisition'), ('the', 'spanish', 'inquisition'), log(0.8)) + >>> phrase_table.add(('!',), ('!',), log(0.8)) + + >>> # nltk.model should be used here once it is implemented + >>> from collections import defaultdict + >>> language_prob = defaultdict(lambda: -999.0) + >>> language_prob[('nobody',)] = log(0.5) + >>> language_prob[('expects',)] = log(0.4) + >>> language_prob[('the', 'spanish', 'inquisition')] = log(0.2) + >>> language_prob[('!',)] = log(0.1) + >>> language_model = type('',(object,),{'probability_change': lambda self, context, phrase: language_prob[phrase], 'probability': lambda self, phrase: language_prob[phrase]})() + + >>> stack_decoder = StackDecoder(phrase_table, language_model) + + >>> stack_decoder.translate(['niemand', 'erwartet', 'die', 'spanische', 'inquisition', '!']) + ['nobody', 'expects', 'the', 'spanish', 'inquisition', '!'] + + """ + + def __init__(self, phrase_table, language_model): + """ + :param phrase_table: Table of translations for source language + phrases and the log probabilities for those translations. + :type phrase_table: PhraseTable + + :param language_model: Target language model. Must define a + ``probability_change`` method that calculates the change in + log probability of a sentence, if a given string is appended + to it. + This interface is experimental and will likely be replaced + with nltk.model once it is implemented. + :type language_model: object + """ + self.phrase_table = phrase_table + self.language_model = language_model + + self.word_penalty = 0.0 + """ + float: Influences the translation length exponentially. + If positive, shorter translations are preferred. + If negative, longer translations are preferred. + If zero, no penalty is applied. + """ + + self.beam_threshold = 0.0 + """ + float: Hypotheses that score below this factor of the best + hypothesis in a stack are dropped from consideration. + Value between 0.0 and 1.0. + """ + + self.stack_size = 100 + """ + int: Maximum number of hypotheses to consider in a stack. + Higher values increase the likelihood of a good translation, + but increases processing time. + """ + + self.__distortion_factor = 0.5 + self.__compute_log_distortion() + + @property + def distortion_factor(self): + """ + float: Amount of reordering of source phrases. + Lower values favour monotone translation, suitable when + word order is similar for both source and target languages. + Value between 0.0 and 1.0. Default 0.5. + """ + return self.__distortion_factor + + @distortion_factor.setter + def distortion_factor(self, d): + self.__distortion_factor = d + self.__compute_log_distortion() + + def __compute_log_distortion(self): + # cache log(distortion_factor) so we don't have to recompute it + # when scoring hypotheses + if self.__distortion_factor == 0.0: + self.__log_distortion_factor = log(1e-9) # 1e-9 is almost zero + else: + self.__log_distortion_factor = log(self.__distortion_factor) + + def translate(self, src_sentence): + """ + :param src_sentence: Sentence to be translated + :type src_sentence: list(str) + + :return: Translated sentence + :rtype: list(str) + """ + sentence = tuple(src_sentence) # prevent accidental modification + sentence_length = len(sentence) + stacks = [ + _Stack(self.stack_size, self.beam_threshold) + for _ in range(0, sentence_length + 1) + ] + empty_hypothesis = _Hypothesis() + stacks[0].push(empty_hypothesis) + + all_phrases = self.find_all_src_phrases(sentence) + future_score_table = self.compute_future_scores(sentence) + for stack in stacks: + for hypothesis in stack: + possible_expansions = StackDecoder.valid_phrases( + all_phrases, hypothesis + ) + for src_phrase_span in possible_expansions: + src_phrase = sentence[src_phrase_span[0] : src_phrase_span[1]] + for translation_option in self.phrase_table.translations_for( + src_phrase + ): + raw_score = self.expansion_score( + hypothesis, translation_option, src_phrase_span + ) + new_hypothesis = _Hypothesis( + raw_score=raw_score, + src_phrase_span=src_phrase_span, + trg_phrase=translation_option.trg_phrase, + previous=hypothesis, + ) + new_hypothesis.future_score = self.future_score( + new_hypothesis, future_score_table, sentence_length + ) + total_words = new_hypothesis.total_translated_words() + stacks[total_words].push(new_hypothesis) + + if not stacks[sentence_length]: + warnings.warn( + "Unable to translate all words. " + "The source sentence contains words not in " + "the phrase table" + ) + # Instead of returning empty output, perhaps a partial + # translation could be returned + return [] + + best_hypothesis = stacks[sentence_length].best() + return best_hypothesis.translation_so_far() + + def find_all_src_phrases(self, src_sentence): + """ + Finds all subsequences in src_sentence that have a phrase + translation in the translation table + + :type src_sentence: tuple(str) + + :return: Subsequences that have a phrase translation, + represented as a table of lists of end positions. + For example, if result[2] is [5, 6, 9], then there are + three phrases starting from position 2 in ``src_sentence``, + ending at positions 5, 6, and 9 exclusive. The list of + ending positions are in ascending order. + :rtype: list(list(int)) + """ + sentence_length = len(src_sentence) + phrase_indices = [[] for _ in src_sentence] + for start in range(0, sentence_length): + for end in range(start + 1, sentence_length + 1): + potential_phrase = src_sentence[start:end] + if potential_phrase in self.phrase_table: + phrase_indices[start].append(end) + return phrase_indices + + def compute_future_scores(self, src_sentence): + """ + Determines the approximate scores for translating every + subsequence in ``src_sentence`` + + Future scores can be used a look-ahead to determine the + difficulty of translating the remaining parts of a src_sentence. + + :type src_sentence: tuple(str) + + :return: Scores of subsequences referenced by their start and + end positions. For example, result[2][5] is the score of the + subsequence covering positions 2, 3, and 4. + :rtype: dict(int: (dict(int): float)) + """ + scores = defaultdict(lambda: defaultdict(lambda: float("-inf"))) + for seq_length in range(1, len(src_sentence) + 1): + for start in range(0, len(src_sentence) - seq_length + 1): + end = start + seq_length + phrase = src_sentence[start:end] + if phrase in self.phrase_table: + score = self.phrase_table.translations_for(phrase)[ + 0 + ].log_prob # pick best (first) translation + # Warning: API of language_model is subject to change + score += self.language_model.probability(phrase) + scores[start][end] = score + + # check if a better score can be obtained by combining + # two child subsequences + for mid in range(start + 1, end): + combined_score = scores[start][mid] + scores[mid][end] + if combined_score > scores[start][end]: + scores[start][end] = combined_score + return scores + + def future_score(self, hypothesis, future_score_table, sentence_length): + """ + Determines the approximate score for translating the + untranslated words in ``hypothesis`` + """ + score = 0.0 + for span in hypothesis.untranslated_spans(sentence_length): + score += future_score_table[span[0]][span[1]] + return score + + def expansion_score(self, hypothesis, translation_option, src_phrase_span): + """ + Calculate the score of expanding ``hypothesis`` with + ``translation_option`` + + :param hypothesis: Hypothesis being expanded + :type hypothesis: _Hypothesis + + :param translation_option: Information about the proposed expansion + :type translation_option: PhraseTableEntry + + :param src_phrase_span: Word position span of the source phrase + :type src_phrase_span: tuple(int, int) + """ + score = hypothesis.raw_score + score += translation_option.log_prob + # The API of language_model is subject to change; it could accept + # a string, a list of words, and/or some other type + score += self.language_model.probability_change( + hypothesis, translation_option.trg_phrase + ) + score += self.distortion_score(hypothesis, src_phrase_span) + score -= self.word_penalty * len(translation_option.trg_phrase) + return score + + def distortion_score(self, hypothesis, next_src_phrase_span): + if not hypothesis.src_phrase_span: + return 0.0 + next_src_phrase_start = next_src_phrase_span[0] + prev_src_phrase_end = hypothesis.src_phrase_span[1] + distortion_distance = next_src_phrase_start - prev_src_phrase_end + return abs(distortion_distance) * self.__log_distortion_factor + + @staticmethod + def valid_phrases(all_phrases_from, hypothesis): + """ + Extract phrases from ``all_phrases_from`` that contains words + that have not been translated by ``hypothesis`` + + :param all_phrases_from: Phrases represented by their spans, in + the same format as the return value of + ``find_all_src_phrases`` + :type all_phrases_from: list(list(int)) + + :type hypothesis: _Hypothesis + + :return: A list of phrases, represented by their spans, that + cover untranslated positions. + :rtype: list(tuple(int, int)) + """ + untranslated_spans = hypothesis.untranslated_spans(len(all_phrases_from)) + valid_phrases = [] + for available_span in untranslated_spans: + start = available_span[0] + available_end = available_span[1] + while start < available_end: + for phrase_end in all_phrases_from[start]: + if phrase_end > available_end: + # Subsequent elements in all_phrases_from[start] + # will also be > available_end, since the + # elements are in ascending order + break + valid_phrases.append((start, phrase_end)) + start += 1 + return valid_phrases + + +class _Hypothesis: + """ + Partial solution to a translation. + + Records the word positions of the phrase being translated, its + translation, raw score, and the cost of the untranslated parts of + the sentence. When the next phrase is selected to build upon the + partial solution, a new _Hypothesis object is created, with a back + pointer to the previous hypothesis. + + To find out which words have been translated so far, look at the + ``src_phrase_span`` in the hypothesis chain. Similarly, the + translation output can be found by traversing up the chain. + """ + + def __init__( + self, + raw_score=0.0, + src_phrase_span=(), + trg_phrase=(), + previous=None, + future_score=0.0, + ): + """ + :param raw_score: Likelihood of hypothesis so far. + Higher is better. Does not account for untranslated words. + :type raw_score: float + + :param src_phrase_span: Span of word positions covered by the + source phrase in this hypothesis expansion. For example, + (2, 5) means that the phrase is from the second word up to, + but not including the fifth word in the source sentence. + :type src_phrase_span: tuple(int) + + :param trg_phrase: Translation of the source phrase in this + hypothesis expansion + :type trg_phrase: tuple(str) + + :param previous: Previous hypothesis before expansion to this one + :type previous: _Hypothesis + + :param future_score: Approximate score for translating the + remaining words not covered by this hypothesis. Higher means + that the remaining words are easier to translate. + :type future_score: float + """ + self.raw_score = raw_score + self.src_phrase_span = src_phrase_span + self.trg_phrase = trg_phrase + self.previous = previous + self.future_score = future_score + + def score(self): + """ + Overall score of hypothesis after accounting for local and + global features + """ + return self.raw_score + self.future_score + + def untranslated_spans(self, sentence_length): + """ + Starting from each untranslated word, find the longest + continuous span of untranslated positions + + :param sentence_length: Length of source sentence being + translated by the hypothesis + :type sentence_length: int + + :rtype: list(tuple(int, int)) + """ + translated_positions = self.translated_positions() + translated_positions.sort() + translated_positions.append(sentence_length) # add sentinel position + + untranslated_spans = [] + start = 0 + # each untranslated span must end in one of the translated_positions + for end in translated_positions: + if start < end: + untranslated_spans.append((start, end)) + start = end + 1 + + return untranslated_spans + + def translated_positions(self): + """ + List of positions in the source sentence of words already + translated. The list is not sorted. + + :rtype: list(int) + """ + translated_positions = [] + current_hypothesis = self + while current_hypothesis.previous is not None: + translated_span = current_hypothesis.src_phrase_span + translated_positions.extend(range(translated_span[0], translated_span[1])) + current_hypothesis = current_hypothesis.previous + return translated_positions + + def total_translated_words(self): + return len(self.translated_positions()) + + def translation_so_far(self): + translation = [] + self.__build_translation(self, translation) + return translation + + def __build_translation(self, hypothesis, output): + if hypothesis.previous is None: + return + self.__build_translation(hypothesis.previous, output) + output.extend(hypothesis.trg_phrase) + + +class _Stack: + """ + Collection of _Hypothesis objects + """ + + def __init__(self, max_size=100, beam_threshold=0.0): + """ + :param beam_threshold: Hypotheses that score less than this + factor of the best hypothesis are discarded from the stack. + Value must be between 0.0 and 1.0. + :type beam_threshold: float + """ + self.max_size = max_size + self.items = [] + + if beam_threshold == 0.0: + self.__log_beam_threshold = float("-inf") + else: + self.__log_beam_threshold = log(beam_threshold) + + def push(self, hypothesis): + """ + Add ``hypothesis`` to the stack. + Removes lowest scoring hypothesis if the stack is full. + After insertion, hypotheses that score less than + ``beam_threshold`` times the score of the best hypothesis + are removed. + """ + self.items.append(hypothesis) + self.items.sort(key=lambda h: h.score(), reverse=True) + while len(self.items) > self.max_size: + self.items.pop() + self.threshold_prune() + + def threshold_prune(self): + if not self.items: + return + # log(score * beam_threshold) = log(score) + log(beam_threshold) + threshold = self.items[0].score() + self.__log_beam_threshold + for hypothesis in reversed(self.items): + if hypothesis.score() < threshold: + self.items.pop() + else: + break + + def best(self): + """ + :return: Hypothesis with the highest score in the stack + :rtype: _Hypothesis + """ + if self.items: + return self.items[0] + return None + + def __iter__(self): + return iter(self.items) + + def __contains__(self, hypothesis): + return hypothesis in self.items + + def __bool__(self): + return len(self.items) != 0 + + __nonzero__ = __bool__