diff --git a/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..01edbcf6160073db9d7b7028e00c2a00aa3168fd --- /dev/null +++ b/ckpts/universal/global_step40/zero/11.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:919884b6d10ee17e5c5b639c1411394f84ef73524c379e454c1085f018f33fbd +size 33555612 diff --git a/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..dba65bb22154a37e85959d033b9ea5a53f2c39f3 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e160f6140f0f3d71aeadb8db97bbc70be894f1f500c734db34a07ae43767beb6 +size 9372 diff --git a/ckpts/universal/global_step40/zero/14.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f69ae83681f9d3d3babe37654da7ee06a7280bde --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75a6a8a597e0b1e79d66bd8ccab6e636a37d16a276ae9a4a8344c2dc0d95f20d +size 9293 diff --git a/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..293683a12980bab18bc3c0f46d36b8a628ae6329 --- /dev/null +++ b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ca5dea5f26ac963c29753d8d2242cb785ed77a3767bd3dd2828916dbb320e66 +size 9372 diff --git a/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..216ae77b120ccf6b5409f44bce259e870da0cdc7 --- /dev/null +++ b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa30388f624ced306c7f5a3cdaf4d28e4906a0c1912b2e88ed1faebd0e3e5e98 +size 9387 diff --git a/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2dc764b5adf918defd40f7304b25c6ba5180d94f --- /dev/null +++ b/ckpts/universal/global_step40/zero/16.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a2bd8c6105d794a281cbb230e8aee689d14d9b4ba8120beec09fff26a8ca1a0 +size 9293 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a67b0f5d5eb482aa8d442be0ed0a6b9dc2e0dae --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b9b6b5cc941b50ef0640fe548a91d8185b557c4f00c0adce54388dfa9a1344 +size 33555627 diff --git a/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..ed0bdfc1d14a89436cad962109194a1803eedc10 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:873e9dd677576eeae755c1200f9f0250b6b701a1bef6e4f4e66987a7d119a9d7 +size 33555533 diff --git a/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..8a8f2f796859d7aea24e364fc18afdff4acd51f8 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15d60d7b9dd3fd75fb28d60475dac58910f82c24304f4c3d9b808544de715d5a +size 9372 diff --git a/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..def7154e3ce7a81d9abcd868e00aea8eb7f25e04 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6dee10baea593f908f384d1a2eb90daac3f9120b2c3dbe4fcc7116b9678cc9b +size 9387 diff --git a/ckpts/universal/global_step40/zero/26.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..6a11e213c4e1e59034fe346aa56bf40ccc973ad9 --- /dev/null +++ b/ckpts/universal/global_step40/zero/26.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d200745c9f640ae2de38d5e8f66ed52191c1a48862f2d49fcca6cc0e7e9b0472 +size 9293 diff --git a/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cbc96c5344e2d011ae531e9d64a4b27412746d88 --- /dev/null +++ b/ckpts/universal/global_step40/zero/7.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cac4041933538666ff1790170326d845ee32386e99f2db2cb80b920a39a4d750 +size 33555612 diff --git a/venv/lib/python3.10/site-packages/nltk/cluster/util.py b/venv/lib/python3.10/site-packages/nltk/cluster/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8ed5e9f0b97be7ce80eef87d36fdbf8c59bdfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cluster/util.py @@ -0,0 +1,300 @@ +# Natural Language Toolkit: Clusterer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Contributor: J Richard Snape +# URL: +# For license information, see LICENSE.TXT +import copy +from abc import abstractmethod +from math import sqrt +from sys import stdout + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.api import ClusterI + + +class VectorSpaceClusterer(ClusterI): + """ + Abstract clusterer which takes tokens and maps them into a vector space. + Optionally performs singular value decomposition to reduce the + dimensionality. + """ + + def __init__(self, normalise=False, svd_dimensions=None): + """ + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + self._Tt = None + self._should_normalise = normalise + self._svd_dimensions = svd_dimensions + + def cluster(self, vectors, assign_clusters=False, trace=False): + assert len(vectors) > 0 + + # normalise the vectors + if self._should_normalise: + vectors = list(map(self._normalise, vectors)) + + # use SVD to reduce the dimensionality + if self._svd_dimensions and self._svd_dimensions < len(vectors[0]): + [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors))) + S = d[: self._svd_dimensions] * numpy.identity( + self._svd_dimensions, numpy.float64 + ) + T = u[:, : self._svd_dimensions] + Dt = vt[: self._svd_dimensions, :] + vectors = numpy.transpose(numpy.dot(S, Dt)) + self._Tt = numpy.transpose(T) + + # call abstract method to cluster the vectors + self.cluster_vectorspace(vectors, trace) + + # assign the vectors to clusters + if assign_clusters: + return [self.classify(vector) for vector in vectors] + + @abstractmethod + def cluster_vectorspace(self, vectors, trace): + """ + Finds the clusters using the given set of vectors. + """ + + def classify(self, vector): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + cluster = self.classify_vectorspace(vector) + return self.cluster_name(cluster) + + @abstractmethod + def classify_vectorspace(self, vector): + """ + Returns the index of the appropriate cluster for the vector. + """ + + def likelihood(self, vector, label): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return self.likelihood_vectorspace(vector, label) + + def likelihood_vectorspace(self, vector, cluster): + """ + Returns the likelihood of the vector belonging to the cluster. + """ + predicted = self.classify_vectorspace(vector) + return 1.0 if cluster == predicted else 0.0 + + def vector(self, vector): + """ + Returns the vector after normalisation and dimensionality reduction + """ + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return vector + + def _normalise(self, vector): + """ + Normalises the vector to unit length. + """ + return vector / sqrt(numpy.dot(vector, vector)) + + +def euclidean_distance(u, v): + """ + Returns the euclidean distance between vectors u and v. This is equivalent + to the length of the vector (u - v). + """ + diff = u - v + return sqrt(numpy.dot(diff, diff)) + + +def cosine_distance(u, v): + """ + Returns 1 minus the cosine of the angle between vectors v and u. This is + equal to ``1 - (u.v / |u||v|)``. + """ + return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v)))) + + +class _DendrogramNode: + """Tree node of a dendrogram.""" + + def __init__(self, value, *children): + self._value = value + self._children = children + + def leaves(self, values=True): + if self._children: + leaves = [] + for child in self._children: + leaves.extend(child.leaves(values)) + return leaves + elif values: + return [self._value] + else: + return [self] + + def groups(self, n): + queue = [(self._value, self)] + + while len(queue) < n: + priority, node = queue.pop() + if not node._children: + queue.push((priority, node)) + break + for child in node._children: + if child._children: + queue.append((child._value, child)) + else: + queue.append((0, child)) + # makes the earliest merges at the start, latest at the end + queue.sort() + + groups = [] + for priority, node in queue: + groups.append(node.leaves()) + return groups + + def __lt__(self, comparator): + return cosine_distance(self._value, comparator._value) < 0 + + +class Dendrogram: + """ + Represents a dendrogram, a tree with a specified branching order. This + must be initialised with the leaf items, then iteratively call merge for + each branch. This class constructs a tree representing the order of calls + to the merge function. + """ + + def __init__(self, items=[]): + """ + :param items: the items at the leaves of the dendrogram + :type items: sequence of (any) + """ + self._items = [_DendrogramNode(item) for item in items] + self._original_items = copy.copy(self._items) + self._merge = 1 + + def merge(self, *indices): + """ + Merges nodes at given indices in the dendrogram. The nodes will be + combined which then replaces the first node specified. All other nodes + involved in the merge will be removed. + + :param indices: indices of the items to merge (at least two) + :type indices: seq of int + """ + assert len(indices) >= 2 + node = _DendrogramNode(self._merge, *(self._items[i] for i in indices)) + self._merge += 1 + self._items[indices[0]] = node + for i in indices[1:]: + del self._items[i] + + def groups(self, n): + """ + Finds the n-groups of items (leaves) reachable from a cut at depth n. + :param n: number of groups + :type n: int + """ + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + return root.groups(n) + + def show(self, leaf_labels=[]): + """ + Print the dendrogram in ASCII art to standard out. + + :param leaf_labels: an optional list of strings to use for labeling the + leaves + :type leaf_labels: list + """ + + # ASCII rendering characters + JOIN, HLINK, VLINK = "+", "-", "|" + + # find the root (or create one) + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = self._original_items + + if leaf_labels: + last_row = leaf_labels + else: + last_row = ["%s" % leaf._value for leaf in leaves] + + # find the bottom row and the best cell width + width = max(map(len, last_row)) + 1 + lhalf = width // 2 + rhalf = int(width - lhalf - 1) + + # display functions + def format(centre, left=" ", right=" "): + return f"{lhalf * left}{centre}{right * rhalf}" + + def display(str): + stdout.write(str) + + # for each merge, top down + queue = [(root._value, root)] + verticals = [format(" ") for leaf in leaves] + while queue: + priority, node = queue.pop() + child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children)) + indices = list(map(leaves.index, child_left_leaf)) + if child_left_leaf: + min_idx = min(indices) + max_idx = max(indices) + for i in range(len(leaves)): + if leaves[i] in child_left_leaf: + if i == min_idx: + display(format(JOIN, " ", HLINK)) + elif i == max_idx: + display(format(JOIN, HLINK, " ")) + else: + display(format(JOIN, HLINK, HLINK)) + verticals[i] = format(VLINK) + elif min_idx <= i <= max_idx: + display(format(HLINK, HLINK, HLINK)) + else: + display(verticals[i]) + display("\n") + for child in node._children: + if child._children: + queue.append((child._value, child)) + queue.sort() + + for vertical in verticals: + display(vertical) + display("\n") + + # finally, display the last line + display("".join(item.center(width) for item in last_row)) + display("\n") + + def __repr__(self): + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = root.leaves(False) + return "" % len(leaves) diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__init__.py b/venv/lib/python3.10/site-packages/nltk/parse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82600563c78bd7fb762777967a43454ffd7ab226 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/__init__.py @@ -0,0 +1,102 @@ +# Natural Language Toolkit: Parsers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +NLTK Parsers + +Classes and interfaces for producing tree structures that represent +the internal organization of a text. This task is known as "parsing" +the text, and the resulting tree structures are called the text's +"parses". Typically, the text is a single sentence, and the tree +structure represents the syntactic structure of the sentence. +However, parsers can also be used in other domains. For example, +parsers can be used to derive the morphological structure of the +morphemes that make up a word, or to derive the discourse structure +for a set of utterances. + +Sometimes, a single piece of text can be represented by more than one +tree structure. Texts represented by more than one tree structure are +called "ambiguous" texts. Note that there are actually two ways in +which a text can be ambiguous: + + - The text has multiple correct parses. + - There is not enough information to decide which of several + candidate parses is correct. + +However, the parser module does *not* distinguish these two types of +ambiguity. + +The parser module defines ``ParserI``, a standard interface for parsing +texts; and two simple implementations of that interface, +``ShiftReduceParser`` and ``RecursiveDescentParser``. It also contains +three sub-modules for specialized kinds of parsing: + + - ``nltk.parser.chart`` defines chart parsing, which uses dynamic + programming to efficiently parse texts. + - ``nltk.parser.probabilistic`` defines probabilistic parsing, which + associates a probability with each parse. +""" + +from nltk.parse.api import ParserI +from nltk.parse.bllip import BllipParser +from nltk.parse.chart import ( + BottomUpChartParser, + BottomUpLeftCornerChartParser, + ChartParser, + LeftCornerChartParser, + SteppingChartParser, + TopDownChartParser, +) +from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser +from nltk.parse.dependencygraph import DependencyGraph +from nltk.parse.earleychart import ( + EarleyChartParser, + FeatureEarleyChartParser, + FeatureIncrementalBottomUpChartParser, + FeatureIncrementalBottomUpLeftCornerChartParser, + FeatureIncrementalChartParser, + FeatureIncrementalTopDownChartParser, + IncrementalBottomUpChartParser, + IncrementalBottomUpLeftCornerChartParser, + IncrementalChartParser, + IncrementalLeftCornerChartParser, + IncrementalTopDownChartParser, +) +from nltk.parse.evaluate import DependencyEvaluator +from nltk.parse.featurechart import ( + FeatureBottomUpChartParser, + FeatureBottomUpLeftCornerChartParser, + FeatureChartParser, + FeatureTopDownChartParser, +) +from nltk.parse.malt import MaltParser +from nltk.parse.nonprojectivedependencyparser import ( + NaiveBayesDependencyScorer, + NonprojectiveDependencyParser, + ProbabilisticNonprojectiveParser, +) +from nltk.parse.pchart import ( + BottomUpProbabilisticChartParser, + InsideChartParser, + LongestChartParser, + RandomChartParser, + UnsortedChartParser, +) +from nltk.parse.projectivedependencyparser import ( + ProbabilisticProjectiveDependencyParser, + ProjectiveDependencyParser, +) +from nltk.parse.recursivedescent import ( + RecursiveDescentParser, + SteppingRecursiveDescentParser, +) +from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser +from nltk.parse.transitionparser import TransitionParser +from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser +from nltk.parse.viterbi import ViterbiParser diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daae5d2c9c1879b514d2cf33d340fa8d61fcb6f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb097f5699cf2a221d0643565840dadcdfcda6a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..874d11a07011c051c44be038c9da7797e209ab11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/bllip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7daf1a6d98f07520c0fd3bada08e1b3cf0dbb69a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/chart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f86fbcec7dcc8bceb5afefab2df04236c1a736a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51b741e99a21704aa46b28b4dabfd5621ce69721 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d013cff583bb1fc15db8737bae83e64b3bda365d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10e1e21a3e1b16933df5fd7c529f78001c01fc14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c603d90e5b50fb2a481fc8ea46dcb29bb273a3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c25e9b72be1a399604fae29d74af24a09c26d0cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e534d71d19e48a91a03276b4a1d42994aa06b7cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa08ea1d24a3104e3b27c753cffc76b72bdd1227 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/nonprojectivedependencyparser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e0fa8cdad4610bd97b146a966a8e63c5130be1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8d970c3a2b94792fd8902c05e04885aed0938bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/projectivedependencyparser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5d83e0c599517e7b08ca430dc16633eda0eb35e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0faea439a1f4ee1d12f1824efcc6d3bd962eb948 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dcad87e2fa7349975fc9d77c651a09b5e4164bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14603ce1153afd0b78f1cdce6600220fe38a60ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b03ce599f2b70fa483ba125bda916d13426a77cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..342a1a617accd635eda32f7f8b95efb8886f161c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/parse/api.py b/venv/lib/python3.10/site-packages/nltk/parse/api.py new file mode 100644 index 0000000000000000000000000000000000000000..280c1a5a8225e7832ecb6f80e4e96feb25ca4f8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/api.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Parser API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +import itertools + +from nltk.internals import overridden + + +class ParserI: + """ + A processing class for deriving trees that represent possible + structures for a sequence of tokens. These tree structures are + known as "parses". Typically, parsers are used to derive syntax + trees for sentences. But parsers can also be used to derive other + kinds of tree structure, such as morphological trees and discourse + structures. + + Subclasses must define: + - at least one of: ``parse()``, ``parse_sents()``. + + Subclasses may define: + - ``grammar()`` + """ + + def grammar(self): + """ + :return: The grammar used by this parser. + """ + raise NotImplementedError() + + def parse(self, sent, *args, **kwargs): + """ + :return: An iterator that generates parse trees for the sentence. + When possible this list is sorted from most likely to least likely. + + :param sent: The sentence to be parsed + :type sent: list(str) + :rtype: iter(Tree) + """ + if overridden(self.parse_sents): + return next(self.parse_sents([sent], *args, **kwargs)) + elif overridden(self.parse_one): + return ( + tree + for tree in [self.parse_one(sent, *args, **kwargs)] + if tree is not None + ) + elif overridden(self.parse_all): + return iter(self.parse_all(sent, *args, **kwargs)) + else: + raise NotImplementedError() + + def parse_sents(self, sents, *args, **kwargs): + """ + Apply ``self.parse()`` to each element of ``sents``. + :rtype: iter(iter(Tree)) + """ + return (self.parse(sent, *args, **kwargs) for sent in sents) + + def parse_all(self, sent, *args, **kwargs): + """:rtype: list(Tree)""" + return list(self.parse(sent, *args, **kwargs)) + + def parse_one(self, sent, *args, **kwargs): + """:rtype: Tree or None""" + return next(self.parse(sent, *args, **kwargs), None) diff --git a/venv/lib/python3.10/site-packages/nltk/parse/bllip.py b/venv/lib/python3.10/site-packages/nltk/parse/bllip.py new file mode 100644 index 0000000000000000000000000000000000000000..581ed661c256ca95ed89643516eb6edee8997300 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/bllip.py @@ -0,0 +1,299 @@ +# Natural Language Toolkit: Interface to BLLIP Parser +# +# Author: David McClosky +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.parse.api import ParserI +from nltk.tree import Tree + +""" +Interface for parsing with BLLIP Parser. Requires the Python +bllipparser module. BllipParser objects can be constructed with the +``BllipParser.from_unified_model_dir`` class method or manually using the +``BllipParser`` constructor. The former is generally easier if you have +a BLLIP Parser unified model directory -- a basic model can be obtained +from NLTK's downloader. More unified parsing models can be obtained with +BLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher`` +or see docs for ``bllipparser.ModelFetcher.download_and_install_model``). + +Basic usage:: + + # download and install a basic unified parsing model (Wall Street Journal) + # sudo python -m nltk.downloader bllip_wsj_no_aux + + >>> from nltk.data import find + >>> model_dir = find('models/bllip_wsj_no_aux').path + >>> bllip = BllipParser.from_unified_model_dir(model_dir) + + # 1-best parsing + >>> sentence1 = 'British left waffles on Falklands .'.split() + >>> top_parse = bllip.parse_one(sentence1) + >>> print(top_parse) + (S1 + (S + (NP (JJ British) (NN left)) + (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands)))) + (. .))) + + # n-best parsing + >>> sentence2 = 'Time flies'.split() + >>> all_parses = bllip.parse_all(sentence2) + >>> print(len(all_parses)) + 50 + >>> print(all_parses[0]) + (S1 (S (NP (NNP Time)) (VP (VBZ flies)))) + + # incorporating external tagging constraints (None means unconstrained tag) + >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')]) + >>> print(next(constrained1)) + (S1 (NP (VB Time) (NNS flies))) + >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)]) + >>> print(next(constrained2)) + (S1 (NP (NN Time) (VBZ flies))) + +References +---------- + +- Charniak, Eugene. "A maximum-entropy-inspired parser." Proceedings of + the 1st North American chapter of the Association for Computational + Linguistics conference. Association for Computational Linguistics, + 2000. + +- Charniak, Eugene, and Mark Johnson. "Coarse-to-fine n-best parsing + and MaxEnt discriminative reranking." Proceedings of the 43rd Annual + Meeting on Association for Computational Linguistics. Association + for Computational Linguistics, 2005. + +Known issues +------------ + +Note that BLLIP Parser is not currently threadsafe. Since this module +uses a SWIG interface, it is potentially unsafe to create multiple +``BllipParser`` objects in the same process. BLLIP Parser currently +has issues with non-ASCII text and will raise an error if given any. + +See https://pypi.python.org/pypi/bllipparser/ for more information +on BLLIP Parser's Python interface. +""" + +__all__ = ["BllipParser"] + +# this block allows this module to be imported even if bllipparser isn't +# available +try: + from bllipparser import RerankingParser + from bllipparser.RerankingParser import get_unified_model_parameters + + def _ensure_bllip_import_or_error(): + pass + +except ImportError as ie: + + def _ensure_bllip_import_or_error(ie=ie): + raise ImportError("Couldn't import bllipparser module: %s" % ie) + + +def _ensure_ascii(words): + try: + for i, word in enumerate(words): + word.encode("ascii") + except UnicodeEncodeError as e: + raise ValueError( + f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser " + "currently doesn't support non-ASCII inputs." + ) from e + + +def _scored_parse_to_nltk_tree(scored_parse): + return Tree.fromstring(str(scored_parse.ptb_parse)) + + +class BllipParser(ParserI): + """ + Interface for parsing with BLLIP Parser. BllipParser objects can be + constructed with the ``BllipParser.from_unified_model_dir`` class + method or manually using the ``BllipParser`` constructor. + """ + + def __init__( + self, + parser_model=None, + reranker_features=None, + reranker_weights=None, + parser_options=None, + reranker_options=None, + ): + """ + Load a BLLIP Parser model from scratch. You'll typically want to + use the ``from_unified_model_dir()`` class method to construct + this object. + + :param parser_model: Path to parser model directory + :type parser_model: str + + :param reranker_features: Path the reranker model's features file + :type reranker_features: str + + :param reranker_weights: Path the reranker model's weights file + :type reranker_weights: str + + :param parser_options: optional dictionary of parser options, see + ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` + for more information. + :type parser_options: dict(str) + + :param reranker_options: optional + dictionary of reranker options, see + ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` + for more information. + :type reranker_options: dict(str) + """ + _ensure_bllip_import_or_error() + + parser_options = parser_options or {} + reranker_options = reranker_options or {} + + self.rrp = RerankingParser() + self.rrp.load_parser_model(parser_model, **parser_options) + if reranker_features and reranker_weights: + self.rrp.load_reranker_model( + features_filename=reranker_features, + weights_filename=reranker_weights, + **reranker_options, + ) + + def parse(self, sentence): + """ + Use BLLIP Parser to parse a sentence. Takes a sentence as a list + of words; it will be automatically tagged with this BLLIP Parser + instance's tagger. + + :return: An iterator that generates parse trees for the sentence + from most likely to least likely. + + :param sentence: The sentence to be parsed + :type sentence: list(str) + :rtype: iter(Tree) + """ + _ensure_ascii(sentence) + nbest_list = self.rrp.parse(sentence) + for scored_parse in nbest_list: + yield _scored_parse_to_nltk_tree(scored_parse) + + def tagged_parse(self, word_and_tag_pairs): + """ + Use BLLIP to parse a sentence. Takes a sentence as a list of + (word, tag) tuples; the sentence must have already been tokenized + and tagged. BLLIP will attempt to use the tags provided but may + use others if it can't come up with a complete parse subject + to those constraints. You may also specify a tag as ``None`` + to leave a token's tag unconstrained. + + :return: An iterator that generates parse trees for the sentence + from most likely to least likely. + + :param sentence: Input sentence to parse as (word, tag) pairs + :type sentence: list(tuple(str, str)) + :rtype: iter(Tree) + """ + words = [] + tag_map = {} + for i, (word, tag) in enumerate(word_and_tag_pairs): + words.append(word) + if tag is not None: + tag_map[i] = tag + + _ensure_ascii(words) + nbest_list = self.rrp.parse_tagged(words, tag_map) + for scored_parse in nbest_list: + yield _scored_parse_to_nltk_tree(scored_parse) + + @classmethod + def from_unified_model_dir( + cls, model_dir, parser_options=None, reranker_options=None + ): + """ + Create a ``BllipParser`` object from a unified parsing model + directory. Unified parsing model directories are a standardized + way of storing BLLIP parser and reranker models together on disk. + See ``bllipparser.RerankingParser.get_unified_model_parameters()`` + for more information about unified model directories. + + :return: A ``BllipParser`` object using the parser and reranker + models in the model directory. + + :param model_dir: Path to the unified model directory. + :type model_dir: str + :param parser_options: optional dictionary of parser options, see + ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` + for more information. + :type parser_options: dict(str) + :param reranker_options: optional dictionary of reranker options, see + ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` + for more information. + :type reranker_options: dict(str) + :rtype: BllipParser + """ + ( + parser_model_dir, + reranker_features_filename, + reranker_weights_filename, + ) = get_unified_model_parameters(model_dir) + return cls( + parser_model_dir, + reranker_features_filename, + reranker_weights_filename, + parser_options, + reranker_options, + ) + + +def demo(): + """This assumes the Python module bllipparser is installed.""" + + # download and install a basic unified parsing model (Wall Street Journal) + # sudo python -m nltk.downloader bllip_wsj_no_aux + + from nltk.data import find + + model_dir = find("models/bllip_wsj_no_aux").path + + print("Loading BLLIP Parsing models...") + # the easiest way to get started is to use a unified model + bllip = BllipParser.from_unified_model_dir(model_dir) + print("Done.") + + sentence1 = "British left waffles on Falklands .".split() + sentence2 = "I saw the man with the telescope .".split() + # this sentence is known to fail under the WSJ parsing model + fail1 = "# ! ? : -".split() + for sentence in (sentence1, sentence2, fail1): + print("Sentence: %r" % " ".join(sentence)) + try: + tree = next(bllip.parse(sentence)) + print(tree) + except StopIteration: + print("(parse failed)") + + # n-best parsing demo + for i, parse in enumerate(bllip.parse(sentence1)): + print("parse %d:\n%s" % (i, parse)) + + # using external POS tag constraints + print( + "forcing 'tree' to be 'NN':", + next(bllip.tagged_parse([("A", None), ("tree", "NN")])), + ) + print( + "forcing 'A' to be 'DT' and 'tree' to be 'NNP':", + next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])), + ) + # constraints don't have to make sense... (though on more complicated + # sentences, they may cause the parse to fail) + print( + "forcing 'A' to be 'NNP':", + next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])), + ) diff --git a/venv/lib/python3.10/site-packages/nltk/parse/chart.py b/venv/lib/python3.10/site-packages/nltk/parse/chart.py new file mode 100644 index 0000000000000000000000000000000000000000..3f068d7d8ac61f1e46aae99a5ea915c74ac2791b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/chart.py @@ -0,0 +1,1848 @@ +# Natural Language Toolkit: A Chart Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Jean Mark Gawron +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Data classes and parser implementations for "chart parsers", which +use dynamic programming to efficiently parse a text. A chart +parser derives parse trees for a text by iteratively adding "edges" +to a "chart." Each edge represents a hypothesis about the tree +structure for a subsequence of the text. The chart is a +"blackboard" for composing and combining these hypotheses. + +When a chart parser begins parsing a text, it creates a new (empty) +chart, spanning the text. It then incrementally adds new edges to the +chart. A set of "chart rules" specifies the conditions under which +new edges should be added to the chart. Once the chart reaches a +stage where none of the chart rules adds any new edges, parsing is +complete. + +Charts are encoded with the ``Chart`` class, and edges are encoded with +the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module +defines three chart parsers: + + - ``ChartParser`` is a simple and flexible chart parser. Given a + set of chart rules, it will apply those rules to the chart until + no more edges are added. + + - ``SteppingChartParser`` is a subclass of ``ChartParser`` that can + be used to step through the parsing process. +""" + +import itertools +import re +import warnings +from functools import total_ordering + +from nltk.grammar import PCFG, is_nonterminal, is_terminal +from nltk.internals import raise_unorderable_types +from nltk.parse.api import ParserI +from nltk.tree import Tree +from nltk.util import OrderedDict + +######################################################################## +## Edges +######################################################################## + + +@total_ordering +class EdgeI: + """ + A hypothesis about the structure of part of a sentence. + Each edge records the fact that a structure is (partially) + consistent with the sentence. An edge contains: + + - A span, indicating what part of the sentence is + consistent with the hypothesized structure. + - A left-hand side, specifying what kind of structure is + hypothesized. + - A right-hand side, specifying the contents of the + hypothesized structure. + - A dot position, indicating how much of the hypothesized + structure is consistent with the sentence. + + Every edge is either complete or incomplete: + + - An edge is complete if its structure is fully consistent + with the sentence. + - An edge is incomplete if its structure is partially + consistent with the sentence. For every incomplete edge, the + span specifies a possible prefix for the edge's structure. + + There are two kinds of edge: + + - A ``TreeEdge`` records which trees have been found to + be (partially) consistent with the text. + - A ``LeafEdge`` records the tokens occurring in the text. + + The ``EdgeI`` interface provides a common interface to both types + of edge, allowing chart parsers to treat them in a uniform manner. + """ + + def __init__(self): + if self.__class__ == EdgeI: + raise TypeError("Edge is an abstract interface") + + # //////////////////////////////////////////////////////////// + # Span + # //////////////////////////////////////////////////////////// + + def span(self): + """ + Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the + portion of the sentence that is consistent with this + edge's structure. + + :rtype: tuple(int, int) + """ + raise NotImplementedError() + + def start(self): + """ + Return the start index of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + def end(self): + """ + Return the end index of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + def length(self): + """ + Return the length of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Left Hand Side + # //////////////////////////////////////////////////////////// + + def lhs(self): + """ + Return this edge's left-hand side, which specifies what kind + of structure is hypothesized by this edge. + + :see: ``TreeEdge`` and ``LeafEdge`` for a description of + the left-hand side values for each edge type. + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Right Hand Side + # //////////////////////////////////////////////////////////// + + def rhs(self): + """ + Return this edge's right-hand side, which specifies + the content of the structure hypothesized by this edge. + + :see: ``TreeEdge`` and ``LeafEdge`` for a description of + the right-hand side values for each edge type. + """ + raise NotImplementedError() + + def dot(self): + """ + Return this edge's dot position, which indicates how much of + the hypothesized structure is consistent with the + sentence. In particular, ``self.rhs[:dot]`` is consistent + with ``tokens[self.start():self.end()]``. + + :rtype: int + """ + raise NotImplementedError() + + def nextsym(self): + """ + Return the element of this edge's right-hand side that + immediately follows its dot. + + :rtype: Nonterminal or terminal or None + """ + raise NotImplementedError() + + def is_complete(self): + """ + Return True if this edge's structure is fully consistent + with the text. + + :rtype: bool + """ + raise NotImplementedError() + + def is_incomplete(self): + """ + Return True if this edge's structure is partially consistent + with the text. + + :rtype: bool + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Comparisons & hashing + # //////////////////////////////////////////////////////////// + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, EdgeI): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + +class TreeEdge(EdgeI): + """ + An edge that records the fact that a tree is (partially) + consistent with the sentence. A tree edge consists of: + + - A span, indicating what part of the sentence is + consistent with the hypothesized tree. + - A left-hand side, specifying the hypothesized tree's node + value. + - A right-hand side, specifying the hypothesized tree's + children. Each element of the right-hand side is either a + terminal, specifying a token with that terminal as its leaf + value; or a nonterminal, specifying a subtree with that + nonterminal's symbol as its node value. + - A dot position, indicating which children are consistent + with part of the sentence. In particular, if ``dot`` is the + dot position, ``rhs`` is the right-hand size, ``(start,end)`` + is the span, and ``sentence`` is the list of tokens in the + sentence, then ``tokens[start:end]`` can be spanned by the + children specified by ``rhs[:dot]``. + + For more information about edges, see the ``EdgeI`` interface. + """ + + def __init__(self, span, lhs, rhs, dot=0): + """ + Construct a new ``TreeEdge``. + + :type span: tuple(int, int) + :param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the + portion of the sentence that is consistent with the new + edge's structure. + :type lhs: Nonterminal + :param lhs: The new edge's left-hand side, specifying the + hypothesized tree's node value. + :type rhs: list(Nonterminal and str) + :param rhs: The new edge's right-hand side, specifying the + hypothesized tree's children. + :type dot: int + :param dot: The position of the new edge's dot. This position + specifies what prefix of the production's right hand side + is consistent with the text. In particular, if + ``sentence`` is the list of tokens in the sentence, then + ``okens[span[0]:span[1]]`` can be spanned by the + children specified by ``rhs[:dot]``. + """ + self._span = span + self._lhs = lhs + rhs = tuple(rhs) + self._rhs = rhs + self._dot = dot + self._comparison_key = (span, lhs, rhs, dot) + + @staticmethod + def from_production(production, index): + """ + Return a new ``TreeEdge`` formed from the given production. + The new edge's left-hand side and right-hand side will + be taken from ``production``; its span will be + ``(index,index)``; and its dot position will be ``0``. + + :rtype: TreeEdge + """ + return TreeEdge( + span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0 + ) + + def move_dot_forward(self, new_end): + """ + Return a new ``TreeEdge`` formed from this edge. + The new edge's dot position is increased by ``1``, + and its end index will be replaced by ``new_end``. + + :param new_end: The new end index. + :type new_end: int + :rtype: TreeEdge + """ + return TreeEdge( + span=(self._span[0], new_end), + lhs=self._lhs, + rhs=self._rhs, + dot=self._dot + 1, + ) + + # Accessors + def lhs(self): + return self._lhs + + def span(self): + return self._span + + def start(self): + return self._span[0] + + def end(self): + return self._span[1] + + def length(self): + return self._span[1] - self._span[0] + + def rhs(self): + return self._rhs + + def dot(self): + return self._dot + + def is_complete(self): + return self._dot == len(self._rhs) + + def is_incomplete(self): + return self._dot != len(self._rhs) + + def nextsym(self): + if self._dot >= len(self._rhs): + return None + else: + return self._rhs[self._dot] + + # String representation + def __str__(self): + str = f"[{self._span[0]}:{self._span[1]}] " + str += "%-2r ->" % (self._lhs,) + + for i in range(len(self._rhs)): + if i == self._dot: + str += " *" + str += " %s" % repr(self._rhs[i]) + if len(self._rhs) == self._dot: + str += " *" + return str + + def __repr__(self): + return "[Edge: %s]" % self + + +class LeafEdge(EdgeI): + """ + An edge that records the fact that a leaf value is consistent with + a word in the sentence. A leaf edge consists of: + + - An index, indicating the position of the word. + - A leaf, specifying the word's content. + + A leaf edge's left-hand side is its leaf value, and its right hand + side is ``()``. Its span is ``[index, index+1]``, and its dot + position is ``0``. + """ + + def __init__(self, leaf, index): + """ + Construct a new ``LeafEdge``. + + :param leaf: The new edge's leaf value, specifying the word + that is recorded by this edge. + :param index: The new edge's index, specifying the position of + the word that is recorded by this edge. + """ + self._leaf = leaf + self._index = index + self._comparison_key = (leaf, index) + + # Accessors + def lhs(self): + return self._leaf + + def span(self): + return (self._index, self._index + 1) + + def start(self): + return self._index + + def end(self): + return self._index + 1 + + def length(self): + return 1 + + def rhs(self): + return () + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + # String representations + def __str__(self): + return f"[{self._index}:{self._index + 1}] {repr(self._leaf)}" + + def __repr__(self): + return "[Edge: %s]" % (self) + + +######################################################################## +## Chart +######################################################################## + + +class Chart: + """ + A blackboard for hypotheses about the syntactic constituents of a + sentence. A chart contains a set of edges, and each edge encodes + a single hypothesis about the structure of some portion of the + sentence. + + The ``select`` method can be used to select a specific collection + of edges. For example ``chart.select(is_complete=True, start=0)`` + yields all complete edges whose start indices are 0. To ensure + the efficiency of these selection operations, ``Chart`` dynamically + creates and maintains an index for each set of attributes that + have been selected on. + + In order to reconstruct the trees that are represented by an edge, + the chart associates each edge with a set of child pointer lists. + A child pointer list is a list of the edges that license an + edge's right-hand side. + + :ivar _tokens: The sentence that the chart covers. + :ivar _num_leaves: The number of tokens. + :ivar _edges: A list of the edges in the chart + :ivar _edge_to_cpls: A dictionary mapping each edge to a set + of child pointer lists that are associated with that edge. + :ivar _indexes: A dictionary mapping tuples of edge attributes + to indices, where each index maps the corresponding edge + attribute values to lists of edges. + """ + + def __init__(self, tokens): + """ + Construct a new chart. The chart is initialized with the + leaf edges corresponding to the terminal leaves. + + :type tokens: list + :param tokens: The sentence that this chart will be used to parse. + """ + # Record the sentence token and the sentence length. + self._tokens = tuple(tokens) + self._num_leaves = len(self._tokens) + + # Initialise the chart. + self.initialize() + + def initialize(self): + """ + Clear the chart. + """ + # A list of edges contained in this chart. + self._edges = [] + + # The set of child pointer lists associated with each edge. + self._edge_to_cpls = {} + + # Indexes mapping attribute values to lists of edges + # (used by select()). + self._indexes = {} + + # //////////////////////////////////////////////////////////// + # Sentence Access + # //////////////////////////////////////////////////////////// + + def num_leaves(self): + """ + Return the number of words in this chart's sentence. + + :rtype: int + """ + return self._num_leaves + + def leaf(self, index): + """ + Return the leaf value of the word at the given index. + + :rtype: str + """ + return self._tokens[index] + + def leaves(self): + """ + Return a list of the leaf values of each word in the + chart's sentence. + + :rtype: list(str) + """ + return self._tokens + + # //////////////////////////////////////////////////////////// + # Edge access + # //////////////////////////////////////////////////////////// + + def edges(self): + """ + Return a list of all edges in this chart. New edges + that are added to the chart after the call to edges() + will *not* be contained in this list. + + :rtype: list(EdgeI) + :see: ``iteredges``, ``select`` + """ + return self._edges[:] + + def iteredges(self): + """ + Return an iterator over the edges in this chart. It is + not guaranteed that new edges which are added to the + chart before the iterator is exhausted will also be generated. + + :rtype: iter(EdgeI) + :see: ``edges``, ``select`` + """ + return iter(self._edges) + + # Iterating over the chart yields its edges. + __iter__ = iteredges + + def num_edges(self): + """ + Return the number of edges contained in this chart. + + :rtype: int + """ + return len(self._edge_to_cpls) + + def select(self, **restrictions): + """ + Return an iterator over the edges in this chart. Any + new edges that are added to the chart before the iterator + is exahusted will also be generated. ``restrictions`` + can be used to restrict the set of edges that will be + generated. + + :param span: Only generate edges ``e`` where ``e.span()==span`` + :param start: Only generate edges ``e`` where ``e.start()==start`` + :param end: Only generate edges ``e`` where ``e.end()==end`` + :param length: Only generate edges ``e`` where ``e.length()==length`` + :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs`` + :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs`` + :param nextsym: Only generate edges ``e`` where + ``e.nextsym()==nextsym`` + :param dot: Only generate edges ``e`` where ``e.dot()==dot`` + :param is_complete: Only generate edges ``e`` where + ``e.is_complete()==is_complete`` + :param is_incomplete: Only generate edges ``e`` where + ``e.is_incomplete()==is_incomplete`` + :rtype: iter(EdgeI) + """ + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(self._edges) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple(restrictions[key] for key in restr_keys) + return iter(self._indexes[restr_keys].get(vals, [])) + + def _add_index(self, restr_keys): + """ + A helper function for ``select``, which creates a new index for + a given set of attributes (aka restriction keys). + """ + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = {} + + # Add all existing edges to the index. + for edge in self._edges: + vals = tuple(getattr(edge, key)() for key in restr_keys) + index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + """ + A helper function for ``insert``, which registers the new + edge with all existing indexes. + """ + for (restr_keys, index) in self._indexes.items(): + vals = tuple(getattr(edge, key)() for key in restr_keys) + index.setdefault(vals, []).append(edge) + + # //////////////////////////////////////////////////////////// + # Edge Insertion + # //////////////////////////////////////////////////////////// + + def insert_with_backpointer(self, new_edge, previous_edge, child_edge): + """ + Add a new edge to the chart, using a pointer to the previous edge. + """ + cpls = self.child_pointer_lists(previous_edge) + new_cpls = [cpl + (child_edge,) for cpl in cpls] + return self.insert(new_edge, *new_cpls) + + def insert(self, edge, *child_pointer_lists): + """ + Add a new edge to the chart, and return True if this operation + modified the chart. In particular, return true iff the chart + did not already contain ``edge``, or if it did not already associate + ``child_pointer_lists`` with ``edge``. + + :type edge: EdgeI + :param edge: The new edge + :type child_pointer_lists: sequence of tuple(EdgeI) + :param child_pointer_lists: A sequence of lists of the edges that + were used to form this edge. This list is used to reconstruct + the trees (or partial trees) that are associated with ``edge``. + :rtype: bool + """ + # Is it a new edge? + if edge not in self._edge_to_cpls: + # Add it to the list of edges. + self._append_edge(edge) + # Register with indexes. + self._register_with_indexes(edge) + + # Get the set of child pointer lists for this edge. + cpls = self._edge_to_cpls.setdefault(edge, OrderedDict()) + chart_was_modified = False + for child_pointer_list in child_pointer_lists: + child_pointer_list = tuple(child_pointer_list) + if child_pointer_list not in cpls: + # It's a new CPL; register it, and return true. + cpls[child_pointer_list] = True + chart_was_modified = True + return chart_was_modified + + def _append_edge(self, edge): + self._edges.append(edge) + + # //////////////////////////////////////////////////////////// + # Tree extraction & child pointer lists + # //////////////////////////////////////////////////////////// + + def parses(self, root, tree_class=Tree): + """ + Return an iterator of the complete tree structures that span + the entire chart, and whose root node is ``root``. + """ + for edge in self.select(start=0, end=self._num_leaves, lhs=root): + yield from self.trees(edge, tree_class=tree_class, complete=True) + + def trees(self, edge, tree_class=Tree, complete=False): + """ + Return an iterator of the tree structures that are associated + with ``edge``. + + If ``edge`` is incomplete, then the unexpanded children will be + encoded as childless subtrees, whose node value is the + corresponding terminal or nonterminal. + + :rtype: list(Tree) + :note: If two trees share a common subtree, then the same + Tree may be used to encode that subtree in + both trees. If you need to eliminate this subtree + sharing, then create a deep copy of each tree. + """ + return iter(self._trees(edge, complete, memo={}, tree_class=tree_class)) + + def _trees(self, edge, complete, memo, tree_class): + """ + A helper function for ``trees``. + + :param memo: A dictionary used to record the trees that we've + generated for each edge, so that when we see an edge more + than once, we can reuse the same trees. + """ + # If we've seen this edge before, then reuse our old answer. + if edge in memo: + return memo[edge] + + # when we're reading trees off the chart, don't use incomplete edges + if complete and edge.is_incomplete(): + return [] + + # Leaf edges. + if isinstance(edge, LeafEdge): + leaf = self._tokens[edge.start()] + memo[edge] = [leaf] + return [leaf] + + # Until we're done computing the trees for edge, set + # memo[edge] to be empty. This has the effect of filtering + # out any cyclic trees (i.e., trees that contain themselves as + # descendants), because if we reach this edge via a cycle, + # then it will appear that the edge doesn't generate any trees. + memo[edge] = [] + trees = [] + lhs = edge.lhs().symbol() + + # Each child pointer list can be used to form trees. + for cpl in self.child_pointer_lists(edge): + # Get the set of child choices for each child pointer. + # child_choices[i] is the set of choices for the tree's + # ith child. + child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] + + # For each combination of children, add a tree. + for children in itertools.product(*child_choices): + trees.append(tree_class(lhs, children)) + + # If the edge is incomplete, then extend it with "partial trees": + if edge.is_incomplete(): + unexpanded = [tree_class(elt, []) for elt in edge.rhs()[edge.dot() :]] + for tree in trees: + tree.extend(unexpanded) + + # Update the memoization dictionary. + memo[edge] = trees + + # Return the list of trees. + return trees + + def child_pointer_lists(self, edge): + """ + Return the set of child pointer lists for the given edge. + Each child pointer list is a list of edges that have + been used to form this edge. + + :rtype: list(list(EdgeI)) + """ + # Make a copy, in case they modify it. + return self._edge_to_cpls.get(edge, {}).keys() + + # //////////////////////////////////////////////////////////// + # Display + # //////////////////////////////////////////////////////////// + def pretty_format_edge(self, edge, width=None): + """ + Return a pretty-printed string representation of a given edge + in this chart. + + :rtype: str + :param width: The number of characters allotted to each + index in the sentence. + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + (start, end) = (edge.start(), edge.end()) + + str = "|" + ("." + " " * (width - 1)) * start + + # Zero-width edges are "#" if complete, ">" if incomplete + if start == end: + if edge.is_complete(): + str += "#" + else: + str += ">" + + # Spanning complete edges are "[===]"; Other edges are + # "[---]" if complete, "[--->" if incomplete + elif edge.is_complete() and edge.span() == (0, self._num_leaves): + str += "[" + ("=" * width) * (end - start - 1) + "=" * (width - 1) + "]" + elif edge.is_complete(): + str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + "]" + else: + str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + ">" + + str += (" " * (width - 1) + ".") * (self._num_leaves - end) + return str + "| %s" % edge + + def pretty_format_leaves(self, width=None): + """ + Return a pretty-printed string representation of this + chart's leaves. This string can be used as a header + for calls to ``pretty_format_edge``. + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + + if self._tokens is not None and width > 1: + header = "|." + for tok in self._tokens: + header += tok[: width - 1].center(width - 1) + "." + header += "|" + else: + header = "" + + return header + + def pretty_format(self, width=None): + """ + Return a pretty-printed string representation of this chart. + + :param width: The number of characters allotted to each + index in the sentence. + :rtype: str + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + # sort edges: primary key=length, secondary key=start index. + # (and filter out the token edges) + edges = sorted((e.length(), e.start(), e) for e in self) + edges = [e for (_, _, e) in edges] + + return ( + self.pretty_format_leaves(width) + + "\n" + + "\n".join(self.pretty_format_edge(edge, width) for edge in edges) + ) + + # //////////////////////////////////////////////////////////// + # Display: Dot (AT&T Graphviz) + # //////////////////////////////////////////////////////////// + + def dot_digraph(self): + # Header + s = "digraph nltk_chart {\n" + # s += ' size="5,5";\n' + s += " rankdir=LR;\n" + s += " node [height=0.1,width=0.1];\n" + s += ' node [style=filled, color="lightgray"];\n' + + # Set up the nodes + for y in range(self.num_edges(), -1, -1): + if y == 0: + s += ' node [style=filled, color="black"];\n' + for x in range(self.num_leaves() + 1): + if y == 0 or ( + x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() + ): + s += ' %04d.%04d [label=""];\n' % (x, y) + + # Add a spacer + s += " x [style=invis]; x->0000.0000 [style=invis];\n" + + # Declare ranks. + for x in range(self.num_leaves() + 1): + s += " {rank=same;" + for y in range(self.num_edges() + 1): + if y == 0 or ( + x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() + ): + s += " %04d.%04d" % (x, y) + s += "}\n" + + # Add the leaves + s += " edge [style=invis, weight=100];\n" + s += " node [shape=plaintext]\n" + s += " 0000.0000" + for x in range(self.num_leaves()): + s += "->%s->%04d.0000" % (self.leaf(x), x + 1) + s += ";\n\n" + + # Add the edges + s += " edge [style=solid, weight=1];\n" + for y, edge in enumerate(self): + for x in range(edge.start()): + s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( + x, + y + 1, + x + 1, + y + 1, + ) + s += ' %04d.%04d -> %04d.%04d [label="%s"];\n' % ( + edge.start(), + y + 1, + edge.end(), + y + 1, + edge, + ) + for x in range(edge.end(), self.num_leaves()): + s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( + x, + y + 1, + x + 1, + y + 1, + ) + s += "}\n" + return s + + +######################################################################## +## Chart Rules +######################################################################## + + +class ChartRuleI: + """ + A rule that specifies what new edges are licensed by any given set + of existing edges. Each chart rule expects a fixed number of + edges, as indicated by the class variable ``NUM_EDGES``. In + particular: + + - A chart rule with ``NUM_EDGES=0`` specifies what new edges are + licensed, regardless of existing edges. + - A chart rule with ``NUM_EDGES=1`` specifies what new edges are + licensed by a single existing edge. + - A chart rule with ``NUM_EDGES=2`` specifies what new edges are + licensed by a pair of existing edges. + + :type NUM_EDGES: int + :cvar NUM_EDGES: The number of existing edges that this rule uses + to license new edges. Typically, this number ranges from zero + to two. + """ + + def apply(self, chart, grammar, *edges): + """ + Return a generator that will add edges licensed by this rule + and the given edges to the chart, one at a time. Each + time the generator is resumed, it will either add a new + edge and yield that edge; or return. + + :type edges: list(EdgeI) + :param edges: A set of existing edges. The number of edges + that should be passed to ``apply()`` is specified by the + ``NUM_EDGES`` class variable. + :rtype: iter(EdgeI) + """ + raise NotImplementedError() + + def apply_everywhere(self, chart, grammar): + """ + Return a generator that will add all edges licensed by + this rule, given the edges that are currently in the + chart, one at a time. Each time the generator is resumed, + it will either add a new edge and yield that edge; or return. + + :rtype: iter(EdgeI) + """ + raise NotImplementedError() + + +class AbstractChartRule(ChartRuleI): + """ + An abstract base class for chart rules. ``AbstractChartRule`` + provides: + + - A default implementation for ``apply``. + - A default implementation for ``apply_everywhere``, + (Currently, this implementation assumes that ``NUM_EDGES <= 3``.) + - A default implementation for ``__str__``, which returns a + name based on the rule's class name. + """ + + # Subclasses must define apply. + def apply(self, chart, grammar, *edges): + raise NotImplementedError() + + # Default: loop through the given number of edges, and call + # self.apply() for each set of edges. + def apply_everywhere(self, chart, grammar): + if self.NUM_EDGES == 0: + yield from self.apply(chart, grammar) + + elif self.NUM_EDGES == 1: + for e1 in chart: + yield from self.apply(chart, grammar, e1) + + elif self.NUM_EDGES == 2: + for e1 in chart: + for e2 in chart: + yield from self.apply(chart, grammar, e1, e2) + + elif self.NUM_EDGES == 3: + for e1 in chart: + for e2 in chart: + for e3 in chart: + yield from self.apply(chart, grammar, e1, e2, e3) + + else: + raise AssertionError("NUM_EDGES>3 is not currently supported") + + # Default: return a name based on the class name. + def __str__(self): + # Add spaces between InitialCapsWords. + return re.sub("([a-z])([A-Z])", r"\1 \2", self.__class__.__name__) + + +# //////////////////////////////////////////////////////////// +# Fundamental Rule +# //////////////////////////////////////////////////////////// + + +class FundamentalRule(AbstractChartRule): + r""" + A rule that joins two adjacent edges to form a single combined + edge. In particular, this rule specifies that any pair of edges + + - ``[A -> alpha \* B beta][i:j]`` + - ``[B -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B * beta][i:j]`` + """ + + NUM_EDGES = 2 + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.is_incomplete() + and right_edge.is_complete() + and left_edge.end() == right_edge.start() + and left_edge.nextsym() == right_edge.lhs() + ): + return + + # Construct the new edge. + new_edge = left_edge.move_dot_forward(right_edge.end()) + + # Insert it into the chart. + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class SingleEdgeFundamentalRule(FundamentalRule): + r""" + A rule that joins a given edge with adjacent edges in the chart, + to form combined edges. In particular, this rule specifies that + either of the edges: + + - ``[A -> alpha \* B beta][i:j]`` + - ``[B -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B * beta][i:j]`` + + if the other edge is already in the chart. + + :note: This is basically ``FundamentalRule``, with one edge left + unspecified. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + yield from self._apply_incomplete(chart, grammar, edge) + else: + yield from self._apply_complete(chart, grammar, edge) + + def _apply_complete(self, chart, grammar, right_edge): + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + def _apply_incomplete(self, chart, grammar, left_edge): + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Inserting Terminal Leafs +# //////////////////////////////////////////////////////////// + + +class LeafInitRule(AbstractChartRule): + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for index in range(chart.num_leaves()): + new_edge = LeafEdge(chart.leaf(index), index) + if chart.insert(new_edge, ()): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Top-Down Prediction +# //////////////////////////////////////////////////////////// + + +class TopDownInitRule(AbstractChartRule): + r""" + A rule licensing edges corresponding to the grammar productions for + the grammar's start symbol. In particular, this rule specifies that + ``[S -> \* alpha][0:i]`` is licensed for each grammar production + ``S -> alpha``, where ``S`` is the grammar's start symbol. + """ + + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for prod in grammar.productions(lhs=grammar.start()): + new_edge = TreeEdge.from_production(prod, 0) + if chart.insert(new_edge, ()): + yield new_edge + + +class TopDownPredictRule(AbstractChartRule): + r""" + A rule licensing edges corresponding to the grammar productions + for the nonterminal following an incomplete edge's dot. In + particular, this rule specifies that + ``[A -> alpha \* B beta][i:j]`` licenses the edge + ``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``. + + :note: This rule corresponds to the Predictor Rule in Earley parsing. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + for prod in grammar.productions(lhs=edge.nextsym()): + new_edge = TreeEdge.from_production(prod, edge.end()) + if chart.insert(new_edge, ()): + yield new_edge + + +class CachedTopDownPredictRule(TopDownPredictRule): + r""" + A cached version of ``TopDownPredictRule``. After the first time + this rule is applied to an edge with a given ``end`` and ``next``, + it will not generate any more edges for edges with that ``end`` and + ``next``. + + If ``chart`` or ``grammar`` are changed, then the cache is flushed. + """ + + def __init__(self): + TopDownPredictRule.__init__(self) + self._done = {} + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + nextsym, index = edge.nextsym(), edge.end() + if not is_nonterminal(nextsym): + return + + # If we've already applied this rule to an edge with the same + # next & end, and the chart & grammar have not changed, then + # just return (no new edges to add). + done = self._done.get((nextsym, index), (None, None)) + if done[0] is chart and done[1] is grammar: + return + + # Add all the edges indicated by the top down expand rule. + for prod in grammar.productions(lhs=nextsym): + # If the left corner in the predicted production is + # leaf, it must match with the input. + if prod.rhs(): + first = prod.rhs()[0] + if is_terminal(first): + if index >= chart.num_leaves() or first != chart.leaf(index): + continue + + new_edge = TreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + # Record the fact that we've applied this rule. + self._done[nextsym, index] = (chart, grammar) + + +# //////////////////////////////////////////////////////////// +# Bottom-Up Prediction +# //////////////////////////////////////////////////////////// + + +class BottomUpPredictRule(AbstractChartRule): + r""" + A rule licensing any edge corresponding to a production whose + right-hand side begins with a complete edge's left-hand side. In + particular, this rule specifies that ``[A -> alpha \*]`` licenses + the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + new_edge = TreeEdge.from_production(prod, edge.start()) + if chart.insert(new_edge, ()): + yield new_edge + + +class BottomUpPredictCombineRule(BottomUpPredictRule): + r""" + A rule licensing any edge corresponding to a production whose + right-hand side begins with a complete edge's left-hand side. In + particular, this rule specifies that ``[A -> alpha \*]`` + licenses the edge ``[B -> A \* beta]`` for each grammar + production ``B -> A beta``. + + :note: This is like ``BottomUpPredictRule``, but it also applies + the ``FundamentalRule`` to the resulting edge. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +class EmptyPredictRule(AbstractChartRule): + """ + A rule that inserts all empty productions as passive edges, + in every position in the chart. + """ + + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for prod in grammar.productions(empty=True): + for index in range(chart.num_leaves() + 1): + new_edge = TreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + +######################################################################## +## Filtered Bottom Up +######################################################################## + + +class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): + def _apply_complete(self, chart, grammar, right_edge): + end = right_edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + def _apply_incomplete(self, chart, grammar, left_edge): + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + end = right_edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + + end = edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + for prod in grammar.productions(rhs=edge.lhs()): + if _bottomup_filter(grammar, nexttoken, prod.rhs()): + new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +def _bottomup_filter(grammar, nexttoken, rhs, dot=0): + if len(rhs) <= dot + 1: + return True + _next = rhs[dot + 1] + if is_terminal(_next): + return nexttoken == _next + else: + return grammar.is_leftcorner(_next, nexttoken) + + +######################################################################## +## Generic Chart Parser +######################################################################## + +TD_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CachedTopDownPredictRule(), + SingleEdgeFundamentalRule(), +] +BU_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictRule(), + SingleEdgeFundamentalRule(), +] +BU_LC_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictCombineRule(), + SingleEdgeFundamentalRule(), +] + +LC_STRATEGY = [ + LeafInitRule(), + FilteredBottomUpPredictCombineRule(), + FilteredSingleEdgeFundamentalRule(), +] + + +class ChartParser(ParserI): + """ + A generic chart parser. A "strategy", or list of + ``ChartRuleI`` instances, is used to decide what edges to add to + the chart. In particular, ``ChartParser`` uses the following + algorithm to parse texts: + + | Until no new edges are added: + | For each *rule* in *strategy*: + | Apply *rule* to any applicable edges in the chart. + | Return any complete parses in the chart + """ + + def __init__( + self, + grammar, + strategy=BU_LC_STRATEGY, + trace=0, + trace_chart_width=50, + use_agenda=True, + chart_class=Chart, + ): + """ + Create a new chart parser, that uses ``grammar`` to parse + texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type strategy: list(ChartRuleI) + :param strategy: A list of rules that should be used to decide + what edges to add to the chart (top-down strategy by default). + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + :type trace_chart_width: int + :param trace_chart_width: The default total width reserved for + the chart in trace output. The remainder of each line will + be used to display edges. + :type use_agenda: bool + :param use_agenda: Use an optimized agenda-based algorithm, + if possible. + :param chart_class: The class that should be used to create + the parse charts. + """ + self._grammar = grammar + self._strategy = strategy + self._trace = trace + self._trace_chart_width = trace_chart_width + # If the strategy only consists of axioms (NUM_EDGES==0) and + # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: + self._use_agenda = use_agenda + self._chart_class = chart_class + + self._axioms = [] + self._inference_rules = [] + for rule in strategy: + if rule.NUM_EDGES == 0: + self._axioms.append(rule) + elif rule.NUM_EDGES == 1: + self._inference_rules.append(rule) + else: + self._use_agenda = False + + def grammar(self): + return self._grammar + + def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): + if not trace: + return + print_rule_header = trace > 1 + for edge in new_edges: + if print_rule_header: + print("%s:" % rule) + print_rule_header = False + print(chart.pretty_format_edge(edge, edge_width)) + + def chart_parse(self, tokens, trace=None): + """ + Return the final parse ``Chart`` from which all possible + parse trees can be extracted. + + :param tokens: The sentence to be parsed + :type tokens: list(str) + :rtype: Chart + """ + if trace is None: + trace = self._trace + trace_new_edges = self._trace_new_edges + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + chart = self._chart_class(tokens) + grammar = self._grammar + + # Width, for printing trace edges. + trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) + if trace: + print(chart.pretty_format_leaves(trace_edge_width)) + + if self._use_agenda: + # Use an agenda-based algorithm. + for axiom in self._axioms: + new_edges = list(axiom.apply(chart, grammar)) + trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) + + inference_rules = self._inference_rules + agenda = chart.edges() + # We reverse the initial agenda, since it is a stack + # but chart.edges() functions as a queue. + agenda.reverse() + while agenda: + edge = agenda.pop() + for rule in inference_rules: + new_edges = list(rule.apply(chart, grammar, edge)) + if trace: + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + agenda += new_edges + + else: + # Do not use an agenda-based algorithm. + edges_added = True + while edges_added: + edges_added = False + for rule in self._strategy: + new_edges = list(rule.apply_everywhere(chart, grammar)) + edges_added = len(new_edges) + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + + # Return the final chart. + return chart + + def parse(self, tokens, tree_class=Tree): + chart = self.chart_parse(tokens) + return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) + + +class TopDownChartParser(ChartParser): + """ + A ``ChartParser`` using a top-down parsing strategy. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args) + + +class BottomUpChartParser(ChartParser): + """ + A ``ChartParser`` using a bottom-up parsing strategy. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + if isinstance(grammar, PCFG): + warnings.warn( + "BottomUpChartParser only works for CFG, " + "use BottomUpProbabilisticChartParser instead", + category=DeprecationWarning, + ) + ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args) + + +class BottomUpLeftCornerChartParser(ChartParser): + """ + A ``ChartParser`` using a bottom-up left-corner parsing strategy. + This strategy is often more efficient than standard bottom-up. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args) + + +class LeftCornerChartParser(ChartParser): + def __init__(self, grammar, **parser_args): + if not grammar.is_nonempty(): + raise ValueError( + "LeftCornerParser only works for grammars " "without empty productions." + ) + ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args) + + +######################################################################## +## Stepping Chart Parser +######################################################################## + + +class SteppingChartParser(ChartParser): + """ + A ``ChartParser`` that allows you to step through the parsing + process, adding a single edge at a time. It also allows you to + change the parser's strategy or grammar midway through parsing a + text. + + The ``initialize`` method is used to start parsing a text. ``step`` + adds a single edge to the chart. ``set_strategy`` changes the + strategy used by the chart parser. ``parses`` returns the set of + parses that has been found by the chart parser. + + :ivar _restart: Records whether the parser's strategy, grammar, + or chart has been changed. If so, then ``step`` must restart + the parsing algorithm. + """ + + def __init__(self, grammar, strategy=[], trace=0): + self._chart = None + self._current_chartrule = None + self._restart = False + ChartParser.__init__(self, grammar, strategy, trace) + + # //////////////////////////////////////////////////////////// + # Initialization + # //////////////////////////////////////////////////////////// + + def initialize(self, tokens): + "Begin parsing the given tokens." + self._chart = Chart(list(tokens)) + self._restart = True + + # //////////////////////////////////////////////////////////// + # Stepping + # //////////////////////////////////////////////////////////// + + def step(self): + """ + Return a generator that adds edges to the chart, one at a + time. Each time the generator is resumed, it adds a single + edge and yields that edge. If no more edges can be added, + then it yields None. + + If the parser's strategy, grammar, or chart is changed, then + the generator will continue adding edges using the new + strategy, grammar, or chart. + + Note that this generator never terminates, since the grammar + or strategy might be changed to values that would add new + edges. Instead, it yields None when no more edges can be + added with the current strategy and grammar. + """ + if self._chart is None: + raise ValueError("Parser must be initialized first") + while True: + self._restart = False + w = 50 // (self._chart.num_leaves() + 1) + + for e in self._parse(): + if self._trace > 1: + print(self._current_chartrule) + if self._trace > 0: + print(self._chart.pretty_format_edge(e, w)) + yield e + if self._restart: + break + else: + yield None # No more edges. + + def _parse(self): + """ + A generator that implements the actual parsing algorithm. + ``step`` iterates through this generator, and restarts it + whenever the parser's strategy, grammar, or chart is modified. + """ + chart = self._chart + grammar = self._grammar + edges_added = 1 + while edges_added > 0: + edges_added = 0 + for rule in self._strategy: + self._current_chartrule = rule + for e in rule.apply_everywhere(chart, grammar): + edges_added += 1 + yield e + + # //////////////////////////////////////////////////////////// + # Accessors + # //////////////////////////////////////////////////////////// + + def strategy(self): + "Return the strategy used by this parser." + return self._strategy + + def grammar(self): + "Return the grammar used by this parser." + return self._grammar + + def chart(self): + "Return the chart that is used by this parser." + return self._chart + + def current_chartrule(self): + "Return the chart rule used to generate the most recent edge." + return self._current_chartrule + + def parses(self, tree_class=Tree): + "Return the parse trees currently contained in the chart." + return self._chart.parses(self._grammar.start(), tree_class) + + # //////////////////////////////////////////////////////////// + # Parser modification + # //////////////////////////////////////////////////////////// + + def set_strategy(self, strategy): + """ + Change the strategy that the parser uses to decide which edges + to add to the chart. + + :type strategy: list(ChartRuleI) + :param strategy: A list of rules that should be used to decide + what edges to add to the chart. + """ + if strategy == self._strategy: + return + self._strategy = strategy[:] # Make a copy. + self._restart = True + + def set_grammar(self, grammar): + "Change the grammar used by the parser." + if grammar is self._grammar: + return + self._grammar = grammar + self._restart = True + + def set_chart(self, chart): + "Load a given chart into the chart parser." + if chart is self._chart: + return + self._chart = chart + self._restart = True + + # //////////////////////////////////////////////////////////// + # Standard parser methods + # //////////////////////////////////////////////////////////// + + def parse(self, tokens, tree_class=Tree): + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # Initialize ourselves. + self.initialize(tokens) + + # Step until no more edges are generated. + for e in self.step(): + if e is None: + break + + # Return an iterator of complete parses. + return self.parses(tree_class=tree_class) + + +######################################################################## +## Demo Code +######################################################################## + + +def demo_grammar(): + from nltk.grammar import CFG + + return CFG.fromstring( + """ +S -> NP VP +PP -> "with" NP +NP -> NP PP +VP -> VP PP +VP -> Verb NP +VP -> Verb +NP -> Det Noun +NP -> "John" +NP -> "I" +Det -> "the" +Det -> "my" +Det -> "a" +Noun -> "dog" +Noun -> "cookie" +Verb -> "ate" +Verb -> "saw" +Prep -> "with" +Prep -> "under" +""" + ) + + +def demo( + choice=None, + print_times=True, + print_grammar=False, + print_trees=True, + trace=2, + sent="I saw John with a dog with my cookie", + numparses=5, +): + """ + A demonstration of the chart parsers. + """ + import sys + import time + + from nltk import CFG, Production, nonterminals + + # The grammar for ChartParser and SteppingChartParser: + grammar = demo_grammar() + if print_grammar: + print("* Grammar") + print(grammar) + + # Tokenize the sample sentence. + print("* Sentence:") + print(sent) + tokens = sent.split() + print(tokens) + print() + + # Ask the user which parser to test, + # if the parser wasn't provided as an argument + if choice is None: + print(" 1: Top-down chart parser") + print(" 2: Bottom-up chart parser") + print(" 3: Bottom-up left-corner chart parser") + print(" 4: Left-corner chart parser with bottom-up filter") + print(" 5: Stepping chart parser (alternating top-down & bottom-up)") + print(" 6: All parsers") + print("\nWhich parser (1-6)? ", end=" ") + choice = sys.stdin.readline().strip() + print() + + choice = str(choice) + if choice not in "123456": + print("Bad parser number") + return + + # Keep track of how long each parser takes. + times = {} + + strategies = { + "1": ("Top-down", TD_STRATEGY), + "2": ("Bottom-up", BU_STRATEGY), + "3": ("Bottom-up left-corner", BU_LC_STRATEGY), + "4": ("Filtered left-corner", LC_STRATEGY), + } + choices = [] + if choice in strategies: + choices = [choice] + if choice == "6": + choices = "1234" + + # Run the requested chart parser(s), except the stepping parser. + for strategy in choices: + print("* Strategy: " + strategies[strategy][0]) + print() + cp = ChartParser(grammar, strategies[strategy][1], trace=trace) + t = time.time() + chart = cp.chart_parse(tokens) + parses = list(chart.parses(grammar.start())) + + times[strategies[strategy][0]] = time.time() - t + print("Nr edges in chart:", len(chart.edges())) + if numparses: + assert len(parses) == numparses, "Not all parses found" + if print_trees: + for tree in parses: + print(tree) + else: + print("Nr trees:", len(parses)) + print() + + # Run the stepping parser, if requested. + if choice in "56": + print("* Strategy: Stepping (top-down vs bottom-up)") + print() + t = time.time() + cp = SteppingChartParser(grammar, trace=trace) + cp.initialize(tokens) + for i in range(5): + print("*** SWITCH TO TOP DOWN") + cp.set_strategy(TD_STRATEGY) + for j, e in enumerate(cp.step()): + if j > 20 or e is None: + break + print("*** SWITCH TO BOTTOM UP") + cp.set_strategy(BU_STRATEGY) + for j, e in enumerate(cp.step()): + if j > 20 or e is None: + break + times["Stepping"] = time.time() - t + print("Nr edges in chart:", len(cp.chart().edges())) + if numparses: + assert len(list(cp.parses())) == numparses, "Not all parses found" + if print_trees: + for tree in cp.parses(): + print(tree) + else: + print("Nr trees:", len(list(cp.parses()))) + print() + + # Print the times of all parsers: + if not (print_times and times): + return + print("* Parsing times") + print() + maxlen = max(len(key) for key in times) + format = "%" + repr(maxlen) + "s parser: %6.3fsec" + times_items = times.items() + for (parser, t) in sorted(times_items, key=lambda a: a[1]): + print(format % (parser, t)) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/dependencygraph.py b/venv/lib/python3.10/site-packages/nltk/parse/dependencygraph.py new file mode 100644 index 0000000000000000000000000000000000000000..7300f0596baf3a6fdae5a4183aca862c1aa09ea8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/dependencygraph.py @@ -0,0 +1,799 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# Steven Bird (modifications) +# +# URL: +# For license information, see LICENSE.TXT +# + +""" +Tools for reading and writing dependency trees. +The input is assumed to be in Malt-TAB format +(https://stp.lingfil.uu.se/~nivre/research/MaltXML.html). +""" + +import subprocess +import warnings +from collections import defaultdict +from itertools import chain +from pprint import pformat + +from nltk.internals import find_binary +from nltk.tree import Tree + +################################################################# +# DependencyGraph Class +################################################################# + + +class DependencyGraph: + """ + A container for the nodes and labelled edges of a dependency structure. + """ + + def __init__( + self, + tree_str=None, + cell_extractor=None, + zero_based=False, + cell_separator=None, + top_relation_label="ROOT", + ): + """Dependency graph. + + We place a dummy `TOP` node with the index 0, since the root node is + often assigned 0 as its head. This also means that the indexing of the + nodes corresponds directly to the Malt-TAB format, which starts at 1. + + If zero-based is True, then Malt-TAB-like input with node numbers + starting at 0 and the root node assigned -1 (as produced by, e.g., + zpar). + + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + """ + self.nodes = defaultdict( + lambda: { + "address": None, + "word": None, + "lemma": None, + "ctag": None, + "tag": None, + "feats": None, + "head": None, + "deps": defaultdict(list), + "rel": None, + } + ) + + self.nodes[0].update({"ctag": "TOP", "tag": "TOP", "address": 0}) + + self.root = None + + if tree_str: + self._parse( + tree_str, + cell_extractor=cell_extractor, + zero_based=zero_based, + cell_separator=cell_separator, + top_relation_label=top_relation_label, + ) + + def remove_by_address(self, address): + """ + Removes the node with the given address. References + to this node in others will still exist. + """ + del self.nodes[address] + + def redirect_arcs(self, originals, redirect): + """ + Redirects arcs to any of the nodes in the originals list + to the redirect node address. + """ + for node in self.nodes.values(): + new_deps = [] + for dep in node["deps"]: + if dep in originals: + new_deps.append(redirect) + else: + new_deps.append(dep) + node["deps"] = new_deps + + def add_arc(self, head_address, mod_address): + """ + Adds an arc from the node specified by head_address to the + node specified by the mod address. + """ + relation = self.nodes[mod_address]["rel"] + self.nodes[head_address]["deps"].setdefault(relation, []) + self.nodes[head_address]["deps"][relation].append(mod_address) + # self.nodes[head_address]['deps'].append(mod_address) + + def connect_graph(self): + """ + Fully connects all non-root nodes. All nodes are set to be dependents + of the root node. + """ + for node1 in self.nodes.values(): + for node2 in self.nodes.values(): + if node1["address"] != node2["address"] and node2["rel"] != "TOP": + relation = node2["rel"] + node1["deps"].setdefault(relation, []) + node1["deps"][relation].append(node2["address"]) + # node1['deps'].append(node2['address']) + + def get_by_address(self, node_address): + """Return the node with the given address.""" + return self.nodes[node_address] + + def contains_address(self, node_address): + """ + Returns true if the graph contains a node with the given node + address, false otherwise. + """ + return node_address in self.nodes + + def to_dot(self): + """Return a dot representation suitable for using with Graphviz. + + >>> dg = DependencyGraph( + ... 'John N 2\\n' + ... 'loves V 0\\n' + ... 'Mary N 2' + ... ) + >>> print(dg.to_dot()) + digraph G{ + edge [dir=forward] + node [shape=plaintext] + + 0 [label="0 (None)"] + 0 -> 2 [label="ROOT"] + 1 [label="1 (John)"] + 2 [label="2 (loves)"] + 2 -> 1 [label=""] + 2 -> 3 [label=""] + 3 [label="3 (Mary)"] + } + + """ + # Start the digraph specification + s = "digraph G{\n" + s += "edge [dir=forward]\n" + s += "node [shape=plaintext]\n" + + # Draw the remaining nodes + for node in sorted(self.nodes.values(), key=lambda v: v["address"]): + s += '\n{} [label="{} ({})"]'.format( + node["address"], + node["address"], + node["word"], + ) + for rel, deps in node["deps"].items(): + for dep in deps: + if rel is not None: + s += '\n{} -> {} [label="{}"]'.format(node["address"], dep, rel) + else: + s += "\n{} -> {} ".format(node["address"], dep) + s += "\n}" + + return s + + def _repr_svg_(self): + """Show SVG representation of the transducer (IPython magic). + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('dot') + >>> dg = DependencyGraph( + ... 'John N 2\\n' + ... 'loves V 0\\n' + ... 'Mary N 2' + ... ) + >>> dg._repr_svg_().split('\\n')[0] + '' + + """ + dot_string = self.to_dot() + return dot2img(dot_string) + + def __str__(self): + return pformat(self.nodes) + + def __repr__(self): + return f"" + + @staticmethod + def load( + filename, zero_based=False, cell_separator=None, top_relation_label="ROOT" + ): + """ + :param filename: a name of a file in Malt-TAB format + :param zero_based: nodes in the input file are numbered starting from 0 + rather than 1 (as produced by, e.g., zpar) + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + + :return: a list of DependencyGraphs + + """ + with open(filename) as infile: + return [ + DependencyGraph( + tree_str, + zero_based=zero_based, + cell_separator=cell_separator, + top_relation_label=top_relation_label, + ) + for tree_str in infile.read().split("\n\n") + ] + + def left_children(self, node_index): + """ + Returns the number of left children under the node specified + by the given address. + """ + children = chain.from_iterable(self.nodes[node_index]["deps"].values()) + index = self.nodes[node_index]["address"] + return sum(1 for c in children if c < index) + + def right_children(self, node_index): + """ + Returns the number of right children under the node specified + by the given address. + """ + children = chain.from_iterable(self.nodes[node_index]["deps"].values()) + index = self.nodes[node_index]["address"] + return sum(1 for c in children if c > index) + + def add_node(self, node): + if not self.contains_address(node["address"]): + self.nodes[node["address"]].update(node) + + def _parse( + self, + input_, + cell_extractor=None, + zero_based=False, + cell_separator=None, + top_relation_label="ROOT", + ): + """Parse a sentence. + + :param extractor: a function that given a tuple of cells returns a + 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head, + rel``. + + :param str cell_separator: the cell separator. If not provided, cells + are split by whitespace. + + :param str top_relation_label: the label by which the top relation is + identified, for examlple, `ROOT`, `null` or `TOP`. + + """ + + def extract_3_cells(cells, index): + word, tag, head = cells + return index, word, word, tag, tag, "", head, "" + + def extract_4_cells(cells, index): + word, tag, head, rel = cells + return index, word, word, tag, tag, "", head, rel + + def extract_7_cells(cells, index): + line_index, word, lemma, tag, _, head, rel = cells + try: + index = int(line_index) + except ValueError: + # index can't be parsed as an integer, use default + pass + return index, word, lemma, tag, tag, "", head, rel + + def extract_10_cells(cells, index): + line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells + try: + index = int(line_index) + except ValueError: + # index can't be parsed as an integer, use default + pass + return index, word, lemma, ctag, tag, feats, head, rel + + extractors = { + 3: extract_3_cells, + 4: extract_4_cells, + 7: extract_7_cells, + 10: extract_10_cells, + } + + if isinstance(input_, str): + input_ = (line for line in input_.split("\n")) + + lines = (l.rstrip() for l in input_) + lines = (l for l in lines if l) + + cell_number = None + for index, line in enumerate(lines, start=1): + cells = line.split(cell_separator) + if cell_number is None: + cell_number = len(cells) + else: + assert cell_number == len(cells) + + if cell_extractor is None: + try: + cell_extractor = extractors[cell_number] + except KeyError as e: + raise ValueError( + "Number of tab-delimited fields ({}) not supported by " + "CoNLL(10) or Malt-Tab(4) format".format(cell_number) + ) from e + + try: + index, word, lemma, ctag, tag, feats, head, rel = cell_extractor( + cells, index + ) + except (TypeError, ValueError): + # cell_extractor doesn't take 2 arguments or doesn't return 8 + # values; assume the cell_extractor is an older external + # extractor and doesn't accept or return an index. + word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells) + + if head == "_": + continue + + head = int(head) + if zero_based: + head += 1 + + self.nodes[index].update( + { + "address": index, + "word": word, + "lemma": lemma, + "ctag": ctag, + "tag": tag, + "feats": feats, + "head": head, + "rel": rel, + } + ) + + # Make sure that the fake root node has labeled dependencies. + if (cell_number == 3) and (head == 0): + rel = top_relation_label + self.nodes[head]["deps"][rel].append(index) + + if self.nodes[0]["deps"][top_relation_label]: + root_address = self.nodes[0]["deps"][top_relation_label][0] + self.root = self.nodes[root_address] + self.top_relation_label = top_relation_label + else: + warnings.warn( + "The graph doesn't contain a node " "that depends on the root element." + ) + + def _word(self, node, filter=True): + w = node["word"] + if filter: + if w != ",": + return w + return w + + def _tree(self, i): + """Turn dependency graphs into NLTK trees. + + :param int i: index of a node + :return: either a word (if the indexed node is a leaf) or a ``Tree``. + """ + node = self.get_by_address(i) + word = node["word"] + deps = sorted(chain.from_iterable(node["deps"].values())) + + if deps: + return Tree(word, [self._tree(dep) for dep in deps]) + else: + return word + + def tree(self): + """ + Starting with the ``root`` node, build a dependency tree using the NLTK + ``Tree`` constructor. Dependency labels are omitted. + """ + node = self.root + + word = node["word"] + deps = sorted(chain.from_iterable(node["deps"].values())) + return Tree(word, [self._tree(dep) for dep in deps]) + + def triples(self, node=None): + """ + Extract dependency triples of the form: + ((head word, head tag), rel, (dep word, dep tag)) + """ + + if not node: + node = self.root + + head = (node["word"], node["ctag"]) + for i in sorted(chain.from_iterable(node["deps"].values())): + dep = self.get_by_address(i) + yield (head, dep["rel"], (dep["word"], dep["ctag"])) + yield from self.triples(node=dep) + + def _hd(self, i): + try: + return self.nodes[i]["head"] + except IndexError: + return None + + def _rel(self, i): + try: + return self.nodes[i]["rel"] + except IndexError: + return None + + # what's the return type? Boolean or list? + def contains_cycle(self): + """Check whether there are cycles. + + >>> dg = DependencyGraph(treebank_data) + >>> dg.contains_cycle() + False + + >>> cyclic_dg = DependencyGraph() + >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0} + >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1} + >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2} + >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3} + >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4} + >>> cyclic_dg.nodes = { + ... 0: top, + ... 1: child1, + ... 2: child2, + ... 3: child3, + ... 4: child4, + ... } + >>> cyclic_dg.root = top + + >>> cyclic_dg.contains_cycle() + [1, 2, 4, 3] + + """ + distances = {} + + for node in self.nodes.values(): + for dep in node["deps"]: + key = tuple([node["address"], dep]) + distances[key] = 1 + + for _ in self.nodes: + new_entries = {} + + for pair1 in distances: + for pair2 in distances: + if pair1[1] == pair2[0]: + key = tuple([pair1[0], pair2[1]]) + new_entries[key] = distances[pair1] + distances[pair2] + + for pair in new_entries: + distances[pair] = new_entries[pair] + if pair[0] == pair[1]: + path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0]) + return path + + return False # return []? + + def get_cycle_path(self, curr_node, goal_node_index): + for dep in curr_node["deps"]: + if dep == goal_node_index: + return [curr_node["address"]] + for dep in curr_node["deps"]: + path = self.get_cycle_path(self.get_by_address(dep), goal_node_index) + if len(path) > 0: + path.insert(0, curr_node["address"]) + return path + return [] + + def to_conll(self, style): + """ + The dependency graph in CoNLL format. + + :param style: the style to use for the format (3, 4, 10 columns) + :type style: int + :rtype: str + """ + + if style == 3: + template = "{word}\t{tag}\t{head}\n" + elif style == 4: + template = "{word}\t{tag}\t{head}\t{rel}\n" + elif style == 10: + template = ( + "{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n" + ) + else: + raise ValueError( + "Number of tab-delimited fields ({}) not supported by " + "CoNLL(10) or Malt-Tab(4) format".format(style) + ) + + return "".join( + template.format(i=i, **node) + for i, node in sorted(self.nodes.items()) + if node["tag"] != "TOP" + ) + + def nx_graph(self): + """Convert the data in a ``nodelist`` into a networkx labeled directed graph.""" + import networkx + + nx_nodelist = list(range(1, len(self.nodes))) + nx_edgelist = [ + (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n) + ] + self.nx_labels = {} + for n in nx_nodelist: + self.nx_labels[n] = self.nodes[n]["word"] + + g = networkx.MultiDiGraph() + g.add_nodes_from(nx_nodelist) + g.add_edges_from(nx_edgelist) + + return g + + +def dot2img(dot_string, t="svg"): + """ + Create image representation fom dot_string, using the 'dot' program + from the Graphviz package. + + Use the 't' argument to specify the image file format, for ex. 'jpeg', 'eps', + 'json', 'png' or 'webp' (Running 'dot -T:' lists all available formats). + + Note that the "capture_output" option of subprocess.run() is only available + with text formats (like svg), but not with binary image formats (like png). + """ + + try: + find_binary("dot") + try: + if t in ["dot", "dot_json", "json", "svg"]: + proc = subprocess.run( + ["dot", "-T%s" % t], + capture_output=True, + input=dot_string, + text=True, + ) + else: + proc = subprocess.run( + ["dot", "-T%s" % t], + input=bytes(dot_string, encoding="utf8"), + ) + return proc.stdout + except: + raise Exception( + "Cannot create image representation by running dot from string: {}" + "".format(dot_string) + ) + except OSError as e: + raise Exception("Cannot find the dot binary from Graphviz package") from e + + +class DependencyGraphError(Exception): + """Dependency graph exception.""" + + +def demo(): + malt_demo() + conll_demo() + conll_file_demo() + cycle_finding_demo() + + +def malt_demo(nx=False): + """ + A demonstration of the result of reading a dependency + version of the first sentence of the Penn Treebank. + """ + dg = DependencyGraph( + """Pierre NNP 2 NMOD +Vinken NNP 8 SUB +, , 2 P +61 CD 5 NMOD +years NNS 6 AMOD +old JJ 2 NMOD +, , 2 P +will MD 0 ROOT +join VB 8 VC +the DT 11 NMOD +board NN 9 OBJ +as IN 9 VMOD +a DT 15 NMOD +nonexecutive JJ 15 NMOD +director NN 12 PMOD +Nov. NNP 9 VMOD +29 CD 16 NMOD +. . 9 VMOD +""" + ) + tree = dg.tree() + tree.pprint() + if nx: + # currently doesn't work + import networkx + from matplotlib import pylab + + g = dg.nx_graph() + g.info() + pos = networkx.spring_layout(g, dim=1) + networkx.draw_networkx_nodes(g, pos, node_size=50) + # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8) + networkx.draw_networkx_labels(g, pos, dg.nx_labels) + pylab.xticks([]) + pylab.yticks([]) + pylab.savefig("tree.png") + pylab.show() + + +def conll_demo(): + """ + A demonstration of how to read a string representation of + a CoNLL format dependency tree. + """ + dg = DependencyGraph(conll_data1) + tree = dg.tree() + tree.pprint() + print(dg) + print(dg.to_conll(4)) + + +def conll_file_demo(): + print("Mass conll_read demo...") + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + for graph in graphs: + tree = graph.tree() + print("\n") + tree.pprint() + + +def cycle_finding_demo(): + dg = DependencyGraph(treebank_data) + print(dg.contains_cycle()) + cyclic_dg = DependencyGraph() + cyclic_dg.add_node({"word": None, "deps": [1], "rel": "TOP", "address": 0}) + cyclic_dg.add_node({"word": None, "deps": [2], "rel": "NTOP", "address": 1}) + cyclic_dg.add_node({"word": None, "deps": [4], "rel": "NTOP", "address": 2}) + cyclic_dg.add_node({"word": None, "deps": [1], "rel": "NTOP", "address": 3}) + cyclic_dg.add_node({"word": None, "deps": [3], "rel": "NTOP", "address": 4}) + print(cyclic_dg.contains_cycle()) + + +treebank_data = """Pierre NNP 2 NMOD +Vinken NNP 8 SUB +, , 2 P +61 CD 5 NMOD +years NNS 6 AMOD +old JJ 2 NMOD +, , 2 P +will MD 0 ROOT +join VB 8 VC +the DT 11 NMOD +board NN 9 OBJ +as IN 9 VMOD +a DT 15 NMOD +nonexecutive JJ 15 NMOD +director NN 12 PMOD +Nov. NNP 9 VMOD +29 CD 16 NMOD +. . 9 VMOD +""" + +conll_data1 = """ +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 met met Prep Prep voor 8 mod _ _ +4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ +5 moeder moeder N N soort|ev|neut 3 obj1 _ _ +6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ +7 gaan ga V V hulp|inf 6 vc _ _ +8 winkelen winkel V V intrans|inf 11 cnj _ _ +9 , , Punc Punc komma 8 punct _ _ +10 zwemmen zwem V V intrans|inf 11 cnj _ _ +11 of of Conj Conj neven 7 vc _ _ +12 terrassen terras N N soort|mv|neut 11 cnj _ _ +13 . . Punc Punc punt 12 punct _ _ +""" + +conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _ +2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _ +4 wild wild Adj Adj attr|stell|onverv 5 mod _ _ +5 zwaaien zwaai N N soort|mv|neut 2 vc _ _ +6 . . Punc Punc punt 5 punct _ _ + +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 met met Prep Prep voor 8 mod _ _ +4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ +5 moeder moeder N N soort|ev|neut 3 obj1 _ _ +6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ +7 gaan ga V V hulp|inf 6 vc _ _ +8 winkelen winkel V V intrans|inf 11 cnj _ _ +9 , , Punc Punc komma 8 punct _ _ +10 zwemmen zwem V V intrans|inf 11 cnj _ _ +11 of of Conj Conj neven 7 vc _ _ +12 terrassen terras N N soort|mv|neut 11 cnj _ _ +13 . . Punc Punc punt 12 punct _ _ + +1 Dat dat Pron Pron aanw|neut|attr 2 det _ _ +2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _ +3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _ +4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _ +5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _ +6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _ +7 . . Punc Punc punt 6 punct _ _ + +1 Het het Pron Pron onbep|neut|zelfst 2 su _ _ +2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _ +3 bij bij Prep Prep voor 2 ld _ _ +4 de de Art Art bep|zijdofmv|neut 6 det _ _ +5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _ +6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _ +7 die die Pron Pron betr|neut|zelfst 6 mod _ _ +8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _ +9 ginds ginds Adv Adv gew|aanw 12 mod _ _ +10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _ +11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _ +12 gelaten laat V V trans|verldw|onverv 11 vc _ _ +13 . . Punc Punc punt 12 punct _ _ + +1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ +2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _ +3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _ +4 naast naast Prep Prep voor 11 mod _ _ +5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _ +6 op op Prep Prep voor 11 ld _ _ +7 de de Art Art bep|zijdofmv|neut 8 det _ _ +8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _ +9 kunnen kan V V hulp|inf 2 vc _ _ +10 gaan ga V V hulp|inf 9 vc _ _ +11 liggen lig V V intrans|inf 10 vc _ _ +12 . . Punc Punc punt 11 punct _ _ + +1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _ +2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _ +3 mams mams N N soort|ev|neut 4 det _ _ +4 rug rug N N soort|ev|neut 5 obj1 _ _ +5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _ +6 hebben heb V V hulp|inf 2 vc _ _ +7 en en Conj Conj neven 0 ROOT _ _ +8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _ +9 de de Art Art bep|zijdofmv|neut 10 det _ _ +10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _ +11 . . Punc Punc punt 10 punct _ _ + +1 Of of Conj Conj onder|metfin 0 ROOT _ _ +2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _ +3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _ +4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _ +5 met met Prep Prep voor 10 mod _ _ +6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _ +7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _ +8 rond rond Adv Adv deelv 10 svp _ _ +9 kunnen kan V V hulp|inf 3 vc _ _ +10 slenteren slenter V V intrans|inf 9 vc _ _ +11 in in Prep Prep voor 10 mod _ _ +12 de de Art Art bep|zijdofmv|neut 13 det _ _ +13 buurt buurt N N soort|ev|neut 11 obj1 _ _ +14 van van Prep Prep voor 13 mod _ _ +15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _ +16 . . Punc Punc punt 15 punct _ _ +""" + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/pchart.py b/venv/lib/python3.10/site-packages/nltk/parse/pchart.py new file mode 100644 index 0000000000000000000000000000000000000000..319655d023a462c0c6c7ac087746dc77d46b7949 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/pchart.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: Probabilistic Chart Parsers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for associating probabilities with tree +structures that represent the internal organization of a text. The +probabilistic parser module defines ``BottomUpProbabilisticChartParser``. + +``BottomUpProbabilisticChartParser`` is an abstract class that implements +a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges, +and adds them to the chart one at a time. The ordering of this queue +is based on the probabilities associated with the edges, allowing the +parser to expand more likely edges before less likely ones. Each +subclass implements a different queue ordering, producing different +search strategies. Currently the following subclasses are defined: + + - ``InsideChartParser`` searches edges in decreasing order of + their trees' inside probabilities. + - ``RandomChartParser`` searches edges in random order. + - ``LongestChartParser`` searches edges in decreasing order of their + location's length. + +The ``BottomUpProbabilisticChartParser`` constructor has an optional +argument beam_size. If non-zero, this controls the size of the beam +(aka the edge queue). This option is most useful with InsideChartParser. +""" + +##////////////////////////////////////////////////////// +## Bottom-Up PCFG Chart Parser +##////////////////////////////////////////////////////// + +# [XX] This might not be implemented quite right -- it would be better +# to associate probabilities with child pointer lists. + +import random +from functools import reduce + +from nltk.grammar import PCFG, Nonterminal +from nltk.parse.api import ParserI +from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge +from nltk.tree import ProbabilisticTree, Tree + + +# Probabilistic edges +class ProbabilisticLeafEdge(LeafEdge): + def prob(self): + return 1.0 + + +class ProbabilisticTreeEdge(TreeEdge): + def __init__(self, prob, *args, **kwargs): + TreeEdge.__init__(self, *args, **kwargs) + self._prob = prob + # two edges with different probabilities are not equal. + self._comparison_key = (self._comparison_key, prob) + + def prob(self): + return self._prob + + @staticmethod + def from_production(production, index, p): + return ProbabilisticTreeEdge( + p, (index, index), production.lhs(), production.rhs(), 0 + ) + + +# Rules using probabilistic edges +class ProbabilisticBottomUpInitRule(AbstractChartRule): + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for index in range(chart.num_leaves()): + new_edge = ProbabilisticLeafEdge(chart.leaf(index), index) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticBottomUpPredictRule(AbstractChartRule): + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(): + if edge.lhs() == prod.rhs()[0]: + new_edge = ProbabilisticTreeEdge.from_production( + prod, edge.start(), prod.prob() + ) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 2 + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.end() == right_edge.start() + and left_edge.nextsym() == right_edge.lhs() + and left_edge.is_incomplete() + and right_edge.is_complete() + ): + return + + # Construct the new edge. + p = left_edge.prob() * right_edge.prob() + new_edge = ProbabilisticTreeEdge( + p, + span=(left_edge.start(), right_edge.end()), + lhs=left_edge.lhs(), + rhs=left_edge.rhs(), + dot=left_edge.dot() + 1, + ) + + # Add it to the chart, with appropriate child pointers. + changed_chart = False + for cpl1 in chart.child_pointer_lists(left_edge): + if chart.insert(new_edge, cpl1 + (right_edge,)): + changed_chart = True + + # If we changed the chart, then generate the edge. + if changed_chart: + yield new_edge + + +class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 1 + + _fundamental_rule = ProbabilisticFundamentalRule() + + def apply(self, chart, grammar, edge1): + fr = self._fundamental_rule + if edge1.is_incomplete(): + # edge1 = left_edge; edge2 = right_edge + for edge2 in chart.select( + start=edge1.end(), is_complete=True, lhs=edge1.nextsym() + ): + yield from fr.apply(chart, grammar, edge1, edge2) + else: + # edge2 = left_edge; edge1 = right_edge + for edge2 in chart.select( + end=edge1.start(), is_complete=False, nextsym=edge1.lhs() + ): + yield from fr.apply(chart, grammar, edge2, edge1) + + def __str__(self): + return "Fundamental Rule" + + +class BottomUpProbabilisticChartParser(ParserI): + """ + An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to + record partial results. ``BottomUpProbabilisticChartParser`` maintains + a queue of edges that can be added to the chart. This queue is + initialized with edges for each token in the text that is being + parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into + the chart one at a time, starting with the most likely edges, and + proceeding to less likely edges. For each edge that is added to + the chart, it may become possible to insert additional edges into + the chart; these are added to the queue. This process continues + until enough complete parses have been generated, or until the + queue is empty. + + The sorting order for the queue is not specified by + ``BottomUpProbabilisticChartParser``. Different sorting orders will + result in different search strategies. The sorting order for the + queue is defined by the method ``sort_queue``; subclasses are required + to provide a definition for this method. + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, beam_size=0, trace=0): + """ + Create a new ``BottomUpProbabilisticChartParser``, that uses + ``grammar`` to parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + if not isinstance(grammar, PCFG): + raise ValueError("The grammar must be probabilistic PCFG") + self._grammar = grammar + self.beam_size = beam_size + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + # TODO: change this to conform more with the standard ChartParser + def parse(self, tokens): + self._grammar.check_coverage(tokens) + chart = Chart(list(tokens)) + grammar = self._grammar + + # Chart parser rules. + bu_init = ProbabilisticBottomUpInitRule() + bu = ProbabilisticBottomUpPredictRule() + fr = SingleEdgeProbabilisticFundamentalRule() + + # Our queue + queue = [] + + # Initialize the chart. + for edge in bu_init.apply(chart, grammar): + if self._trace > 1: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + queue.append(edge) + + while len(queue) > 0: + # Re-sort the queue. + self.sort_queue(queue, chart) + + # Prune the queue to the correct size if a beam was defined + if self.beam_size: + self._prune(queue, chart) + + # Get the best edge. + edge = queue.pop() + if self._trace > 0: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + + # Apply BU & FR to it. + queue.extend(bu.apply(chart, grammar, edge)) + queue.extend(fr.apply(chart, grammar, edge)) + + # Get a list of complete parses. + parses = list(chart.parses(grammar.start(), ProbabilisticTree)) + + # Assign probabilities to the trees. + prod_probs = {} + for prod in grammar.productions(): + prod_probs[prod.lhs(), prod.rhs()] = prod.prob() + for parse in parses: + self._setprob(parse, prod_probs) + + # Sort by probability + parses.sort(reverse=True, key=lambda tree: tree.prob()) + + return iter(parses) + + def _setprob(self, tree, prod_probs): + if tree.prob() is not None: + return + + # Get the prob of the CFG production. + lhs = Nonterminal(tree.label()) + rhs = [] + for child in tree: + if isinstance(child, Tree): + rhs.append(Nonterminal(child.label())) + else: + rhs.append(child) + prob = prod_probs[lhs, tuple(rhs)] + + # Get the probs of children. + for child in tree: + if isinstance(child, Tree): + self._setprob(child, prod_probs) + prob *= child.prob() + + tree.set_prob(prob) + + def sort_queue(self, queue, chart): + """ + Sort the given queue of ``Edge`` objects, placing the edge that should + be tried first at the beginning of the queue. This method + will be called after each ``Edge`` is added to the queue. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + raise NotImplementedError() + + def _prune(self, queue, chart): + """Discard items in the queue if the queue is longer than the beam.""" + if len(queue) > self.beam_size: + split = len(queue) - self.beam_size + if self._trace > 2: + for edge in queue[:split]: + print(" %-50s [DISCARDED]" % chart.pretty_format_edge(edge, 2)) + del queue[:split] + + +class InsideChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in descending + order of the inside probabilities of their trees. The "inside + probability" of a tree is simply the + probability of the entire tree, ignoring its context. In + particular, the inside probability of a tree generated by + production *p* with children *c[1], c[2], ..., c[n]* is + *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside + probability of a token is 1 if it is present in the text, and 0 if + it is absent. + + This sorting order results in a type of lowest-cost-first search + strategy. + """ + + # Inherit constructor. + def sort_queue(self, queue, chart): + """ + Sort the given queue of edges, in descending order of the + inside probabilities of the edges' trees. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + queue.sort(key=lambda edge: edge.prob()) + + +# Eventually, this will become some sort of inside-outside parser: +# class InsideOutsideParser(BottomUpProbabilisticChartParser): +# def __init__(self, grammar, trace=0): +# # Inherit docs. +# BottomUpProbabilisticChartParser.__init__(self, grammar, trace) +# +# # Find the best path from S to each nonterminal +# bestp = {} +# for production in grammar.productions(): bestp[production.lhs()]=0 +# bestp[grammar.start()] = 1.0 +# +# for i in range(len(grammar.productions())): +# for production in grammar.productions(): +# lhs = production.lhs() +# for elt in production.rhs(): +# bestp[elt] = max(bestp[lhs]*production.prob(), +# bestp.get(elt,0)) +# +# self._bestp = bestp +# for (k,v) in self._bestp.items(): print(k,v) +# +# def _sortkey(self, edge): +# return edge.structure()[PROB] * self._bestp[edge.lhs()] +# +# def sort_queue(self, queue, chart): +# queue.sort(key=self._sortkey) + + +class RandomChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in random order. + This sorting order results in a random search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + i = random.randint(0, len(queue) - 1) + (queue[-1], queue[i]) = (queue[i], queue[-1]) + + +class UnsortedChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + return + + +class LongestChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries longer edges before + shorter ones. This sorting order results in a type of best-first + search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + queue.sort(key=lambda edge: edge.length()) + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(choice=None, draw_parses=None, print_parses=None): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.parse import pchart + + # Define two demos. Each demo has a sentence and a grammar. + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + demos = [ + ("I saw John with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + if choice is None: + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + choice = int(sys.stdin.readline().strip()) - 1 + try: + sent, grammar = demos[choice] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + # Define a list of parsers. We'll use all parsers. + parsers = [ + pchart.InsideChartParser(grammar), + pchart.RandomChartParser(grammar), + pchart.UnsortedChartParser(grammar), + pchart.LongestChartParser(grammar), + pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser + ] + + # Run the parsers on the tokenized sentence. + times = [] + average_p = [] + num_parses = [] + all_parses = {} + for parser in parsers: + print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = list(parser.parse(tokens)) + times.append(time.time() - t) + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + average_p.append(p) + num_parses.append(len(parses)) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print(" Parser Beam | Time (secs) # Parses Average P(parse)") + print("------------------------+------------------------------------------") + for i in range(len(parsers)): + print( + "%18s %4d |%11.4f%11d%19.14f" + % ( + parsers[i].__class__.__name__, + parsers[i].beam_size, + times[i], + num_parses[i], + average_p[i], + ) + ) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------+------------------------------------------") + print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p)) + + if draw_parses is None: + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + draw_parses = sys.stdin.readline().strip().lower().startswith("y") + if draw_parses: + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + if print_parses is None: + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + print_parses = sys.stdin.readline().strip().lower().startswith("y") + if print_parses: + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py b/venv/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4e3ba4d6d8e19820de6d527d5847e365e018d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py @@ -0,0 +1,716 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +from collections import defaultdict +from functools import total_ordering +from itertools import chain + +from nltk.grammar import ( + DependencyGrammar, + DependencyProduction, + ProbabilisticDependencyGrammar, +) +from nltk.internals import raise_unorderable_types +from nltk.parse.dependencygraph import DependencyGraph + +################################################################# +# Dependency Span +################################################################# + + +@total_ordering +class DependencySpan: + """ + A contiguous span over some part of the input string representing + dependency (head -> modifier) relationships amongst words. An atomic + span corresponds to only one word so it isn't a 'span' in the conventional + sense, as its _start_index = _end_index = _head_index for concatenation + purposes. All other spans are assumed to have arcs between all nodes + within the start and end indexes of the span, and one head index corresponding + to the head word for the entire span. This is the same as the root node if + the dependency structure were depicted as a graph. + """ + + def __init__(self, start_index, end_index, head_index, arcs, tags): + self._start_index = start_index + self._end_index = end_index + self._head_index = head_index + self._arcs = arcs + self._tags = tags + self._comparison_key = (start_index, end_index, head_index, tuple(arcs)) + self._hash = hash(self._comparison_key) + + def head_index(self): + """ + :return: An value indexing the head of the entire ``DependencySpan``. + :rtype: int + """ + return self._head_index + + def __repr__(self): + """ + :return: A concise string representatino of the ``DependencySpan``. + :rtype: str. + """ + return "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + + def __str__(self): + """ + :return: A verbose string representation of the ``DependencySpan``. + :rtype: str + """ + str = "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + for i in range(len(self._arcs)): + str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i]) + return str + + def __eq__(self, other): + return ( + type(self) == type(other) and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, DependencySpan): + raise_unorderable_types("<", self, other) + return self._comparison_key < other._comparison_key + + def __hash__(self): + """ + :return: The hash value of this ``DependencySpan``. + """ + return self._hash + + +################################################################# +# Chart Cell +################################################################# + + +class ChartCell: + """ + A cell from the parse chart formed when performing the CYK algorithm. + Each cell keeps track of its x and y coordinates (though this will probably + be discarded), and a list of spans serving as the cell's entries. + """ + + def __init__(self, x, y): + """ + :param x: This cell's x coordinate. + :type x: int. + :param y: This cell's y coordinate. + :type y: int. + """ + self._x = x + self._y = y + self._entries = set() + + def add(self, span): + """ + Appends the given span to the list of spans + representing the chart cell's entries. + + :param span: The span to add. + :type span: DependencySpan + """ + self._entries.add(span) + + def __str__(self): + """ + :return: A verbose string representation of this ``ChartCell``. + :rtype: str. + """ + return "CC[%d,%d]: %s" % (self._x, self._y, self._entries) + + def __repr__(self): + """ + :return: A concise string representation of this ``ChartCell``. + :rtype: str. + """ + return "%s" % self + + +################################################################# +# Parsing with Dependency Grammars +################################################################# + + +class ProjectiveDependencyParser: + """ + A projective, rule-based, dependency parser. A ProjectiveDependencyParser + is created with a DependencyGrammar, a set of productions specifying + word-to-word dependency relations. The parse() method will then + return the set of all parses, in tree representation, for a given input + sequence of tokens. Each parse must meet the requirements of the both + the grammar and the projectivity constraint which specifies that the + branches of the dependency tree are not allowed to cross. Alternatively, + this can be understood as stating that each parent node and its children + in the parse tree form a continuous substring of the input sequence. + """ + + def __init__(self, dependency_grammar): + """ + Create a new ProjectiveDependencyParser, from a word-to-word + dependency grammar ``DependencyGrammar``. + + :param dependency_grammar: A word-to-word relation dependencygrammar. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Performs a projective dependency parse on the list of tokens using + a chart-based, span-concatenation algorithm similar to Eisner (1996). + + :param tokens: The list of input tokens. + :type tokens: list(str) + :return: An iterator over parse trees. + :rtype: iter(Tree) + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"])) + + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + # malt_format = "" + for i in range(len(tokens)): + # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null') + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with the new Dependency Graph requirement (at least must have an root elements) + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + "null", + "null", + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + # if self.meets_arity(dg): + yield dg.tree() + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index)) + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index)) + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + +################################################################# +# Parsing with Probabilistic Dependency Grammars +################################################################# + + +class ProbabilisticProjectiveDependencyParser: + """A probabilistic, projective dependency parser. + + This parser returns the most probable projective parse derived from the + probabilistic dependency grammar derived from the train() method. The + probabilistic model is an implementation of Eisner's (1996) Model C, which + conditions on head-word, head-tag, child-word, and child-tag. The decoding + uses a bottom-up chart-based span concatenation algorithm that's identical + to the one utilized by the rule-based projective parser. + + Usage example + + >>> from nltk.parse.dependencygraph import conll_data2 + + >>> graphs = [ + ... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry + ... ] + + >>> ppdp = ProbabilisticProjectiveDependencyParser() + >>> ppdp.train(graphs) + + >>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.'] + >>> list(ppdp.parse(sent)) + [Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])] + + """ + + def __init__(self): + """ + Create a new probabilistic dependency parser. No additional + operations are necessary. + """ + + def parse(self, tokens): + """ + Parses the list of tokens subject to the projectivity constraint + and the productions in the parser's grammar. This uses a method + similar to the span-concatenation algorithm defined in Eisner (1996). + It returns the most probable parse derived from the parser's + probabilistic dependency grammar. + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + if tokens[i - 1] in self._grammar._tags: + for tag in self._grammar._tags[tokens[i - 1]]: + chart[i][j].add( + DependencySpan(i - 1, i, i - 1, [-1], [tag]) + ) + else: + print( + "No tag found for input token '%s', parse is impossible." + % tokens[i - 1] + ) + return [] + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + trees = [] + max_parse = None + max_score = 0 + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + malt_format = "" + for i in range(len(tokens)): + malt_format += "%s\t%s\t%d\t%s\n" % ( + tokens[i], + "null", + parse._arcs[i] + 1, + "null", + ) + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with recent change in dependency graph such that there must be a ROOT element. + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + parse._tags[i], + parse._tags[i], + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + score = self.compute_prob(dg) + trees.append((score, dg.tree())) + trees.sort() + return (tree for (score, tree) in trees) + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + def train(self, graphs): + """ + Trains a ProbabilisticDependencyGrammar based on the list of input + DependencyGraphs. This model is an implementation of Eisner's (1996) + Model C, which derives its statistics from head-word, head-tag, + child-word, and child-tag relationships. + + :param graphs: A list of dependency graphs to train from. + :type: list(DependencyGraph) + """ + productions = [] + events = defaultdict(int) + tags = {} + for dg in graphs: + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list( + chain.from_iterable(dg.nodes[node_index]["deps"].values()) + ) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range( + 0 - (nr_left_children + 1), nr_right_children + 2 + ): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + if head_word in tags: + tags[head_word].add(head_tag) + else: + tags[head_word] = {head_tag} + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + self._grammar = ProbabilisticDependencyGrammar(productions, events, tags) + + def compute_prob(self, dg): + """ + Computes the probability of a dependency graph based + on the parser's probability model (defined by the parser's + statistical dependency grammar). + + :param dg: A dependency graph to score. + :type dg: DependencyGraph + :return: The probability of the dependency graph. + :rtype: int + """ + prob = 1.0 + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values())) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + # If the grammar is not covered + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + return prob + + +################################################################# +# Demos +################################################################# + + +def demo(): + projective_rule_parse_demo() + # arity_parse_demo() + projective_prob_parse_demo() + + +def projective_rule_parse_demo(): + """ + A demonstration showing the creation and use of a + ``DependencyGrammar`` to perform a projective dependency + parse. + """ + grammar = DependencyGrammar.fromstring( + """ + 'scratch' -> 'cats' | 'walls' + 'walls' -> 'the' + 'cats' -> 'the' + """ + ) + print(grammar) + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "cats", "scratch", "the", "walls"]) + for tree in trees: + print(tree) + + +def arity_parse_demo(): + """ + A demonstration showing the creation of a ``DependencyGrammar`` + in which a specific number of modifiers is listed for a given + head. This can further constrain the number of possible parses + created by a ``ProjectiveDependencyParser``. + """ + print() + print("A grammar with no arity constraints. Each DependencyProduction") + print("specifies a relationship between one head word and only one") + print("modifier word.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' | 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print("For the sentence 'The price of the stock fell', this grammar") + print("will produce the following three parses:") + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + print() + print("By contrast, the following grammar contains a ") + print("DependencyProduction that specifies a relationship") + print("between a single head word, 'price', and two modifier") + print("words, 'of' and 'the'.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print( + "This constrains the number of possible parses to just one:" + ) # unimplemented, soon to replace + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + +def projective_prob_parse_demo(): + """ + A demo showing the training and use of a projective + dependency parser. + """ + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + ppdp = ProbabilisticProjectiveDependencyParser() + print("Training Probabilistic Projective Dependency Parser...") + ppdp.train(graphs) + + sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."] + print("Parsing '", " ".join(sent), "'...") + print("Parse:") + for tree in ppdp.parse(sent): + print(tree) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/recursivedescent.py b/venv/lib/python3.10/site-packages/nltk/parse/recursivedescent.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5d88c0884d8da7fdc52b044331ff0536bc19c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/recursivedescent.py @@ -0,0 +1,684 @@ +# Natural Language Toolkit: Recursive Descent Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.grammar import Nonterminal +from nltk.parse.api import ParserI +from nltk.tree import ImmutableTree, Tree + + +##////////////////////////////////////////////////////// +## Recursive Descent Parser +##////////////////////////////////////////////////////// +class RecursiveDescentParser(ParserI): + """ + A simple top-down CFG parser that parses texts by recursively + expanding the fringe of a Tree, and matching it against a + text. + + ``RecursiveDescentParser`` uses a list of tree locations called a + "frontier" to remember which subtrees have not yet been expanded + and which leaves have not yet been matched against the text. Each + tree location consists of a list of child indices specifying the + path from the root of the tree to a subtree or a leaf; see the + reference documentation for Tree for more information + about tree locations. + + When the parser begins parsing a text, it constructs a tree + containing only the start symbol, and a frontier containing the + location of the tree's root node. It then extends the tree to + cover the text, using the following recursive procedure: + + - If the frontier is empty, and the text is covered by the tree, + then return the tree as a possible parse. + - If the frontier is empty, and the text is not covered by the + tree, then return no parses. + - If the first element of the frontier is a subtree, then + use CFG productions to "expand" it. For each applicable + production, add the expanded subtree's children to the + frontier, and recursively find all parses that can be + generated by the new tree and frontier. + - If the first element of the frontier is a token, then "match" + it against the next token from the text. Remove the token + from the frontier, and recursively find all parses that can be + generated by the new tree and frontier. + + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``RecursiveDescentParser``, that uses ``grammar`` + to parse texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # Start a recursive descent parse, with an initial tree + # containing just the start symbol. + start = self._grammar.start().symbol() + initial_tree = Tree(start, []) + frontier = [()] + if self._trace: + self._trace_start(initial_tree, frontier, tokens) + return self._parse(tokens, initial_tree, frontier) + + def _parse(self, remaining_text, tree, frontier): + """ + Recursively expand and match each elements of ``tree`` + specified by ``frontier``, to cover ``remaining_text``. Return + a list of all parses found. + + :return: An iterator of all parses that can be generated by + matching and expanding the elements of ``tree`` + specified by ``frontier``. + :rtype: iter(Tree) + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. This list sorted + in left-to-right order of location within the tree. + """ + + # If the tree covers the text, and there's nothing left to + # expand, then we've found a complete parse; return it. + if len(remaining_text) == 0 and len(frontier) == 0: + if self._trace: + self._trace_succeed(tree, frontier) + yield tree + + # If there's still text, but nothing left to expand, we failed. + elif len(frontier) == 0: + if self._trace: + self._trace_backtrack(tree, frontier) + + # If the next element on the frontier is a tree, expand it. + elif isinstance(tree[frontier[0]], Tree): + yield from self._expand(remaining_text, tree, frontier) + + # If the next element on the frontier is a token, match it. + else: + yield from self._match(remaining_text, tree, frontier) + + def _match(self, rtext, tree, frontier): + """ + :rtype: iter(Tree) + :return: an iterator of all parses that can be generated by + matching the first element of ``frontier`` against the + first token in ``rtext``. In particular, if the first + element of ``frontier`` has the same type as the first + token in ``rtext``, then substitute the token into + ``tree``; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` does not + have the same type as the first token in ``rtext``, then + return empty list. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type rtext: list(str) + :param rtext: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list of tuple of int + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + tree_leaf = tree[frontier[0]] + if len(rtext) > 0 and tree_leaf == rtext[0]: + # If it's a terminal that matches rtext[0], then substitute + # in the token, and continue parsing. + newtree = tree.copy(deep=True) + newtree[frontier[0]] = rtext[0] + if self._trace: + self._trace_match(newtree, frontier[1:], rtext[0]) + yield from self._parse(rtext[1:], newtree, frontier[1:]) + else: + # If it's a non-matching terminal, fail. + if self._trace: + self._trace_backtrack(tree, frontier, rtext[:1]) + + def _expand(self, remaining_text, tree, frontier, production=None): + """ + :rtype: iter(Tree) + :return: An iterator of all parses that can be generated by + expanding the first element of ``frontier`` with + ``production``. In particular, if the first element of + ``frontier`` is a subtree whose node type is equal to + ``production``'s left hand side, then add a child to that + subtree for each element of ``production``'s right hand + side; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` is not a + subtree whose node type is equal to ``production``'s left + hand side, then return an empty list. If ``production`` is + not specified, then return a list of all parses that can + be generated by expanding the first element of ``frontier`` + with *any* CFG production. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + if production is None: + productions = self._grammar.productions() + else: + productions = [production] + + for production in productions: + lhs = production.lhs().symbol() + if lhs == tree[frontier[0]].label(): + subtree = self._production_to_tree(production) + if frontier[0] == (): + newtree = subtree + else: + newtree = tree.copy(deep=True) + newtree[frontier[0]] = subtree + new_frontier = [ + frontier[0] + (i,) for i in range(len(production.rhs())) + ] + if self._trace: + self._trace_expand(newtree, new_frontier, production) + yield from self._parse( + remaining_text, newtree, new_frontier + frontier[1:] + ) + + def _production_to_tree(self, production): + """ + :rtype: Tree + :return: The Tree that is licensed by ``production``. + In particular, given the production ``[lhs -> elt[1] ... elt[n]]`` + return a tree that has a node ``lhs.symbol``, and + ``n`` children. For each nonterminal element + ``elt[i]`` in the production, the tree token has a + childless subtree with node value ``elt[i].symbol``; and + for each terminal element ``elt[j]``, the tree token has + a leaf token with type ``elt[j]``. + + :param production: The CFG production that licenses the tree + token that should be returned. + :type production: Production + """ + children = [] + for elt in production.rhs(): + if isinstance(elt, Nonterminal): + children.append(Tree(elt.symbol(), [])) + else: + # This will be matched. + children.append(elt) + return Tree(production.lhs().symbol(), children) + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def _trace_fringe(self, tree, treeloc=None): + """ + Print trace output displaying the fringe of ``tree``. The + fringe of ``tree`` consists of all of its leaves and all of + its childless subtrees. + + :rtype: None + """ + + if treeloc == (): + print("*", end=" ") + if isinstance(tree, Tree): + if len(tree) == 0: + print(repr(Nonterminal(tree.label())), end=" ") + for i in range(len(tree)): + if treeloc is not None and i == treeloc[0]: + self._trace_fringe(tree[i], treeloc[1:]) + else: + self._trace_fringe(tree[i]) + else: + print(repr(tree), end=" ") + + def _trace_tree(self, tree, frontier, operation): + """ + Print trace output displaying the parser's current state. + + :param operation: A character identifying the operation that + generated the current state. + :rtype: None + """ + if self._trace == 2: + print(" %c [" % operation, end=" ") + else: + print(" [", end=" ") + if len(frontier) > 0: + self._trace_fringe(tree, frontier[0]) + else: + self._trace_fringe(tree) + print("]") + + def _trace_start(self, tree, frontier, text): + print("Parsing %r" % " ".join(text)) + if self._trace > 2: + print("Start:") + if self._trace > 1: + self._trace_tree(tree, frontier, " ") + + def _trace_expand(self, tree, frontier, production): + if self._trace > 2: + print("Expand: %s" % production) + if self._trace > 1: + self._trace_tree(tree, frontier, "E") + + def _trace_match(self, tree, frontier, tok): + if self._trace > 2: + print("Match: %r" % tok) + if self._trace > 1: + self._trace_tree(tree, frontier, "M") + + def _trace_succeed(self, tree, frontier): + if self._trace > 2: + print("GOOD PARSE:") + if self._trace == 1: + print("Found a parse:\n%s" % tree) + if self._trace > 1: + self._trace_tree(tree, frontier, "+") + + def _trace_backtrack(self, tree, frontier, toks=None): + if self._trace > 2: + if toks: + print("Backtrack: %r match failed" % toks[0]) + else: + print("Backtrack") + + +##////////////////////////////////////////////////////// +## Stepping Recursive Descent Parser +##////////////////////////////////////////////////////// +class SteppingRecursiveDescentParser(RecursiveDescentParser): + """ + A ``RecursiveDescentParser`` that allows you to step through the + parsing process, performing a single operation at a time. + + The ``initialize`` method is used to start parsing a text. + ``expand`` expands the first element on the frontier using a single + CFG production, and ``match`` matches the first element on the + frontier against the next text token. ``backtrack`` undoes the most + recent expand or match operation. ``step`` performs a single + expand, match, or backtrack operation. ``parses`` returns the set + of parses that have been found by the parser. + + :ivar _history: A list of ``(rtext, tree, frontier)`` tripples, + containing the previous states of the parser. This history is + used to implement the ``backtrack`` operation. + :ivar _tried_e: A record of all productions that have been tried + for a given tree. This record is used by ``expand`` to perform + the next untried production. + :ivar _tried_m: A record of what tokens have been matched for a + given tree. This record is used by ``step`` to decide whether + or not to match a token. + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + super().__init__(grammar, trace) + self._rtext = None + self._tree = None + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + + # [XX] TEMPORARY HACK WARNING! This should be replaced with + # something nicer when we get the chance. + def _freeze(self, tree): + c = tree.copy() + # for pos in c.treepositions('leaves'): + # c[pos] = c[pos].freeze() + return ImmutableTree.convert(c) + + def parse(self, tokens): + tokens = list(tokens) + self.initialize(tokens) + while self.step() is not None: + pass + return self.parses() + + def initialize(self, tokens): + """ + Start parsing a given text. This sets the parser's tree to + the start symbol, its frontier to the root node, and its + remaining text to ``token['SUBTOKENS']``. + """ + + self._rtext = tokens + start = self._grammar.start().symbol() + self._tree = Tree(start, []) + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + if self._trace: + self._trace_start(self._tree, self._frontier, self._rtext) + + def remaining_text(self): + """ + :return: The portion of the text that is not yet covered by the + tree. + :rtype: list(str) + """ + return self._rtext + + def frontier(self): + """ + :return: A list of the tree locations of all subtrees that + have not yet been expanded, and all leaves that have not + yet been matched. + :rtype: list(tuple(int)) + """ + return self._frontier + + def tree(self): + """ + :return: A partial structure for the text that is + currently being parsed. The elements specified by the + frontier have not yet been expanded or matched. + :rtype: Tree + """ + return self._tree + + def step(self): + """ + Perform a single parsing operation. If an untried match is + possible, then perform the match, and return the matched + token. If an untried expansion is possible, then perform the + expansion, and return the production that it is based on. If + backtracking is possible, then backtrack, and return True. + Otherwise, return None. + + :return: None if no operation was performed; a token if a match + was performed; a production if an expansion was performed; + and True if a backtrack operation was performed. + :rtype: Production or String or bool + """ + # Try matching (if we haven't already) + if self.untried_match(): + token = self.match() + if token is not None: + return token + + # Try expanding. + production = self.expand() + if production is not None: + return production + + # Try backtracking + if self.backtrack(): + self._trace_backtrack(self._tree, self._frontier) + return True + + # Nothing left to do. + return None + + def expand(self, production=None): + """ + Expand the first element of the frontier. In particular, if + the first element of the frontier is a subtree whose node type + is equal to ``production``'s left hand side, then add a child + to that subtree for each element of ``production``'s right hand + side. If ``production`` is not specified, then use the first + untried expandable production. If all expandable productions + have been tried, do nothing. + + :return: The production used to expand the frontier, if an + expansion was performed. If no expansion was performed, + return None. + :rtype: Production or None + """ + + # Make sure we *can* expand. + if len(self._frontier) == 0: + return None + if not isinstance(self._tree[self._frontier[0]], Tree): + return None + + # If they didn't specify a production, check all untried ones. + if production is None: + productions = self.untried_expandable_productions() + else: + productions = [production] + + parses = [] + for prod in productions: + # Record that we've tried this production now. + self._tried_e.setdefault(self._freeze(self._tree), []).append(prod) + + # Try expanding. + for _result in self._expand(self._rtext, self._tree, self._frontier, prod): + return prod + + # We didn't expand anything. + return None + + def match(self): + """ + Match the first element of the frontier. In particular, if + the first element of the frontier has the same type as the + next text token, then substitute the text token into the tree. + + :return: The token matched, if a match operation was + performed. If no match was performed, return None + :rtype: str or None + """ + + # Record that we've tried matching this token. + tok = self._rtext[0] + self._tried_m.setdefault(self._freeze(self._tree), []).append(tok) + + # Make sure we *can* match. + if len(self._frontier) == 0: + return None + if isinstance(self._tree[self._frontier[0]], Tree): + return None + + for _result in self._match(self._rtext, self._tree, self._frontier): + # Return the token we just matched. + return self._history[-1][0][0] + return None + + def backtrack(self): + """ + Return the parser to its state before the most recent + match or expand operation. Calling ``undo`` repeatedly return + the parser to successively earlier states. If no match or + expand operations have been performed, ``undo`` will make no + changes. + + :return: true if an operation was successfully undone. + :rtype: bool + """ + if len(self._history) == 0: + return False + (self._rtext, self._tree, self._frontier) = self._history.pop() + return True + + def expandable_productions(self): + """ + :return: A list of all the productions for which expansions + are available for the current parser state. + :rtype: list(Production) + """ + # Make sure we *can* expand. + if len(self._frontier) == 0: + return [] + frontier_child = self._tree[self._frontier[0]] + if len(self._frontier) == 0 or not isinstance(frontier_child, Tree): + return [] + + return [ + p + for p in self._grammar.productions() + if p.lhs().symbol() == frontier_child.label() + ] + + def untried_expandable_productions(self): + """ + :return: A list of all the untried productions for which + expansions are available for the current parser state. + :rtype: list(Production) + """ + + tried_expansions = self._tried_e.get(self._freeze(self._tree), []) + return [p for p in self.expandable_productions() if p not in tried_expansions] + + def untried_match(self): + """ + :return: Whether the first element of the frontier is a token + that has not yet been matched. + :rtype: bool + """ + + if len(self._rtext) == 0: + return False + tried_matches = self._tried_m.get(self._freeze(self._tree), []) + return self._rtext[0] not in tried_matches + + def currently_complete(self): + """ + :return: Whether the parser's current state represents a + complete parse. + :rtype: bool + """ + return len(self._frontier) == 0 and len(self._rtext) == 0 + + def _parse(self, remaining_text, tree, frontier): + """ + A stub version of ``_parse`` that sets the parsers current + state to the given arguments. In ``RecursiveDescentParser``, + the ``_parse`` method is used to recursively continue parsing a + text. ``SteppingRecursiveDescentParser`` overrides it to + capture these recursive calls. It records the parser's old + state in the history (to allow for backtracking), and updates + the parser's new state using the given arguments. Finally, it + returns ``[1]``, which is used by ``match`` and ``expand`` to + detect whether their operations were successful. + + :return: ``[1]`` + :rtype: list of int + """ + self._history.append((self._rtext, self._tree, self._frontier)) + self._rtext = remaining_text + self._tree = tree + self._frontier = frontier + + # Is it a good parse? If so, record it. + if len(frontier) == 0 and len(remaining_text) == 0: + self._parses.append(tree) + self._trace_succeed(self._tree, self._frontier) + + return [1] + + def parses(self): + """ + :return: An iterator of the parses that have been found by this + parser so far. + :rtype: list of Tree + """ + return iter(self._parses) + + def set_grammar(self, grammar): + """ + Change the grammar used to parse texts. + + :param grammar: The new grammar. + :type grammar: CFG + """ + self._grammar = grammar + + +##////////////////////////////////////////////////////// +## Demonstration Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the recursive descent parser. + """ + + from nltk import CFG, parse + + grammar = CFG.fromstring( + """ + S -> NP VP + NP -> Det N | Det N PP + VP -> V NP | V NP PP + PP -> P NP + NP -> 'I' + N -> 'man' | 'park' | 'telescope' | 'dog' + Det -> 'the' | 'a' + P -> 'in' | 'with' + V -> 'saw' + """ + ) + + for prod in grammar.productions(): + print(prod) + + sent = "I saw a man in the park".split() + parser = parse.RecursiveDescentParser(grammar, trace=2) + for p in parser.parse(sent): + print(p) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/shiftreduce.py b/venv/lib/python3.10/site-packages/nltk/parse/shiftreduce.py new file mode 100644 index 0000000000000000000000000000000000000000..bf18342573a14f18ca3918580e22d81f82c896cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/shiftreduce.py @@ -0,0 +1,479 @@ +# Natural Language Toolkit: Shift-Reduce Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.grammar import Nonterminal +from nltk.parse.api import ParserI +from nltk.tree import Tree + + +##////////////////////////////////////////////////////// +## Shift/Reduce Parser +##////////////////////////////////////////////////////// +class ShiftReduceParser(ParserI): + """ + A simple bottom-up CFG parser that uses two operations, "shift" + and "reduce", to find a single parse for a text. + + ``ShiftReduceParser`` maintains a stack, which records the + structure of a portion of the text. This stack is a list of + strings and Trees that collectively cover a portion of + the text. For example, while parsing the sentence "the dog saw + the man" with a typical grammar, ``ShiftReduceParser`` will produce + the following stack, which covers "the dog saw":: + + [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')] + + ``ShiftReduceParser`` attempts to extend the stack to cover the + entire text, and to combine the stack elements into a single tree, + producing a complete parse for the sentence. + + Initially, the stack is empty. It is extended to cover the text, + from left to right, by repeatedly applying two operations: + + - "shift" moves a token from the beginning of the text to the + end of the stack. + - "reduce" uses a CFG production to combine the rightmost stack + elements into a single Tree. + + Often, more than one operation can be performed on a given stack. + In this case, ``ShiftReduceParser`` uses the following heuristics + to decide which operation to perform: + + - Only shift if no reductions are available. + - If multiple reductions are available, then apply the reduction + whose CFG production is listed earliest in the grammar. + + Note that these heuristics are not guaranteed to choose an + operation that leads to a parse of the text. Also, if multiple + parses exists, ``ShiftReduceParser`` will return at most one of + them. + + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``ShiftReduceParser``, that uses ``grammar`` to + parse texts. + + :type grammar: Grammar + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + self._check_grammar() + + def grammar(self): + return self._grammar + + def parse(self, tokens): + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # initialize the stack. + stack = [] + remaining_text = tokens + + # Trace output. + if self._trace: + print("Parsing %r" % " ".join(tokens)) + self._trace_stack(stack, remaining_text) + + # iterate through the text, pushing the token onto + # the stack, then reducing the stack. + while len(remaining_text) > 0: + self._shift(stack, remaining_text) + while self._reduce(stack, remaining_text): + pass + + # Did we reduce everything? + if len(stack) == 1: + # Did we end up with the right category? + if stack[0].label() == self._grammar.start().symbol(): + yield stack[0] + + def _shift(self, stack, remaining_text): + """ + Move a token from the beginning of ``remaining_text`` to the + end of ``stack``. + + :type stack: list(str and Tree) + :param stack: A list of strings and Trees, encoding + the structure of the text that has been parsed so far. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``stack``. + :rtype: None + """ + stack.append(remaining_text[0]) + remaining_text.remove(remaining_text[0]) + if self._trace: + self._trace_shift(stack, remaining_text) + + def _match_rhs(self, rhs, rightmost_stack): + """ + :rtype: bool + :return: true if the right hand side of a CFG production + matches the rightmost elements of the stack. ``rhs`` + matches ``rightmost_stack`` if they are the same length, + and each element of ``rhs`` matches the corresponding + element of ``rightmost_stack``. A nonterminal element of + ``rhs`` matches any Tree whose node value is equal + to the nonterminal's symbol. A terminal element of ``rhs`` + matches any string whose type is equal to the terminal. + :type rhs: list(terminal and Nonterminal) + :param rhs: The right hand side of a CFG production. + :type rightmost_stack: list(string and Tree) + :param rightmost_stack: The rightmost elements of the parser's + stack. + """ + + if len(rightmost_stack) != len(rhs): + return False + for i in range(len(rightmost_stack)): + if isinstance(rightmost_stack[i], Tree): + if not isinstance(rhs[i], Nonterminal): + return False + if rightmost_stack[i].label() != rhs[i].symbol(): + return False + else: + if isinstance(rhs[i], Nonterminal): + return False + if rightmost_stack[i] != rhs[i]: + return False + return True + + def _reduce(self, stack, remaining_text, production=None): + """ + Find a CFG production whose right hand side matches the + rightmost stack elements; and combine those stack elements + into a single Tree, with the node specified by the + production's left-hand side. If more than one CFG production + matches the stack, then use the production that is listed + earliest in the grammar. The new Tree replaces the + elements in the stack. + + :rtype: Production or None + :return: If a reduction is performed, then return the CFG + production that the reduction is based on; otherwise, + return false. + :type stack: list(string and Tree) + :param stack: A list of strings and Trees, encoding + the structure of the text that has been parsed so far. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``stack``. + """ + if production is None: + productions = self._grammar.productions() + else: + productions = [production] + + # Try each production, in order. + for production in productions: + rhslen = len(production.rhs()) + + # check if the RHS of a production matches the top of the stack + if self._match_rhs(production.rhs(), stack[-rhslen:]): + + # combine the tree to reflect the reduction + tree = Tree(production.lhs().symbol(), stack[-rhslen:]) + stack[-rhslen:] = [tree] + + # We reduced something + if self._trace: + self._trace_reduce(stack, production, remaining_text) + return production + + # We didn't reduce anything + return None + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + # 1: just show shifts. + # 2: show shifts & reduces + # 3: display which tokens & productions are shifed/reduced + self._trace = trace + + def _trace_stack(self, stack, remaining_text, marker=" "): + """ + Print trace output displaying the given stack and text. + + :rtype: None + :param marker: A character that is printed to the left of the + stack. This is used with trace level 2 to print 'S' + before shifted stacks and 'R' before reduced stacks. + """ + s = " " + marker + " [ " + for elt in stack: + if isinstance(elt, Tree): + s += repr(Nonterminal(elt.label())) + " " + else: + s += repr(elt) + " " + s += "* " + " ".join(remaining_text) + "]" + print(s) + + def _trace_shift(self, stack, remaining_text): + """ + Print trace output displaying that a token has been shifted. + + :rtype: None + """ + if self._trace > 2: + print("Shift %r:" % stack[-1]) + if self._trace == 2: + self._trace_stack(stack, remaining_text, "S") + elif self._trace > 0: + self._trace_stack(stack, remaining_text) + + def _trace_reduce(self, stack, production, remaining_text): + """ + Print trace output displaying that ``production`` was used to + reduce ``stack``. + + :rtype: None + """ + if self._trace > 2: + rhs = " ".join(production.rhs()) + print(f"Reduce {production.lhs()!r} <- {rhs}") + if self._trace == 2: + self._trace_stack(stack, remaining_text, "R") + elif self._trace > 1: + self._trace_stack(stack, remaining_text) + + def _check_grammar(self): + """ + Check to make sure that all of the CFG productions are + potentially useful. If any productions can never be used, + then print a warning. + + :rtype: None + """ + productions = self._grammar.productions() + + # Any production whose RHS is an extension of another production's RHS + # will never be used. + for i in range(len(productions)): + for j in range(i + 1, len(productions)): + rhs1 = productions[i].rhs() + rhs2 = productions[j].rhs() + if rhs1[: len(rhs2)] == rhs2: + print("Warning: %r will never be used" % productions[i]) + + +##////////////////////////////////////////////////////// +## Stepping Shift/Reduce Parser +##////////////////////////////////////////////////////// +class SteppingShiftReduceParser(ShiftReduceParser): + """ + A ``ShiftReduceParser`` that allows you to setp through the parsing + process, performing a single operation at a time. It also allows + you to change the parser's grammar midway through parsing a text. + + The ``initialize`` method is used to start parsing a text. + ``shift`` performs a single shift operation, and ``reduce`` performs + a single reduce operation. ``step`` will perform a single reduce + operation if possible; otherwise, it will perform a single shift + operation. ``parses`` returns the set of parses that have been + found by the parser. + + :ivar _history: A list of ``(stack, remaining_text)`` pairs, + containing all of the previous states of the parser. This + history is used to implement the ``undo`` operation. + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + super().__init__(grammar, trace) + self._stack = None + self._remaining_text = None + self._history = [] + + def parse(self, tokens): + tokens = list(tokens) + self.initialize(tokens) + while self.step(): + pass + return self.parses() + + def stack(self): + """ + :return: The parser's stack. + :rtype: list(str and Tree) + """ + return self._stack + + def remaining_text(self): + """ + :return: The portion of the text that is not yet covered by the + stack. + :rtype: list(str) + """ + return self._remaining_text + + def initialize(self, tokens): + """ + Start parsing a given text. This sets the parser's stack to + ``[]`` and sets its remaining text to ``tokens``. + """ + self._stack = [] + self._remaining_text = tokens + self._history = [] + + def step(self): + """ + Perform a single parsing operation. If a reduction is + possible, then perform that reduction, and return the + production that it is based on. Otherwise, if a shift is + possible, then perform it, and return True. Otherwise, + return False. + + :return: False if no operation was performed; True if a shift was + performed; and the CFG production used to reduce if a + reduction was performed. + :rtype: Production or bool + """ + return self.reduce() or self.shift() + + def shift(self): + """ + Move a token from the beginning of the remaining text to the + end of the stack. If there are no more tokens in the + remaining text, then do nothing. + + :return: True if the shift operation was successful. + :rtype: bool + """ + if len(self._remaining_text) == 0: + return False + self._history.append((self._stack[:], self._remaining_text[:])) + self._shift(self._stack, self._remaining_text) + return True + + def reduce(self, production=None): + """ + Use ``production`` to combine the rightmost stack elements into + a single Tree. If ``production`` does not match the + rightmost stack elements, then do nothing. + + :return: The production used to reduce the stack, if a + reduction was performed. If no reduction was performed, + return None. + + :rtype: Production or None + """ + self._history.append((self._stack[:], self._remaining_text[:])) + return_val = self._reduce(self._stack, self._remaining_text, production) + + if not return_val: + self._history.pop() + return return_val + + def undo(self): + """ + Return the parser to its state before the most recent + shift or reduce operation. Calling ``undo`` repeatedly return + the parser to successively earlier states. If no shift or + reduce operations have been performed, ``undo`` will make no + changes. + + :return: true if an operation was successfully undone. + :rtype: bool + """ + if len(self._history) == 0: + return False + (self._stack, self._remaining_text) = self._history.pop() + return True + + def reducible_productions(self): + """ + :return: A list of the productions for which reductions are + available for the current parser state. + :rtype: list(Production) + """ + productions = [] + for production in self._grammar.productions(): + rhslen = len(production.rhs()) + if self._match_rhs(production.rhs(), self._stack[-rhslen:]): + productions.append(production) + return productions + + def parses(self): + """ + :return: An iterator of the parses that have been found by this + parser so far. + :rtype: iter(Tree) + """ + if ( + len(self._remaining_text) == 0 + and len(self._stack) == 1 + and self._stack[0].label() == self._grammar.start().symbol() + ): + yield self._stack[0] + + # copied from nltk.parser + + def set_grammar(self, grammar): + """ + Change the grammar used to parse texts. + + :param grammar: The new grammar. + :type grammar: CFG + """ + self._grammar = grammar + + +##////////////////////////////////////////////////////// +## Demonstration Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the shift-reduce parser. + """ + + from nltk import CFG, parse + + grammar = CFG.fromstring( + """ + S -> NP VP + NP -> Det N | Det N PP + VP -> V NP | V NP PP + PP -> P NP + NP -> 'I' + N -> 'man' | 'park' | 'telescope' | 'dog' + Det -> 'the' | 'a' + P -> 'in' | 'with' + V -> 'saw' + """ + ) + + sent = "I saw a man in the park".split() + + parser = parse.ShiftReduceParser(grammar, trace=2) + for p in parser.parse(sent): + print(p) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/parse/util.py b/venv/lib/python3.10/site-packages/nltk/parse/util.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc5bee08fdb9aa237513992a36fa2eaa0aa8219 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/parse/util.py @@ -0,0 +1,234 @@ +# Natural Language Toolkit: Parser Utility Functions +# +# Author: Ewan Klein +# Tom Aarsen <> +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + + +""" +Utility functions for parsers. +""" + +from nltk.data import load +from nltk.grammar import CFG, PCFG, FeatureGrammar +from nltk.parse.chart import Chart, ChartParser +from nltk.parse.featurechart import FeatureChart, FeatureChartParser +from nltk.parse.pchart import InsideChartParser + + +def load_parser( + grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args +): + """ + Load a grammar from a file, and build a parser based on that grammar. + The parser depends on the grammar format, and might also depend + on properties of the grammar itself. + + The following grammar formats are currently supported: + - ``'cfg'`` (CFGs: ``CFG``) + - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) + - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) + + :type grammar_url: str + :param grammar_url: A URL specifying where the grammar is located. + The default protocol is ``"nltk:"``, which searches for the file + in the the NLTK data package. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing output. + :param parser: The class used for parsing; should be ``ChartParser`` + or a subclass. + If None, the class depends on the grammar format. + :param chart_class: The class used for storing the chart; + should be ``Chart`` or a subclass. + Only used for CFGs and feature CFGs. + If None, the chart class depends on the grammar format. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + Only used for probabilistic CFGs. + :param load_args: Keyword parameters used when loading the grammar. + See ``data.load`` for more information. + """ + grammar = load(grammar_url, **load_args) + if not isinstance(grammar, CFG): + raise ValueError("The grammar must be a CFG, " "or a subclass thereof.") + if isinstance(grammar, PCFG): + if parser is None: + parser = InsideChartParser + return parser(grammar, trace=trace, beam_size=beam_size) + + elif isinstance(grammar, FeatureGrammar): + if parser is None: + parser = FeatureChartParser + if chart_class is None: + chart_class = FeatureChart + return parser(grammar, trace=trace, chart_class=chart_class) + + else: # Plain CFG. + if parser is None: + parser = ChartParser + if chart_class is None: + chart_class = Chart + return parser(grammar, trace=trace, chart_class=chart_class) + + +def taggedsent_to_conll(sentence): + """ + A module to convert a single POS tagged sentence into CONLL format. + + >>> from nltk import word_tokenize, pos_tag + >>> text = "This is a foobar sentence." + >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + :param sentence: A single input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(str) + :return: a generator yielding a single sentence in CONLL format. + """ + for (i, (word, tag)) in enumerate(sentence, start=1): + input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] + input_str = "\t".join(input_str) + "\n" + yield input_str + + +def taggedsents_to_conll(sentences): + """ + A module to convert the a POS tagged document stream + (i.e. list of list of tuples, a list of sentences) and yield lines + in CONLL format. This module yields one line per word and two newlines + for end of sentence. + + >>> from nltk import word_tokenize, sent_tokenize, pos_tag + >>> text = "This is a foobar sentence. Is that right?" + >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] + >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE + ... if line: + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + + 1 Is _ VBZ VBZ _ 0 a _ _ + 2 that _ IN IN _ 0 a _ _ + 3 right _ NN NN _ 0 a _ _ + 4 ? _ . . _ 0 a _ _ + + + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :rtype: iter(str) + :return: a generator yielding sentences in CONLL format. + """ + for sentence in sentences: + yield from taggedsent_to_conll(sentence) + yield "\n\n" + + +###################################################################### +# { Test Suites +###################################################################### + + +class TestGrammar: + """ + Unit tests for CFG. + """ + + def __init__(self, grammar, suite, accept=None, reject=None): + self.test_grammar = grammar + + self.cp = load_parser(grammar, trace=0) + self.suite = suite + self._accept = accept + self._reject = reject + + def run(self, show_trees=False): + """ + Sentences in the test suite are divided into two classes: + + - grammatical (``accept``) and + - ungrammatical (``reject``). + + If a sentence should parse according to the grammar, the value of + ``trees`` will be a non-empty list. If a sentence should be rejected + according to the grammar, then the value of ``trees`` will be None. + """ + for test in self.suite: + print(test["doc"] + ":", end=" ") + for key in ["accept", "reject"]: + for sent in test[key]: + tokens = sent.split() + trees = list(self.cp.parse(tokens)) + if show_trees and trees: + print() + print(sent) + for tree in trees: + print(tree) + if key == "accept": + if trees == []: + raise ValueError("Sentence '%s' failed to parse'" % sent) + else: + accepted = True + else: + if trees: + raise ValueError("Sentence '%s' received a parse'" % sent) + else: + rejected = True + if accepted and rejected: + print("All tests passed!") + + +def extract_test_sentences(string, comment_chars="#%;", encoding=None): + """ + Parses a string with one test sentence per line. + Lines can optionally begin with: + + - a bool, saying if the sentence is grammatical or not, or + - an int, giving the number of parse trees is should have, + + The result information is followed by a colon, and then the sentence. + Empty lines and lines beginning with a comment char are ignored. + + :return: a list of tuple of sentences and expected results, + where a sentence is a list of str, + and a result is None, or bool, or int + + :param comment_chars: ``str`` of possible comment characters. + :param encoding: the encoding of the string, if it is binary + """ + if encoding is not None: + string = string.decode(encoding) + sentences = [] + for sentence in string.split("\n"): + if sentence == "" or sentence[0] in comment_chars: + continue + split_info = sentence.split(":", 1) + result = None + if len(split_info) == 2: + if split_info[0] in ["True", "true", "False", "false"]: + result = split_info[0] in ["True", "true"] + sentence = split_info[1] + else: + result = int(split_info[0]) + sentence = split_info[1] + tokens = sentence.split() + if tokens == []: + continue + sentences += [(tokens, result)] + return sentences diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fed0eafb520b4a100b87f1f052c222224192bda4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173a4cb5f660c54ee9acc1d77b80d8d7436437af Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/all.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6be03eb2a1085fb78a6aab9f10d95f101d939847 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/childes_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..129bbd4909fc92cbd7a2785a1de56c6080d61486 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/classify_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08269084404628decd5598bfcda9548dbe08ebb4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be72d658c97636e978c96d397ff733efb9558021 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gensim_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..327ad2f39385f4aff420ef19255e6545170b143f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/gluesemantics_malt_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e205f4ffadda122c56700407e4ff02363276eb6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/portuguese_en_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b96163165043fdcd6389d3a1b258b61bba0774a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/probability_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..026ae36785a96a09fb0cb85c393d0ccb9572a16d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/__pycache__/setup_fixt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..912acd35cc6401a57344334d22c79e7caa7bef7a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd6bbf99d6287b3228ce56453f7be60744a52215 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_aline.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e50649b9d6d95cc3074a4d5a1acc55ca84667ff1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_bllip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a71328c8b02e26c29e328187fe2b188bd9a9ba93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfd_mutation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01649a479ba599e8947da5662ab72bfcf20d2cb5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_cfg2chomsky.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e09fc6ac0ecab10ac8b8ef1c287b3181315ff49c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_chunk.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcf79155dac60da998161152df82dcdee9ffe85c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_classify.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93ce3ea2179a9c0d58b5e3ccf3bc7ec5c97b8570 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_collocations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbe48ae89116efab3bf7459cb101825058850453 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_concordance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea43d5321a0981c1c6825cb78a5b13cebbee36a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corenlp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6873ed5eccaa8e6cb622f33f4df58ab149e5bb Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpora.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd1578c4bc3156f4a85294d0969440e5570a2eb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_corpus_views.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f23c51c9672bd06d716ff3dc4443fb283939baf Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_data.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d5aa8b1d34060655df1a3b0305c18a40cd4d92a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_disagreement.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b816443522494ee8ae5b058fd5ae7e5ab1f80ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_distance.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39f5cd809f24ac914d219e4b34a7752c2ae892a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_downloader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb1336c4be463eb66c9d1edcd5f268487b352513 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_freqdist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab24226c7c7cb6c2f6994a4f6ffb887c975c475d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_hmm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8ae5ccbab7f4a5f92e964c40b5b963307cde7fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json2csv_corpus.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21d0f73f6b702013fb52083b8b5735da8072a737 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_json_serialization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7fa2416e2675e427f245a16f55d1cc378e352905 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_metrics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bf52b8a48ebe9d919a851c7a0a6ca19e101d9a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_naivebayes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9143373db8cd961b06ad322ba9281f0f07a1db94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_nombank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c3fc9c078bebc8ecb89ab76936696fc1da0470b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pos_tag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..910de6dba2e87c497fdfb3b67685300aa8e04b3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e26162f180c13f2c37d6b53cb6cb88e660a11fc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_rte_classify.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb67bfa31daa6fe04b46a22b6435955e0bd0fd75 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_seekable_unicode_stream_reader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0263dbdbd248ea1fe842cc7f58588e832a1098b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_senna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74ace81b06c893221e55f8f43bc68ddf8f37465c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_stem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb6dd4074d1828c973c1c2599474e546ff4332db Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa306329dada0b6e0b923d36cc2d382f12137046 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_tokenize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93a5129455494cc8a94a5d0c633af4a660076ddd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_twitter_auth.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..883229bdb191a1c6fbfc274c74009bc47a98e79e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..726471fc1191115b2af835ca8a25d8f9e00f7b59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_wordnet.cpython-310.pyc differ