diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..989b5a15dd39309a9551a0d3b0ae4230c09a1f7c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766ea2c0710de1a39bd2fb313178f2f7e98c3034 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/book.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ae917fb8a16250be80fa1f878eec8d725115fb6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/cli.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f0830e694d14528f2b0f296b4d2660eb3cf731 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/collections.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a3b96ea778f9edca02954cfee44e56b4a3ac62f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1087b1e9f507a2af183522d470c1e20e498dbcf8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/decorators.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..723f0c7115ff07fe88af9808ad773f77f28799ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/featstruct.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dff06f60ab81c93cea7a2d69b8be37dfb296de2e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/grammar.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eafd70b1e1d613cc6e88d82e1689902e9b0bce54 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/help.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4ce9b26693b69891e09372eed2c0cb47218f89a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/internals.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d44f595e1d0b573baf5957fe03a7ec5d420f7c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/langnames.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52a9088b9cd8bf9047478e148f00a113591de9d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/lazyimport.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3413e3832a089a74baf68fba47d0362ac413d0da Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/probability.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..587628a6bc956dab9501ffeadb83325e8893b57e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/tgrep.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ec839a005e9583c91b3c77b743913266ce0ed36 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f46c7ce000d8cb36021536c9b1770a6f48dae1e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/__pycache__/wsd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__init__.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4bbf1831e714c40514313293ae9027e181b8a77 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/__init__.py @@ -0,0 +1,47 @@ +# Natural Language Toolkit: Applications package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interactive NLTK Applications: + +chartparser: Chart Parser +chunkparser: Regular-Expression Chunk Parser +collocations: Find collocations in text +concordance: Part-of-speech concordancer +nemo: Finding (and Replacing) Nemo regular expression tool +rdparser: Recursive Descent Parser +srparser: Shift-Reduce Parser +wordnet: WordNet Browser +""" + + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.app package not loaded (please install Tkinter library).") +else: + from nltk.app.chartparser_app import app as chartparser + from nltk.app.chunkparser_app import app as chunkparser + from nltk.app.collocations_app import app as collocations + from nltk.app.concordance_app import app as concordance + from nltk.app.nemo_app import app as nemo + from nltk.app.rdparser_app import app as rdparser + from nltk.app.srparser_app import app as srparser + from nltk.app.wordnet_app import app as wordnet + + try: + from matplotlib import pylab + except ImportError: + import warnings + + warnings.warn("nltk.app.wordfreq not loaded (requires the matplotlib library).") + else: + from nltk.app.wordfreq_app import app as wordfreq diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854f7b710ef0844f71f6152311a5d31e328c0347 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..998252a9d931faadd9cf4adbdf6122fb5e2e35cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5330c82ad1223a35e4355fce2a54d979302ab4e6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/chartparser_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/chartparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..53a938c642c6dcfe23fc085205cac3a541821207 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/chartparser_app.py @@ -0,0 +1,2569 @@ +# Natural Language Toolkit: Chart Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Jean Mark Gawron +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring chart parsing. + +Chart parsing is a flexible parsing algorithm that uses a data +structure called a "chart" to record hypotheses about syntactic +constituents. Each hypothesis is represented by a single "edge" on +the chart. A set of "chart rules" determine when new edges can be +added to the chart. This set of rules controls the overall behavior +of the parser (e.g. whether it parses top-down or bottom-up). + +The chart parsing tool demonstrates the process of parsing a single +sentence, with a given grammar and lexicon. Its display is divided +into three sections: the bottom section displays the chart; the middle +section displays the sentence; and the top section displays the +partial syntax tree corresponding to the selected edge. Buttons along +the bottom of the window are used to control the execution of the +algorithm. + +The chart parsing tool allows for flexible control of the parsing +algorithm. At each step of the algorithm, you can select which rule +or strategy you wish to apply. This allows you to experiment with +mixing different strategies (e.g. top-down and bottom-up). You can +exercise fine-grained control over the algorithm by selecting which +edge you wish to apply a rule to. +""" + +# At some point, we should rewrite this tool to use the new canvas +# widget system. + + +import os.path +import pickle +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Tk, + Toplevel, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font +from tkinter.messagebox import showerror, showinfo + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + EntryDialog, + MutableOptionMenu, + ShowText, + SymbolWidget, +) +from nltk.grammar import CFG, Nonterminal +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + Chart, + LeafEdge, + LeafInitRule, + SingleEdgeFundamentalRule, + SteppingChartParser, + TopDownInitRule, + TopDownPredictRule, + TreeEdge, +) +from nltk.tree import Tree +from nltk.util import in_idle + +# Known bug: ChartView doesn't handle edges generated by epsilon +# productions (e.g., [Production: PP -> ]) very well. + +####################################################################### +# Edge List +####################################################################### + + +class EdgeList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config("dot", foreground="#000000") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for i, elt in enumerate(item.rhs()): + if i == item.dot(): + contents.append((" *", "dot")) + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + if item.is_complete(): + contents.append((" *", "dot")) + return contents + + +####################################################################### +# Chart Matrix View +####################################################################### + + +class ChartMatrixView: + """ + A view of a chart that displays the contents of the corresponding matrix. + """ + + def __init__( + self, parent, chart, toplevel=True, title="Chart Matrix", show_numedges=False + ): + self._chart = chart + self._cells = [] + self._marks = [] + + self._selected_cell = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title(title) + self._root.bind("", self.destroy) + self._init_quit(self._root) + else: + self._root = Frame(parent) + + self._init_matrix(self._root) + self._init_list(self._root) + if show_numedges: + self._init_numedges(self._root) + else: + self._numedges_label = None + + self._callbacks = {} + + self._num_edges = 0 + + self.draw() + + def _init_quit(self, root): + quit = Button(root, text="Quit", command=self.destroy) + quit.pack(side="bottom", expand=0, fill="none") + + def _init_matrix(self, root): + cframe = Frame(root, border=2, relief="sunken") + cframe.pack(expand=0, fill="none", padx=1, pady=3, side="top") + self._canvas = Canvas(cframe, width=200, height=200, background="white") + self._canvas.pack(expand=0, fill="none") + + def _init_numedges(self, root): + self._numedges_label = Label(root, text="0 edges") + self._numedges_label.pack(expand=0, fill="none", side="top") + + def _init_list(self, root): + self._list = EdgeList(root, [], width=20, height=5) + self._list.pack(side="top", expand=1, fill="both", pady=3) + + def cb(edge, self=self): + self._fire_callbacks("select", edge) + + self._list.add_callback("select", cb) + self._list.focus() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def set_chart(self, chart): + if chart is not self._chart: + self._chart = chart + self._num_edges = 0 + self.draw() + + def update(self): + if self._root is None: + return + + # Count the edges in each cell + N = len(self._cells) + cell_edges = [[0 for i in range(N)] for j in range(N)] + for edge in self._chart: + cell_edges[edge.start()][edge.end()] += 1 + + # Color the cells correspondingly. + for i in range(N): + for j in range(i, N): + if cell_edges[i][j] == 0: + color = "gray20" + else: + color = "#00{:02x}{:02x}".format( + min(255, 50 + 128 * cell_edges[i][j] / 10), + max(0, 128 - 128 * cell_edges[i][j] / 10), + ) + cell_tag = self._cells[i][j] + self._canvas.itemconfig(cell_tag, fill=color) + if (i, j) == self._selected_cell: + self._canvas.itemconfig(cell_tag, outline="#00ffff", width=3) + self._canvas.tag_raise(cell_tag) + else: + self._canvas.itemconfig(cell_tag, outline="black", width=1) + + # Update the edge list. + edges = list(self._chart.select(span=self._selected_cell)) + self._list.set(edges) + + # Update our edge count. + self._num_edges = self._chart.num_edges() + if self._numedges_label is not None: + self._numedges_label["text"] = "%d edges" % self._num_edges + + def activate(self): + self._canvas.itemconfig("inactivebox", state="hidden") + self.update() + + def inactivate(self): + self._canvas.itemconfig("inactivebox", state="normal") + self.update() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + def select_cell(self, i, j): + if self._root is None: + return + + # If the cell is already selected (and the chart contents + # haven't changed), then do nothing. + if (i, j) == self._selected_cell and self._chart.num_edges() == self._num_edges: + return + + self._selected_cell = (i, j) + self.update() + + # Fire the callback. + self._fire_callbacks("select_cell", i, j) + + def deselect_cell(self): + if self._root is None: + return + self._selected_cell = None + self._list.set([]) + self.update() + + def _click_cell(self, i, j): + if self._selected_cell == (i, j): + self.deselect_cell() + else: + self.select_cell(i, j) + + def view_edge(self, edge): + self.select_cell(*edge.span()) + self._list.view(edge) + + def mark_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.mark(edge) + + def unmark_edge(self, edge=None): + if self._root is None: + return + self._list.unmark(edge) + + def markonly_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.markonly(edge) + + def draw(self): + if self._root is None: + return + LEFT_MARGIN = BOT_MARGIN = 15 + TOP_MARGIN = 5 + c = self._canvas + c.delete("all") + N = self._chart.num_leaves() + 1 + dx = (int(c["width"]) - LEFT_MARGIN) / N + dy = (int(c["height"]) - TOP_MARGIN - BOT_MARGIN) / N + + c.delete("all") + + # Labels and dotted lines + for i in range(N): + c.create_text( + LEFT_MARGIN - 2, i * dy + dy / 2 + TOP_MARGIN, text=repr(i), anchor="e" + ) + c.create_text( + i * dx + dx / 2 + LEFT_MARGIN, + N * dy + TOP_MARGIN + 1, + text=repr(i), + anchor="n", + ) + c.create_line( + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dx * N + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dash=".", + ) + c.create_line( + dx * i + LEFT_MARGIN, + TOP_MARGIN, + dx * i + LEFT_MARGIN, + dy * N + TOP_MARGIN, + dash=".", + ) + + # A box around the whole thing + c.create_rectangle( + LEFT_MARGIN, TOP_MARGIN, LEFT_MARGIN + dx * N, dy * N + TOP_MARGIN, width=2 + ) + + # Cells + self._cells = [[None for i in range(N)] for j in range(N)] + for i in range(N): + for j in range(i, N): + t = c.create_rectangle( + j * dx + LEFT_MARGIN, + i * dy + TOP_MARGIN, + (j + 1) * dx + LEFT_MARGIN, + (i + 1) * dy + TOP_MARGIN, + fill="gray20", + ) + self._cells[i][j] = t + + def cb(event, self=self, i=i, j=j): + self._click_cell(i, j) + + c.tag_bind(t, "", cb) + + # Inactive box + xmax, ymax = int(c["width"]), int(c["height"]) + t = c.create_rectangle( + -100, + -100, + xmax + 100, + ymax + 100, + fill="gray50", + state="hidden", + tag="inactivebox", + ) + c.tag_lower(t) + + # Update the cells. + self.update() + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Results View +####################################################################### + + +class ChartResultsView: + def __init__(self, parent, chart, grammar, toplevel=True): + self._chart = chart + self._grammar = grammar + self._trees = [] + self._y = 10 + self._treewidgets = [] + self._selection = None + self._selectbox = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title("Chart Parser Application: Results") + self._root.bind("", self.destroy) + else: + self._root = Frame(parent) + + # Buttons + if toplevel: + buttons = Frame(self._root) + buttons.pack(side="bottom", expand=0, fill="x") + Button(buttons, text="Quit", command=self.destroy).pack(side="right") + Button(buttons, text="Print All", command=self.print_all).pack(side="left") + Button(buttons, text="Print Selection", command=self.print_selection).pack( + side="left" + ) + + # Canvas frame. + self._cframe = CanvasFrame(self._root, closeenough=20) + self._cframe.pack(side="top", expand=1, fill="both") + + # Initial update + self.update() + + def update(self, edge=None): + if self._root is None: + return + # If the edge isn't a parse edge, do nothing. + if edge is not None: + if edge.lhs() != self._grammar.start(): + return + if edge.span() != (0, self._chart.num_leaves()): + return + + for parse in self._chart.parses(self._grammar.start()): + if parse not in self._trees: + self._add(parse) + + def _add(self, parse): + # Add it to self._trees. + self._trees.append(parse) + + # Create a widget for it. + c = self._cframe.canvas() + treewidget = tree_to_treesegment(c, parse) + + # Add it to the canvas frame. + self._treewidgets.append(treewidget) + self._cframe.add_widget(treewidget, 10, self._y) + + # Register callbacks. + treewidget.bind_click(self._click) + + # Update y. + self._y = treewidget.bbox()[3] + 10 + + def _click(self, widget): + c = self._cframe.canvas() + if self._selection is not None: + c.delete(self._selectbox) + self._selection = widget + (x1, y1, x2, y2) = widget.bbox() + self._selectbox = c.create_rectangle(x1, y1, x2, y2, width=2, outline="#088") + + def _color(self, treewidget, color): + treewidget.label()["color"] = color + for child in treewidget.subtrees(): + if isinstance(child, TreeSegmentWidget): + self._color(child, color) + else: + child["color"] = color + + def print_all(self, *e): + if self._root is None: + return + self._cframe.print_to_file() + + def print_selection(self, *e): + if self._root is None: + return + if self._selection is None: + showerror("Print Error", "No tree selected") + else: + c = self._cframe.canvas() + for widget in self._treewidgets: + if widget is not self._selection: + self._cframe.destroy_widget(widget) + c.delete(self._selectbox) + (x1, y1, x2, y2) = self._selection.bbox() + self._selection.move(10 - x1, 10 - y1) + c["scrollregion"] = f"0 0 {x2 - x1 + 20} {y2 - y1 + 20}" + self._cframe.print_to_file() + + # Restore our state. + self._treewidgets = [self._selection] + self.clear() + self.update() + + def clear(self): + if self._root is None: + return + for treewidget in self._treewidgets: + self._cframe.destroy_widget(treewidget) + self._trees = [] + self._treewidgets = [] + if self._selection is not None: + self._cframe.canvas().delete(self._selectbox) + self._selection = None + self._y = 10 + + def set_chart(self, chart): + self.clear() + self._chart = chart + self.update() + + def set_grammar(self, grammar): + self.clear() + self._grammar = grammar + self.update() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Comparer +####################################################################### + + +class ChartComparer: + """ + + :ivar _root: The root window + + :ivar _charts: A dictionary mapping names to charts. When + charts are loaded, they are added to this dictionary. + + :ivar _left_chart: The left ``Chart``. + :ivar _left_name: The name ``_left_chart`` (derived from filename) + :ivar _left_matrix: The ``ChartMatrixView`` for ``_left_chart`` + :ivar _left_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_left_chart``. + + :ivar _right_chart: The right ``Chart``. + :ivar _right_name: The name ``_right_chart`` (derived from filename) + :ivar _right_matrix: The ``ChartMatrixView`` for ``_right_chart`` + :ivar _right_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_right_chart``. + + :ivar _out_chart: The out ``Chart``. + :ivar _out_name: The name ``_out_chart`` (derived from filename) + :ivar _out_matrix: The ``ChartMatrixView`` for ``_out_chart`` + :ivar _out_label: The label for ``_out_chart``. + + :ivar _op_label: A Label containing the most recent operation. + """ + + _OPSYMBOL = { + "-": "-", + "and": SymbolWidget.SYMBOLS["intersection"], + "or": SymbolWidget.SYMBOLS["union"], + } + + def __init__(self, *chart_filenames): + # This chart is displayed when we don't have a value (eg + # before any chart is loaded). + faketok = [""] * 8 + self._emptychart = Chart(faketok) + + # The left & right charts start out empty. + self._left_name = "None" + self._right_name = "None" + self._left_chart = self._emptychart + self._right_chart = self._emptychart + + # The charts that have been loaded. + self._charts = {"None": self._emptychart} + + # The output chart. + self._out_chart = self._emptychart + + # The most recent operation + self._operator = None + + # Set up the root window. + self._root = Tk() + self._root.title("Chart Comparison") + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + + # Initialize all widgets, etc. + self._init_menubar(self._root) + self._init_chartviews(self._root) + self._init_divider(self._root) + self._init_buttons(self._root) + self._init_bindings(self._root) + + # Load any specified charts. + for filename in chart_filenames: + self.load_chart(filename) + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def mainloop(self, *args, **kwargs): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization + # //////////////////////////////////////////////////////////// + + def _init_menubar(self, root): + menubar = Menu(root) + + # File menu + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Load Chart", + accelerator="Ctrl-o", + underline=0, + command=self.load_chart_dialog, + ) + filemenu.add_command( + label="Save Output", + accelerator="Ctrl-s", + underline=0, + command=self.save_chart_dialog, + ) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + # Compare menu + opmenu = Menu(menubar, tearoff=0) + opmenu.add_command( + label="Intersection", command=self._intersection, accelerator="+" + ) + opmenu.add_command(label="Union", command=self._union, accelerator="*") + opmenu.add_command( + label="Difference", command=self._difference, accelerator="-" + ) + opmenu.add_separator() + opmenu.add_command(label="Swap Charts", command=self._swapcharts) + menubar.add_cascade(label="Compare", underline=0, menu=opmenu) + + # Add the menu + self._root.config(menu=menubar) + + def _init_divider(self, root): + divider = Frame(root, border=2, relief="sunken") + divider.pack(side="top", fill="x", ipady=2) + + def _init_chartviews(self, root): + opfont = ("symbol", -36) # Font for operator. + eqfont = ("helvetica", -36) # Font for equals sign. + + frame = Frame(root, background="#c0c0c0") + frame.pack(side="top", expand=1, fill="both") + + # The left matrix. + cv1_frame = Frame(frame, border=3, relief="groove") + cv1_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._left_selector = MutableOptionMenu( + cv1_frame, list(self._charts.keys()), command=self._select_left + ) + self._left_selector.pack(side="top", pady=5, fill="x") + self._left_matrix = ChartMatrixView( + cv1_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._left_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._left_matrix.add_callback("select", self.select_edge) + self._left_matrix.add_callback("select_cell", self.select_cell) + self._left_matrix.inactivate() + + # The operator. + self._op_label = Label( + frame, text=" ", width=3, background="#c0c0c0", font=opfont + ) + self._op_label.pack(side="left", padx=5, pady=5) + + # The right matrix. + cv2_frame = Frame(frame, border=3, relief="groove") + cv2_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._right_selector = MutableOptionMenu( + cv2_frame, list(self._charts.keys()), command=self._select_right + ) + self._right_selector.pack(side="top", pady=5, fill="x") + self._right_matrix = ChartMatrixView( + cv2_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._right_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._right_matrix.add_callback("select", self.select_edge) + self._right_matrix.add_callback("select_cell", self.select_cell) + self._right_matrix.inactivate() + + # The equals sign + Label(frame, text="=", width=3, background="#c0c0c0", font=eqfont).pack( + side="left", padx=5, pady=5 + ) + + # The output matrix. + out_frame = Frame(frame, border=3, relief="groove") + out_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._out_label = Label(out_frame, text="Output") + self._out_label.pack(side="top", pady=9) + self._out_matrix = ChartMatrixView( + out_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._out_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._out_matrix.add_callback("select", self.select_edge) + self._out_matrix.add_callback("select_cell", self.select_cell) + self._out_matrix.inactivate() + + def _init_buttons(self, root): + buttons = Frame(root) + buttons.pack(side="bottom", pady=5, fill="x", expand=0) + Button(buttons, text="Intersection", command=self._intersection).pack( + side="left" + ) + Button(buttons, text="Union", command=self._union).pack(side="left") + Button(buttons, text="Difference", command=self._difference).pack(side="left") + Frame(buttons, width=20).pack(side="left") + Button(buttons, text="Swap Charts", command=self._swapcharts).pack(side="left") + + Button(buttons, text="Detach Output", command=self._detach_out).pack( + side="right" + ) + + def _init_bindings(self, root): + # root.bind('', self.save_chart) + root.bind("", self.load_chart_dialog) + # root.bind('', self.reset) + + # //////////////////////////////////////////////////////////// + # Input Handling + # //////////////////////////////////////////////////////////// + + def _select_left(self, name): + self._left_name = name + self._left_chart = self._charts[name] + self._left_matrix.set_chart(self._left_chart) + if name == "None": + self._left_matrix.inactivate() + self._apply_op() + + def _select_right(self, name): + self._right_name = name + self._right_chart = self._charts[name] + self._right_matrix.set_chart(self._right_chart) + if name == "None": + self._right_matrix.inactivate() + self._apply_op() + + def _apply_op(self): + if self._operator == "-": + self._difference() + elif self._operator == "or": + self._union() + elif self._operator == "and": + self._intersection() + + # //////////////////////////////////////////////////////////// + # File + # //////////////////////////////////////////////////////////// + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + + def save_chart_dialog(self, *args): + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._out_chart, outfile) + except Exception as e: + showerror("Error Saving Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart_dialog(self, *args): + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + self.load_chart(filename) + except Exception as e: + showerror("Error Loading Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart(self, filename): + with open(filename, "rb") as infile: + chart = pickle.load(infile) + name = os.path.basename(filename) + if name.endswith(".pickle"): + name = name[:-7] + if name.endswith(".chart"): + name = name[:-6] + self._charts[name] = chart + self._left_selector.add(name) + self._right_selector.add(name) + + # If either left_matrix or right_matrix is empty, then + # display the new chart. + if self._left_chart is self._emptychart: + self._left_selector.set(name) + elif self._right_chart is self._emptychart: + self._right_selector.set(name) + + def _update_chartviews(self): + self._left_matrix.update() + self._right_matrix.update() + self._out_matrix.update() + + # //////////////////////////////////////////////////////////// + # Selection + # //////////////////////////////////////////////////////////// + + def select_edge(self, edge): + if edge in self._left_chart: + self._left_matrix.markonly_edge(edge) + else: + self._left_matrix.unmark_edge() + if edge in self._right_chart: + self._right_matrix.markonly_edge(edge) + else: + self._right_matrix.unmark_edge() + if edge in self._out_chart: + self._out_matrix.markonly_edge(edge) + else: + self._out_matrix.unmark_edge() + + def select_cell(self, i, j): + self._left_matrix.select_cell(i, j) + self._right_matrix.select_cell(i, j) + self._out_matrix.select_cell(i, j) + + # //////////////////////////////////////////////////////////// + # Operations + # //////////////////////////////////////////////////////////// + + def _difference(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge not in self._right_chart: + out_chart.insert(edge, []) + + self._update("-", out_chart) + + def _intersection(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("and", out_chart) + + def _union(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + out_chart.insert(edge, []) + for edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("or", out_chart) + + def _swapcharts(self): + left, right = self._left_name, self._right_name + self._left_selector.set(right) + self._right_selector.set(left) + + def _checkcompat(self): + if ( + self._left_chart.tokens() != self._right_chart.tokens() + or self._left_chart.property_names() != self._right_chart.property_names() + or self._left_chart == self._emptychart + or self._right_chart == self._emptychart + ): + # Clear & inactivate the output chart. + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._out_matrix.inactivate() + self._out_label["text"] = "Output" + # Issue some other warning? + return False + else: + return True + + def _update(self, operator, out_chart): + self._operator = operator + self._op_label["text"] = self._OPSYMBOL[operator] + self._out_chart = out_chart + self._out_matrix.set_chart(out_chart) + self._out_label["text"] = "{} {} {}".format( + self._left_name, + self._operator, + self._right_name, + ) + + def _clear_out_chart(self): + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._op_label["text"] = " " + self._out_matrix.inactivate() + + def _detach_out(self): + ChartMatrixView(self._root, self._out_chart, title=self._out_label["text"]) + + +####################################################################### +# Chart View +####################################################################### + + +class ChartView: + """ + A component for viewing charts. This is used by ``ChartParserApp`` to + allow students to interactively experiment with various chart + parsing techniques. It is also used by ``Chart.draw()``. + + :ivar _chart: The chart that we are giving a view of. This chart + may be modified; after it is modified, you should call + ``update``. + :ivar _sentence: The list of tokens that the chart spans. + + :ivar _root: The root window. + :ivar _chart_canvas: The canvas we're using to display the chart + itself. + :ivar _tree_canvas: The canvas we're using to display the tree + that each edge spans. May be None, if we're not displaying + trees. + :ivar _sentence_canvas: The canvas we're using to display the sentence + text. May be None, if we're not displaying the sentence text. + :ivar _edgetags: A dictionary mapping from edges to the tags of + the canvas elements (lines, etc) used to display that edge. + The values of this dictionary have the form + ``(linetag, rhstag1, dottag, rhstag2, lhstag)``. + :ivar _treetags: A list of all the tags that make up the tree; + used to erase the tree (without erasing the loclines). + :ivar _chart_height: The height of the chart canvas. + :ivar _sentence_height: The height of the sentence canvas. + :ivar _tree_height: The height of the tree + + :ivar _text_height: The height of a text string (in the normal + font). + + :ivar _edgelevels: A list of edges at each level of the chart (the + top level is the 0th element). This list is used to remember + where edges should be drawn; and to make sure that no edges + are overlapping on the chart view. + + :ivar _unitsize: Pixel size of one unit (from the location). This + is determined by the span of the chart's location, and the + width of the chart display canvas. + + :ivar _fontsize: The current font size + + :ivar _marks: A dictionary from edges to marks. Marks are + strings, specifying colors (e.g. 'green'). + """ + + _LEAF_SPACING = 10 + _MARGIN = 10 + _TREE_LEVEL_SIZE = 12 + _CHART_LEVEL_SIZE = 40 + + def __init__(self, chart, root=None, **kw): + """ + Construct a new ``Chart`` display. + """ + # Process keyword args. + draw_tree = kw.get("draw_tree", 0) + draw_sentence = kw.get("draw_sentence", 1) + self._fontsize = kw.get("fontsize", -12) + + # The chart! + self._chart = chart + + # Callback functions + self._callbacks = {} + + # Keep track of drawn edges + self._edgelevels = [] + self._edgetags = {} + + # Keep track of which edges are marked. + self._marks = {} + + # These are used to keep track of the set of tree tokens + # currently displayed in the tree canvas. + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + # Keep track of the tags used to draw the tree + self._tree_tags = [] + + # Put multiple edges on each level? + self._compact = 0 + + # If they didn't provide a main window, then set one up. + if root is None: + top = Tk() + top.title("Chart View") + + def destroy1(e, top=top): + top.destroy() + + def destroy2(top=top): + top.destroy() + + top.bind("q", destroy1) + b = Button(top, text="Done", command=destroy2) + b.pack(side="bottom") + self._root = top + else: + self._root = root + + # Create some fonts. + self._init_fonts(root) + + # Create the chart canvas. + (self._chart_sb, self._chart_canvas) = self._sb_canvas(self._root) + self._chart_canvas["height"] = 300 + self._chart_canvas["closeenough"] = 15 + + # Create the sentence canvas. + if draw_sentence: + cframe = Frame(self._root, relief="sunk", border=2) + cframe.pack(fill="both", side="bottom") + self._sentence_canvas = Canvas(cframe, height=50) + self._sentence_canvas["background"] = "#e0e0e0" + self._sentence_canvas.pack(fill="both") + # self._sentence_canvas['height'] = self._sentence_height + else: + self._sentence_canvas = None + + # Create the tree canvas. + if draw_tree: + (sb, canvas) = self._sb_canvas(self._root, "n", "x") + (self._tree_sb, self._tree_canvas) = (sb, canvas) + self._tree_canvas["height"] = 200 + else: + self._tree_canvas = None + + # Do some analysis to figure out how big the window should be + self._analyze() + self.draw() + self._resize() + self._grow() + + # Set up the configure callback, which will be called whenever + # the window is resized. + self._chart_canvas.bind("", self._configure) + + def _init_fonts(self, root): + self._boldfont = Font(family="helvetica", weight="bold", size=self._fontsize) + self._font = Font(family="helvetica", size=self._fontsize) + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + def _sb_canvas(self, root, expand="y", fill="both", side="bottom"): + """ + Helper for __init__: construct a canvas with a scrollbar. + """ + cframe = Frame(root, relief="sunk", border=2) + cframe.pack(fill=fill, expand=expand, side=side) + canvas = Canvas(cframe, background="#e0e0e0") + + # Give the canvas a scrollbar. + sb = Scrollbar(cframe, orient="vertical") + sb.pack(side="right", fill="y") + canvas.pack(side="left", fill=fill, expand="yes") + + # Connect the scrollbars to the canvas. + sb["command"] = canvas.yview + canvas["yscrollcommand"] = sb.set + + return (sb, canvas) + + def scroll_up(self, *e): + self._chart_canvas.yview("scroll", -1, "units") + + def scroll_down(self, *e): + self._chart_canvas.yview("scroll", 1, "units") + + def page_up(self, *e): + self._chart_canvas.yview("scroll", -1, "pages") + + def page_down(self, *e): + self._chart_canvas.yview("scroll", 1, "pages") + + def _grow(self): + """ + Grow the window, if necessary + """ + # Grow, if need-be + N = self._chart.num_leaves() + width = max( + int(self._chart_canvas["width"]), N * self._unitsize + ChartView._MARGIN * 2 + ) + + # It won't resize without the second (height) line, but I + # don't understand why not. + self._chart_canvas.configure(width=width) + self._chart_canvas.configure(height=self._chart_canvas["height"]) + + self._unitsize = (width - 2 * ChartView._MARGIN) / N + + # Reset the height for the sentence window. + if self._sentence_canvas is not None: + self._sentence_canvas["height"] = self._sentence_height + + def set_font_size(self, size): + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + self._analyze() + self._grow() + self.draw() + + def get_font_size(self): + return abs(self._fontsize) + + def _configure(self, e): + """ + The configure callback. This is called whenever the window is + resized. It is also called when the window is first mapped. + It figures out the unit size, and redraws the contents of each + canvas. + """ + N = self._chart.num_leaves() + self._unitsize = (e.width - 2 * ChartView._MARGIN) / N + self.draw() + + def update(self, chart=None): + """ + Draw any edges that have not been drawn. This is typically + called when a after modifies the canvas that a CanvasView is + displaying. ``update`` will cause any edges that have been + added to the chart to be drawn. + + If update is given a ``chart`` argument, then it will replace + the current chart with the given chart. + """ + if chart is not None: + self._chart = chart + self._edgelevels = [] + self._marks = {} + self._analyze() + self._grow() + self.draw() + self.erase_tree() + self._resize() + else: + for edge in self._chart: + if edge not in self._edgetags: + self._add_edge(edge) + self._resize() + + def _edge_conflict(self, edge, lvl): + """ + Return True if the given edge overlaps with any edge on the given + level. This is used by _add_edge to figure out what level a + new edge should be added to. + """ + (s1, e1) = edge.span() + for otheredge in self._edgelevels[lvl]: + (s2, e2) = otheredge.span() + if (s1 <= s2 < e1) or (s2 <= s1 < e2) or (s1 == s2 == e1 == e2): + return True + return False + + def _analyze_edge(self, edge): + """ + Given a new edge, recalculate: + + - _text_height + - _unitsize (if the edge text is too big for the current + _unitsize, then increase _unitsize) + """ + c = self._chart_canvas + + if isinstance(edge, TreeEdge): + lhs = edge.lhs() + rhselts = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhselts.append(str(elt.symbol())) + else: + rhselts.append(repr(elt)) + rhs = " ".join(rhselts) + else: + lhs = edge.lhs() + rhs = "" + + for s in (lhs, rhs): + tag = c.create_text( + 0, 0, text=s, font=self._boldfont, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] # + ChartView._LEAF_SPACING + edgelen = max(edge.length(), 1) + self._unitsize = max(self._unitsize, width / edgelen) + self._text_height = max(self._text_height, bbox[3] - bbox[1]) + + def _add_edge(self, edge, minlvl=0): + """ + Add a single edge to the ChartView: + + - Call analyze_edge to recalculate display parameters + - Find an available level + - Call _draw_edge + """ + # Do NOT show leaf edges in the chart. + if isinstance(edge, LeafEdge): + return + + if edge in self._edgetags: + return + self._analyze_edge(edge) + self._grow() + + if not self._compact: + self._edgelevels.append([edge]) + lvl = len(self._edgelevels) - 1 + self._draw_edge(edge, lvl) + self._resize() + return + + # Figure out what level to draw the edge on. + lvl = 0 + while True: + # If this level doesn't exist yet, create it. + while lvl >= len(self._edgelevels): + self._edgelevels.append([]) + self._resize() + + # Check if we can fit the edge in this level. + if lvl >= minlvl and not self._edge_conflict(edge, lvl): + # Go ahead and draw it. + self._edgelevels[lvl].append(edge) + break + + # Try the next level. + lvl += 1 + + self._draw_edge(edge, lvl) + + def view_edge(self, edge): + level = None + for i in range(len(self._edgelevels)): + if edge in self._edgelevels[i]: + level = i + break + if level is None: + return + # Try to view the new edge.. + y = (level + 1) * self._chart_level_size + dy = self._text_height + 10 + self._chart_canvas.yview("moveto", 1.0) + if self._chart_height != 0: + self._chart_canvas.yview("moveto", (y - dy) / self._chart_height) + + def _draw_edge(self, edge, lvl): + """ + Draw a single edge on the ChartView. + """ + c = self._chart_canvas + + # Draw the arrow. + x1 = edge.start() * self._unitsize + ChartView._MARGIN + x2 = edge.end() * self._unitsize + ChartView._MARGIN + if x2 == x1: + x2 += max(4, self._unitsize / 5) + y = (lvl + 1) * self._chart_level_size + linetag = c.create_line(x1, y, x2, y, arrow="last", width=3) + + # Draw a label for the edge. + if isinstance(edge, TreeEdge): + rhs = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhs.append(str(elt.symbol())) + else: + rhs.append(repr(elt)) + pos = edge.dot() + else: + rhs = [] + pos = 0 + + rhs1 = " ".join(rhs[:pos]) + rhs2 = " ".join(rhs[pos:]) + rhstag1 = c.create_text(x1 + 3, y, text=rhs1, font=self._font, anchor="nw") + dotx = c.bbox(rhstag1)[2] + 6 + doty = (c.bbox(rhstag1)[1] + c.bbox(rhstag1)[3]) / 2 + dottag = c.create_oval(dotx - 2, doty - 2, dotx + 2, doty + 2) + rhstag2 = c.create_text(dotx + 6, y, text=rhs2, font=self._font, anchor="nw") + lhstag = c.create_text( + (x1 + x2) / 2, y, text=str(edge.lhs()), anchor="s", font=self._boldfont + ) + + # Keep track of the edge's tags. + self._edgetags[edge] = (linetag, rhstag1, dottag, rhstag2, lhstag) + + # Register a callback for clicking on the edge. + def cb(event, self=self, edge=edge): + self._fire_callbacks("select", edge) + + c.tag_bind(rhstag1, "", cb) + c.tag_bind(rhstag2, "", cb) + c.tag_bind(linetag, "", cb) + c.tag_bind(dottag, "", cb) + c.tag_bind(lhstag, "", cb) + + self._color_edge(edge) + + def _color_edge(self, edge, linecolor=None, textcolor=None): + """ + Color in an edge with the given colors. + If no colors are specified, use intelligent defaults + (dependent on selection, etc.) + """ + if edge not in self._edgetags: + return + c = self._chart_canvas + + if linecolor is not None and textcolor is not None: + if edge in self._marks: + linecolor = self._marks[edge] + tags = self._edgetags[edge] + c.itemconfig(tags[0], fill=linecolor) + c.itemconfig(tags[1], fill=textcolor) + c.itemconfig(tags[2], fill=textcolor, outline=textcolor) + c.itemconfig(tags[3], fill=textcolor) + c.itemconfig(tags[4], fill=textcolor) + return + else: + N = self._chart.num_leaves() + if edge in self._marks: + self._color_edge(self._marks[edge]) + if edge.is_complete() and edge.span() == (0, N): + self._color_edge(edge, "#084", "#042") + elif isinstance(edge, LeafEdge): + self._color_edge(edge, "#48c", "#246") + else: + self._color_edge(edge, "#00f", "#008") + + def mark_edge(self, edge, mark="#0df"): + """ + Mark an edge + """ + self._marks[edge] = mark + self._color_edge(edge) + + def unmark_edge(self, edge=None): + """ + Unmark an edge (or all edges) + """ + if edge is None: + old_marked_edges = list(self._marks.keys()) + self._marks = {} + for edge in old_marked_edges: + self._color_edge(edge) + else: + del self._marks[edge] + self._color_edge(edge) + + def markonly_edge(self, edge, mark="#0df"): + self.unmark_edge() + self.mark_edge(edge, mark) + + def _analyze(self): + """ + Analyze the sentence string, to figure out how big a unit needs + to be, How big the tree should be, etc. + """ + # Figure out the text height and the unit size. + unitsize = 70 # min unitsize + text_height = 0 + c = self._chart_canvas + + # Check against all tokens + for leaf in self._chart.leaves(): + tag = c.create_text( + 0, 0, text=repr(leaf), font=self._font, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] + ChartView._LEAF_SPACING + unitsize = max(width, unitsize) + text_height = max(text_height, bbox[3] - bbox[1]) + + self._unitsize = unitsize + self._text_height = text_height + self._sentence_height = self._text_height + 2 * ChartView._MARGIN + + # Check against edges. + for edge in self._chart.edges(): + self._analyze_edge(edge) + + # Size of chart levels + self._chart_level_size = self._text_height * 2 + + # Default tree size.. + self._tree_height = 3 * (ChartView._TREE_LEVEL_SIZE + self._text_height) + + # Resize the scrollregions. + self._resize() + + def _resize(self): + """ + Update the scroll-regions for each canvas. This ensures that + everything is within a scroll-region, so the user can use the + scrollbars to view the entire display. This does *not* + resize the window. + """ + c = self._chart_canvas + + # Reset the chart scroll region + width = self._chart.num_leaves() * self._unitsize + ChartView._MARGIN * 2 + + levels = len(self._edgelevels) + self._chart_height = (levels + 2) * self._chart_level_size + c["scrollregion"] = (0, 0, width, self._chart_height) + + # Reset the tree scroll region + if self._tree_canvas: + self._tree_canvas["scrollregion"] = (0, 0, width, self._tree_height) + + def _draw_loclines(self): + """ + Draw location lines. These are vertical gridlines used to + show where each location unit is. + """ + BOTTOM = 50000 + c1 = self._tree_canvas + c2 = self._sentence_canvas + c3 = self._chart_canvas + margin = ChartView._MARGIN + self._loclines = [] + for i in range(0, self._chart.num_leaves() + 1): + x = i * self._unitsize + margin + + if c1: + t1 = c1.create_line(x, 0, x, BOTTOM) + c1.tag_lower(t1) + if c2: + t2 = c2.create_line(x, 0, x, self._sentence_height) + c2.tag_lower(t2) + t3 = c3.create_line(x, 0, x, BOTTOM) + c3.tag_lower(t3) + t4 = c3.create_text(x + 2, 0, text=repr(i), anchor="nw", font=self._font) + c3.tag_lower(t4) + # if i % 4 == 0: + # if c1: c1.itemconfig(t1, width=2, fill='gray60') + # if c2: c2.itemconfig(t2, width=2, fill='gray60') + # c3.itemconfig(t3, width=2, fill='gray60') + if i % 2 == 0: + if c1: + c1.itemconfig(t1, fill="gray60") + if c2: + c2.itemconfig(t2, fill="gray60") + c3.itemconfig(t3, fill="gray60") + else: + if c1: + c1.itemconfig(t1, fill="gray80") + if c2: + c2.itemconfig(t2, fill="gray80") + c3.itemconfig(t3, fill="gray80") + + def _draw_sentence(self): + """Draw the sentence string.""" + if self._chart.num_leaves() == 0: + return + c = self._sentence_canvas + margin = ChartView._MARGIN + y = ChartView._MARGIN + + for i, leaf in enumerate(self._chart.leaves()): + x1 = i * self._unitsize + margin + x2 = x1 + self._unitsize + x = (x1 + x2) / 2 + tag = c.create_text( + x, y, text=repr(leaf), font=self._font, anchor="n", justify="left" + ) + bbox = c.bbox(tag) + rt = c.create_rectangle( + x1 + 2, + bbox[1] - (ChartView._LEAF_SPACING / 2), + x2 - 2, + bbox[3] + (ChartView._LEAF_SPACING / 2), + fill="#f0f0f0", + outline="#f0f0f0", + ) + c.tag_lower(rt) + + def erase_tree(self): + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + def draw_tree(self, edge=None): + if edge is None and self._treetoks_edge is None: + return + if edge is None: + edge = self._treetoks_edge + + # If it's a new edge, then get a new list of treetoks. + if self._treetoks_edge != edge: + self._treetoks = [t for t in self._chart.trees(edge) if isinstance(t, Tree)] + self._treetoks_edge = edge + self._treetoks_index = 0 + + # Make sure there's something to draw. + if len(self._treetoks) == 0: + return + + # Erase the old tree. + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + + # Draw the new tree. + tree = self._treetoks[self._treetoks_index] + self._draw_treetok(tree, edge.start()) + + # Show how many trees are available for the edge. + self._draw_treecycle() + + # Update the scroll region. + w = self._chart.num_leaves() * self._unitsize + 2 * ChartView._MARGIN + h = tree.height() * (ChartView._TREE_LEVEL_SIZE + self._text_height) + self._tree_canvas["scrollregion"] = (0, 0, w, h) + + def cycle_tree(self): + self._treetoks_index = (self._treetoks_index + 1) % len(self._treetoks) + self.draw_tree(self._treetoks_edge) + + def _draw_treecycle(self): + if len(self._treetoks) <= 1: + return + + # Draw the label. + label = "%d Trees" % len(self._treetoks) + c = self._tree_canvas + margin = ChartView._MARGIN + right = self._chart.num_leaves() * self._unitsize + margin - 2 + tag = c.create_text(right, 2, anchor="ne", text=label, font=self._boldfont) + self._tree_tags.append(tag) + _, _, _, y = c.bbox(tag) + + # Draw the triangles. + for i in range(len(self._treetoks)): + x = right - 20 * (len(self._treetoks) - i - 1) + if i == self._treetoks_index: + fill = "#084" + else: + fill = "#fff" + tag = c.create_polygon( + x, y + 10, x - 5, y, x - 10, y + 10, fill=fill, outline="black" + ) + self._tree_tags.append(tag) + + # Set up a callback: show the tree if they click on its + # triangle. + def cb(event, self=self, i=i): + self._treetoks_index = i + self.draw_tree() + + c.tag_bind(tag, "", cb) + + def _draw_treetok(self, treetok, index, depth=0): + """ + :param index: The index of the first leaf in the tree. + :return: The index of the first leaf after the tree. + """ + c = self._tree_canvas + margin = ChartView._MARGIN + + # Draw the children + child_xs = [] + for child in treetok: + if isinstance(child, Tree): + child_x, index = self._draw_treetok(child, index, depth + 1) + child_xs.append(child_x) + else: + child_xs.append((2 * index + 1) * self._unitsize / 2 + margin) + index += 1 + + # If we have children, then get the node's x by averaging their + # node x's. Otherwise, make room for ourselves. + if child_xs: + nodex = sum(child_xs) / len(child_xs) + else: + # [XX] breaks for null productions. + nodex = (2 * index + 1) * self._unitsize / 2 + margin + index += 1 + + # Draw the node + nodey = depth * (ChartView._TREE_LEVEL_SIZE + self._text_height) + tag = c.create_text( + nodex, + nodey, + anchor="n", + justify="center", + text=str(treetok.label()), + fill="#042", + font=self._boldfont, + ) + self._tree_tags.append(tag) + + # Draw lines to the children. + childy = nodey + ChartView._TREE_LEVEL_SIZE + self._text_height + for childx, child in zip(child_xs, treetok): + if isinstance(child, Tree) and child: + # A "real" tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + if isinstance(child, Tree) and not child: + # An unexpanded tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#048", + dash="2 3", + ) + self._tree_tags.append(tag) + if not isinstance(child, Tree): + # A leaf: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + 10000, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + + return nodex, index + + def draw(self): + """ + Draw everything (from scratch). + """ + if self._tree_canvas: + self._tree_canvas.delete("all") + self.draw_tree() + + if self._sentence_canvas: + self._sentence_canvas.delete("all") + self._draw_sentence() + + self._chart_canvas.delete("all") + self._edgetags = {} + + # Redraw any edges we erased. + for lvl in range(len(self._edgelevels)): + for edge in self._edgelevels[lvl]: + self._draw_edge(edge, lvl) + + for edge in self._chart: + self._add_edge(edge) + + self._draw_loclines() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + +####################################################################### +# Edge Rules +####################################################################### +# These version of the chart rules only apply to a specific edge. +# This lets the user select an edge, and then apply a rule. + + +class EdgeRule: + """ + To create an edge rule, make an empty base class that uses + EdgeRule as the first base class, and the basic rule as the + second base class. (Order matters!) + """ + + def __init__(self, edge): + super = self.__class__.__bases__[1] + self._edge = edge + self.NUM_EDGES = super.NUM_EDGES - 1 + + def apply(self, chart, grammar, *edges): + super = self.__class__.__bases__[1] + edges += (self._edge,) + yield from super.apply(self, chart, grammar, *edges) + + def __str__(self): + super = self.__class__.__bases__[1] + return super.__str__(self) + + +class TopDownPredictEdgeRule(EdgeRule, TopDownPredictRule): + pass + + +class BottomUpEdgeRule(EdgeRule, BottomUpPredictRule): + pass + + +class BottomUpLeftCornerEdgeRule(EdgeRule, BottomUpPredictCombineRule): + pass + + +class FundamentalEdgeRule(EdgeRule, SingleEdgeFundamentalRule): + pass + + +####################################################################### +# Chart Parser Application +####################################################################### + + +class ChartParserApp: + def __init__(self, grammar, tokens, title="Chart Parser Application"): + # Initialize the parser + self._init_parser(grammar, tokens) + + self._root = None + try: + # Create the root window. + self._root = Tk() + self._root.title(title) + self._root.bind("", self.destroy) + + # Set up some frames. + frame3 = Frame(self._root) + frame2 = Frame(self._root) + frame1 = Frame(self._root) + frame3.pack(side="bottom", fill="none") + frame2.pack(side="bottom", fill="x") + frame1.pack(side="bottom", fill="both", expand=1) + + self._init_fonts(self._root) + self._init_animation() + self._init_chartview(frame1) + self._init_rulelabel(frame2) + self._init_buttons(frame3) + self._init_menubar() + + self._matrix = None + self._results = None + + # Set up keyboard bindings. + self._init_bindings() + + except: + print("Error creating Tree View") + self.destroy() + raise + + def destroy(self, *args): + if self._root is None: + return + self._root.destroy() + self._root = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization Helpers + # //////////////////////////////////////////////////////////// + + def _init_parser(self, grammar, tokens): + self._grammar = grammar + self._tokens = tokens + self._reset_parser() + + def _reset_parser(self): + self._cp = SteppingChartParser(self._grammar) + self._cp.initialize(self._tokens) + self._chart = self._cp.chart() + + # Insert LeafEdges before the parsing starts. + for _new_edge in LeafInitRule().apply(self._chart, self._grammar): + pass + + # The step iterator -- use this to generate new edges + self._cpstep = self._cp.step() + + # The currently selected edge + self._selection = None + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_animation(self): + # Are we stepping? (default=yes) + self._step = IntVar(self._root) + self._step.set(1) + + # What's our animation speed (default=fast) + self._animate = IntVar(self._root) + self._animate.set(3) # Default speed = fast + + # Are we currently animating? + self._animating = 0 + + def _init_chartview(self, parent): + self._cv = ChartView(self._chart, parent, draw_tree=1, draw_sentence=1) + self._cv.add_callback("select", self._click_cv_edge) + + def _init_rulelabel(self, parent): + ruletxt = "Last edge generated by:" + + self._rulelabel1 = Label(parent, text=ruletxt, font=self._boldfont) + self._rulelabel2 = Label( + parent, width=40, relief="groove", anchor="w", font=self._boldfont + ) + self._rulelabel1.pack(side="left") + self._rulelabel2.pack(side="left") + step = Checkbutton(parent, variable=self._step, text="Step") + step.pack(side="right") + + def _init_buttons(self, parent): + frame1 = Frame(parent) + frame2 = Frame(parent) + frame1.pack(side="bottom", fill="x") + frame2.pack(side="top", fill="none") + + Button( + frame1, + text="Reset\nParser", + background="#90c0d0", + foreground="black", + command=self.reset, + ).pack(side="right") + # Button(frame1, text='Pause', + # background='#90c0d0', foreground='black', + # command=self.pause).pack(side='left') + + Button( + frame1, + text="Top Down\nStrategy", + background="#90c0d0", + foreground="black", + command=self.top_down_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nStrategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nLeft-Corner Strategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_leftcorner_strategy, + ).pack(side="left") + + Button( + frame2, + text="Top Down Init\nRule", + background="#90f090", + foreground="black", + command=self.top_down_init, + ).pack(side="left") + Button( + frame2, + text="Top Down Predict\nRule", + background="#90f090", + foreground="black", + command=self.top_down_predict, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Predict\nRule", + background="#90f090", + foreground="black", + command=self.bottom_up, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Left-Corner\nPredict Rule", + background="#90f090", + foreground="black", + command=self.bottom_up_leftcorner, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Fundamental\nRule", + background="#90f090", + foreground="black", + command=self.fundamental, + ).pack(side="left") + + def _init_bindings(self): + self._root.bind("", self._cv.scroll_up) + self._root.bind("", self._cv.scroll_down) + self._root.bind("", self._cv.page_up) + self._root.bind("", self._cv.page_down) + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + self._root.bind("", self.help) + + self._root.bind("", self.save_chart) + self._root.bind("", self.load_chart) + self._root.bind("", self.reset) + + self._root.bind("t", self.top_down_strategy) + self._root.bind("b", self.bottom_up_strategy) + self._root.bind("c", self.bottom_up_leftcorner_strategy) + self._root.bind("", self._stop_animation) + + self._root.bind("", self.edit_grammar) + self._root.bind("", self.edit_sentence) + + # Animation speed control + self._root.bind("-", lambda e, a=self._animate: a.set(1)) + self._root.bind("=", lambda e, a=self._animate: a.set(2)) + self._root.bind("+", lambda e, a=self._animate: a.set(3)) + + # Step control + self._root.bind("s", lambda e, s=self._step: s.set(not s.get())) + + def _init_menubar(self): + menubar = Menu(self._root) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Save Chart", + underline=0, + command=self.save_chart, + accelerator="Ctrl-s", + ) + filemenu.add_command( + label="Load Chart", + underline=0, + command=self.load_chart, + accelerator="Ctrl-o", + ) + filemenu.add_command( + label="Reset Chart", underline=0, command=self.reset, accelerator="Ctrl-r" + ) + filemenu.add_separator() + filemenu.add_command(label="Save Grammar", command=self.save_grammar) + filemenu.add_command(label="Load Grammar", command=self.load_grammar) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_command( + label="Chart Matrix", underline=6, command=self.view_matrix + ) + viewmenu.add_command(label="Results", underline=0, command=self.view_results) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Top Down Strategy", + underline=0, + command=self.top_down_strategy, + accelerator="t", + ) + rulemenu.add_command( + label="Bottom Up Strategy", + underline=0, + command=self.bottom_up_strategy, + accelerator="b", + ) + rulemenu.add_command( + label="Bottom Up Left-Corner Strategy", + underline=0, + command=self.bottom_up_leftcorner_strategy, + accelerator="c", + ) + rulemenu.add_separator() + rulemenu.add_command(label="Bottom Up Rule", command=self.bottom_up) + rulemenu.add_command( + label="Bottom Up Left-Corner Rule", command=self.bottom_up_leftcorner + ) + rulemenu.add_command(label="Top Down Init Rule", command=self.top_down_init) + rulemenu.add_command( + label="Top Down Predict Rule", command=self.top_down_predict + ) + rulemenu.add_command(label="Fundamental Rule", command=self.fundamental) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_checkbutton( + label="Step", underline=0, variable=self._step, accelerator="s" + ) + animatemenu.add_separator() + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=1, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=2, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=3, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + self._root.config(menu=menubar) + + # //////////////////////////////////////////////////////////// + # Selection Handling + # //////////////////////////////////////////////////////////// + + def _click_cv_edge(self, edge): + if edge != self._selection: + # Clicking on a new edge selects it. + self._select_edge(edge) + else: + # Repeated clicks on one edge cycle its trees. + self._cv.cycle_tree() + # [XX] this can get confused if animation is running + # faster than the callbacks... + + def _select_matrix_edge(self, edge): + self._select_edge(edge) + self._cv.view_edge(edge) + + def _select_edge(self, edge): + self._selection = edge + # Update the chart view. + self._cv.markonly_edge(edge, "#f00") + self._cv.draw_tree(edge) + # Update the matrix view. + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + + def _deselect_edge(self): + self._selection = None + # Update the chart view. + self._cv.unmark_edge() + self._cv.erase_tree() + # Update the matrix view + if self._matrix: + self._matrix.unmark_edge() + + def _show_new_edge(self, edge): + self._display_rule(self._cp.current_chartrule()) + # Update the chart view. + self._cv.update() + self._cv.draw_tree(edge) + self._cv.markonly_edge(edge, "#0df") + self._cv.view_edge(edge) + # Update the matrix view. + if self._matrix: + self._matrix.update() + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + # Update the results view. + if self._results: + self._results.update(edge) + + # //////////////////////////////////////////////////////////// + # Help/usage + # //////////////////////////////////////////////////////////// + + def help(self, *e): + self._animating = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Chart Parser Application\n" + "Written by Edward Loper" + showinfo("About: Chart Parser Application", ABOUT) + + # //////////////////////////////////////////////////////////// + # File Menu + # //////////////////////////////////////////////////////////// + + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + GRAMMAR_FILE_TYPES = [ + ("Plaintext grammar file", ".cfg"), + ("Pickle file", ".pickle"), + ("All files", "*"), + ] + + def load_chart(self, *args): + "Load a chart from a pickle file" + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "rb") as infile: + chart = pickle.load(infile) + self._chart = chart + self._cv.update(chart) + if self._matrix: + self._matrix.set_chart(chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(chart) + self._cp.set_chart(chart) + except Exception as e: + raise + showerror("Error Loading Chart", "Unable to open file: %r" % filename) + + def save_chart(self, *args): + "Save a chart to a pickle file" + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._chart, outfile) + except Exception as e: + raise + showerror("Error Saving Chart", "Unable to open file: %r" % filename) + + def load_grammar(self, *args): + "Load a grammar from a pickle file" + filename = askopenfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "rb") as infile: + grammar = pickle.load(infile) + else: + with open(filename) as infile: + grammar = CFG.fromstring(infile.read()) + self.set_grammar(grammar) + except Exception as e: + showerror("Error Loading Grammar", "Unable to open file: %r" % filename) + + def save_grammar(self, *args): + filename = asksaveasfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "wb") as outfile: + pickle.dump((self._chart, self._tokens), outfile) + else: + with open(filename, "w") as outfile: + prods = self._grammar.productions() + start = [p for p in prods if p.lhs() == self._grammar.start()] + rest = [p for p in prods if p.lhs() != self._grammar.start()] + for prod in start: + outfile.write("%s\n" % prod) + for prod in rest: + outfile.write("%s\n" % prod) + except Exception as e: + showerror("Error Saving Grammar", "Unable to open file: %r" % filename) + + def reset(self, *args): + self._animating = 0 + self._reset_parser() + self._cv.update(self._chart) + if self._matrix: + self._matrix.set_chart(self._chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(self._chart) + + # //////////////////////////////////////////////////////////// + # Edit + # //////////////////////////////////////////////////////////// + + def edit_grammar(self, *e): + CFGEditor(self._root, self._grammar, self.set_grammar) + + def set_grammar(self, grammar): + self._grammar = grammar + self._cp.set_grammar(grammar) + if self._results: + self._results.set_grammar(grammar) + + def edit_sentence(self, *e): + sentence = " ".join(self._tokens) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._root, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._tokens = list(sentence.split()) + self.reset() + + # //////////////////////////////////////////////////////////// + # View Menu + # //////////////////////////////////////////////////////////// + + def view_matrix(self, *e): + if self._matrix is not None: + self._matrix.destroy() + self._matrix = ChartMatrixView(self._root, self._chart) + self._matrix.add_callback("select", self._select_matrix_edge) + + def view_results(self, *e): + if self._results is not None: + self._results.destroy() + self._results = ChartResultsView(self._root, self._chart, self._grammar) + + # //////////////////////////////////////////////////////////// + # Zoom Menu + # //////////////////////////////////////////////////////////// + + def resize(self): + self._animating = 0 + self.set_font_size(self._size.get()) + + def set_font_size(self, size): + self._cv.set_font_size(size) + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + + def get_font_size(self): + return abs(self._size.get()) + + # //////////////////////////////////////////////////////////// + # Parsing + # //////////////////////////////////////////////////////////// + + def apply_strategy(self, strategy, edge_strategy=None): + # If we're animating, then stop. + if self._animating: + self._animating = 0 + return + + # Clear the rule display & mark. + self._display_rule(None) + # self._cv.unmark_edge() + + if self._step.get(): + selection = self._selection + if (selection is not None) and (edge_strategy is not None): + # Apply the given strategy to the selected edge. + self._cp.set_strategy([edge_strategy(selection)]) + newedge = self._apply_strategy() + + # If it failed, then clear the selection. + if newedge is None: + self._cv.unmark_edge() + self._selection = None + else: + self._cp.set_strategy(strategy) + self._apply_strategy() + + else: + self._cp.set_strategy(strategy) + if self._animate.get(): + self._animating = 1 + self._animate_strategy() + else: + for edge in self._cpstep: + if edge is None: + break + self._cv.update() + if self._matrix: + self._matrix.update() + if self._results: + self._results.update() + + def _stop_animation(self, *e): + self._animating = 0 + + def _animate_strategy(self, speed=1): + if self._animating == 0: + return + if self._apply_strategy() is not None: + if self._animate.get() == 0 or self._step.get() == 1: + return + if self._animate.get() == 1: + self._root.after(3000, self._animate_strategy) + elif self._animate.get() == 2: + self._root.after(1000, self._animate_strategy) + else: + self._root.after(20, self._animate_strategy) + + def _apply_strategy(self): + new_edge = next(self._cpstep) + + if new_edge is not None: + self._show_new_edge(new_edge) + return new_edge + + def _display_rule(self, rule): + if rule is None: + self._rulelabel2["text"] = "" + else: + name = str(rule) + self._rulelabel2["text"] = name + size = self._cv.get_font_size() + + # //////////////////////////////////////////////////////////// + # Parsing Strategies + # //////////////////////////////////////////////////////////// + + # Basic rules: + _TD_INIT = [TopDownInitRule()] + _TD_PREDICT = [TopDownPredictRule()] + _BU_RULE = [BottomUpPredictRule()] + _BU_LC_RULE = [BottomUpPredictCombineRule()] + _FUNDAMENTAL = [SingleEdgeFundamentalRule()] + + # Complete strategies: + _TD_STRATEGY = _TD_INIT + _TD_PREDICT + _FUNDAMENTAL + _BU_STRATEGY = _BU_RULE + _FUNDAMENTAL + _BU_LC_STRATEGY = _BU_LC_RULE + _FUNDAMENTAL + + # Button callback functions: + def top_down_init(self, *e): + self.apply_strategy(self._TD_INIT, None) + + def top_down_predict(self, *e): + self.apply_strategy(self._TD_PREDICT, TopDownPredictEdgeRule) + + def bottom_up(self, *e): + self.apply_strategy(self._BU_RULE, BottomUpEdgeRule) + + def bottom_up_leftcorner(self, *e): + self.apply_strategy(self._BU_LC_RULE, BottomUpLeftCornerEdgeRule) + + def fundamental(self, *e): + self.apply_strategy(self._FUNDAMENTAL, FundamentalEdgeRule) + + def bottom_up_strategy(self, *e): + self.apply_strategy(self._BU_STRATEGY, BottomUpEdgeRule) + + def bottom_up_leftcorner_strategy(self, *e): + self.apply_strategy(self._BU_LC_STRATEGY, BottomUpLeftCornerEdgeRule) + + def top_down_strategy(self, *e): + self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule) + + +def app(): + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + VP -> VP PP | V NP | V + NP -> Det N | NP PP + PP -> P NP + # Lexical productions. + NP -> 'John' | 'I' + Det -> 'the' | 'my' | 'a' + N -> 'dog' | 'cookie' | 'table' | 'cake' | 'fork' + V -> 'ate' | 'saw' + P -> 'on' | 'under' | 'with' + """ + ) + + sent = "John ate the cake on the table with a fork" + sent = "John ate the cake on the table" + tokens = list(sent.split()) + + print("grammar= (") + for rule in grammar.productions(): + print((" ", repr(rule) + ",")) + print(")") + print("tokens = %r" % tokens) + print('Calling "ChartParserApp(grammar, tokens)"...') + ChartParserApp(grammar, tokens).mainloop() + + +if __name__ == "__main__": + app() + + # Chart comparer: + # charts = ['/tmp/earley.pickle', + # '/tmp/topdown.pickle', + # '/tmp/bottomup.pickle'] + # ChartComparer(*charts).mainloop() + + # import profile + # profile.run('demo2()', '/tmp/profile.out') + # import pstats + # p = pstats.Stats('/tmp/profile.out') + # p.strip_dirs().sort_stats('time', 'cum').print_stats(60) + # p.strip_dirs().sort_stats('cum', 'time').print_stats(60) + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/chunkparser_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/chunkparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..54a10a1e7db3dde0f3a18447575130e658ba3c51 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/chunkparser_app.py @@ -0,0 +1,1500 @@ +# Natural Language Toolkit: Regexp Chunk Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the regular expression based chunk +parser ``nltk.chunk.RegexpChunkParser``. +""" + +# Todo: Add a way to select the development set from the menubar. This +# might just need to be a selection box (conll vs treebank etc) plus +# configuration parameters to select what's being chunked (eg VP vs NP) +# and what part of the data is being used as the development set. + +import random +import re +import textwrap +import time +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Text, + Tk, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font + +from nltk.chunk import ChunkScore, RegexpChunkParser +from nltk.chunk.regexp import RegexpChunkRule +from nltk.corpus import conll2000, treebank_chunk +from nltk.draw.util import ShowText +from nltk.tree import Tree +from nltk.util import in_idle + + +class RegexpChunkApp: + """ + A graphical tool for exploring the regular expression based chunk + parser ``nltk.chunk.RegexpChunkParser``. + + See ``HELP`` for instructional text. + """ + + ##///////////////////////////////////////////////////////////////// + ## Help Text + ##///////////////////////////////////////////////////////////////// + + #: A dictionary mapping from part of speech tags to descriptions, + #: which is used in the help text. (This should probably live with + #: the conll and/or treebank corpus instead.) + TAGSET = { + "CC": "Coordinating conjunction", + "PRP$": "Possessive pronoun", + "CD": "Cardinal number", + "RB": "Adverb", + "DT": "Determiner", + "RBR": "Adverb, comparative", + "EX": "Existential there", + "RBS": "Adverb, superlative", + "FW": "Foreign word", + "RP": "Particle", + "JJ": "Adjective", + "TO": "to", + "JJR": "Adjective, comparative", + "UH": "Interjection", + "JJS": "Adjective, superlative", + "VB": "Verb, base form", + "LS": "List item marker", + "VBD": "Verb, past tense", + "MD": "Modal", + "NNS": "Noun, plural", + "NN": "Noun, singular or masps", + "VBN": "Verb, past participle", + "VBZ": "Verb,3rd ps. sing. present", + "NNP": "Proper noun, singular", + "NNPS": "Proper noun plural", + "WDT": "wh-determiner", + "PDT": "Predeterminer", + "WP": "wh-pronoun", + "POS": "Possessive ending", + "WP$": "Possessive wh-pronoun", + "PRP": "Personal pronoun", + "WRB": "wh-adverb", + "(": "open parenthesis", + ")": "close parenthesis", + "``": "open quote", + ",": "comma", + "''": "close quote", + ".": "period", + "#": "pound sign (currency marker)", + "$": "dollar sign (currency marker)", + "IN": "Preposition/subord. conjunction", + "SYM": "Symbol (mathematical or scientific)", + "VBG": "Verb, gerund/present participle", + "VBP": "Verb, non-3rd ps. sing. present", + ":": "colon", + } + + #: Contents for the help box. This is a list of tuples, one for + #: each help page, where each tuple has four elements: + #: - A title (displayed as a tab) + #: - A string description of tabstops (see Tkinter.Text for details) + #: - The text contents for the help page. You can use expressions + #: like ... to colorize the text; see ``HELP_AUTOTAG`` + #: for a list of tags you can use for colorizing. + HELP = [ + ( + "Help", + "20", + "Welcome to the regular expression chunk-parser grammar editor. " + "You can use this editor to develop and test chunk parser grammars " + "based on NLTK's RegexpChunkParser class.\n\n" + # Help box. + "Use this box ('Help') to learn more about the editor; click on the " + "tabs for help on specific topics:" + "\n" + "Rules: grammar rule types\n" + "Regexps: regular expression syntax\n" + "Tags: part of speech tags\n\n" + # Grammar. + "Use the upper-left box ('Grammar') to edit your grammar. " + "Each line of your grammar specifies a single 'rule', " + "which performs an action such as creating a chunk or merging " + "two chunks.\n\n" + # Dev set. + "The lower-left box ('Development Set') runs your grammar on the " + "development set, and displays the results. " + "Your grammar's chunks are highlighted, and " + "the correct (gold standard) chunks are " + "underlined. If they " + "match, they are displayed in green; otherwise, " + "they are displayed in red. The box displays a single " + "sentence from the development set at a time; use the scrollbar or " + "the next/previous buttons view additional sentences.\n\n" + # Performance + "The lower-right box ('Evaluation') tracks the performance of " + "your grammar on the development set. The 'precision' axis " + "indicates how many of your grammar's chunks are correct; and " + "the 'recall' axis indicates how many of the gold standard " + "chunks your system generated. Typically, you should try to " + "design a grammar that scores high on both metrics. The " + "exact precision and recall of the current grammar, as well " + "as their harmonic mean (the 'f-score'), are displayed in " + "the status bar at the bottom of the window.", + ), + ( + "Rules", + "10", + "

{...regexp...}

" + "\nChunk rule: creates new chunks from words matching " + "regexp.\n\n" + "

}...regexp...{

" + "\nStrip rule: removes words matching regexp from existing " + "chunks.\n\n" + "

...regexp1...}{...regexp2...

" + "\nSplit rule: splits chunks that match regexp1 followed by " + "regexp2 in two.\n\n" + "

...regexp...{}...regexp...

" + "\nMerge rule: joins consecutive chunks that match regexp1 " + "and regexp2\n", + ), + ( + "Regexps", + "10 60", + # "Regular Expression Syntax Summary:\n\n" + "

Pattern\t\tMatches...

\n" + "" + "\t<T>\ta word with tag T " + "(where T may be a regexp).\n" + "\tx?\tan optional x\n" + "\tx+\ta sequence of 1 or more x's\n" + "\tx*\ta sequence of 0 or more x's\n" + "\tx|y\tx or y\n" + "\t.\tmatches any character\n" + "\t(x)\tTreats x as a group\n" + "\t# x...\tTreats x... " + "(to the end of the line) as a comment\n" + "\t\\C\tmatches character C " + "(useful when C is a special character " + "like + or #)\n" + "" + "\n

Examples:

\n" + "" + "\t\n" + '\t\tMatches "cow/NN"\n' + '\t\tMatches "green/NN"\n' + "\t\n" + '\t\tMatches "eating/VBG"\n' + '\t\tMatches "ate/VBD"\n' + "\t
\n" + '\t\tMatches "on/IN the/DT car/NN"\n' + "\t?\n" + '\t\tMatches "ran/VBD"\n' + '\t\tMatches "slowly/RB ate/VBD"\n' + r"\t<\#> # This is a comment...\n" + '\t\tMatches "#/# 100/CD"\n' + "", + ), + ( + "Tags", + "10 60", + "

Part of Speech Tags:

\n" + + "" + + "<>" + + "\n", # this gets auto-substituted w/ self.TAGSET + ), + ] + + HELP_AUTOTAG = [ + ("red", dict(foreground="#a00")), + ("green", dict(foreground="#080")), + ("highlight", dict(background="#ddd")), + ("underline", dict(underline=True)), + ("h1", dict(underline=True)), + ("indent", dict(lmargin1=20, lmargin2=20)), + ("hangindent", dict(lmargin1=0, lmargin2=60)), + ("var", dict(foreground="#88f")), + ("regexp", dict(foreground="#ba7")), + ("match", dict(foreground="#6a6")), + ] + + ##///////////////////////////////////////////////////////////////// + ## Config Parameters + ##///////////////////////////////////////////////////////////////// + + _EVAL_DELAY = 1 + """If the user has not pressed any key for this amount of time (in + seconds), and the current grammar has not been evaluated, then + the eval demon will evaluate it.""" + + _EVAL_CHUNK = 15 + """The number of sentences that should be evaluated by the eval + demon each time it runs.""" + _EVAL_FREQ = 0.2 + """The frequency (in seconds) at which the eval demon is run""" + _EVAL_DEMON_MIN = 0.02 + """The minimum amount of time that the eval demon should take each time + it runs -- if it takes less than this time, _EVAL_CHUNK will be + modified upwards.""" + _EVAL_DEMON_MAX = 0.04 + """The maximum amount of time that the eval demon should take each time + it runs -- if it takes more than this time, _EVAL_CHUNK will be + modified downwards.""" + + _GRAMMARBOX_PARAMS = dict( + width=40, + height=12, + background="#efe", + highlightbackground="#efe", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _HELPBOX_PARAMS = dict( + width=15, + height=15, + background="#efe", + highlightbackground="#efe", + foreground="#555", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _DEVSETBOX_PARAMS = dict( + width=70, + height=10, + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + tabs=(30,), + ) + _STATUS_PARAMS = dict(background="#9bb", relief="groove", border=2) + _FONT_PARAMS = dict(family="helvetica", size=-20) + _FRAME_PARAMS = dict(background="#777", padx=2, pady=2, border=3) + _EVALBOX_PARAMS = dict( + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + width=300, + height=280, + ) + _BUTTON_PARAMS = dict( + background="#777", activebackground="#777", highlightbackground="#777" + ) + _HELPTAB_BG_COLOR = "#aba" + _HELPTAB_FG_COLOR = "#efe" + + _HELPTAB_FG_PARAMS = dict(background="#efe") + _HELPTAB_BG_PARAMS = dict(background="#aba") + _HELPTAB_SPACER = 6 + + def normalize_grammar(self, grammar): + # Strip comments + grammar = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", grammar) + # Normalize whitespace + grammar = re.sub(" +", " ", grammar) + grammar = re.sub(r"\n\s+", r"\n", grammar) + grammar = grammar.strip() + # [xx] Hack: automatically backslash $! + grammar = re.sub(r"([^\\])\$", r"\1\\$", grammar) + return grammar + + def __init__( + self, + devset_name="conll2000", + devset=None, + grammar="", + chunk_label="NP", + tagset=None, + ): + """ + :param devset_name: The name of the development set; used for + display & for save files. If either the name 'treebank' + or the name 'conll2000' is used, and devset is None, then + devset will be set automatically. + :param devset: A list of chunked sentences + :param grammar: The initial grammar to display. + :param tagset: Dictionary from tags to string descriptions, used + for the help page. Defaults to ``self.TAGSET``. + """ + self._chunk_label = chunk_label + + if tagset is None: + tagset = self.TAGSET + self.tagset = tagset + + # Named development sets: + if devset is None: + if devset_name == "conll2000": + devset = conll2000.chunked_sents("train.txt") # [:100] + elif devset == "treebank": + devset = treebank_chunk.chunked_sents() # [:100] + else: + raise ValueError("Unknown development set %s" % devset_name) + + self.chunker = None + """The chunker built from the grammar string""" + + self.grammar = grammar + """The unparsed grammar string""" + + self.normalized_grammar = None + """A normalized version of ``self.grammar``.""" + + self.grammar_changed = 0 + """The last time() that the grammar was changed.""" + + self.devset = devset + """The development set -- a list of chunked sentences.""" + + self.devset_name = devset_name + """The name of the development set (for save files).""" + + self.devset_index = -1 + """The index into the development set of the first instance + that's currently being viewed.""" + + self._last_keypress = 0 + """The time() when a key was most recently pressed""" + + self._history = [] + """A list of (grammar, precision, recall, fscore) tuples for + grammars that the user has already tried.""" + + self._history_index = 0 + """When the user is scrolling through previous grammars, this + is used to keep track of which grammar they're looking at.""" + + self._eval_grammar = None + """The grammar that is being currently evaluated by the eval + demon.""" + + self._eval_normalized_grammar = None + """A normalized copy of ``_eval_grammar``.""" + + self._eval_index = 0 + """The index of the next sentence in the development set that + should be looked at by the eval demon.""" + + self._eval_score = ChunkScore(chunk_label=chunk_label) + """The ``ChunkScore`` object that's used to keep track of the score + of the current grammar on the development set.""" + + # Set up the main window. + top = self.top = Tk() + top.geometry("+50+50") + top.title("Regexp Chunk Parser App") + top.bind("", self.destroy) + + # Variable that restricts how much of the devset we look at. + self._devset_size = IntVar(top) + self._devset_size.set(100) + + # Set up all the tkinter widgets + self._init_fonts(top) + self._init_widgets(top) + self._init_bindings(top) + self._init_menubar(top) + self.grammarbox.focus() + + # If a grammar was given, then display it. + if grammar: + self.grammarbox.insert("end", grammar + "\n") + self.grammarbox.mark_set("insert", "1.0") + + # Display the first item in the development set + self.show_devset(0) + self.update() + + def _init_bindings(self, top): + top.bind("", self._devset_next) + top.bind("", self._devset_prev) + top.bind("", self.toggle_show_trace) + top.bind("", self.update) + top.bind("", lambda e: self.save_grammar()) + top.bind("", lambda e: self.load_grammar()) + self.grammarbox.bind("", self.toggle_show_trace) + self.grammarbox.bind("", self._devset_next) + self.grammarbox.bind("", self._devset_prev) + + # Redraw the eval graph when the window size changes + self.evalbox.bind("", self._eval_plot) + + def _init_fonts(self, top): + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(top) + self._size.set(20) + self._font = Font(family="helvetica", size=-self._size.get()) + self._smallfont = Font( + family="helvetica", size=-(int(self._size.get() * 14 // 20)) + ) + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command(label="Reset Application", underline=0, command=self.reset) + filemenu.add_command( + label="Save Current Grammar", + underline=0, + accelerator="Ctrl-s", + command=self.save_grammar, + ) + filemenu.add_command( + label="Load Grammar", + underline=0, + accelerator="Ctrl-o", + command=self.load_grammar, + ) + + filemenu.add_command( + label="Save Grammar History", underline=13, command=self.save_history + ) + + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=16, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=20, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=34, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + devsetmenu = Menu(menubar, tearoff=0) + devsetmenu.add_radiobutton( + label="50 sentences", + variable=self._devset_size, + value=50, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="100 sentences", + variable=self._devset_size, + value=100, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="200 sentences", + variable=self._devset_size, + value=200, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="500 sentences", + variable=self._devset_size, + value=500, + command=self.set_devset_size, + ) + menubar.add_cascade(label="Development-Set", underline=0, menu=devsetmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def toggle_show_trace(self, *e): + if self._showing_trace: + self.show_devset() + else: + self.show_trace() + return "break" + + _SCALE_N = 5 # center on the last 5 examples. + _DRAW_LINES = False + + def _eval_plot(self, *e, **config): + width = config.get("width", self.evalbox.winfo_width()) + height = config.get("height", self.evalbox.winfo_height()) + + # Clear the canvas + self.evalbox.delete("all") + + # Draw the precision & recall labels. + tag = self.evalbox.create_text( + 10, height // 2 - 10, justify="left", anchor="w", text="Precision" + ) + left, right = self.evalbox.bbox(tag)[2] + 5, width - 10 + tag = self.evalbox.create_text( + left + (width - left) // 2, + height - 10, + anchor="s", + text="Recall", + justify="center", + ) + top, bot = 10, self.evalbox.bbox(tag)[1] - 10 + + # Draw masks for clipping the plot. + bg = self._EVALBOX_PARAMS["background"] + self.evalbox.lower( + self.evalbox.create_rectangle(0, 0, left - 1, 5000, fill=bg, outline=bg) + ) + self.evalbox.lower( + self.evalbox.create_rectangle(0, bot + 1, 5000, 5000, fill=bg, outline=bg) + ) + + # Calculate the plot's scale. + if self._autoscale.get() and len(self._history) > 1: + max_precision = max_recall = 0 + min_precision = min_recall = 1 + for i in range(1, min(len(self._history), self._SCALE_N + 1)): + grammar, precision, recall, fmeasure = self._history[-i] + min_precision = min(precision, min_precision) + min_recall = min(recall, min_recall) + max_precision = max(precision, max_precision) + max_recall = max(recall, max_recall) + # if max_precision-min_precision > max_recall-min_recall: + # min_recall -= (max_precision-min_precision)/2 + # max_recall += (max_precision-min_precision)/2 + # else: + # min_precision -= (max_recall-min_recall)/2 + # max_precision += (max_recall-min_recall)/2 + # if min_recall < 0: + # max_recall -= min_recall + # min_recall = 0 + # if min_precision < 0: + # max_precision -= min_precision + # min_precision = 0 + min_precision = max(min_precision - 0.01, 0) + min_recall = max(min_recall - 0.01, 0) + max_precision = min(max_precision + 0.01, 1) + max_recall = min(max_recall + 0.01, 1) + else: + min_precision = min_recall = 0 + max_precision = max_recall = 1 + + # Draw the axis lines & grid lines + for i in range(11): + x = left + (right - left) * ( + (i / 10.0 - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (i / 10.0 - min_precision) / (max_precision - min_precision) + ) + if left < x < right: + self.evalbox.create_line(x, top, x, bot, fill="#888") + if top < y < bot: + self.evalbox.create_line(left, y, right, y, fill="#888") + self.evalbox.create_line(left, top, left, bot) + self.evalbox.create_line(left, bot, right, bot) + + # Display the plot's scale + self.evalbox.create_text( + left - 3, + bot, + justify="right", + anchor="se", + text="%d%%" % (100 * min_precision), + ) + self.evalbox.create_text( + left - 3, + top, + justify="right", + anchor="ne", + text="%d%%" % (100 * max_precision), + ) + self.evalbox.create_text( + left, + bot + 3, + justify="center", + anchor="nw", + text="%d%%" % (100 * min_recall), + ) + self.evalbox.create_text( + right, + bot + 3, + justify="center", + anchor="ne", + text="%d%%" % (100 * max_recall), + ) + + # Display the scores. + prev_x = prev_y = None + for i, (_, precision, recall, fscore) in enumerate(self._history): + x = left + (right - left) * ( + (recall - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (precision - min_precision) / (max_precision - min_precision) + ) + if i == self._history_index: + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#0f0", outline="#000" + ) + self.status["text"] = ( + "Precision: %.2f%%\t" % (precision * 100) + + "Recall: %.2f%%\t" % (recall * 100) + + "F-score: %.2f%%" % (fscore * 100) + ) + else: + self.evalbox.lower( + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#afa", outline="#8c8" + ) + ) + if prev_x is not None and self._eval_lines.get(): + self.evalbox.lower( + self.evalbox.create_line(prev_x, prev_y, x, y, fill="#8c8") + ) + prev_x, prev_y = x, y + + _eval_demon_running = False + + def _eval_demon(self): + if self.top is None: + return + if self.chunker is None: + self._eval_demon_running = False + return + + # Note our starting time. + t0 = time.time() + + # If are still typing, then wait for them to finish. + if ( + time.time() - self._last_keypress < self._EVAL_DELAY + and self.normalized_grammar != self._eval_normalized_grammar + ): + self._eval_demon_running = True + return self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + # If the grammar changed, restart the evaluation. + if self.normalized_grammar != self._eval_normalized_grammar: + # Check if we've seen this grammar already. If so, then + # just use the old evaluation values. + for (g, p, r, f) in self._history: + if self.normalized_grammar == self.normalize_grammar(g): + self._history.append((g, p, r, f)) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + return + self._eval_index = 0 + self._eval_score = ChunkScore(chunk_label=self._chunk_label) + self._eval_grammar = self.grammar + self._eval_normalized_grammar = self.normalized_grammar + + # If the grammar is empty, the don't bother evaluating it, or + # recording it in history -- the score will just be 0. + if self.normalized_grammar.strip() == "": + # self._eval_index = self._devset_size.get() + self._eval_demon_running = False + return + + # Score the next set of examples + for gold in self.devset[ + self._eval_index : min( + self._eval_index + self._EVAL_CHUNK, self._devset_size.get() + ) + ]: + guess = self._chunkparse(gold.leaves()) + self._eval_score.score(gold, guess) + + # update our index in the devset. + self._eval_index += self._EVAL_CHUNK + + # Check if we're done + if self._eval_index >= self._devset_size.get(): + self._history.append( + ( + self._eval_grammar, + self._eval_score.precision(), + self._eval_score.recall(), + self._eval_score.f_measure(), + ) + ) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + else: + progress = 100 * self._eval_index / self._devset_size.get() + self.status["text"] = "Evaluating on Development Set (%d%%)" % progress + self._eval_demon_running = True + self._adaptively_modify_eval_chunk(time.time() - t0) + self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + def _adaptively_modify_eval_chunk(self, t): + """ + Modify _EVAL_CHUNK to try to keep the amount of time that the + eval demon takes between _EVAL_DEMON_MIN and _EVAL_DEMON_MAX. + + :param t: The amount of time that the eval demon took. + """ + if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5: + self._EVAL_CHUNK = min( + self._EVAL_CHUNK - 1, + max( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MAX / t)), + self._EVAL_CHUNK - 10, + ), + ) + elif t < self._EVAL_DEMON_MIN: + self._EVAL_CHUNK = max( + self._EVAL_CHUNK + 1, + min( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MIN / t)), + self._EVAL_CHUNK + 10, + ), + ) + + def _init_widgets(self, top): + frame0 = Frame(top, **self._FRAME_PARAMS) + frame0.grid_columnconfigure(0, weight=4) + frame0.grid_columnconfigure(3, weight=2) + frame0.grid_rowconfigure(1, weight=1) + frame0.grid_rowconfigure(5, weight=1) + + # The grammar + self.grammarbox = Text(frame0, font=self._font, **self._GRAMMARBOX_PARAMS) + self.grammarlabel = Label( + frame0, + font=self._font, + text="Grammar:", + highlightcolor="black", + background=self._GRAMMARBOX_PARAMS["background"], + ) + self.grammarlabel.grid(column=0, row=0, sticky="SW") + self.grammarbox.grid(column=0, row=1, sticky="NEWS") + + # Scroll bar for grammar + grammar_scrollbar = Scrollbar(frame0, command=self.grammarbox.yview) + grammar_scrollbar.grid(column=1, row=1, sticky="NWS") + self.grammarbox.config(yscrollcommand=grammar_scrollbar.set) + + # grammar buttons + bg = self._FRAME_PARAMS["background"] + frame3 = Frame(frame0, background=bg) + frame3.grid(column=0, row=2, sticky="EW") + Button( + frame3, + text="Prev Grammar", + command=self._history_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame3, + text="Next Grammar", + command=self._history_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + + # Help box + self.helpbox = Text(frame0, font=self._smallfont, **self._HELPBOX_PARAMS) + self.helpbox.grid(column=3, row=1, sticky="NEWS") + self.helptabs = {} + bg = self._FRAME_PARAMS["background"] + helptab_frame = Frame(frame0, background=bg) + helptab_frame.grid(column=3, row=0, sticky="SW") + for i, (tab, tabstops, text) in enumerate(self.HELP): + label = Label(helptab_frame, text=tab, font=self._smallfont) + label.grid(column=i * 2, row=0, sticky="S") + # help_frame.grid_columnconfigure(i, weight=1) + # label.pack(side='left') + label.bind("", lambda e, tab=tab: self.show_help(tab)) + self.helptabs[tab] = label + Frame( + helptab_frame, height=1, width=self._HELPTAB_SPACER, background=bg + ).grid(column=i * 2 + 1, row=0) + self.helptabs[self.HELP[0][0]].configure(font=self._font) + self.helpbox.tag_config("elide", elide=True) + for (tag, params) in self.HELP_AUTOTAG: + self.helpbox.tag_config("tag-%s" % tag, **params) + self.show_help(self.HELP[0][0]) + + # Scroll bar for helpbox + help_scrollbar = Scrollbar(frame0, command=self.helpbox.yview) + self.helpbox.config(yscrollcommand=help_scrollbar.set) + help_scrollbar.grid(column=4, row=1, sticky="NWS") + + # The dev set + frame4 = Frame(frame0, background=self._FRAME_PARAMS["background"]) + self.devsetbox = Text(frame4, font=self._font, **self._DEVSETBOX_PARAMS) + self.devsetbox.pack(expand=True, fill="both") + self.devsetlabel = Label( + frame0, + font=self._font, + text="Development Set:", + justify="right", + background=self._DEVSETBOX_PARAMS["background"], + ) + self.devsetlabel.grid(column=0, row=4, sticky="SW") + frame4.grid(column=0, row=5, sticky="NEWS") + + # dev set scrollbars + self.devset_scroll = Scrollbar(frame0, command=self._devset_scroll) + self.devset_scroll.grid(column=1, row=5, sticky="NWS") + self.devset_xscroll = Scrollbar( + frame4, command=self.devsetbox.xview, orient="horiz" + ) + self.devsetbox["xscrollcommand"] = self.devset_xscroll.set + self.devset_xscroll.pack(side="bottom", fill="x") + + # dev set buttons + bg = self._FRAME_PARAMS["background"] + frame1 = Frame(frame0, background=bg) + frame1.grid(column=0, row=7, sticky="EW") + Button( + frame1, + text="Prev Example (Ctrl-p)", + command=self._devset_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame1, + text="Next Example (Ctrl-n)", + command=self._devset_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + self.devset_button = Button( + frame1, + text="Show example", + command=self.show_devset, + state="disabled", + **self._BUTTON_PARAMS, + ) + self.devset_button.pack(side="right") + self.trace_button = Button( + frame1, text="Show trace", command=self.show_trace, **self._BUTTON_PARAMS + ) + self.trace_button.pack(side="right") + + # evaluation box + self.evalbox = Canvas(frame0, **self._EVALBOX_PARAMS) + label = Label( + frame0, + font=self._font, + text="Evaluation:", + justify="right", + background=self._EVALBOX_PARAMS["background"], + ) + label.grid(column=3, row=4, sticky="SW") + self.evalbox.grid(column=3, row=5, sticky="NEWS", columnspan=2) + + # evaluation box buttons + bg = self._FRAME_PARAMS["background"] + frame2 = Frame(frame0, background=bg) + frame2.grid(column=3, row=7, sticky="EW") + self._autoscale = IntVar(self.top) + self._autoscale.set(False) + Checkbutton( + frame2, + variable=self._autoscale, + command=self._eval_plot, + text="Zoom", + **self._BUTTON_PARAMS, + ).pack(side="left") + self._eval_lines = IntVar(self.top) + self._eval_lines.set(False) + Checkbutton( + frame2, + variable=self._eval_lines, + command=self._eval_plot, + text="Lines", + **self._BUTTON_PARAMS, + ).pack(side="left") + Button(frame2, text="History", **self._BUTTON_PARAMS).pack(side="right") + + # The status label + self.status = Label(frame0, font=self._font, **self._STATUS_PARAMS) + self.status.grid(column=0, row=9, sticky="NEW", padx=3, pady=2, columnspan=5) + + # Help box & devset box can't be edited. + self.helpbox["state"] = "disabled" + self.devsetbox["state"] = "disabled" + + # Spacers + bg = self._FRAME_PARAMS["background"] + Frame(frame0, height=10, width=0, background=bg).grid(column=0, row=3) + Frame(frame0, height=0, width=10, background=bg).grid(column=2, row=0) + Frame(frame0, height=6, width=0, background=bg).grid(column=0, row=8) + + # pack the frame. + frame0.pack(fill="both", expand=True) + + # Set up colors for the devset box + self.devsetbox.tag_config("true-pos", background="#afa", underline="True") + self.devsetbox.tag_config("false-neg", underline="True", foreground="#800") + self.devsetbox.tag_config("false-pos", background="#faa") + self.devsetbox.tag_config("trace", foreground="#666", wrap="none") + self.devsetbox.tag_config("wrapindent", lmargin2=30, wrap="none") + self.devsetbox.tag_config("error", foreground="#800") + + # And for the grammarbox + self.grammarbox.tag_config("error", background="#fec") + self.grammarbox.tag_config("comment", foreground="#840") + self.grammarbox.tag_config("angle", foreground="#00f") + self.grammarbox.tag_config("brace", foreground="#0a0") + self.grammarbox.tag_config("hangindent", lmargin1=0, lmargin2=40) + + _showing_trace = False + + def show_trace(self, *e): + self._showing_trace = True + self.trace_button["state"] = "disabled" + self.devset_button["state"] = "normal" + + self.devsetbox["state"] = "normal" + # self.devsetbox['wrap'] = 'none' + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + if self.chunker is None: + self.devsetbox.insert("1.0", "Trace: waiting for a valid grammar.") + self.devsetbox.tag_add("error", "1.0", "end") + return # can't do anything more + + gold_tree = self.devset[self.devset_index] + rules = self.chunker.rules() + + # Calculate the tag sequence + tagseq = "\t" + charnum = [1] + for wordnum, (word, pos) in enumerate(gold_tree.leaves()): + tagseq += "%s " % pos + charnum.append(len(tagseq)) + self.charnum = { + (i, j): charnum[j] + for i in range(len(rules) + 1) + for j in range(len(charnum)) + } + self.linenum = {i: i * 2 + 2 for i in range(len(rules) + 1)} + + for i in range(len(rules) + 1): + if i == 0: + self.devsetbox.insert("end", "Start:\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + else: + self.devsetbox.insert("end", "Apply %s:\n" % rules[i - 1]) + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + # Display the tag sequence. + self.devsetbox.insert("end", tagseq + "\n") + self.devsetbox.tag_add("wrapindent", "end -2c linestart", "end -2c") + # Run a partial parser, and extract gold & test chunks + chunker = RegexpChunkParser(rules[:i]) + test_tree = self._chunkparse(gold_tree.leaves()) + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(i, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(i, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(i, chunk, "false-pos") + self.devsetbox.insert("end", "Finished.\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + + # This is a hack, because the x-scrollbar isn't updating its + # position right -- I'm not sure what the underlying cause is + # though. (This is on OS X w/ python 2.5) + self.top.after(100, self.devset_xscroll.set, 0, 0.3) + + def show_help(self, tab): + self.helpbox["state"] = "normal" + self.helpbox.delete("1.0", "end") + for (name, tabstops, text) in self.HELP: + if name == tab: + text = text.replace( + "<>", + "\n".join( + "\t%s\t%s" % item + for item in sorted( + list(self.tagset.items()), + key=lambda t_w: re.match(r"\w+", t_w[0]) + and (0, t_w[0]) + or (1, t_w[0]), + ) + ), + ) + + self.helptabs[name].config(**self._HELPTAB_FG_PARAMS) + self.helpbox.config(tabs=tabstops) + self.helpbox.insert("1.0", text + "\n" * 20) + C = "1.0 + %d chars" + for (tag, params) in self.HELP_AUTOTAG: + pattern = f"(?s)(<{tag}>)(.*?)()" + for m in re.finditer(pattern, text): + self.helpbox.tag_add("elide", C % m.start(1), C % m.end(1)) + self.helpbox.tag_add( + "tag-%s" % tag, C % m.start(2), C % m.end(2) + ) + self.helpbox.tag_add("elide", C % m.start(3), C % m.end(3)) + else: + self.helptabs[name].config(**self._HELPTAB_BG_PARAMS) + self.helpbox["state"] = "disabled" + + def _history_prev(self, *e): + self._view_history(self._history_index - 1) + return "break" + + def _history_next(self, *e): + self._view_history(self._history_index + 1) + return "break" + + def _view_history(self, index): + # Bounds & sanity checking: + index = max(0, min(len(self._history) - 1, index)) + if not self._history: + return + # Already viewing the requested history item? + if index == self._history_index: + return + # Show the requested grammar. It will get added to _history + # only if they edit it (causing self.update() to get run.) + self.grammarbox["state"] = "normal" + self.grammarbox.delete("1.0", "end") + self.grammarbox.insert("end", self._history[index][0]) + self.grammarbox.mark_set("insert", "1.0") + self._history_index = index + self._syntax_highlight_grammar(self._history[index][0]) + # Record the normalized grammar & regenerate the chunker. + self.normalized_grammar = self.normalize_grammar(self._history[index][0]) + if self.normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in self.normalized_grammar.split("\n") + ] + else: + rules = [] + self.chunker = RegexpChunkParser(rules) + # Show the score. + self._eval_plot() + # Update the devset box + self._highlight_devset() + if self._showing_trace: + self.show_trace() + # Update the grammar label + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar {}/{}:".format( + self._history_index + 1, + len(self._history), + ) + else: + self.grammarlabel["text"] = "Grammar:" + + def _devset_next(self, *e): + self._devset_scroll("scroll", 1, "page") + return "break" + + def _devset_prev(self, *e): + self._devset_scroll("scroll", -1, "page") + return "break" + + def destroy(self, *e): + if self.top is None: + return + self.top.destroy() + self.top = None + + def _devset_scroll(self, command, *args): + N = 1 # size of a page -- one sentence. + showing_trace = self._showing_trace + if command == "scroll" and args[1].startswith("unit"): + self.show_devset(self.devset_index + int(args[0])) + elif command == "scroll" and args[1].startswith("page"): + self.show_devset(self.devset_index + N * int(args[0])) + elif command == "moveto": + self.show_devset(int(float(args[0]) * self._devset_size.get())) + else: + assert 0, f"bad scroll command {command} {args}" + if showing_trace: + self.show_trace() + + def show_devset(self, index=None): + if index is None: + index = self.devset_index + + # Bounds checking + index = min(max(0, index), self._devset_size.get() - 1) + + if index == self.devset_index and not self._showing_trace: + return + self.devset_index = index + + self._showing_trace = False + self.trace_button["state"] = "normal" + self.devset_button["state"] = "disabled" + + # Clear the text box. + self.devsetbox["state"] = "normal" + self.devsetbox["wrap"] = "word" + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + # Add the sentences + sample = self.devset[self.devset_index : self.devset_index + 1] + self.charnum = {} + self.linenum = {0: 1} + for sentnum, sent in enumerate(sample): + linestr = "" + for wordnum, (word, pos) in enumerate(sent.leaves()): + self.charnum[sentnum, wordnum] = len(linestr) + linestr += f"{word}/{pos} " + self.charnum[sentnum, wordnum + 1] = len(linestr) + self.devsetbox.insert("end", linestr[:-1] + "\n\n") + + # Highlight chunks in the dev set + if self.chunker is not None: + self._highlight_devset() + self.devsetbox["state"] = "disabled" + + # Update the scrollbar + first = self.devset_index / self._devset_size.get() + last = (self.devset_index + 2) / self._devset_size.get() + self.devset_scroll.set(first, last) + + def _chunks(self, tree): + chunks = set() + wordnum = 0 + for child in tree: + if isinstance(child, Tree): + if child.label() == self._chunk_label: + chunks.add((wordnum, wordnum + len(child))) + wordnum += len(child) + else: + wordnum += 1 + return chunks + + def _syntax_highlight_grammar(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("comment", "1.0", "end") + self.grammarbox.tag_remove("angle", "1.0", "end") + self.grammarbox.tag_remove("brace", "1.0", "end") + self.grammarbox.tag_add("hangindent", "1.0", "end") + for lineno, line in enumerate(grammar.split("\n")): + if not line.strip(): + continue + m = re.match(r"(\\.|[^#])*(#.*)?", line) + comment_start = None + if m.group(2): + comment_start = m.start(2) + s = "%d.%d" % (lineno + 1, m.start(2)) + e = "%d.%d" % (lineno + 1, m.end(2)) + self.grammarbox.tag_add("comment", s, e) + for m in re.finditer("[<>{}]", line): + if comment_start is not None and m.start() >= comment_start: + break + s = "%d.%d" % (lineno + 1, m.start()) + e = "%d.%d" % (lineno + 1, m.end()) + if m.group() in "<>": + self.grammarbox.tag_add("angle", s, e) + else: + self.grammarbox.tag_add("brace", s, e) + + def _grammarcheck(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("error", "1.0", "end") + self._grammarcheck_errs = [] + for lineno, line in enumerate(grammar.split("\n")): + line = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", line) + line = line.strip() + if line: + try: + RegexpChunkRule.fromstring(line) + except ValueError as e: + self.grammarbox.tag_add( + "error", "%s.0" % (lineno + 1), "%s.0 lineend" % (lineno + 1) + ) + self.status["text"] = "" + + def update(self, *event): + # Record when update was called (for grammarcheck) + if event: + self._last_keypress = time.time() + + # Read the grammar from the Text box. + self.grammar = grammar = self.grammarbox.get("1.0", "end") + + # If the grammar hasn't changed, do nothing: + normalized_grammar = self.normalize_grammar(grammar) + if normalized_grammar == self.normalized_grammar: + return + else: + self.normalized_grammar = normalized_grammar + + # If the grammar has changed, and we're looking at history, + # then stop looking at history. + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar:" + + self._syntax_highlight_grammar(grammar) + + # The grammar has changed; try parsing it. If it doesn't + # parse, do nothing. (flag error location?) + try: + # Note: the normalized grammar has no blank lines. + if normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in normalized_grammar.split("\n") + ] + else: + rules = [] + except ValueError as e: + # Use the un-normalized grammar for error highlighting. + self._grammarcheck(grammar) + self.chunker = None + return + + self.chunker = RegexpChunkParser(rules) + self.grammarbox.tag_remove("error", "1.0", "end") + self.grammar_changed = time.time() + # Display the results + if self._showing_trace: + self.show_trace() + else: + self._highlight_devset() + # Start the eval demon + if not self._eval_demon_running: + self._eval_demon() + + def _highlight_devset(self, sample=None): + if sample is None: + sample = self.devset[self.devset_index : self.devset_index + 1] + + self.devsetbox.tag_remove("true-pos", "1.0", "end") + self.devsetbox.tag_remove("false-neg", "1.0", "end") + self.devsetbox.tag_remove("false-pos", "1.0", "end") + + # Run the grammar on the test cases. + for sentnum, gold_tree in enumerate(sample): + # Run the chunk parser + test_tree = self._chunkparse(gold_tree.leaves()) + # Extract gold & test chunks + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(sentnum, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(sentnum, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(sentnum, chunk, "false-pos") + + def _chunkparse(self, words): + try: + return self.chunker.parse(words) + except (ValueError, IndexError) as e: + # There's an error somewhere in the grammar, but we're not sure + # exactly where, so just mark the whole grammar as bad. + # E.g., this is caused by: "({})" + self.grammarbox.tag_add("error", "1.0", "end") + # Treat it as tagging nothing: + return words + + def _color_chunk(self, sentnum, chunk, tag): + start, end = chunk + self.devsetbox.tag_add( + tag, + f"{self.linenum[sentnum]}.{self.charnum[sentnum, start]}", + f"{self.linenum[sentnum]}.{self.charnum[sentnum, end] - 1}", + ) + + def reset(self): + # Clear various variables + self.chunker = None + self.grammar = None + self.normalized_grammar = None + self.grammar_changed = 0 + self._history = [] + self._history_index = 0 + # Update the on-screen display. + self.grammarbox.delete("1.0", "end") + self.show_devset(0) + self.update() + # self._eval_plot() + + SAVE_GRAMMAR_TEMPLATE = ( + "# Regexp Chunk Parsing Grammar\n" + "# Saved %(date)s\n" + "#\n" + "# Development set: %(devset)s\n" + "# Precision: %(precision)s\n" + "# Recall: %(recall)s\n" + "# F-score: %(fscore)s\n\n" + "%(grammar)s\n" + ) + + def save_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + if self._history and self.normalized_grammar == self.normalize_grammar( + self._history[-1][0] + ): + precision, recall, fscore = ( + "%.2f%%" % (100 * v) for v in self._history[-1][1:] + ) + elif self.chunker is None: + precision = recall = fscore = "Grammar not well formed" + else: + precision = recall = fscore = "Not finished evaluation yet" + + with open(filename, "w") as outfile: + outfile.write( + self.SAVE_GRAMMAR_TEMPLATE + % dict( + date=time.ctime(), + devset=self.devset_name, + precision=precision, + recall=recall, + fscore=fscore, + grammar=self.grammar.strip(), + ) + ) + + def load_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = askopenfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + self.grammarbox.delete("1.0", "end") + self.update() + with open(filename) as infile: + grammar = infile.read() + grammar = re.sub( + r"^\# Regexp Chunk Parsing Grammar[\s\S]*" "F-score:.*\n", "", grammar + ).lstrip() + self.grammarbox.insert("1.0", grammar) + self.update() + + def save_history(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr History", ".txt"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".txt") + if not filename: + return + + with open(filename, "w") as outfile: + outfile.write("# Regexp Chunk Parsing Grammar History\n") + outfile.write("# Saved %s\n" % time.ctime()) + outfile.write("# Development set: %s\n" % self.devset_name) + for i, (g, p, r, f) in enumerate(self._history): + hdr = ( + "Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, " + "fscore=%.2f%%)" + % (i + 1, len(self._history), p * 100, r * 100, f * 100) + ) + outfile.write("\n%s\n" % hdr) + outfile.write("".join(" %s\n" % line for line in g.strip().split())) + + if not ( + self._history + and self.normalized_grammar + == self.normalize_grammar(self._history[-1][0]) + ): + if self.chunker is None: + outfile.write("\nCurrent Grammar (not well-formed)\n") + else: + outfile.write("\nCurrent Grammar (not evaluated)\n") + outfile.write( + "".join(" %s\n" % line for line in self.grammar.strip().split()) + ) + + def about(self, *e): + ABOUT = "NLTK RegExp Chunk Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Regular Expression Chunk Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def set_devset_size(self, size=None): + if size is not None: + self._devset_size.set(size) + self._devset_size.set(min(len(self.devset), self._devset_size.get())) + self.show_devset(1) + self.show_devset(0) + # what about history? Evaluated at diff dev set sizes! + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._smallfont.configure(size=min(-10, -(abs(size)) * 14 // 20)) + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +def app(): + RegexpChunkApp().mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/collocations_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/collocations_app.py new file mode 100644 index 0000000000000000000000000000000000000000..19c661368fd9e96d1a4bf1a47ebfbd07a4bb3d80 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/collocations_app.py @@ -0,0 +1,438 @@ +# Natural Language Toolkit: Collocations Application +# Much of the GUI code is imported from concordance.py; We intend to merge these tools together +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT +# + + +import queue as q +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + machado, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.probability import FreqDist +from nltk.util import in_idle + +CORPUS_LOADED_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" +POLL_INTERVAL = 100 + +_DEFAULT = "English: Brown Corpus (Humor)" +_CORPORA = { + "Catalan: CESS-CAT Corpus": lambda: cess_cat.words(), + "English: Brown Corpus": lambda: brown.words(), + "English: Brown Corpus (Press)": lambda: brown.words( + categories=["news", "editorial", "reviews"] + ), + "English: Brown Corpus (Religion)": lambda: brown.words(categories="religion"), + "English: Brown Corpus (Learned)": lambda: brown.words(categories="learned"), + "English: Brown Corpus (Science Fiction)": lambda: brown.words( + categories="science_fiction" + ), + "English: Brown Corpus (Romance)": lambda: brown.words(categories="romance"), + "English: Brown Corpus (Humor)": lambda: brown.words(categories="humor"), + "English: NPS Chat Corpus": lambda: nps_chat.words(), + "English: Wall Street Journal Corpus": lambda: treebank.words(), + "Chinese: Sinica Corpus": lambda: sinica_treebank.words(), + "Dutch: Alpino Corpus": lambda: alpino.words(), + "Hindi: Indian Languages Corpus": lambda: indian.words(files="hindi.pos"), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.words(), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.words(), + "Portuguese: Machado Corpus (Brazil)": lambda: machado.words(), + "Spanish: CESS-ESP Corpus": lambda: cess_esp.words(), +} + + +class CollocationsView: + _BACKGROUND_COLOUR = "#FFF" # white + + def __init__(self): + self.queue = q.Queue() + self.model = CollocationsModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("550x650+50+50") + top.title("NLTK Collocations List") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(550, 650) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_menubar(self): + self._result_size = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.reset_current_page() + + def reset_current_page(self): + self.current_page = -1 + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_results_box() + self.freeze_editable() + self.reset_current_page() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_results_box() + self.reset_current_page() + # self.next() + collocations = self.model.next(self.current_page + 1) + self.write_results(collocations) + self.current_page += 1 + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def previous(self): + self.freeze_editable() + collocations = self.model.prev(self.current_page - 1) + self.current_page = self.current_page - 1 + self.clear_results_box() + self.write_results(collocations) + self.unfreeze_editable() + + def __next__(self): + self.freeze_editable() + collocations = self.model.next(self.current_page + 1) + self.clear_results_box() + self.write_results(collocations) + self.current_page += 1 + self.unfreeze_editable() + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def freeze_editable(self): + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + def unfreeze_editable(self): + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == -1 or self.current_page == 0: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.is_last_page(self.current_page): + self.next["state"] = "disabled" + else: + self.next["state"] = "normal" + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + self.results_box.insert(str(row) + ".0", each[0] + " " + each[1] + "\n") + row += 1 + self.results_box["state"] = "disabled" + + +class CollocationsModel: + def __init__(self, queue): + self.result_count = None + self.selected_corpus = None + self.collocations = None + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.queue = queue + self.reset_results() + + def reset_results(self): + self.result_pages = [] + self.results_returned = 0 + + def load_corpus(self, name): + self.selected_corpus = name + self.collocations = None + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + self.reset_results() + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def is_last_page(self, number): + if number < len(self.result_pages): + return False + return self.results_returned + ( + number - len(self.result_pages) + ) * self.result_count >= len(self.collocations) + + def next(self, page): + if (len(self.result_pages) - 1) < page: + for i in range(page - (len(self.result_pages) - 1)): + self.result_pages.append( + self.collocations[ + self.results_returned : self.results_returned + + self.result_count + ] + ) + self.results_returned += self.result_count + return self.result_pages[page] + + def prev(self, page): + if page == -1: + return [] + return self.result_pages[page] + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + words = self.model.CORPORA[self.name]() + from operator import itemgetter + + text = [w for w in words if len(w) > 2] + fd = FreqDist(tuple(text[i : i + 2]) for i in range(len(text) - 1)) + vocab = FreqDist(text) + scored = [ + ((w1, w2), fd[(w1, w2)] ** 3 / (vocab[w1] * vocab[w2])) + for w1, w2 in fd + ] + scored.sort(key=itemgetter(1), reverse=True) + self.model.collocations = list(map(itemgetter(0), scored)) + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + +# def collocations(): +# colloc_strings = [w1 + ' ' + w2 for w1, w2 in self._collocations[:num]] + + +def app(): + c = CollocationsView() + c.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/concordance_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/concordance_app.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd9a991a0a969f87bf03986a915a0af18cd9b5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/concordance_app.py @@ -0,0 +1,709 @@ +# Natural Language Toolkit: Concordance Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +import queue as q +import re +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Entry, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.draw.util import ShowText +from nltk.util import in_idle + +WORD_OR_TAG = "[^/ ]+" +BOUNDARY = r"\b" + +CORPUS_LOADED_EVENT = "<>" +SEARCH_TERMINATED_EVENT = "<>" +SEARCH_ERROR_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" + +POLL_INTERVAL = 50 + +# NB All corpora must be specified in a lambda expression so as not to be +# loaded when the module is imported. + +_DEFAULT = "English: Brown Corpus (Humor, simplified)" +_CORPORA = { + "Catalan: CESS-CAT Corpus (simplified)": lambda: cess_cat.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus": lambda: brown.tagged_sents(), + "English: Brown Corpus (simplified)": lambda: brown.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus (Press, simplified)": lambda: brown.tagged_sents( + categories=["news", "editorial", "reviews"], tagset="universal" + ), + "English: Brown Corpus (Religion, simplified)": lambda: brown.tagged_sents( + categories="religion", tagset="universal" + ), + "English: Brown Corpus (Learned, simplified)": lambda: brown.tagged_sents( + categories="learned", tagset="universal" + ), + "English: Brown Corpus (Science Fiction, simplified)": lambda: brown.tagged_sents( + categories="science_fiction", tagset="universal" + ), + "English: Brown Corpus (Romance, simplified)": lambda: brown.tagged_sents( + categories="romance", tagset="universal" + ), + "English: Brown Corpus (Humor, simplified)": lambda: brown.tagged_sents( + categories="humor", tagset="universal" + ), + "English: NPS Chat Corpus": lambda: nps_chat.tagged_posts(), + "English: NPS Chat Corpus (simplified)": lambda: nps_chat.tagged_posts( + tagset="universal" + ), + "English: Wall Street Journal Corpus": lambda: treebank.tagged_sents(), + "English: Wall Street Journal Corpus (simplified)": lambda: treebank.tagged_sents( + tagset="universal" + ), + "Chinese: Sinica Corpus": lambda: sinica_treebank.tagged_sents(), + "Chinese: Sinica Corpus (simplified)": lambda: sinica_treebank.tagged_sents( + tagset="universal" + ), + "Dutch: Alpino Corpus": lambda: alpino.tagged_sents(), + "Dutch: Alpino Corpus (simplified)": lambda: alpino.tagged_sents( + tagset="universal" + ), + "Hindi: Indian Languages Corpus": lambda: indian.tagged_sents(files="hindi.pos"), + "Hindi: Indian Languages Corpus (simplified)": lambda: indian.tagged_sents( + files="hindi.pos", tagset="universal" + ), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.tagged_sents(), + "Portuguese: Floresta Corpus (Portugal, simplified)": lambda: floresta.tagged_sents( + tagset="universal" + ), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.tagged_sents(), + "Portuguese: MAC-MORPHO Corpus (Brazil, simplified)": lambda: mac_morpho.tagged_sents( + tagset="universal" + ), + "Spanish: CESS-ESP Corpus (simplified)": lambda: cess_esp.tagged_sents( + tagset="universal" + ), +} + + +class ConcordanceSearchView: + _BACKGROUND_COLOUR = "#FFF" # white + + # Colour of highlighted results + _HIGHLIGHT_WORD_COLOUR = "#F00" # red + _HIGHLIGHT_WORD_TAG = "HL_WRD_TAG" + + _HIGHLIGHT_LABEL_COLOUR = "#C0C0C0" # dark grey + _HIGHLIGHT_LABEL_TAG = "HL_LBL_TAG" + + # Percentage of text left of the scrollbar position + _FRACTION_LEFT_TEXT = 0.30 + + def __init__(self): + self.queue = q.Queue() + self.model = ConcordanceSearchModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("950x680+50+50") + top.title("NLTK Concordance Search") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(950, 680) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_query_box(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_menubar(self): + self._result_size = IntVar(self.top) + self._cntx_bf_len = IntVar(self.top) + self._cntx_af_len = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + cntxmenu = Menu(editmenu, tearoff=0) + cntxbfmenu = Menu(cntxmenu, tearoff=0) + cntxbfmenu.add_radiobutton( + label="60 characters", + variable=self._cntx_bf_len, + underline=0, + value=60, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="80 characters", + variable=self._cntx_bf_len, + underline=0, + value=80, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="100 characters", + variable=self._cntx_bf_len, + underline=0, + value=100, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.invoke(1) + cntxmenu.add_cascade(label="Before", underline=0, menu=cntxbfmenu) + + cntxafmenu = Menu(cntxmenu, tearoff=0) + cntxafmenu.add_radiobutton( + label="70 characters", + variable=self._cntx_af_len, + underline=0, + value=70, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="90 characters", + variable=self._cntx_af_len, + underline=0, + value=90, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="110 characters", + variable=self._cntx_af_len, + underline=0, + value=110, + command=self.set_cntx_af_len, + ) + cntxafmenu.invoke(1) + cntxmenu.add_cascade(label="After", underline=0, menu=cntxafmenu) + + editmenu.add_cascade(label="Context", underline=0, menu=cntxmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def set_cntx_af_len(self, **kwargs): + self._char_after = self._cntx_af_len.get() + + def set_cntx_bf_len(self, **kwargs): + self._char_before = self._cntx_bf_len.get() + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_query_box(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + another = Frame(innerframe, background=self._BACKGROUND_COLOUR) + self.query_box = Entry(another, width=60) + self.query_box.pack(side="left", fill="x", pady=25, anchor="center") + self.search_button = Button( + another, + text="Search", + command=self.search, + borderwidth=1, + highlightthickness=1, + ) + self.search_button.pack(side="left", fill="x", pady=25, anchor="center") + self.query_box.bind("", self.search_enter_keypress_handler) + another.pack() + innerframe.pack(side="top", fill="x", anchor="n") + + def search_enter_keypress_handler(self, *event): + self.search() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + self.results_box.tag_config( + self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR + ) + self.results_box.tag_config( + self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR + ) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.current_page = 0 + + def previous(self): + self.clear_results_box() + self.freeze_editable() + self.model.prev(self.current_page - 1) + + def __next__(self): + self.clear_results_box() + self.freeze_editable() + self.model.next(self.current_page + 1) + + def about(self, *e): + ABOUT = "NLTK Concordance Search Demo\n" + TITLE = "About: NLTK Concordance Search Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE, parent=self.main_frame).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def _bind_event_handlers(self): + self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded) + self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated) + self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error) + self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus) + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == SEARCH_TERMINATED_EVENT: + self.handle_search_terminated(event) + elif event == SEARCH_ERROR_EVENT: + self.handle_search_error(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_all() + self.freeze_editable() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_all() + self.query_box.focus_set() + + def handle_search_terminated(self, event): + # todo: refactor the model such that it is less state sensitive + results = self.model.get_results() + self.write_results(results) + self.status["text"] = "" + if len(results) == 0: + self.status["text"] = "No results found for " + self.model.query + else: + self.current_page = self.model.last_requested_page + self.unfreeze_editable() + self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT) + + def handle_search_error(self, event): + self.status["text"] = "Error in query " + self.model.query + self.unfreeze_editable() + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def search(self): + self.current_page = 0 + self.clear_results_box() + self.model.reset_results() + query = self.query_box.get() + if len(query.strip()) == 0: + return + self.status["text"] = "Searching for " + query + self.freeze_editable() + self.model.search(query, self.current_page + 1) + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + sent, pos1, pos2 = each[0].strip(), each[1], each[2] + if len(sent) != 0: + if pos1 < self._char_before: + sent, pos1, pos2 = self.pad(sent, pos1, pos2) + sentence = sent[pos1 - self._char_before : pos1 + self._char_after] + if not row == len(results): + sentence += "\n" + self.results_box.insert(str(row) + ".0", sentence) + word_markers, label_markers = self.words_and_labels(sent, pos1, pos2) + for marker in word_markers: + self.results_box.tag_add( + self._HIGHLIGHT_WORD_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + for marker in label_markers: + self.results_box.tag_add( + self._HIGHLIGHT_LABEL_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + row += 1 + self.results_box["state"] = "disabled" + + def words_and_labels(self, sentence, pos1, pos2): + search_exp = sentence[pos1:pos2] + words, labels = [], [] + labeled_words = search_exp.split(" ") + index = 0 + for each in labeled_words: + if each == "": + index += 1 + else: + word, label = each.split("/") + words.append( + (self._char_before + index, self._char_before + index + len(word)) + ) + index += len(word) + 1 + labels.append( + (self._char_before + index, self._char_before + index + len(label)) + ) + index += len(label) + index += 1 + return words, labels + + def pad(self, sent, hstart, hend): + if hstart >= self._char_before: + return sent, hstart, hend + d = self._char_before - hstart + sent = "".join([" "] * d) + sent + return sent, hstart + d, hend + d + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def clear_all(self): + self.query_box.delete(0, END) + self.model.reset_query() + self.clear_results_box() + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def freeze_editable(self): + self.query_box["state"] = "disabled" + self.search_button["state"] = "disabled" + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def unfreeze_editable(self): + self.query_box["state"] = "normal" + self.search_button["state"] = "normal" + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == 0 or self.current_page == 1: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.has_more_pages(self.current_page): + self.next["state"] = "normal" + else: + self.next["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +class ConcordanceSearchModel: + def __init__(self, queue): + self.queue = queue + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.selected_corpus = None + self.reset_query() + self.reset_results() + self.result_count = None + self.last_sent_searched = 0 + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def load_corpus(self, name): + self.selected_corpus = name + self.tagged_sents = [] + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + + def search(self, query, page): + self.query = query + self.last_requested_page = page + self.SearchCorpus(self, page, self.result_count).start() + + def next(self, page): + self.last_requested_page = page + if len(self.results) < page: + self.search(self.query, page) + else: + self.queue.put(SEARCH_TERMINATED_EVENT) + + def prev(self, page): + self.last_requested_page = page + self.queue.put(SEARCH_TERMINATED_EVENT) + + def reset_results(self): + self.last_sent_searched = 0 + self.results = [] + self.last_page = None + + def reset_query(self): + self.query = None + + def set_results(self, page, resultset): + self.results.insert(page - 1, resultset) + + def get_results(self): + return self.results[self.last_requested_page - 1] + + def has_more_pages(self, page): + if self.results == [] or self.results[0] == []: + return False + if self.last_page is None: + return True + return page < self.last_page + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + ts = self.model.CORPORA[self.name]() + self.model.tagged_sents = [ + " ".join(w + "/" + t for (w, t) in sent) for sent in ts + ] + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + class SearchCorpus(threading.Thread): + def __init__(self, model, page, count): + self.model, self.count, self.page = model, count, page + threading.Thread.__init__(self) + + def run(self): + q = self.processed_query() + sent_pos, i, sent_count = [], 0, 0 + for sent in self.model.tagged_sents[self.model.last_sent_searched :]: + try: + m = re.search(q, sent) + except re.error: + self.model.reset_results() + self.model.queue.put(SEARCH_ERROR_EVENT) + return + if m: + sent_pos.append((sent, m.start(), m.end())) + i += 1 + if i > self.count: + self.model.last_sent_searched += sent_count - 1 + break + sent_count += 1 + if self.count >= len(sent_pos): + self.model.last_sent_searched += sent_count - 1 + self.model.last_page = self.page + self.model.set_results(self.page, sent_pos) + else: + self.model.set_results(self.page, sent_pos[:-1]) + self.model.queue.put(SEARCH_TERMINATED_EVENT) + + def processed_query(self): + new = [] + for term in self.model.query.split(): + term = re.sub(r"\.", r"[^/ ]", term) + if re.match("[A-Z]+$", term): + new.append(BOUNDARY + WORD_OR_TAG + "/" + term + BOUNDARY) + elif "/" in term: + new.append(BOUNDARY + term + BOUNDARY) + else: + new.append(BOUNDARY + term + "/" + WORD_OR_TAG + BOUNDARY) + return " ".join(new) + + +def app(): + d = ConcordanceSearchView() + d.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/nemo_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/nemo_app.py new file mode 100644 index 0000000000000000000000000000000000000000..df0ceb1be59e40bb48289f4f1411653789ca7a17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/nemo_app.py @@ -0,0 +1,163 @@ +# Finding (and Replacing) Nemo, Version 1.1, Aristide Grange 2006/06/06 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496783 + +""" +Finding (and Replacing) Nemo + +Instant Regular Expressions +Created by Aristide Grange +""" +import itertools +import re +from tkinter import SEL_FIRST, SEL_LAST, Frame, Label, PhotoImage, Scrollbar, Text, Tk + +windowTitle = "Finding (and Replacing) Nemo" +initialFind = r"n(.*?)e(.*?)m(.*?)o" +initialRepl = r"M\1A\2K\3I" +initialText = """\ +Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +""" +images = { + "FIND": "R0lGODlhMAAiAPcAMf/////37//35//n1v97Off///f/9/f37/fexvfOvfeEQvd7QvdrQvdrKfdaKfdSMfdSIe/v9+/v7+/v5+/n3u/e1u/Wxu/Gre+1lO+tnO+thO+Ua+97Y+97Oe97Me9rOe9rMe9jOe9jMe9jIe9aMefe5+fe3ufezuece+eEWudzQudaIedSIedKMedKIedCKedCId7e1t7Wzt7Oxt7Gvd69vd69rd61pd6ljN6UjN6Ue96EY95zY95rUt5rQt5jMd5SId5KIdbn59be3tbGztbGvda1rdaEa9Z7a9Z7WtZzQtZzOdZzMdZjMdZaQtZSOdZSMdZKMdZCKdZCGNY5Ic7W1s7Oxs7Gtc69xs69tc69rc6tpc6llM6clM6cjM6Ue86EY85zWs5rSs5SKc5KKc5KGMa1tcatrcalvcalnMaUpcZ7c8ZzMcZrUsZrOcZrMcZaQsZSOcZSMcZKMcZCKcZCGMYxIcYxGL3Gxr21tb21rb2lpb2crb2cjL2UnL2UlL2UhL2Ec717Wr17Ur1zWr1rMb1jUr1KMb1KIb1CIb0xGLWlrbWlpbWcnLWEe7V7c7VzY7VzUrVSKbVKMbVCMbVCIbU5KbUxIbUxEK2lta2lpa2clK2UjK2MnK2MlK2Ea617e61za61rY61rMa1jSq1aUq1aSq1SQq1KKa0xEKWlnKWcnKWUnKWUhKWMjKWEa6Vza6VrWqVjMaVaUqVaKaVSMaVCMaU5KaUxIaUxGJyclJyMe5yElJyEhJx7e5x7c5xrOZxaQpxSOZxKQpw5IZSMhJSEjJR7c5Rre5RrY5RrUpRSQpRSKZRCOZRCKZQxKZQxIYyEhIx7hIxza4xzY4xrc4xjUoxaa4xaUoxSSoxKQoxCMYw5GIR7c4Rzc4Rre4RjY4RjWoRaa4RSWoRSUoRSMYRKQoRCOYQ5KYQxIXtra3taY3taSntKOXtCMXtCKXNCMXM5MXMxIWtSUmtKSmtKQmtCOWs5MWs5KWs5IWNCKWMxIVIxKUIQCDkhGAAAACH+AS4ALAAAAAAwACIAAAj/AAEIHEiwoMGDCBMqXMiwoUOHMqxIeEiRoZVp7cpZ29WrF4WKIAd208dGAQEVbiTVChUjZMU9+pYQmPmBZpxgvVw+nDdKwQICNVcIXQEkTgKdDdUJ+/nggVAXK1xI3TEA6UIr2uJ8iBqka1cXXTlkqGoVYRZ7iLyqBSs0iiEtZQVKiDGxBI1u3NR6lUpGDKg8MSgEQCphU7Z22vhg0dILXRCpYLuSCcYJT4wqXASBQaBzU7klHxC127OHD7ZDJFpERqRt0x5OnwQpmZmCLEhrbgg4WIHO1RY+nbQ9WRGEDJlmnXwJ+9FBgXMCIzYMVijBBgYMFxIMqJBMSc0Ht7qh/+Gjpte2rnYsYeNlasWIBgQ6yCewIoPCCp/cyP/wgUGbXVu0QcADZNBDnh98gHMLGXYQUw02w61QU3wdbNWDbQVVIIhMMwFF1DaZiPLBAy7E04kafrjSizaK3LFNNc0AAYRQDsAHHQlJ2IDQJ2zE1+EKDjiAijShkECCC8Qgw4cr7ZgyzC2WaHPNLWWoNeNWPiRAw0QFWQFMhz8C+QQ20yAiVSrY+MGOJCsccsst2GCzoHFxxEGGC+8hgs0MB2kyCpgzrUDCbs1Es41UdtATHFFkWELMOtsoQsYcgvRRQw5RSDgGOjZMR1AvPQIq6KCo9AKOJWDd48owQlHR4DXEKP9iyRrK+DNNBTu4RwIPFeTAGUG7hAomkA84gEg1m6ADljy9PBKGGJY4ig0xlsTBRSn98FOFDUC8pwQOPkgHbCGAzhTkA850s0c7j6Hjix9+gBIrMXLeAccWXUCyiRBcBEECdEJ98KtAqtBCYQc/OvDENnl4gYpUxISCIjjzylkGGV9okYUVNogRhAOBuuAEhjG08wOgDYzAgA5bCjIoCe5uwUk80RKTTSppPREGGGCIISOQ9AXBg6cC6WIywvCpoMHAocRBwhP4bHLFLujYkV42xNxBRhAyGrc113EgYtRBerDDDHMoDCyQEL5sE083EkgwQyBhxGFHMM206DUixGxmE0wssbQjCQ4JCaFKFwgQTVAVVhQUwAVPIFJKrHfYYRwi6OCDzzuIJIFhXAD0EccPsYRiSyqKSDpFcWSMIcZRoBMkQyA2BGZDIKSYcggih8TRRg4VxM5QABVYYLxgwiev/PLMCxQQADs=", + "find": "R0lGODlhMAAiAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OSkpKRgYGAAAAAAAAAAAAAAAAAAAACH+AS4ALAAAAAAwACIAAAX/ICCOZGmeaKquY2AGLiuvMCAUBuHWc48Kh0iFInEYCb4kSQCxPBiMxkMigRQEgJiSFVBYHNGG0RiZOHjblWAiiY4fkDhEYoBp06dAWfyAQyKAgAwDaHgnB0RwgYASgQ0IhDuGJDAIFhMRVFSLEX8QCJJ4AQM5AgQHTZqqjBAOCQQEkWkCDRMUFQsICQ4Vm5maEwwHOAsPDTpKMAsUDlO4CssTcb+2DAp8YGCyNFoCEsZwFQ3QDRTTVBRS0g1QbgsCd5QAAwgIBwYFAwStzQ8UEdCKVchky0yVBw7YuXkAKt4IAg74vXHVagqFBRgXSCAyYWAVCH0SNhDTitCJfSL5/4RbAPKPhQYYjVCYYAvCP0BxEDaD8CheAAHNwqh8MMGPSwgLeJWhwHSjqkYI+xg4MMCEgQjtRvZ7UAYCpghMF7CxONOWJkYR+rCpY4JlVpVxKDwYWEactKW9mhYRtqCTgwgWEMArERSK1j5q//6T8KXonFsShpiJkAECgQYVjykooCVA0JGHEWNiYCHThTFeb3UkoiCCBgwGEKQ1kuAJlhFwhA71h5SukwUM5qqeCSGBgicEWkfNiWSERtBad4JNIBaQBaQah1ToyGZBAnsIuIJs1qnqiAIVjIE2gnAB1T5x0icgzXT79ipgMOOEH6HBbREBMJCeGEY08IoLAkzB1YYFwjxwSUGSNULQJnNUwRYlCcyEkALIxECAP9cNMMABYpRhy3ZsSLDaR70oUAiABGCkAxowCGCAAfDYIQACXoElGRsdXWDBdg2Y90IWktDYGYAB9PWHP0PMdFZaF07SQgAFNDAMAQg0QA1UC8xoZQl22JGFPgWkOUCOL1pZQyhjxinnnCWEAAA7", + "REPL": "R0lGODlhMAAjAPcAMf/////3//+lOf+UKf+MEPf///f39/f35/fv7/ecQvecOfecKfeUIfeUGPeUEPeUCPeMAO/37+/v9+/v3u/n3u/n1u+9jO+9c++1hO+ta++tY++tWu+tUu+tSu+lUu+lQu+lMe+UMe+UKe+UGO+UEO+UAO+MCOfv5+fvxufn7+fn5+fnzue9lOe9c+e1jOe1e+e1c+e1a+etWuetUuelQuecOeeUUueUCN7e597e3t7e1t7ezt7evd7Wzt7Oxt7Ovd7Otd7Opd7OnN7Gtd7Gpd69lN61hN6ta96lStbextberdbW3tbWztbWxtbOvdbOrda1hNalUtaECM7W1s7Ozs7Oxs7Otc7Gxs7Gvc69tc69rc69pc61jM6lc8bWlMbOvcbGxsbGpca9tca9pca1nMaMAL3OhL3Gtb21vb21tb2tpb2tnL2tlLW9tbW9pbW9e7W1pbWtjLWcKa21nK2tra2tnK2tlK2lpa2llK2ljK2le6WlnKWljKWUe6WUc6WUY5y1QpyclJycjJychJyUc5yMY5StY5SUe5SMhJSMe5SMc5SMWpSEa5SESoyUe4yMhIyEY4SlKYScWoSMe4SEe4SEa4R7c4R7Y3uMY3uEe3t7e3t7c3tza3tzY3trKXtjIXOcAHOUMXOEY3Nzc3NzWnNrSmulCGuUMWuMGGtzWmtrY2taMWtaGGOUOWOMAGNzUmNjWmNjSmNaUmNaQmNaOWNaIWNSCFqcAFpjUlpSMVpSIVpSEFpKKVKMAFJSUlJSSlJSMVJKMVJKGFJKAFI5CEqUAEqEAEpzQkpKIUpCQkpCGEpCAEo5EEoxAEJjOUJCOUJCAEI5IUIxADl7ADlaITlCOTkxMTkxKTkxEDkhADFzADFrGDE5OTExADEpEClrCCkxKSkpKSkpISkpACkhCCkhACkYACFzACFrACEhCCEYGBhjEBhjABghABgYCBgYABgQEBgQABAQABAIAAhjAAhSAAhKAAgIEAgICABaAABCAAAhAAAQAAAIAAAAAAAAACH+AS4ALAAAAAAwACMAAAj/AAEIHEiwoMGDCBMqXMiwocOHAA4cgEixIIIJO3JMmAjADIqKFU/8MHIkg5EgYXx4iaTkI0iHE6wE2TCggYILQayEAgXIy8uGCKz8sDCAQAMRG3iEcXULlJkJPwli3OFjh9UdYYLE6NBhA04UXHoVA2XoTZgfPKBWlOBDphAWOdfMcfMDLloeO3hIMjbWVCQ5Fn6E2UFxgpsgFjYIEBADrZU6luqEEfqjTqpt54z1uuWqTIcgWAk7PECGzIUQDRosDmxlUrVJkwQJkqVuX71v06YZcyUlROAdbnLAJKPFyAYFAhoMwFlnEh0rWkpz8raPHm7dqKKc/KFFkBUrVn1M/ziBcEIeLUEQI8/AYk0i9Be4sqjsrN66c9/OnbobhpR3HkIUoZ0WVnBE0AGLFKKFD0HAFUQe77HQgQI1hRBDEHMcY0899bBzihZuCPILJD8EccEGGzwAQhFaUHHQH82sUkgeNHISDBk8WCCCcsqFUEQWmOyzjz3sUGNNOO5Y48YOEgowAAQhnBScQV00k82V47jzjy9CXZBcjziFoco//4CDiSOyhPMPLkJZkEBqJmRQxA9uZGEQD8Ncmc044/zzDF2IZQBCCDYE8QMZz/iiCSx0neHGI7BIhhhNn+1gxRpokEcQAp7seWU7/PwTyxqG/iCEEVzQmUombnDRxRExzP9nBR2PCKLFD3UJwcMPa/SRqUGNWJmNOVn+M44ukMRB4KGcWDNLVhuUMEIJAlzwA3DJBHMJIXm4sQYhqyxCRQQGLSIsn1qac2UzysQSyzX/hLMGD0F0IMCODYAQBA9W/PKPOcRiw0wzwxTiokF9dLMnuv/Mo+fCZF7jBr0xbDDCACWEYKgb1vzjDp/jZNOMLX0IZxAKq2TZTjtaOjwOsXyG+s8sZJTIQsUdIGHoJPf8w487QI/TDSt5mGwQFZxc406o8HiDJchk/ltLHpSlJwSvz5DpTjvmuGNOM57koelBOaAhiCaaPBLL0wwbm003peRBnBZqJMJL1ECz/HXYYx/NdAIOOVCxQyLorswymU93o0wuwfAiTDNR/xz0MLXU0XdCE+UwSTRZAq2lsSATu+4wkGvt+TjNzPLrQyegAUku2Hij5cd8LhxyM8QIg4w18HgcdC6BTBFSDmfQqsovttveDcG7lFLHI75cE841sARCxeWsnxC4G9HADPK6ywzDCRqBo0EHHWhMgT1IJzziNci1N7PMKnSYfML96/90AiJKey/0KtbLX1QK0rrNnQ541xugQ7SHhkXBghN0SKACWRc4KlAhBwKcIOYymJCAAAA7", + "repl": "R0lGODlhMAAjAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OTExMSkpKSEhIRgYGBAQEAgICAAAACH+AS4ALAAAAAAwACMAAAX/ICCOZGmeaKqubOu+gCDANBkIQ1EMQhAghFptYEAkEgjEwXBo7ISvweGgWCwUysPjwTgEoCafTySYIhYMxgLBjEQgCULvCw0QdAZdoVhUIJUFChISEAxYeQM1N1OMTAp+UwZ5eA4TEhFbDWYFdC4ECVMJjwl5BwsQa0umEhUVlhESDgqlBp0rAn5nVpBMDxeZDRQbHBgWFBSWDgtLBnFjKwRYCI9VqQsPs0YKEcMXFq0UEalFDWx4BAO2IwPjppAKDkrTWKYUGd7fEJJFEZpM00cOzCgh4EE8SaoWxKNixQooBRMyZMBwAYIRBhUgLDGS4MoBJeoANMhAgQsaCRZm/5lqaCUJhA4cNHjDoKEDBlJUHqkBlYBTiQUZNGjYMMxDhY3VWk6R4MEDBoMUak5AqoYBqANIBo4wcGGDUKIeLlzVZmWJggsVIkwAZaQSA3kdZzlKkIiEAAlDvW5oOkEBs488JTw44oeUIwdvVTFTUK7uiAAPgubt8GFDhQepqETAQCFU1UMGzlqAgFhUsAcCS0AO6lUDhw8xNRSbENGDhgWSHjWUe6ACbKITizmopZoBa6KvOwj9uuHDhwxyj3xekgDDhw5EvWKo0IB4iQLCOCC/njc7ZQ8UeGvza+ABZZgcxJNc4FO1gc0cOsCUrHevc8tdIMTIAhc4F198G2Qwwd8CBIQUAwEINABBBJUwR9R5wElgVRLwWODBBx4cGB8GEzDQIAo33CGJA8gh+JoH/clUgQU0YvDhdfmJdwEFC6Sjgg8yEPAABsPkh2F22cl2AQbn6QdTghTQ5eAJAQyQAAQV0MSBB9gRVZ4GE1mw5JZOAmiAVi1UWcAZDrDyZXYTeaOhA/bIVuIBPtKQ4h7ViYekUPdcEAEbzTzCRp5CADmAAwj+ORGPBcgwAAHo9ABGCYtm0ChwFHShlRiXhmHlkAcCiOeUodqQw5W0oXLAiamy4MOkjOyAaqxUymApDCEAADs=", +} +colors = ["#FF7B39", "#80F121"] +emphColors = ["#DAFC33", "#F42548"] +fieldParams = { + "height": 3, + "width": 70, + "font": ("monaco", 14), + "highlightthickness": 0, + "borderwidth": 0, + "background": "white", +} +textParams = { + "bg": "#F7E0D4", + "fg": "#2321F1", + "highlightthickness": 0, + "width": 1, + "height": 10, + "font": ("verdana", 16), + "wrap": "word", +} + + +class Zone: + def __init__(self, image, initialField, initialText): + frm = Frame(root) + frm.config(background="white") + self.image = PhotoImage(format="gif", data=images[image.upper()]) + self.imageDimmed = PhotoImage(format="gif", data=images[image]) + self.img = Label(frm) + self.img.config(borderwidth=0) + self.img.pack(side="left") + self.fld = Text(frm, **fieldParams) + self.initScrollText(frm, self.fld, initialField) + frm = Frame(root) + self.txt = Text(frm, **textParams) + self.initScrollText(frm, self.txt, initialText) + for i in range(2): + self.txt.tag_config(colors[i], background=colors[i]) + self.txt.tag_config("emph" + colors[i], foreground=emphColors[i]) + + def initScrollText(self, frm, txt, contents): + scl = Scrollbar(frm) + scl.config(command=txt.yview) + scl.pack(side="right", fill="y") + txt.pack(side="left", expand=True, fill="x") + txt.config(yscrollcommand=scl.set) + txt.insert("1.0", contents) + frm.pack(fill="x") + Frame(height=2, bd=1, relief="ridge").pack(fill="x") + + def refresh(self): + self.colorCycle = itertools.cycle(colors) + try: + self.substitute() + self.img.config(image=self.image) + except re.error: + self.img.config(image=self.imageDimmed) + + +class FindZone(Zone): + def addTags(self, m): + color = next(self.colorCycle) + self.txt.tag_add(color, "1.0+%sc" % m.start(), "1.0+%sc" % m.end()) + try: + self.txt.tag_add( + "emph" + color, "1.0+%sc" % m.start("emph"), "1.0+%sc" % m.end("emph") + ) + except: + pass + + def substitute(self, *args): + for color in colors: + self.txt.tag_remove(color, "1.0", "end") + self.txt.tag_remove("emph" + color, "1.0", "end") + self.rex = re.compile("") # default value in case of malformed regexp + self.rex = re.compile(self.fld.get("1.0", "end")[:-1], re.MULTILINE) + try: + re.compile("(?P%s)" % self.fld.get(SEL_FIRST, SEL_LAST)) + self.rexSel = re.compile( + "%s(?P%s)%s" + % ( + self.fld.get("1.0", SEL_FIRST), + self.fld.get(SEL_FIRST, SEL_LAST), + self.fld.get(SEL_LAST, "end")[:-1], + ), + re.MULTILINE, + ) + except: + self.rexSel = self.rex + self.rexSel.sub(self.addTags, self.txt.get("1.0", "end")) + + +class ReplaceZone(Zone): + def addTags(self, m): + s = sz.rex.sub(self.repl, m.group()) + self.txt.delete( + "1.0+%sc" % (m.start() + self.diff), "1.0+%sc" % (m.end() + self.diff) + ) + self.txt.insert("1.0+%sc" % (m.start() + self.diff), s, next(self.colorCycle)) + self.diff += len(s) - (m.end() - m.start()) + + def substitute(self): + self.txt.delete("1.0", "end") + self.txt.insert("1.0", sz.txt.get("1.0", "end")[:-1]) + self.diff = 0 + self.repl = rex0.sub(r"\\g<\1>", self.fld.get("1.0", "end")[:-1]) + sz.rex.sub(self.addTags, sz.txt.get("1.0", "end")[:-1]) + + +def launchRefresh(_): + sz.fld.after_idle(sz.refresh) + rz.fld.after_idle(rz.refresh) + + +def app(): + global root, sz, rz, rex0 + root = Tk() + root.resizable(height=False, width=True) + root.title(windowTitle) + root.minsize(width=250, height=0) + sz = FindZone("find", initialFind, initialText) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.rexSel = re.compile("") + rz = ReplaceZone("repl", initialRepl, "") + rex0 = re.compile(r"(?", launchRefresh) + launchRefresh(None) + root.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/rdparser_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/rdparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..16de5a442659171763da4b4d19e9f56ef9db6277 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/rdparser_app.py @@ -0,0 +1,1052 @@ +# Natural Language Toolkit: Recursive Descent Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the recursive descent parser. + +The recursive descent parser maintains a tree, which records the +structure of the portion of the text that has been parsed. It uses +CFG productions to expand the fringe of the tree, and matches its +leaves against the text. Initially, the tree contains the start +symbol ("S"). It is shown in the main canvas, to the right of the +list of available expansions. + +The parser builds up a tree structure for the text using three +operations: + + - "expand" uses a CFG production to add children to a node on the + fringe of the tree. + - "match" compares a leaf in the tree to a text token. + - "backtrack" returns the tree to its state before the most recent + expand or match operation. + +The parser maintains a list of tree locations called a "frontier" to +remember which nodes have not yet been expanded and which leaves have +not yet been matched against the text. The leftmost frontier node is +shown in green, and the other frontier nodes are shown in blue. The +parser always performs expand and match operations on the leftmost +element of the frontier. + +You can control the parser's operation by using the "expand," "match," +and "backtrack" buttons; or you can use the "step" button to let the +parser automatically decide which operation to apply. The parser uses +the following rules to decide which operation to apply: + + - If the leftmost frontier element is a token, try matching it. + - If the leftmost frontier element is a node, try expanding it with + the first untried expansion. + - Otherwise, backtrack. + +The "expand" button applies the untried expansion whose CFG production +is listed earliest in the grammar. To manually choose which expansion +to apply, click on a CFG production from the list of available +expansions, on the left side of the main window. + +The "autostep" button will let the parser continue applying +applications to the tree until it reaches a complete parse. You can +cancel an autostep in progress at any time by clicking on the +"autostep" button again. + +Keyboard Shortcuts:: + [Space]\t Perform the next expand, match, or backtrack operation + [a]\t Step through operations until the next complete parse + [e]\t Perform an expand operation + [m]\t Perform a match operation + [b]\t Perform a backtrack operation + [Delete]\t Reset the parser + [g]\t Show/hide available expansions list + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingRecursiveDescentParser +from nltk.tree import Tree +from nltk.util import in_idle + + +class RecursiveDescentApp: + """ + A graphical tool for exploring the recursive descent parser. The tool + displays the parser's tree and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can expand subtrees on the frontier, match tokens on the frontier + against the text, and backtrack. A "step" button simply steps + through the parsing process, performing the operations that + ``RecursiveDescentParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingRecursiveDescentParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Recursive Descent Parser Application") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts. + self._init_fonts(self._top) + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animation_frames = IntVar(self._top) + self._animation_frames.set(5) + self._animating_lock = 0 + self._autostep = 0 + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # Initialize the parser. + self._parser.initialize(self._sent) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Expansions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("e", self.expand) + # self._top.bind('', self.expand) + # self._top.bind('', self.expand) + self._top.bind("m", self.match) + self._top.bind("", self.match) + self._top.bind("", self.match) + self._top.bind("b", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("a", self.autostep) + # self._top.bind('', self.autostep) + self._top.bind("", self.autostep) + self._top.bind("", self.cancel_autostep) + self._top.bind("", self.step) + self._top.bind("", self.reset) + self._top.bind("", self.postscript) + # self._top.bind('', self.help) + # self._top.bind('', self.help) + self._top.bind("", self.help) + self._top.bind("", self.help) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Autostep", + background="#90c0d0", + foreground="black", + command=self.autostep, + ).pack(side="left") + Button( + buttonframe, + text="Expand", + underline=0, + background="#90f090", + foreground="black", + command=self.expand, + ).pack(side="left") + Button( + buttonframe, + text="Match", + underline=0, + background="#90f090", + foreground="black", + command=self.match, + ).pack(side="left") + Button( + buttonframe, + text="Backtrack", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.backtrack, + ).pack(side="left") + # Replace autostep... + + # self._autostep_button = Button(buttonframe, text='Autostep', + # underline=0, command=self.autostep) + # self._autostep_button.pack(side='left') + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Match", underline=0, command=self.match, accelerator="Ctrl-m" + ) + rulemenu.add_command( + label="Expand", underline=0, command=self.expand, accelerator="Ctrl-e" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Backtrack", underline=0, command=self.backtrack, accelerator="Ctrl-b" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animation_frames, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animation_frames, + value=10, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animation_frames, + value=5, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animation_frames, + value=2, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Helper + ######################################### + + def _get(self, widget, treeloc): + for i in treeloc: + widget = widget.subtrees()[i] + if isinstance(widget, TreeSegmentWidget): + widget = widget.label() + return widget + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old tree, widgets, etc. + if self._tree is not None: + self._cframe.destroy_widget(self._tree) + for twidget in self._textwidgets: + self._cframe.destroy_widget(twidget) + if self._textline is not None: + self._canvas.delete(self._textline) + + # Draw the tree. + helv = ("helvetica", -self._size.get()) + bold = ("helvetica", -self._size.get(), "bold") + attribs = { + "tree_color": "#000000", + "tree_width": 2, + "node_font": bold, + "leaf_font": helv, + } + tree = self._parser.tree() + self._tree = tree_to_treesegment(canvas, tree, **attribs) + self._cframe.add_widget(self._tree, 30, 5) + + # Draw the text. + helv = ("helvetica", -self._size.get()) + bottom = y = self._cframe.scrollregion()[3] + self._textwidgets = [ + TextWidget(canvas, word, font=self._font) for word in self._sent + ] + for twidget in self._textwidgets: + self._cframe.add_widget(twidget, 0, 0) + twidget.move(0, bottom - twidget.bbox()[3] - 5) + y = min(y, twidget.bbox()[1]) + + # Draw a line over the text, to separate it from the tree. + self._textline = canvas.create_line(-5000, y - 5, 5000, y - 5, dash=".") + + # Highlight appropriate nodes. + self._highlight_nodes() + self._highlight_prodlist() + + # Make sure the text lines up. + self._position_text() + + def _redraw_quick(self): + # This should be more-or-less sufficient after an animation. + self._highlight_nodes() + self._highlight_prodlist() + self._position_text() + + def _highlight_nodes(self): + # Highlight the list of nodes to be checked. + bold = ("helvetica", -self._size.get(), "bold") + for treeloc in self._parser.frontier()[:1]: + self._get(self._tree, treeloc)["color"] = "#20a050" + self._get(self._tree, treeloc)["font"] = bold + for treeloc in self._parser.frontier()[1:]: + self._get(self._tree, treeloc)["color"] = "#008080" + + def _highlight_prodlist(self): + # Highlight the productions that can be expanded. + # Boy, too bad tkinter doesn't implement Listbox.itemconfig; + # that would be pretty useful here. + self._prodlist.delete(0, "end") + expandable = self._parser.expandable_productions() + untried = self._parser.untried_expandable_productions() + productions = self._productions + for index in range(len(productions)): + if productions[index] in expandable: + if productions[index] in untried: + self._prodlist.insert(index, " %s" % productions[index]) + else: + self._prodlist.insert(index, " %s (TRIED)" % productions[index]) + self._prodlist.selection_set(index) + else: + self._prodlist.insert(index, " %s" % productions[index]) + + def _position_text(self): + # Line up the text widgets that are matched against the tree + numwords = len(self._sent) + num_matched = numwords - len(self._parser.remaining_text()) + leaves = self._tree_leaves()[:num_matched] + xmax = self._tree.bbox()[0] + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + widget["color"] = "#006040" + leaf["color"] = "#006040" + widget.move(leaf.bbox()[0] - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # Line up the text widgets that are not matched against the tree. + for i in range(len(leaves), numwords): + widget = self._textwidgets[i] + widget["color"] = "#a0a0a0" + widget.move(xmax - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # If we have a complete parse, make everything green :) + if self._parser.currently_complete(): + for twidget in self._textwidgets: + twidget["color"] = "#00a000" + + # Move the matched leaves down to the text. + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0 + dy = max(dy, leaf.parent().label().bbox()[3] - leaf.bbox()[3] + 10) + leaf.move(0, dy) + + def _tree_leaves(self, tree=None): + if tree is None: + tree = self._tree + if isinstance(tree, TreeSegmentWidget): + leaves = [] + for child in tree.subtrees(): + leaves += self._tree_leaves(child) + return leaves + else: + return [tree] + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._autostep = 0 + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset Application" + self._lastoper2["text"] = "" + self._redraw() + + def autostep(self, *e): + if self._animation_frames.get() == 0: + self._animation_frames.set(2) + if self._autostep: + self._autostep = 0 + else: + self._autostep = 1 + self._step() + + def cancel_autostep(self, *e): + # self._autostep_button['text'] = 'Autostep' + self._autostep = 0 + + # Make sure to stop auto-stepping if we get any user input. + def step(self, *e): + self._autostep = 0 + self._step() + + def match(self, *e): + self._autostep = 0 + self._match() + + def expand(self, *e): + self._autostep = 0 + self._expand() + + def backtrack(self, *e): + self._autostep = 0 + self._backtrack() + + def _step(self): + if self._animating_lock: + return + + # Try expanding, matching, and backtracking (in that order) + if self._expand(): + pass + elif self._parser.untried_match() and self._match(): + pass + elif self._backtrack(): + pass + else: + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + self._autostep = 0 + + # Check if we just completed a parse. + if self._parser.currently_complete(): + self._autostep = 0 + self._lastoper2["text"] += " [COMPLETE PARSE]" + + def _expand(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.expand() + if rv is not None: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = rv + self._prodlist.selection_clear(0, "end") + index = self._productions.index(rv) + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = "(all expansions tried)" + return False + + def _match(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.match() + if rv is not None: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = rv + self._animate_match(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = "(failed)" + return False + + def _backtrack(self, *e): + if self._animating_lock: + return + if self._parser.backtrack(): + elt = self._parser.tree() + for i in self._parser.frontier()[0]: + elt = elt[i] + self._lastoper1["text"] = "Backtrack" + self._lastoper2["text"] = "" + if isinstance(elt, Tree): + self._animate_backtrack(self._parser.frontier()[0]) + else: + self._animate_match_backtrack(self._parser.frontier()[0]) + return True + else: + self._autostep = 0 + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + return False + + def about(self, *e): + ABOUT = ( + "NLTK Recursive Descent Parser Application\n" + "Written by Edward Loper" + ) + TITLE = "About: Recursive Descent Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def help(self, *e): + self._autostep = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + ######################################### + ## Expand Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + # def toggle_grammar(self, *e): + # self._show_grammar = not self._show_grammar + # if self._show_grammar: + # self._prodframe.pack(fill='both', expand='y', side='left', + # after=self._feedbackframe) + # self._lastoper1['text'] = 'Show Grammar' + # else: + # self._prodframe.pack_forget() + # self._lastoper1['text'] = 'Hide Grammar' + # self._lastoper2['text'] = '' + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + old_frontier = self._parser.frontier() + production = self._parser.expand(self._productions[index]) + + if production: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = production + self._prodlist.selection_clear(0, "end") + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.expandable_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Animation + ######################################### + + def _animate_expand(self, treeloc): + oldwidget = self._get(self._tree, treeloc) + oldtree = oldwidget.parent() + top = not isinstance(oldtree.parent(), TreeSegmentWidget) + + tree = self._parser.tree() + for i in treeloc: + tree = tree[i] + + widget = tree_to_treesegment( + self._canvas, + tree, + node_font=self._boldfont, + leaf_color="white", + tree_width=2, + tree_color="white", + node_color="white", + leaf_font=self._font, + ) + widget.label()["color"] = "#20a050" + + (oldx, oldy) = oldtree.label().bbox()[:2] + (newx, newy) = widget.label().bbox()[:2] + widget.move(oldx - newx, oldy - newy) + + if top: + self._cframe.add_widget(widget, 0, 5) + widget.move(30 - widget.label().bbox()[0], 0) + self._tree = widget + else: + oldtree.parent().replace_child(oldtree, widget) + + # Move the children over so they don't overlap. + # Line the children up in a strange way. + if widget.subtrees(): + dx = ( + oldx + + widget.label().width() / 2 + - widget.subtrees()[0].bbox()[0] / 2 + - widget.subtrees()[0].bbox()[2] / 2 + ) + for subtree in widget.subtrees(): + subtree.move(dx, 0) + + self._makeroom(widget) + + if top: + self._cframe.destroy_widget(oldtree) + else: + oldtree.destroy() + + colors = [ + "gray%d" % (10 * int(10 * x / self._animation_frames.get())) + for x in range(self._animation_frames.get(), 0, -1) + ] + + # Move the text string down, if necessary. + dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1] + if dy > 0: + for twidget in self._textwidgets: + twidget.move(0, dy) + self._canvas.move(self._textline, 0, dy) + + self._animate_expand_frame(widget, colors) + + def _makeroom(self, treeseg): + """ + Make sure that no sibling tree bbox's overlap. + """ + parent = treeseg.parent() + if not isinstance(parent, TreeSegmentWidget): + return + + index = parent.subtrees().index(treeseg) + + # Handle siblings to the right + rsiblings = parent.subtrees()[index + 1 :] + if rsiblings: + dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10 + for sibling in rsiblings: + sibling.move(dx, 0) + + # Handle siblings to the left + if index > 0: + lsibling = parent.subtrees()[index - 1] + dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10) + treeseg.move(dx, 0) + + # Keep working up the tree. + self._makeroom(parent) + + def _animate_expand_frame(self, widget, colors): + if len(colors) > 0: + self._animating_lock = 1 + widget["color"] = colors[0] + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = colors[0] + else: + subtree["color"] = colors[0] + self._top.after(50, self._animate_expand_frame, widget, colors[1:]) + else: + widget["color"] = "black" + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = "black" + else: + subtree["color"] = "black" + self._redraw_quick() + widget.label()["color"] = "black" + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_backtrack(self, treeloc): + # Flash red first, if we're animating. + if self._animation_frames.get() == 0: + colors = [] + else: + colors = ["#a00000", "#000000", "#a00000"] + colors += [ + "gray%d" % (10 * int(10 * x / (self._animation_frames.get()))) + for x in range(1, self._animation_frames.get() + 1) + ] + + widgets = [self._get(self._tree, treeloc).parent()] + for subtree in widgets[0].subtrees(): + if isinstance(subtree, TreeSegmentWidget): + widgets.append(subtree.label()) + else: + widgets.append(subtree) + + self._animate_backtrack_frame(widgets, colors) + + def _animate_backtrack_frame(self, widgets, colors): + if len(colors) > 0: + self._animating_lock = 1 + for widget in widgets: + widget["color"] = colors[0] + self._top.after(50, self._animate_backtrack_frame, widgets, colors[1:]) + else: + for widget in widgets[0].subtrees(): + widgets[0].remove_child(widget) + widget.destroy() + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack(self, treeloc): + widget = self._get(self._tree, treeloc) + node = widget.parent().label() + dy = (node.bbox()[3] - widget.bbox()[1] + 14) / max( + 1, self._animation_frames.get() + ) + self._animate_match_backtrack_frame(self._animation_frames.get(), widget, dy) + + def _animate_match(self, treeloc): + widget = self._get(self._tree, treeloc) + + dy = (self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) / max( + 1, self._animation_frames.get() + ) + self._animate_match_frame(self._animation_frames.get(), widget, dy) + + def _animate_match_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after(10, self._animate_match_frame, frame - 1, widget, dy) + else: + widget["color"] = "#006040" + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after( + 10, self._animate_match_backtrack_frame, frame - 1, widget, dy + ) + else: + widget.parent().remove_child(widget) + widget.destroy() + self._animating_lock = 0 + if self._autostep: + self._step() + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._sent = sentence.split() # [XX] use tagged? + self.reset() + + +def app(): + """ + Create a recursive descent parser demo, using a simple grammar and + text. + """ + from nltk.grammar import CFG + + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + NP -> Det N PP | Det N + VP -> V NP PP | V NP | V + PP -> P NP + # Lexical productions. + NP -> 'I' + Det -> 'the' | 'a' + N -> 'man' | 'park' | 'dog' | 'telescope' + V -> 'ate' | 'saw' + P -> 'in' | 'under' | 'with' + """ + ) + + sent = "the dog saw a man in the park".split() + + RecursiveDescentApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/srparser_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/srparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..cca5cb2de2149cc573b6d471cd5fef2a57cbbb7d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/srparser_app.py @@ -0,0 +1,937 @@ +# Natural Language Toolkit: Shift-Reduce Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the shift-reduce parser. + +The shift-reduce parser maintains a stack, which records the structure +of the portion of the text that has been parsed. The stack is +initially empty. Its contents are shown on the left side of the main +canvas. + +On the right side of the main canvas is the remaining text. This is +the portion of the text which has not yet been considered by the +parser. + +The parser builds up a tree structure for the text using two +operations: + + - "shift" moves the first token from the remaining text to the top + of the stack. In the demo, the top of the stack is its right-hand + side. + - "reduce" uses a grammar production to combine the rightmost stack + elements into a single tree token. + +You can control the parser's operation by using the "shift" and +"reduce" buttons; or you can use the "step" button to let the parser +automatically decide which operation to apply. The parser uses the +following rules to decide which operation to apply: + + - Only shift if no reductions are available. + - If multiple reductions are available, then apply the reduction + whose CFG production is listed earliest in the grammar. + +The "reduce" button applies the reduction whose CFG production is +listed earliest in the grammar. There are two ways to manually choose +which reduction to apply: + + - Click on a CFG production from the list of available reductions, + on the left side of the main window. The reduction based on that + production will be applied to the top of the stack. + - Click on one of the stack elements. A popup window will appear, + containing all available reductions. Select one, and it will be + applied to the top of the stack. + +Note that reductions can only be applied to the top of the stack. + +Keyboard Shortcuts:: + [Space]\t Perform the next shift or reduce operation + [s]\t Perform a shift operation + [r]\t Perform a reduction operation + [Ctrl-z]\t Undo most recent operation + [Delete]\t Reset the parser + [g]\t Show/hide available production list + [Ctrl-a]\t Toggle animations + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit + +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingShiftReduceParser +from nltk.tree import Tree +from nltk.util import in_idle + +""" +Possible future improvements: + - button/window to change and/or select text. Just pop up a window + with an entry, and let them modify the text; and then retokenize + it? Maybe give a warning if it contains tokens whose types are + not in the grammar. + - button/window to change and/or select grammar. Select from + several alternative grammars? Or actually change the grammar? If + the later, then I'd want to define nltk.draw.cfg, which would be + responsible for that. +""" + + +class ShiftReduceApp: + """ + A graphical tool for exploring the shift-reduce parser. The tool + displays the parser's stack and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can shift tokens onto the stack, and can perform reductions on the + top elements of the stack. A "step" button simply steps through + the parsing process, performing the operations that + ``nltk.parse.ShiftReduceParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingShiftReduceParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Shift Reduce Parser Application") + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animating_lock = 0 + self._animate = IntVar(self._top) + self._animate.set(10) # = medium + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Initialize fonts. + self._init_fonts(self._top) + + # Set up key bindings. + self._init_bindings() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # A popup menu for reducing. + self._reduce_menu = Menu(self._canvas, tearoff=0) + + # Reset the demo, and set the feedback frame to empty. + self.reset() + self._lastoper1["text"] = "" + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Reductions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if 1: # len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + # When they hover over a production, highlight it. + self._hover = -1 + self._prodlist.bind("", self._highlight_hover) + self._prodlist.bind("", self._clear_hover) + + def _init_bindings(self): + # Quit + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + # Ops (step, shift, reduce, undo) + self._top.bind("", self.step) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reset) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + + # Misc + self._top.bind("", self.postscript) + self._top.bind("", self.help) + self._top.bind("", self.help) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + # Animation speed control + self._top.bind("-", lambda e, a=self._animate: a.set(20)) + self._top.bind("=", lambda e, a=self._animate: a.set(10)) + self._top.bind("+", lambda e, a=self._animate: a.set(4)) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom") + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Shift", + underline=0, + background="#90f090", + foreground="black", + command=self.shift, + ).pack(side="left") + Button( + buttonframe, + text="Reduce", + underline=0, + background="#90f090", + foreground="black", + command=self.reduce, + ).pack(side="left") + Button( + buttonframe, + text="Undo", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.undo, + ).pack(side="left") + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Shift", underline=0, command=self.shift, accelerator="Ctrl-s" + ) + rulemenu.add_command( + label="Reduce", underline=0, command=self.reduce, accelerator="Ctrl-r" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Undo", underline=0, command=self.undo, accelerator="Ctrl-u" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=20, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=10, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=4, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + width=525, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + self._stackwidgets = [] + self._rtextwidgets = [] + self._titlebar = canvas.create_rectangle( + 0, 0, 0, 0, fill="#c0f0f0", outline="black" + ) + self._exprline = canvas.create_line(0, 0, 0, 0, dash=".") + self._stacktop = canvas.create_line(0, 0, 0, 0, fill="#408080") + size = self._size.get() + 4 + self._stacklabel = TextWidget( + canvas, "Stack", color="#004040", font=self._boldfont + ) + self._rtextlabel = TextWidget( + canvas, "Remaining Text", color="#004040", font=self._boldfont + ) + self._cframe.add_widget(self._stacklabel) + self._cframe.add_widget(self._rtextlabel) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + scrollregion = self._canvas["scrollregion"].split() + (cx1, cy1, cx2, cy2) = (int(c) for c in scrollregion) + + # Delete the old stack & rtext widgets. + for stackwidget in self._stackwidgets: + self._cframe.destroy_widget(stackwidget) + self._stackwidgets = [] + for rtextwidget in self._rtextwidgets: + self._cframe.destroy_widget(rtextwidget) + self._rtextwidgets = [] + + # Position the titlebar & exprline + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + self._canvas.coords(self._titlebar, -5000, 0, 5000, y - 4) + self._canvas.coords(self._exprline, 0, y * 2 - 10, 5000, y * 2 - 10) + + # Position the titlebar labels.. + (x1, y1, x2, y2) = self._stacklabel.bbox() + self._stacklabel.move(5 - x1, 3 - y1) + (x1, y1, x2, y2) = self._rtextlabel.bbox() + self._rtextlabel.move(cx2 - x2 - 5, 3 - y1) + + # Draw the stack. + stackx = 5 + for tok in self._parser.stack(): + if isinstance(tok, Tree): + attribs = { + "tree_color": "#4080a0", + "tree_width": 2, + "node_font": self._boldfont, + "node_color": "#006060", + "leaf_color": "#006060", + "leaf_font": self._font, + } + widget = tree_to_treesegment(self._canvas, tok, **attribs) + widget.label()["color"] = "#000000" + else: + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + widget.bind_click(self._popup_reduce) + self._stackwidgets.append(widget) + self._cframe.add_widget(widget, stackx, y) + stackx = widget.bbox()[2] + 10 + + # Draw the remaining text. + rtextwidth = 0 + for tok in self._parser.remaining_text(): + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + self._rtextwidgets.append(widget) + self._cframe.add_widget(widget, rtextwidth, y) + rtextwidth = widget.bbox()[2] + 4 + + # Allow enough room to shift the next token (for animations) + if len(self._rtextwidgets) > 0: + stackx += self._rtextwidgets[0].width() + + # Move the remaining text to the correct location (keep it + # right-justified, when possible); and move the remaining text + # label, if necessary. + stackx = max(stackx, self._stacklabel.width() + 25) + rlabelwidth = self._rtextlabel.width() + 10 + if stackx >= cx2 - max(rtextwidth, rlabelwidth): + cx2 = stackx + max(rtextwidth, rlabelwidth) + for rtextwidget in self._rtextwidgets: + rtextwidget.move(4 + cx2 - rtextwidth, 0) + self._rtextlabel.move(cx2 - self._rtextlabel.bbox()[2] - 5, 0) + + midx = (stackx + cx2 - max(rtextwidth, rlabelwidth)) / 2 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + (x1, y1, x2, y2) = self._stacklabel.bbox() + + # Set up binding to allow them to shift a token by dragging it. + if len(self._rtextwidgets) > 0: + + def drag_shift(widget, midx=midx, self=self): + if widget.bbox()[0] < midx: + self.shift() + else: + self._redraw() + + self._rtextwidgets[0].bind_drag(drag_shift) + self._rtextwidgets[0].bind_click(self.shift) + + # Draw the stack top. + self._highlight_productions() + + def _draw_stack_top(self, widget): + # hack.. + midx = widget.bbox()[2] + 50 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + + def _highlight_productions(self): + # Highlight the productions that can be reduced. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset App" + self._lastoper2["text"] = "" + self._redraw() + + def step(self, *e): + if self.reduce(): + return True + elif self.shift(): + return True + else: + if list(self._parser.parses()): + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Success" + else: + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Failure" + + def shift(self, *e): + if self._animating_lock: + return + if self._parser.shift(): + tok = self._parser.stack()[-1] + self._lastoper1["text"] = "Shift:" + self._lastoper2["text"] = "%r" % tok + if self._animate.get(): + self._animate_shift() + else: + self._redraw() + return True + return False + + def reduce(self, *e): + if self._animating_lock: + return + production = self._parser.reduce() + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + return production + + def undo(self, *e): + if self._animating_lock: + return + if self._parser.undo(): + self._redraw() + + def postscript(self, *e): + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + ######################################### + ## Menubar callbacks + ######################################### + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + + # self._stacklabel['font'] = ('helvetica', -size-4, 'bold') + # self._rtextlabel['font'] = ('helvetica', -size-4, 'bold') + # self._lastoper_label['font'] = ('helvetica', -size) + # self._lastoper1['font'] = ('helvetica', -size) + # self._lastoper2['font'] = ('helvetica', -size) + # self._prodlist['font'] = ('helvetica', -size) + # self._prodlist_label['font'] = ('helvetica', -size-2, 'bold') + self._redraw() + + def help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Shift-Reduce Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Shift-Reduce Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sent): + self._sent = sent.split() # [XX] use tagged? + self.reset() + + ######################################### + ## Reduce Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + production = self._parser.reduce(self._productions[index]) + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + def _popup_reduce(self, widget): + # Remove old commands. + productions = self._parser.reducible_productions() + if len(productions) == 0: + return + + self._reduce_menu.delete(0, "end") + for production in productions: + self._reduce_menu.add_command(label=str(production), command=self.reduce) + self._reduce_menu.post( + self._canvas.winfo_pointerx(), self._canvas.winfo_pointery() + ) + + ######################################### + ## Animations + ######################################### + + def _animate_shift(self): + # What widget are we shifting? + widget = self._rtextwidgets[0] + + # Where are we shifting from & to? + right = widget.bbox()[0] + if len(self._stackwidgets) == 0: + left = 5 + else: + left = self._stackwidgets[-1].bbox()[2] + 10 + + # Start animating. + dt = self._animate.get() + dx = (left - right) * 1.0 / dt + self._animate_shift_frame(dt, widget, dx) + + def _animate_shift_frame(self, frame, widget, dx): + if frame > 0: + self._animating_lock = 1 + widget.move(dx, 0) + self._top.after(10, self._animate_shift_frame, frame - 1, widget, dx) + else: + # but: stacktop?? + + # Shift the widget to the stack. + del self._rtextwidgets[0] + self._stackwidgets.append(widget) + self._animating_lock = 0 + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + def _animate_reduce(self): + # What widgets are we shifting? + numwidgets = len(self._parser.stack()[-1]) # number of children + widgets = self._stackwidgets[-numwidgets:] + + # How far are we moving? + if isinstance(widgets[0], TreeSegmentWidget): + ydist = 15 + widgets[0].label().height() + else: + ydist = 15 + widgets[0].height() + + # Start animating. + dt = self._animate.get() + dy = ydist * 2.0 / dt + self._animate_reduce_frame(dt / 2, widgets, dy) + + def _animate_reduce_frame(self, frame, widgets, dy): + if frame > 0: + self._animating_lock = 1 + for widget in widgets: + widget.move(0, dy) + self._top.after(10, self._animate_reduce_frame, frame - 1, widgets, dy) + else: + del self._stackwidgets[-len(widgets) :] + for widget in widgets: + self._cframe.remove_widget(widget) + tok = self._parser.stack()[-1] + if not isinstance(tok, Tree): + raise ValueError() + label = TextWidget( + self._canvas, str(tok.label()), color="#006060", font=self._boldfont + ) + widget = TreeSegmentWidget(self._canvas, label, widgets, width=2) + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + if not self._stackwidgets: + x = 5 + else: + x = self._stackwidgets[-1].bbox()[2] + 10 + self._cframe.add_widget(widget, x, y) + self._stackwidgets.append(widget) + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + # # Delete the old widgets.. + # del self._stackwidgets[-len(widgets):] + # for widget in widgets: + # self._cframe.destroy_widget(widget) + # + # # Make a new one. + # tok = self._parser.stack()[-1] + # if isinstance(tok, Tree): + # attribs = {'tree_color': '#4080a0', 'tree_width': 2, + # 'node_font': bold, 'node_color': '#006060', + # 'leaf_color': '#006060', 'leaf_font':self._font} + # widget = tree_to_treesegment(self._canvas, tok.type(), + # **attribs) + # widget.node()['color'] = '#000000' + # else: + # widget = TextWidget(self._canvas, tok.type(), + # color='#000000', font=self._font) + # widget.bind_click(self._popup_reduce) + # (x1, y1, x2, y2) = self._stacklabel.bbox() + # y = y2-y1+10 + # if not self._stackwidgets: x = 5 + # else: x = self._stackwidgets[-1].bbox()[2] + 10 + # self._cframe.add_widget(widget, x, y) + # self._stackwidgets.append(widget) + + # self._redraw() + self._animating_lock = 0 + + ######################################### + ## Hovering. + ######################################### + + def _highlight_hover(self, event): + # What production are we hovering over? + index = self._prodlist.nearest(event.y) + if self._hover == index: + return + + # Clear any previous hover highlighting. + self._clear_hover() + + # If the production corresponds to an available reduction, + # highlight the stack. + selection = [int(s) for s in self._prodlist.curselection()] + if index in selection: + rhslen = len(self._productions[index].rhs()) + for stackwidget in self._stackwidgets[-rhslen:]: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "#00a000" + else: + stackwidget["color"] = "#00a000" + + # Remember what production we're hovering over. + self._hover = index + + def _clear_hover(self, *event): + # Clear any previous hover highlighting. + if self._hover == -1: + return + self._hover = -1 + for stackwidget in self._stackwidgets: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "black" + else: + stackwidget["color"] = "black" + + +def app(): + """ + Create a shift reduce parser app, using a simple grammar and + text. + """ + + from nltk.grammar import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + grammar = CFG(S, productions) + + # tokenize the sentence + sent = "my dog saw a man in the park with a statue".split() + + ShiftReduceApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/wordfreq_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/wordfreq_app.py new file mode 100644 index 0000000000000000000000000000000000000000..2846b31216be4611aeabb539782137f2f0decac7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/wordfreq_app.py @@ -0,0 +1,36 @@ +# Natural Language Toolkit: Wordfreq Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +from matplotlib import pylab + +from nltk.corpus import gutenberg +from nltk.text import Text + + +def plot_word_freq_dist(text): + fd = text.vocab() + + samples = [item for item, _ in fd.most_common(50)] + values = [fd[sample] for sample in samples] + values = [sum(values[: i + 1]) * 100.0 / fd.N() for i in range(len(values))] + pylab.title(text.name) + pylab.xlabel("Samples") + pylab.ylabel("Cumulative Percentage") + pylab.plot(values) + pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90) + pylab.show() + + +def app(): + t1 = Text(gutenberg.words("melville-moby_dick.txt")) + plot_word_freq_dist(t1) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/app/wordnet_app.py b/llmeval-env/lib/python3.10/site-packages/nltk/app/wordnet_app.py new file mode 100644 index 0000000000000000000000000000000000000000..afed38b947d0ec231fe4d6f2f56614358d98c7b2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/app/wordnet_app.py @@ -0,0 +1,1005 @@ +# Natural Language Toolkit: WordNet Browser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jussi Salmela +# Paul Bone +# URL: +# For license information, see LICENSE.TXT + +""" +A WordNet Browser application which launches the default browser +(if it is not already running) and opens a new tab with a connection +to http://localhost:port/ . It also starts an HTTP server on the +specified port and begins serving browser requests. The default +port is 8000. (For command-line help, run "python wordnet -h") +This application requires that the user's web browser supports +Javascript. + +BrowServer is a server for browsing the NLTK Wordnet database It first +launches a browser client to be used for browsing and then starts +serving the requests of that and maybe other clients + +Usage:: + + browserver.py -h + browserver.py [-s] [-p ] + +Options:: + + -h or --help + Display this help message. + + -l or --log-file + Logs messages to the given file, If this option is not specified + messages are silently dropped. + + -p or --port + Run the web server on this TCP port, defaults to 8000. + + -s or --server-mode + Do not start a web browser, and do not allow a user to + shutdown the server through the web interface. +""" +# TODO: throughout this package variable names and docstrings need +# modifying to be compliant with NLTK's coding standards. Tests also +# need to be develop to ensure this continues to work in the face of +# changes to other NLTK packages. + +import base64 +import copy +import getopt +import io +import os +import pickle +import sys +import threading +import time +import webbrowser +from collections import defaultdict +from http.server import BaseHTTPRequestHandler, HTTPServer + +# Allow this program to run inside the NLTK source tree. +from sys import argv +from urllib.parse import unquote_plus + +from nltk.corpus import wordnet as wn +from nltk.corpus.reader.wordnet import Lemma, Synset + +firstClient = True + +# True if we're not also running a web browser. The value f server_mode +# gets set by demo(). +server_mode = None + +# If set this is a file object for writing log messages. +logfile = None + + +class MyServerHandler(BaseHTTPRequestHandler): + def do_HEAD(self): + self.send_head() + + def do_GET(self): + global firstClient + sp = self.path[1:] + if unquote_plus(sp) == "SHUTDOWN THE SERVER": + if server_mode: + page = "Server must be killed with SIGTERM." + type = "text/plain" + else: + print("Server shutting down!") + os._exit(0) + + elif sp == "": # First request. + type = "text/html" + if not server_mode and firstClient: + firstClient = False + page = get_static_index_page(True) + else: + page = get_static_index_page(False) + word = "green" + + elif sp.endswith(".html"): # Trying to fetch a HTML file TODO: + type = "text/html" + usp = unquote_plus(sp) + if usp == "NLTK Wordnet Browser Database Info.html": + word = "* Database Info *" + if os.path.isfile(usp): + with open(usp) as infile: + page = infile.read() + else: + page = ( + (html_header % word) + "

The database info file:" + "

" + + usp + + "" + + "

was not found. Run this:" + + "

python dbinfo_html.py" + + "

to produce it." + + html_trailer + ) + else: + # Handle files here. + word = sp + try: + page = get_static_page_by_path(usp) + except FileNotFoundError: + page = "Internal error: Path for static page '%s' is unknown" % usp + # Set type to plain to prevent XSS by printing the path as HTML + type = "text/plain" + elif sp.startswith("search"): + # This doesn't seem to work with MWEs. + type = "text/html" + parts = (sp.split("?")[1]).split("&") + word = [ + p.split("=")[1].replace("+", " ") + for p in parts + if p.startswith("nextWord") + ][0] + page, word = page_from_word(word) + elif sp.startswith("lookup_"): + # TODO add a variation of this that takes a non ecoded word or MWE. + type = "text/html" + sp = sp[len("lookup_") :] + page, word = page_from_href(sp) + elif sp == "start_page": + # if this is the first request we should display help + # information, and possibly set a default word. + type = "text/html" + page, word = page_from_word("wordnet") + else: + type = "text/plain" + page = "Could not parse request: '%s'" % sp + + # Send result. + self.send_head(type) + self.wfile.write(page.encode("utf8")) + + def send_head(self, type=None): + self.send_response(200) + self.send_header("Content-type", type) + self.end_headers() + + def log_message(self, format, *args): + global logfile + + if logfile: + logfile.write( + "%s - - [%s] %s\n" + % (self.address_string(), self.log_date_time_string(), format % args) + ) + + +def get_unique_counter_from_url(sp): + """ + Extract the unique counter from the URL if it has one. Otherwise return + null. + """ + pos = sp.rfind("%23") + if pos != -1: + return int(sp[(pos + 3) :]) + else: + return None + + +def wnb(port=8000, runBrowser=True, logfilename=None): + """ + Run NLTK Wordnet Browser Server. + + :param port: The port number for the server to listen on, defaults to + 8000 + :type port: int + + :param runBrowser: True to start a web browser and point it at the web + server. + :type runBrowser: bool + """ + # The webbrowser module is unpredictable, typically it blocks if it uses + # a console web browser, and doesn't block if it uses a GUI webbrowser, + # so we need to force it to have a clear correct behaviour. + # + # Normally the server should run for as long as the user wants. they + # should idealy be able to control this from the UI by closing the + # window or tab. Second best would be clicking a button to say + # 'Shutdown' that first shutsdown the server and closes the window or + # tab, or exits the text-mode browser. Both of these are unfreasable. + # + # The next best alternative is to start the server, have it close when + # it receives SIGTERM (default), and run the browser as well. The user + # may have to shutdown both programs. + # + # Since webbrowser may block, and the webserver will block, we must run + # them in separate threads. + # + global server_mode, logfile + server_mode = not runBrowser + + # Setup logging. + if logfilename: + try: + logfile = open(logfilename, "a", 1) # 1 means 'line buffering' + except OSError as e: + sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e) + sys.exit(1) + else: + logfile = None + + # Compute URL and start web browser + url = "http://localhost:" + str(port) + + server_ready = None + browser_thread = None + + if runBrowser: + server_ready = threading.Event() + browser_thread = startBrowser(url, server_ready) + + # Start the server. + server = HTTPServer(("", port), MyServerHandler) + if logfile: + logfile.write("NLTK Wordnet browser server running serving: %s\n" % url) + if runBrowser: + server_ready.set() + + try: + server.serve_forever() + except KeyboardInterrupt: + pass + + if runBrowser: + browser_thread.join() + + if logfile: + logfile.close() + + +def startBrowser(url, server_ready): + def run(): + server_ready.wait() + time.sleep(1) # Wait a little bit more, there's still the chance of + # a race condition. + webbrowser.open(url, new=2, autoraise=1) + + t = threading.Thread(target=run) + t.start() + return t + + +##################################################################### +# Utilities +##################################################################### + + +""" +WordNet Browser Utilities. + +This provides a backend to both wxbrowse and browserver.py. +""" + +################################################################################ +# +# Main logic for wordnet browser. +# + +# This is wrapped inside a function since wn is only available if the +# WordNet corpus is installed. +def _pos_tuples(): + return [ + (wn.NOUN, "N", "noun"), + (wn.VERB, "V", "verb"), + (wn.ADJ, "J", "adj"), + (wn.ADV, "R", "adv"), + ] + + +def _pos_match(pos_tuple): + """ + This function returns the complete pos tuple for the partial pos + tuple given to it. It attempts to match it against the first + non-null component of the given pos tuple. + """ + if pos_tuple[0] == "s": + pos_tuple = ("a", pos_tuple[1], pos_tuple[2]) + for n, x in enumerate(pos_tuple): + if x is not None: + break + for pt in _pos_tuples(): + if pt[n] == pos_tuple[n]: + return pt + return None + + +HYPONYM = 0 +HYPERNYM = 1 +CLASS_REGIONAL = 2 +PART_HOLONYM = 3 +PART_MERONYM = 4 +ATTRIBUTE = 5 +SUBSTANCE_HOLONYM = 6 +SUBSTANCE_MERONYM = 7 +MEMBER_HOLONYM = 8 +MEMBER_MERONYM = 9 +VERB_GROUP = 10 +INSTANCE_HYPONYM = 12 +INSTANCE_HYPERNYM = 13 +CAUSE = 14 +ALSO_SEE = 15 +SIMILAR = 16 +ENTAILMENT = 17 +ANTONYM = 18 +FRAMES = 19 +PERTAINYM = 20 + +CLASS_CATEGORY = 21 +CLASS_USAGE = 22 +CLASS_REGIONAL = 23 +CLASS_USAGE = 24 +CLASS_CATEGORY = 11 + +DERIVATIONALLY_RELATED_FORM = 25 + +INDIRECT_HYPERNYMS = 26 + + +def lemma_property(word, synset, func): + def flattern(l): + if l == []: + return [] + else: + return l[0] + flattern(l[1:]) + + return flattern([func(l) for l in synset.lemmas() if l.name == word]) + + +def rebuild_tree(orig_tree): + node = orig_tree[0] + children = orig_tree[1:] + return (node, [rebuild_tree(t) for t in children]) + + +def get_relations_data(word, synset): + """ + Get synset relations data for a synset. Note that this doesn't + yet support things such as full hyponym vs direct hyponym. + """ + if synset.pos() == wn.NOUN: + return ( + (HYPONYM, "Hyponyms", synset.hyponyms()), + (INSTANCE_HYPONYM, "Instance hyponyms", synset.instance_hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + # hypernyms', 'Sister terms', + (INSTANCE_HYPERNYM, "Instance hypernyms", synset.instance_hypernyms()), + # (CLASS_REGIONAL, ['domain term region'], ), + (PART_HOLONYM, "Part holonyms", synset.part_holonyms()), + (PART_MERONYM, "Part meronyms", synset.part_meronyms()), + (SUBSTANCE_HOLONYM, "Substance holonyms", synset.substance_holonyms()), + (SUBSTANCE_MERONYM, "Substance meronyms", synset.substance_meronyms()), + (MEMBER_HOLONYM, "Member holonyms", synset.member_holonyms()), + (MEMBER_MERONYM, "Member meronyms", synset.member_meronyms()), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.VERB: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (HYPONYM, "Hyponym", synset.hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + (ENTAILMENT, "Entailments", synset.entailments()), + (CAUSE, "Causes", synset.causes()), + (ALSO_SEE, "Also see", synset.also_sees()), + (VERB_GROUP, "Verb Groups", synset.verb_groups()), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (SIMILAR, "Similar to", synset.similar_tos()), + # Participle of verb - not supported by corpus + ( + PERTAINYM, + "Pertainyms", + lemma_property(word, synset, lambda l: l.pertainyms()), + ), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ALSO_SEE, "Also see", synset.also_sees()), + ) + elif synset.pos() == wn.ADV: + # This is weird. adverbs such as 'quick' and 'fast' don't seem + # to have antonyms returned by the corpus.a + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + ) + # Derived from adjective - not supported by corpus + else: + raise TypeError("Unhandles synset POS type: " + str(synset.pos())) + + +html_header = """ + + + + + +NLTK Wordnet Browser display of: %s + +""" +html_trailer = """ + + +""" + +explanation = """ +

Search Help

+
  • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
  • +
  • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
  • +
  • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
  • +
  • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
  • +
  • Clicking S: opens a section showing the relations for that synset. +
  • +
  • Clicking on a relation name opens a section that displays the associated +synsets.
  • +
  • Type a search word in the Word field and start the search by the +Enter/Return key or click the Search button.
  • +
+
+""" + +# HTML oriented functions + + +def _bold(txt): + return "%s" % txt + + +def _center(txt): + return "
%s
" % txt + + +def _hlev(n, txt): + return "%s" % (n, txt, n) + + +def _italic(txt): + return "%s" % txt + + +def _li(txt): + return "
  • %s
  • " % txt + + +def pg(word, body): + """ + Return a HTML page of NLTK Browser format constructed from the + word and body + + :param word: The word that the body corresponds to + :type word: str + :param body: The HTML body corresponding to the word + :type body: str + :return: a HTML page for the word-body combination + :rtype: str + """ + return (html_header % word) + body + html_trailer + + +def _ul(txt): + return "
      " + txt + "
    " + + +def _abbc(txt): + """ + abbc = asterisks, breaks, bold, center + """ + return _center(_bold("
    " * 10 + "*" * 10 + " " + txt + " " + "*" * 10)) + + +full_hyponym_cont_text = _ul(_li(_italic("(has full hyponym continuation)"))) + "\n" + + +def _get_synset(synset_key): + """ + The synset key is the unique name of the synset, this can be + retrieved via synset.name() + """ + return wn.synset(synset_key) + + +def _collect_one_synset(word, synset, synset_relations): + """ + Returns the HTML string for one synset or word + + :param word: the current word + :type word: str + :param synset: a synset + :type synset: synset + :param synset_relations: information about which synset relations + to display. + :type synset_relations: dict(synset_key, set(relation_id)) + :return: The HTML string built for this synset + :rtype: str + """ + if isinstance(synset, tuple): # It's a word + raise NotImplementedError("word not supported by _collect_one_synset") + + typ = "S" + pos_tuple = _pos_match((synset.pos(), None, None)) + assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos() + descr = pos_tuple[2] + ref = copy.deepcopy(Reference(word, synset_relations)) + ref.toggle_synset(synset) + synset_label = typ + ";" + if synset.name() in synset_relations: + synset_label = _bold(synset_label) + s = f"
  • {make_lookup_link(ref, synset_label)} ({descr}) " + + def format_lemma(w): + w = w.replace("_", " ") + if w.lower() == word: + return _bold(w) + else: + ref = Reference(w) + return make_lookup_link(ref, w) + + s += ", ".join(format_lemma(l.name()) for l in synset.lemmas()) + + gl = " ({}) {} ".format( + synset.definition(), + "; ".join('"%s"' % e for e in synset.examples()), + ) + return s + gl + _synset_relations(word, synset, synset_relations) + "
  • \n" + + +def _collect_all_synsets(word, pos, synset_relations=dict()): + """ + Return a HTML unordered list of synsets for the given word and + part of speech. + """ + return "
      %s\n
    \n" % "".join( + _collect_one_synset(word, synset, synset_relations) + for synset in wn.synsets(word, pos) + ) + + +def _synset_relations(word, synset, synset_relations): + """ + Builds the HTML string for the relations of a synset + + :param word: The current word + :type word: str + :param synset: The synset for which we're building the relations. + :type synset: Synset + :param synset_relations: synset keys and relation types for which to display relations. + :type synset_relations: dict(synset_key, set(relation_type)) + :return: The HTML for a synset's relations + :rtype: str + """ + + if not synset.name() in synset_relations: + return "" + ref = Reference(word, synset_relations) + + def relation_html(r): + if isinstance(r, Synset): + return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0]) + elif isinstance(r, Lemma): + return relation_html(r.synset()) + elif isinstance(r, tuple): + # It's probably a tuple containing a Synset and a list of + # similar tuples. This forms a tree of synsets. + return "{}\n
      {}
    \n".format( + relation_html(r[0]), + "".join("
  • %s
  • \n" % relation_html(sr) for sr in r[1]), + ) + else: + raise TypeError( + "r must be a synset, lemma or list, it was: type(r) = %s, r = %s" + % (type(r), r) + ) + + def make_synset_html(db_name, disp_name, rels): + synset_html = "%s\n" % make_lookup_link( + copy.deepcopy(ref).toggle_synset_relation(synset, db_name), + disp_name, + ) + + if db_name in ref.synset_relations[synset.name()]: + synset_html += "
      %s
    \n" % "".join( + "
  • %s
  • \n" % relation_html(r) for r in rels + ) + + return synset_html + + html = ( + "
      " + + "\n".join( + "
    • %s
    • " % make_synset_html(*rel_data) + for rel_data in get_relations_data(word, synset) + if rel_data[2] != [] + ) + + "
    " + ) + + return html + + +class RestrictedUnpickler(pickle.Unpickler): + """ + Unpickler that prevents any class or function from being used during loading. + """ + + def find_class(self, module, name): + # Forbid every function + raise pickle.UnpicklingError(f"global '{module}.{name}' is forbidden") + + +class Reference: + """ + A reference to a page that may be generated by page_word + """ + + def __init__(self, word, synset_relations=dict()): + """ + Build a reference to a new page. + + word is the word or words (separated by commas) for which to + search for synsets of + + synset_relations is a dictionary of synset keys to sets of + synset relation identifaiers to unfold a list of synset + relations for. + """ + self.word = word + self.synset_relations = synset_relations + + def encode(self): + """ + Encode this reference into a string to be used in a URL. + """ + # This uses a tuple rather than an object since the python + # pickle representation is much smaller and there is no need + # to represent the complete object. + string = pickle.dumps((self.word, self.synset_relations), -1) + return base64.urlsafe_b64encode(string).decode() + + @staticmethod + def decode(string): + """ + Decode a reference encoded with Reference.encode + """ + string = base64.urlsafe_b64decode(string.encode()) + word, synset_relations = RestrictedUnpickler(io.BytesIO(string)).load() + return Reference(word, synset_relations) + + def toggle_synset_relation(self, synset, relation): + """ + Toggle the display of the relations for the given synset and + relation type. + + This function will throw a KeyError if the synset is currently + not being displayed. + """ + if relation in self.synset_relations[synset.name()]: + self.synset_relations[synset.name()].remove(relation) + else: + self.synset_relations[synset.name()].add(relation) + + return self + + def toggle_synset(self, synset): + """ + Toggle displaying of the relation types for the given synset + """ + if synset.name() in self.synset_relations: + del self.synset_relations[synset.name()] + else: + self.synset_relations[synset.name()] = set() + + return self + + +def make_lookup_link(ref, label): + return f'{label}' + + +def page_from_word(word): + """ + Return a HTML page for the given word. + + :type word: str + :param word: The currently active word + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference(word)) + + +def page_from_href(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference.decode(href)) + + +def page_from_reference(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + word = href.word + pos_forms = defaultdict(list) + words = word.split(",") + words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""] + if len(words) == 0: + # No words were found. + return "", "Please specify a word to search for." + + # This looks up multiple words at once. This is probably not + # necessary and may lead to problems. + for w in words: + for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]: + form = wn.morphy(w, pos) + if form and form not in pos_forms[pos]: + pos_forms[pos].append(form) + body = "" + for pos, pos_str, name in _pos_tuples(): + if pos in pos_forms: + body += _hlev(3, name) + "\n" + for w in pos_forms[pos]: + # Not all words of exc files are in the database, skip + # to the next word if a KeyError is raised. + try: + body += _collect_all_synsets(w, pos, href.synset_relations) + except KeyError: + pass + if not body: + body = "The word or words '%s' were not found in the dictionary." % word + return body, word + + +##################################################################### +# Static pages +##################################################################### + + +def get_static_page_by_path(path): + """ + Return a static HTML page from the path given. + """ + if path == "index_2.html": + return get_static_index_page(False) + elif path == "index.html": + return get_static_index_page(True) + elif path == "NLTK Wordnet Browser Database Info.html": + return "Display of Wordnet Database Statistics is not supported" + elif path == "upper_2.html": + return get_static_upper_page(False) + elif path == "upper.html": + return get_static_upper_page(True) + elif path == "web_help.html": + return get_static_web_help_page() + elif path == "wx_help.html": + return get_static_wx_help_page() + raise FileNotFoundError() + + +def get_static_web_help_page(): + """ + Return the static web help page. + """ + return """ + + + + + + NLTK Wordnet Browser display of: * Help * + + +

    NLTK Wordnet Browser Help

    +

    The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database. +

    You are using the Javascript client part of the NLTK Wordnet BrowseServer. We assume your browser is in tab sheets enabled mode.

    +

    For background information on Wordnet, see the Wordnet project home page: https://wordnet.princeton.edu/. For more information on the NLTK project, see the project home: +https://www.nltk.org/. To get an idea of what the Wordnet version used by this browser includes choose Show Database Info from the View submenu.

    +

    Word search

    +

    The word to be searched is typed into the New Word field and the search started with Enter or by clicking the Search button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.

    +

    In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing fLIeS as an obscure example gives one this. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination.

    +

    The result of a search is a display of one or more +synsets for every part of speech in which a form of the +search word was found to occur. A synset is a set of words +having the same sense or meaning. Each word in a synset that is +underlined is a hyperlink which can be clicked to trigger an +automatic search for that word.

    +

    Every synset has a hyperlink S: at the start of its +display line. Clicking that symbol shows you the name of every +relation that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.

    +

    It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this cheer up,clear up, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.

    +

    +There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink W: at their beginning. Clicking this link shows more info on the word in question.

    +

    The Buttons

    +

    The Search and Help buttons need no more explanation.

    +

    The Show Database Info button shows a collection of Wordnet database statistics.

    +

    The Shutdown the Server button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns. +

    + +""" + + +def get_static_welcome_message(): + """ + Get the static welcome page. + """ + return """ +

    Search Help

    +
    • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
    • +
    • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
    • +
    • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
    • +
    • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
    • +
    • Clicking S: opens a section showing the relations for that synset.
    • +
    • Clicking on a relation name opens a section that displays the associated +synsets.
    • +
    • Type a search word in the Next Word field and start the search by the +Enter/Return key or click the Search button.
    • +
    +""" + + +def get_static_index_page(with_shutdown): + """ + Get the static index page. + """ + template = """ + + + + + NLTK Wordnet Browser + + + + + + + +""" + if with_shutdown: + upper_link = "upper.html" + else: + upper_link = "upper_2.html" + + return template % upper_link + + +def get_static_upper_page(with_shutdown): + """ + Return the upper frame page, + + If with_shutdown is True then a 'shutdown' button is also provided + to shutdown the server. + """ + template = """ + + + + + + Untitled Document + + +
    + Current Word:  + Next Word:  + +
    + Help + %s + + + +""" + if with_shutdown: + shutdown_link = 'Shutdown' + else: + shutdown_link = "" + + return template % shutdown_link + + +def usage(): + """ + Display the command line help message. + """ + print(__doc__) + + +def app(): + # Parse and interpret options. + (opts, _) = getopt.getopt( + argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"] + ) + port = 8000 + server_mode = False + help_mode = False + logfilename = None + for (opt, value) in opts: + if (opt == "-l") or (opt == "--logfile"): + logfilename = str(value) + elif (opt == "-p") or (opt == "--port"): + port = int(value) + elif (opt == "-s") or (opt == "--server-mode"): + server_mode = True + elif (opt == "-h") or (opt == "--help"): + help_mode = True + + if help_mode: + usage() + else: + wnb(port, not server_mode, logfilename) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2257b0d382ab33f1063b11f3bf471211c539bd04 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..104e4723449ab43f6db74aee7b03eb295ee86063 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c11bed485a0dca1e012c7d965a9582613961dd9a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/corenlp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b521663243b3d9373190c74f9fce033a7d5c8902 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/dependencygraph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d8c64359b8fb04d2ed2a193cf5f56e715059df6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/earleychart.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4a15110e9909c3644213382a3ad167f898c1460 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/evaluate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6876741fd26a5a91a413127389ad11b0af3bae53 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/featurechart.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da6a8882a3d9aa18ffeb401f8f12e3d065e7a273 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/generate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf536994eee7677c43ca466b8fa2a25ecf6b28ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/malt.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97284591d1c79dfba16d19c2b59135a32fb2444c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/pchart.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f471c88e1de270f00e941fab6ec1f100dc29ec7b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/recursivedescent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a92a1a6160459310b9b2308a026a34f095351bc3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/shiftreduce.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f36fd22e55c58f2b2349a8804386b8961b5993dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/stanford.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c7c940766f309d54923203f1b5d61b85232a5ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/transitionparser.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e5e575e10629a4cfaf336b5a8226da6c1a25aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/util.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2cca4d8b8048b1b7423fb0d0d04789a8f1fe027 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/parse/__pycache__/viterbi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/bllip.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/bllip.py new file mode 100644 index 0000000000000000000000000000000000000000..581ed661c256ca95ed89643516eb6edee8997300 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/bllip.py @@ -0,0 +1,299 @@ +# Natural Language Toolkit: Interface to BLLIP Parser +# +# Author: David McClosky +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.parse.api import ParserI +from nltk.tree import Tree + +""" +Interface for parsing with BLLIP Parser. Requires the Python +bllipparser module. BllipParser objects can be constructed with the +``BllipParser.from_unified_model_dir`` class method or manually using the +``BllipParser`` constructor. The former is generally easier if you have +a BLLIP Parser unified model directory -- a basic model can be obtained +from NLTK's downloader. More unified parsing models can be obtained with +BLLIP Parser's ModelFetcher (run ``python -m bllipparser.ModelFetcher`` +or see docs for ``bllipparser.ModelFetcher.download_and_install_model``). + +Basic usage:: + + # download and install a basic unified parsing model (Wall Street Journal) + # sudo python -m nltk.downloader bllip_wsj_no_aux + + >>> from nltk.data import find + >>> model_dir = find('models/bllip_wsj_no_aux').path + >>> bllip = BllipParser.from_unified_model_dir(model_dir) + + # 1-best parsing + >>> sentence1 = 'British left waffles on Falklands .'.split() + >>> top_parse = bllip.parse_one(sentence1) + >>> print(top_parse) + (S1 + (S + (NP (JJ British) (NN left)) + (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands)))) + (. .))) + + # n-best parsing + >>> sentence2 = 'Time flies'.split() + >>> all_parses = bllip.parse_all(sentence2) + >>> print(len(all_parses)) + 50 + >>> print(all_parses[0]) + (S1 (S (NP (NNP Time)) (VP (VBZ flies)))) + + # incorporating external tagging constraints (None means unconstrained tag) + >>> constrained1 = bllip.tagged_parse([('Time', 'VB'), ('flies', 'NNS')]) + >>> print(next(constrained1)) + (S1 (NP (VB Time) (NNS flies))) + >>> constrained2 = bllip.tagged_parse([('Time', 'NN'), ('flies', None)]) + >>> print(next(constrained2)) + (S1 (NP (NN Time) (VBZ flies))) + +References +---------- + +- Charniak, Eugene. "A maximum-entropy-inspired parser." Proceedings of + the 1st North American chapter of the Association for Computational + Linguistics conference. Association for Computational Linguistics, + 2000. + +- Charniak, Eugene, and Mark Johnson. "Coarse-to-fine n-best parsing + and MaxEnt discriminative reranking." Proceedings of the 43rd Annual + Meeting on Association for Computational Linguistics. Association + for Computational Linguistics, 2005. + +Known issues +------------ + +Note that BLLIP Parser is not currently threadsafe. Since this module +uses a SWIG interface, it is potentially unsafe to create multiple +``BllipParser`` objects in the same process. BLLIP Parser currently +has issues with non-ASCII text and will raise an error if given any. + +See https://pypi.python.org/pypi/bllipparser/ for more information +on BLLIP Parser's Python interface. +""" + +__all__ = ["BllipParser"] + +# this block allows this module to be imported even if bllipparser isn't +# available +try: + from bllipparser import RerankingParser + from bllipparser.RerankingParser import get_unified_model_parameters + + def _ensure_bllip_import_or_error(): + pass + +except ImportError as ie: + + def _ensure_bllip_import_or_error(ie=ie): + raise ImportError("Couldn't import bllipparser module: %s" % ie) + + +def _ensure_ascii(words): + try: + for i, word in enumerate(words): + word.encode("ascii") + except UnicodeEncodeError as e: + raise ValueError( + f"Token {i} ({word!r}) is non-ASCII. BLLIP Parser " + "currently doesn't support non-ASCII inputs." + ) from e + + +def _scored_parse_to_nltk_tree(scored_parse): + return Tree.fromstring(str(scored_parse.ptb_parse)) + + +class BllipParser(ParserI): + """ + Interface for parsing with BLLIP Parser. BllipParser objects can be + constructed with the ``BllipParser.from_unified_model_dir`` class + method or manually using the ``BllipParser`` constructor. + """ + + def __init__( + self, + parser_model=None, + reranker_features=None, + reranker_weights=None, + parser_options=None, + reranker_options=None, + ): + """ + Load a BLLIP Parser model from scratch. You'll typically want to + use the ``from_unified_model_dir()`` class method to construct + this object. + + :param parser_model: Path to parser model directory + :type parser_model: str + + :param reranker_features: Path the reranker model's features file + :type reranker_features: str + + :param reranker_weights: Path the reranker model's weights file + :type reranker_weights: str + + :param parser_options: optional dictionary of parser options, see + ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` + for more information. + :type parser_options: dict(str) + + :param reranker_options: optional + dictionary of reranker options, see + ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` + for more information. + :type reranker_options: dict(str) + """ + _ensure_bllip_import_or_error() + + parser_options = parser_options or {} + reranker_options = reranker_options or {} + + self.rrp = RerankingParser() + self.rrp.load_parser_model(parser_model, **parser_options) + if reranker_features and reranker_weights: + self.rrp.load_reranker_model( + features_filename=reranker_features, + weights_filename=reranker_weights, + **reranker_options, + ) + + def parse(self, sentence): + """ + Use BLLIP Parser to parse a sentence. Takes a sentence as a list + of words; it will be automatically tagged with this BLLIP Parser + instance's tagger. + + :return: An iterator that generates parse trees for the sentence + from most likely to least likely. + + :param sentence: The sentence to be parsed + :type sentence: list(str) + :rtype: iter(Tree) + """ + _ensure_ascii(sentence) + nbest_list = self.rrp.parse(sentence) + for scored_parse in nbest_list: + yield _scored_parse_to_nltk_tree(scored_parse) + + def tagged_parse(self, word_and_tag_pairs): + """ + Use BLLIP to parse a sentence. Takes a sentence as a list of + (word, tag) tuples; the sentence must have already been tokenized + and tagged. BLLIP will attempt to use the tags provided but may + use others if it can't come up with a complete parse subject + to those constraints. You may also specify a tag as ``None`` + to leave a token's tag unconstrained. + + :return: An iterator that generates parse trees for the sentence + from most likely to least likely. + + :param sentence: Input sentence to parse as (word, tag) pairs + :type sentence: list(tuple(str, str)) + :rtype: iter(Tree) + """ + words = [] + tag_map = {} + for i, (word, tag) in enumerate(word_and_tag_pairs): + words.append(word) + if tag is not None: + tag_map[i] = tag + + _ensure_ascii(words) + nbest_list = self.rrp.parse_tagged(words, tag_map) + for scored_parse in nbest_list: + yield _scored_parse_to_nltk_tree(scored_parse) + + @classmethod + def from_unified_model_dir( + cls, model_dir, parser_options=None, reranker_options=None + ): + """ + Create a ``BllipParser`` object from a unified parsing model + directory. Unified parsing model directories are a standardized + way of storing BLLIP parser and reranker models together on disk. + See ``bllipparser.RerankingParser.get_unified_model_parameters()`` + for more information about unified model directories. + + :return: A ``BllipParser`` object using the parser and reranker + models in the model directory. + + :param model_dir: Path to the unified model directory. + :type model_dir: str + :param parser_options: optional dictionary of parser options, see + ``bllipparser.RerankingParser.RerankingParser.load_parser_options()`` + for more information. + :type parser_options: dict(str) + :param reranker_options: optional dictionary of reranker options, see + ``bllipparser.RerankingParser.RerankingParser.load_reranker_model()`` + for more information. + :type reranker_options: dict(str) + :rtype: BllipParser + """ + ( + parser_model_dir, + reranker_features_filename, + reranker_weights_filename, + ) = get_unified_model_parameters(model_dir) + return cls( + parser_model_dir, + reranker_features_filename, + reranker_weights_filename, + parser_options, + reranker_options, + ) + + +def demo(): + """This assumes the Python module bllipparser is installed.""" + + # download and install a basic unified parsing model (Wall Street Journal) + # sudo python -m nltk.downloader bllip_wsj_no_aux + + from nltk.data import find + + model_dir = find("models/bllip_wsj_no_aux").path + + print("Loading BLLIP Parsing models...") + # the easiest way to get started is to use a unified model + bllip = BllipParser.from_unified_model_dir(model_dir) + print("Done.") + + sentence1 = "British left waffles on Falklands .".split() + sentence2 = "I saw the man with the telescope .".split() + # this sentence is known to fail under the WSJ parsing model + fail1 = "# ! ? : -".split() + for sentence in (sentence1, sentence2, fail1): + print("Sentence: %r" % " ".join(sentence)) + try: + tree = next(bllip.parse(sentence)) + print(tree) + except StopIteration: + print("(parse failed)") + + # n-best parsing demo + for i, parse in enumerate(bllip.parse(sentence1)): + print("parse %d:\n%s" % (i, parse)) + + # using external POS tag constraints + print( + "forcing 'tree' to be 'NN':", + next(bllip.tagged_parse([("A", None), ("tree", "NN")])), + ) + print( + "forcing 'A' to be 'DT' and 'tree' to be 'NNP':", + next(bllip.tagged_parse([("A", "DT"), ("tree", "NNP")])), + ) + # constraints don't have to make sense... (though on more complicated + # sentences, they may cause the parse to fail) + print( + "forcing 'A' to be 'NNP':", + next(bllip.tagged_parse([("A", "NNP"), ("tree", None)])), + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/chart.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/chart.py new file mode 100644 index 0000000000000000000000000000000000000000..3f068d7d8ac61f1e46aae99a5ea915c74ac2791b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/chart.py @@ -0,0 +1,1848 @@ +# Natural Language Toolkit: A Chart Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Jean Mark Gawron +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Data classes and parser implementations for "chart parsers", which +use dynamic programming to efficiently parse a text. A chart +parser derives parse trees for a text by iteratively adding "edges" +to a "chart." Each edge represents a hypothesis about the tree +structure for a subsequence of the text. The chart is a +"blackboard" for composing and combining these hypotheses. + +When a chart parser begins parsing a text, it creates a new (empty) +chart, spanning the text. It then incrementally adds new edges to the +chart. A set of "chart rules" specifies the conditions under which +new edges should be added to the chart. Once the chart reaches a +stage where none of the chart rules adds any new edges, parsing is +complete. + +Charts are encoded with the ``Chart`` class, and edges are encoded with +the ``TreeEdge`` and ``LeafEdge`` classes. The chart parser module +defines three chart parsers: + + - ``ChartParser`` is a simple and flexible chart parser. Given a + set of chart rules, it will apply those rules to the chart until + no more edges are added. + + - ``SteppingChartParser`` is a subclass of ``ChartParser`` that can + be used to step through the parsing process. +""" + +import itertools +import re +import warnings +from functools import total_ordering + +from nltk.grammar import PCFG, is_nonterminal, is_terminal +from nltk.internals import raise_unorderable_types +from nltk.parse.api import ParserI +from nltk.tree import Tree +from nltk.util import OrderedDict + +######################################################################## +## Edges +######################################################################## + + +@total_ordering +class EdgeI: + """ + A hypothesis about the structure of part of a sentence. + Each edge records the fact that a structure is (partially) + consistent with the sentence. An edge contains: + + - A span, indicating what part of the sentence is + consistent with the hypothesized structure. + - A left-hand side, specifying what kind of structure is + hypothesized. + - A right-hand side, specifying the contents of the + hypothesized structure. + - A dot position, indicating how much of the hypothesized + structure is consistent with the sentence. + + Every edge is either complete or incomplete: + + - An edge is complete if its structure is fully consistent + with the sentence. + - An edge is incomplete if its structure is partially + consistent with the sentence. For every incomplete edge, the + span specifies a possible prefix for the edge's structure. + + There are two kinds of edge: + + - A ``TreeEdge`` records which trees have been found to + be (partially) consistent with the text. + - A ``LeafEdge`` records the tokens occurring in the text. + + The ``EdgeI`` interface provides a common interface to both types + of edge, allowing chart parsers to treat them in a uniform manner. + """ + + def __init__(self): + if self.__class__ == EdgeI: + raise TypeError("Edge is an abstract interface") + + # //////////////////////////////////////////////////////////// + # Span + # //////////////////////////////////////////////////////////// + + def span(self): + """ + Return a tuple ``(s, e)``, where ``tokens[s:e]`` is the + portion of the sentence that is consistent with this + edge's structure. + + :rtype: tuple(int, int) + """ + raise NotImplementedError() + + def start(self): + """ + Return the start index of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + def end(self): + """ + Return the end index of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + def length(self): + """ + Return the length of this edge's span. + + :rtype: int + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Left Hand Side + # //////////////////////////////////////////////////////////// + + def lhs(self): + """ + Return this edge's left-hand side, which specifies what kind + of structure is hypothesized by this edge. + + :see: ``TreeEdge`` and ``LeafEdge`` for a description of + the left-hand side values for each edge type. + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Right Hand Side + # //////////////////////////////////////////////////////////// + + def rhs(self): + """ + Return this edge's right-hand side, which specifies + the content of the structure hypothesized by this edge. + + :see: ``TreeEdge`` and ``LeafEdge`` for a description of + the right-hand side values for each edge type. + """ + raise NotImplementedError() + + def dot(self): + """ + Return this edge's dot position, which indicates how much of + the hypothesized structure is consistent with the + sentence. In particular, ``self.rhs[:dot]`` is consistent + with ``tokens[self.start():self.end()]``. + + :rtype: int + """ + raise NotImplementedError() + + def nextsym(self): + """ + Return the element of this edge's right-hand side that + immediately follows its dot. + + :rtype: Nonterminal or terminal or None + """ + raise NotImplementedError() + + def is_complete(self): + """ + Return True if this edge's structure is fully consistent + with the text. + + :rtype: bool + """ + raise NotImplementedError() + + def is_incomplete(self): + """ + Return True if this edge's structure is partially consistent + with the text. + + :rtype: bool + """ + raise NotImplementedError() + + # //////////////////////////////////////////////////////////// + # Comparisons & hashing + # //////////////////////////////////////////////////////////// + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, EdgeI): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + +class TreeEdge(EdgeI): + """ + An edge that records the fact that a tree is (partially) + consistent with the sentence. A tree edge consists of: + + - A span, indicating what part of the sentence is + consistent with the hypothesized tree. + - A left-hand side, specifying the hypothesized tree's node + value. + - A right-hand side, specifying the hypothesized tree's + children. Each element of the right-hand side is either a + terminal, specifying a token with that terminal as its leaf + value; or a nonterminal, specifying a subtree with that + nonterminal's symbol as its node value. + - A dot position, indicating which children are consistent + with part of the sentence. In particular, if ``dot`` is the + dot position, ``rhs`` is the right-hand size, ``(start,end)`` + is the span, and ``sentence`` is the list of tokens in the + sentence, then ``tokens[start:end]`` can be spanned by the + children specified by ``rhs[:dot]``. + + For more information about edges, see the ``EdgeI`` interface. + """ + + def __init__(self, span, lhs, rhs, dot=0): + """ + Construct a new ``TreeEdge``. + + :type span: tuple(int, int) + :param span: A tuple ``(s, e)``, where ``tokens[s:e]`` is the + portion of the sentence that is consistent with the new + edge's structure. + :type lhs: Nonterminal + :param lhs: The new edge's left-hand side, specifying the + hypothesized tree's node value. + :type rhs: list(Nonterminal and str) + :param rhs: The new edge's right-hand side, specifying the + hypothesized tree's children. + :type dot: int + :param dot: The position of the new edge's dot. This position + specifies what prefix of the production's right hand side + is consistent with the text. In particular, if + ``sentence`` is the list of tokens in the sentence, then + ``okens[span[0]:span[1]]`` can be spanned by the + children specified by ``rhs[:dot]``. + """ + self._span = span + self._lhs = lhs + rhs = tuple(rhs) + self._rhs = rhs + self._dot = dot + self._comparison_key = (span, lhs, rhs, dot) + + @staticmethod + def from_production(production, index): + """ + Return a new ``TreeEdge`` formed from the given production. + The new edge's left-hand side and right-hand side will + be taken from ``production``; its span will be + ``(index,index)``; and its dot position will be ``0``. + + :rtype: TreeEdge + """ + return TreeEdge( + span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0 + ) + + def move_dot_forward(self, new_end): + """ + Return a new ``TreeEdge`` formed from this edge. + The new edge's dot position is increased by ``1``, + and its end index will be replaced by ``new_end``. + + :param new_end: The new end index. + :type new_end: int + :rtype: TreeEdge + """ + return TreeEdge( + span=(self._span[0], new_end), + lhs=self._lhs, + rhs=self._rhs, + dot=self._dot + 1, + ) + + # Accessors + def lhs(self): + return self._lhs + + def span(self): + return self._span + + def start(self): + return self._span[0] + + def end(self): + return self._span[1] + + def length(self): + return self._span[1] - self._span[0] + + def rhs(self): + return self._rhs + + def dot(self): + return self._dot + + def is_complete(self): + return self._dot == len(self._rhs) + + def is_incomplete(self): + return self._dot != len(self._rhs) + + def nextsym(self): + if self._dot >= len(self._rhs): + return None + else: + return self._rhs[self._dot] + + # String representation + def __str__(self): + str = f"[{self._span[0]}:{self._span[1]}] " + str += "%-2r ->" % (self._lhs,) + + for i in range(len(self._rhs)): + if i == self._dot: + str += " *" + str += " %s" % repr(self._rhs[i]) + if len(self._rhs) == self._dot: + str += " *" + return str + + def __repr__(self): + return "[Edge: %s]" % self + + +class LeafEdge(EdgeI): + """ + An edge that records the fact that a leaf value is consistent with + a word in the sentence. A leaf edge consists of: + + - An index, indicating the position of the word. + - A leaf, specifying the word's content. + + A leaf edge's left-hand side is its leaf value, and its right hand + side is ``()``. Its span is ``[index, index+1]``, and its dot + position is ``0``. + """ + + def __init__(self, leaf, index): + """ + Construct a new ``LeafEdge``. + + :param leaf: The new edge's leaf value, specifying the word + that is recorded by this edge. + :param index: The new edge's index, specifying the position of + the word that is recorded by this edge. + """ + self._leaf = leaf + self._index = index + self._comparison_key = (leaf, index) + + # Accessors + def lhs(self): + return self._leaf + + def span(self): + return (self._index, self._index + 1) + + def start(self): + return self._index + + def end(self): + return self._index + 1 + + def length(self): + return 1 + + def rhs(self): + return () + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + # String representations + def __str__(self): + return f"[{self._index}:{self._index + 1}] {repr(self._leaf)}" + + def __repr__(self): + return "[Edge: %s]" % (self) + + +######################################################################## +## Chart +######################################################################## + + +class Chart: + """ + A blackboard for hypotheses about the syntactic constituents of a + sentence. A chart contains a set of edges, and each edge encodes + a single hypothesis about the structure of some portion of the + sentence. + + The ``select`` method can be used to select a specific collection + of edges. For example ``chart.select(is_complete=True, start=0)`` + yields all complete edges whose start indices are 0. To ensure + the efficiency of these selection operations, ``Chart`` dynamically + creates and maintains an index for each set of attributes that + have been selected on. + + In order to reconstruct the trees that are represented by an edge, + the chart associates each edge with a set of child pointer lists. + A child pointer list is a list of the edges that license an + edge's right-hand side. + + :ivar _tokens: The sentence that the chart covers. + :ivar _num_leaves: The number of tokens. + :ivar _edges: A list of the edges in the chart + :ivar _edge_to_cpls: A dictionary mapping each edge to a set + of child pointer lists that are associated with that edge. + :ivar _indexes: A dictionary mapping tuples of edge attributes + to indices, where each index maps the corresponding edge + attribute values to lists of edges. + """ + + def __init__(self, tokens): + """ + Construct a new chart. The chart is initialized with the + leaf edges corresponding to the terminal leaves. + + :type tokens: list + :param tokens: The sentence that this chart will be used to parse. + """ + # Record the sentence token and the sentence length. + self._tokens = tuple(tokens) + self._num_leaves = len(self._tokens) + + # Initialise the chart. + self.initialize() + + def initialize(self): + """ + Clear the chart. + """ + # A list of edges contained in this chart. + self._edges = [] + + # The set of child pointer lists associated with each edge. + self._edge_to_cpls = {} + + # Indexes mapping attribute values to lists of edges + # (used by select()). + self._indexes = {} + + # //////////////////////////////////////////////////////////// + # Sentence Access + # //////////////////////////////////////////////////////////// + + def num_leaves(self): + """ + Return the number of words in this chart's sentence. + + :rtype: int + """ + return self._num_leaves + + def leaf(self, index): + """ + Return the leaf value of the word at the given index. + + :rtype: str + """ + return self._tokens[index] + + def leaves(self): + """ + Return a list of the leaf values of each word in the + chart's sentence. + + :rtype: list(str) + """ + return self._tokens + + # //////////////////////////////////////////////////////////// + # Edge access + # //////////////////////////////////////////////////////////// + + def edges(self): + """ + Return a list of all edges in this chart. New edges + that are added to the chart after the call to edges() + will *not* be contained in this list. + + :rtype: list(EdgeI) + :see: ``iteredges``, ``select`` + """ + return self._edges[:] + + def iteredges(self): + """ + Return an iterator over the edges in this chart. It is + not guaranteed that new edges which are added to the + chart before the iterator is exhausted will also be generated. + + :rtype: iter(EdgeI) + :see: ``edges``, ``select`` + """ + return iter(self._edges) + + # Iterating over the chart yields its edges. + __iter__ = iteredges + + def num_edges(self): + """ + Return the number of edges contained in this chart. + + :rtype: int + """ + return len(self._edge_to_cpls) + + def select(self, **restrictions): + """ + Return an iterator over the edges in this chart. Any + new edges that are added to the chart before the iterator + is exahusted will also be generated. ``restrictions`` + can be used to restrict the set of edges that will be + generated. + + :param span: Only generate edges ``e`` where ``e.span()==span`` + :param start: Only generate edges ``e`` where ``e.start()==start`` + :param end: Only generate edges ``e`` where ``e.end()==end`` + :param length: Only generate edges ``e`` where ``e.length()==length`` + :param lhs: Only generate edges ``e`` where ``e.lhs()==lhs`` + :param rhs: Only generate edges ``e`` where ``e.rhs()==rhs`` + :param nextsym: Only generate edges ``e`` where + ``e.nextsym()==nextsym`` + :param dot: Only generate edges ``e`` where ``e.dot()==dot`` + :param is_complete: Only generate edges ``e`` where + ``e.is_complete()==is_complete`` + :param is_incomplete: Only generate edges ``e`` where + ``e.is_incomplete()==is_incomplete`` + :rtype: iter(EdgeI) + """ + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(self._edges) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple(restrictions[key] for key in restr_keys) + return iter(self._indexes[restr_keys].get(vals, [])) + + def _add_index(self, restr_keys): + """ + A helper function for ``select``, which creates a new index for + a given set of attributes (aka restriction keys). + """ + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = {} + + # Add all existing edges to the index. + for edge in self._edges: + vals = tuple(getattr(edge, key)() for key in restr_keys) + index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + """ + A helper function for ``insert``, which registers the new + edge with all existing indexes. + """ + for (restr_keys, index) in self._indexes.items(): + vals = tuple(getattr(edge, key)() for key in restr_keys) + index.setdefault(vals, []).append(edge) + + # //////////////////////////////////////////////////////////// + # Edge Insertion + # //////////////////////////////////////////////////////////// + + def insert_with_backpointer(self, new_edge, previous_edge, child_edge): + """ + Add a new edge to the chart, using a pointer to the previous edge. + """ + cpls = self.child_pointer_lists(previous_edge) + new_cpls = [cpl + (child_edge,) for cpl in cpls] + return self.insert(new_edge, *new_cpls) + + def insert(self, edge, *child_pointer_lists): + """ + Add a new edge to the chart, and return True if this operation + modified the chart. In particular, return true iff the chart + did not already contain ``edge``, or if it did not already associate + ``child_pointer_lists`` with ``edge``. + + :type edge: EdgeI + :param edge: The new edge + :type child_pointer_lists: sequence of tuple(EdgeI) + :param child_pointer_lists: A sequence of lists of the edges that + were used to form this edge. This list is used to reconstruct + the trees (or partial trees) that are associated with ``edge``. + :rtype: bool + """ + # Is it a new edge? + if edge not in self._edge_to_cpls: + # Add it to the list of edges. + self._append_edge(edge) + # Register with indexes. + self._register_with_indexes(edge) + + # Get the set of child pointer lists for this edge. + cpls = self._edge_to_cpls.setdefault(edge, OrderedDict()) + chart_was_modified = False + for child_pointer_list in child_pointer_lists: + child_pointer_list = tuple(child_pointer_list) + if child_pointer_list not in cpls: + # It's a new CPL; register it, and return true. + cpls[child_pointer_list] = True + chart_was_modified = True + return chart_was_modified + + def _append_edge(self, edge): + self._edges.append(edge) + + # //////////////////////////////////////////////////////////// + # Tree extraction & child pointer lists + # //////////////////////////////////////////////////////////// + + def parses(self, root, tree_class=Tree): + """ + Return an iterator of the complete tree structures that span + the entire chart, and whose root node is ``root``. + """ + for edge in self.select(start=0, end=self._num_leaves, lhs=root): + yield from self.trees(edge, tree_class=tree_class, complete=True) + + def trees(self, edge, tree_class=Tree, complete=False): + """ + Return an iterator of the tree structures that are associated + with ``edge``. + + If ``edge`` is incomplete, then the unexpanded children will be + encoded as childless subtrees, whose node value is the + corresponding terminal or nonterminal. + + :rtype: list(Tree) + :note: If two trees share a common subtree, then the same + Tree may be used to encode that subtree in + both trees. If you need to eliminate this subtree + sharing, then create a deep copy of each tree. + """ + return iter(self._trees(edge, complete, memo={}, tree_class=tree_class)) + + def _trees(self, edge, complete, memo, tree_class): + """ + A helper function for ``trees``. + + :param memo: A dictionary used to record the trees that we've + generated for each edge, so that when we see an edge more + than once, we can reuse the same trees. + """ + # If we've seen this edge before, then reuse our old answer. + if edge in memo: + return memo[edge] + + # when we're reading trees off the chart, don't use incomplete edges + if complete and edge.is_incomplete(): + return [] + + # Leaf edges. + if isinstance(edge, LeafEdge): + leaf = self._tokens[edge.start()] + memo[edge] = [leaf] + return [leaf] + + # Until we're done computing the trees for edge, set + # memo[edge] to be empty. This has the effect of filtering + # out any cyclic trees (i.e., trees that contain themselves as + # descendants), because if we reach this edge via a cycle, + # then it will appear that the edge doesn't generate any trees. + memo[edge] = [] + trees = [] + lhs = edge.lhs().symbol() + + # Each child pointer list can be used to form trees. + for cpl in self.child_pointer_lists(edge): + # Get the set of child choices for each child pointer. + # child_choices[i] is the set of choices for the tree's + # ith child. + child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] + + # For each combination of children, add a tree. + for children in itertools.product(*child_choices): + trees.append(tree_class(lhs, children)) + + # If the edge is incomplete, then extend it with "partial trees": + if edge.is_incomplete(): + unexpanded = [tree_class(elt, []) for elt in edge.rhs()[edge.dot() :]] + for tree in trees: + tree.extend(unexpanded) + + # Update the memoization dictionary. + memo[edge] = trees + + # Return the list of trees. + return trees + + def child_pointer_lists(self, edge): + """ + Return the set of child pointer lists for the given edge. + Each child pointer list is a list of edges that have + been used to form this edge. + + :rtype: list(list(EdgeI)) + """ + # Make a copy, in case they modify it. + return self._edge_to_cpls.get(edge, {}).keys() + + # //////////////////////////////////////////////////////////// + # Display + # //////////////////////////////////////////////////////////// + def pretty_format_edge(self, edge, width=None): + """ + Return a pretty-printed string representation of a given edge + in this chart. + + :rtype: str + :param width: The number of characters allotted to each + index in the sentence. + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + (start, end) = (edge.start(), edge.end()) + + str = "|" + ("." + " " * (width - 1)) * start + + # Zero-width edges are "#" if complete, ">" if incomplete + if start == end: + if edge.is_complete(): + str += "#" + else: + str += ">" + + # Spanning complete edges are "[===]"; Other edges are + # "[---]" if complete, "[--->" if incomplete + elif edge.is_complete() and edge.span() == (0, self._num_leaves): + str += "[" + ("=" * width) * (end - start - 1) + "=" * (width - 1) + "]" + elif edge.is_complete(): + str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + "]" + else: + str += "[" + ("-" * width) * (end - start - 1) + "-" * (width - 1) + ">" + + str += (" " * (width - 1) + ".") * (self._num_leaves - end) + return str + "| %s" % edge + + def pretty_format_leaves(self, width=None): + """ + Return a pretty-printed string representation of this + chart's leaves. This string can be used as a header + for calls to ``pretty_format_edge``. + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + + if self._tokens is not None and width > 1: + header = "|." + for tok in self._tokens: + header += tok[: width - 1].center(width - 1) + "." + header += "|" + else: + header = "" + + return header + + def pretty_format(self, width=None): + """ + Return a pretty-printed string representation of this chart. + + :param width: The number of characters allotted to each + index in the sentence. + :rtype: str + """ + if width is None: + width = 50 // (self.num_leaves() + 1) + # sort edges: primary key=length, secondary key=start index. + # (and filter out the token edges) + edges = sorted((e.length(), e.start(), e) for e in self) + edges = [e for (_, _, e) in edges] + + return ( + self.pretty_format_leaves(width) + + "\n" + + "\n".join(self.pretty_format_edge(edge, width) for edge in edges) + ) + + # //////////////////////////////////////////////////////////// + # Display: Dot (AT&T Graphviz) + # //////////////////////////////////////////////////////////// + + def dot_digraph(self): + # Header + s = "digraph nltk_chart {\n" + # s += ' size="5,5";\n' + s += " rankdir=LR;\n" + s += " node [height=0.1,width=0.1];\n" + s += ' node [style=filled, color="lightgray"];\n' + + # Set up the nodes + for y in range(self.num_edges(), -1, -1): + if y == 0: + s += ' node [style=filled, color="black"];\n' + for x in range(self.num_leaves() + 1): + if y == 0 or ( + x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() + ): + s += ' %04d.%04d [label=""];\n' % (x, y) + + # Add a spacer + s += " x [style=invis]; x->0000.0000 [style=invis];\n" + + # Declare ranks. + for x in range(self.num_leaves() + 1): + s += " {rank=same;" + for y in range(self.num_edges() + 1): + if y == 0 or ( + x <= self._edges[y - 1].start() or x >= self._edges[y - 1].end() + ): + s += " %04d.%04d" % (x, y) + s += "}\n" + + # Add the leaves + s += " edge [style=invis, weight=100];\n" + s += " node [shape=plaintext]\n" + s += " 0000.0000" + for x in range(self.num_leaves()): + s += "->%s->%04d.0000" % (self.leaf(x), x + 1) + s += ";\n\n" + + # Add the edges + s += " edge [style=solid, weight=1];\n" + for y, edge in enumerate(self): + for x in range(edge.start()): + s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( + x, + y + 1, + x + 1, + y + 1, + ) + s += ' %04d.%04d -> %04d.%04d [label="%s"];\n' % ( + edge.start(), + y + 1, + edge.end(), + y + 1, + edge, + ) + for x in range(edge.end(), self.num_leaves()): + s += ' %04d.%04d -> %04d.%04d [style="invis"];\n' % ( + x, + y + 1, + x + 1, + y + 1, + ) + s += "}\n" + return s + + +######################################################################## +## Chart Rules +######################################################################## + + +class ChartRuleI: + """ + A rule that specifies what new edges are licensed by any given set + of existing edges. Each chart rule expects a fixed number of + edges, as indicated by the class variable ``NUM_EDGES``. In + particular: + + - A chart rule with ``NUM_EDGES=0`` specifies what new edges are + licensed, regardless of existing edges. + - A chart rule with ``NUM_EDGES=1`` specifies what new edges are + licensed by a single existing edge. + - A chart rule with ``NUM_EDGES=2`` specifies what new edges are + licensed by a pair of existing edges. + + :type NUM_EDGES: int + :cvar NUM_EDGES: The number of existing edges that this rule uses + to license new edges. Typically, this number ranges from zero + to two. + """ + + def apply(self, chart, grammar, *edges): + """ + Return a generator that will add edges licensed by this rule + and the given edges to the chart, one at a time. Each + time the generator is resumed, it will either add a new + edge and yield that edge; or return. + + :type edges: list(EdgeI) + :param edges: A set of existing edges. The number of edges + that should be passed to ``apply()`` is specified by the + ``NUM_EDGES`` class variable. + :rtype: iter(EdgeI) + """ + raise NotImplementedError() + + def apply_everywhere(self, chart, grammar): + """ + Return a generator that will add all edges licensed by + this rule, given the edges that are currently in the + chart, one at a time. Each time the generator is resumed, + it will either add a new edge and yield that edge; or return. + + :rtype: iter(EdgeI) + """ + raise NotImplementedError() + + +class AbstractChartRule(ChartRuleI): + """ + An abstract base class for chart rules. ``AbstractChartRule`` + provides: + + - A default implementation for ``apply``. + - A default implementation for ``apply_everywhere``, + (Currently, this implementation assumes that ``NUM_EDGES <= 3``.) + - A default implementation for ``__str__``, which returns a + name based on the rule's class name. + """ + + # Subclasses must define apply. + def apply(self, chart, grammar, *edges): + raise NotImplementedError() + + # Default: loop through the given number of edges, and call + # self.apply() for each set of edges. + def apply_everywhere(self, chart, grammar): + if self.NUM_EDGES == 0: + yield from self.apply(chart, grammar) + + elif self.NUM_EDGES == 1: + for e1 in chart: + yield from self.apply(chart, grammar, e1) + + elif self.NUM_EDGES == 2: + for e1 in chart: + for e2 in chart: + yield from self.apply(chart, grammar, e1, e2) + + elif self.NUM_EDGES == 3: + for e1 in chart: + for e2 in chart: + for e3 in chart: + yield from self.apply(chart, grammar, e1, e2, e3) + + else: + raise AssertionError("NUM_EDGES>3 is not currently supported") + + # Default: return a name based on the class name. + def __str__(self): + # Add spaces between InitialCapsWords. + return re.sub("([a-z])([A-Z])", r"\1 \2", self.__class__.__name__) + + +# //////////////////////////////////////////////////////////// +# Fundamental Rule +# //////////////////////////////////////////////////////////// + + +class FundamentalRule(AbstractChartRule): + r""" + A rule that joins two adjacent edges to form a single combined + edge. In particular, this rule specifies that any pair of edges + + - ``[A -> alpha \* B beta][i:j]`` + - ``[B -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B * beta][i:j]`` + """ + + NUM_EDGES = 2 + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.is_incomplete() + and right_edge.is_complete() + and left_edge.end() == right_edge.start() + and left_edge.nextsym() == right_edge.lhs() + ): + return + + # Construct the new edge. + new_edge = left_edge.move_dot_forward(right_edge.end()) + + # Insert it into the chart. + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class SingleEdgeFundamentalRule(FundamentalRule): + r""" + A rule that joins a given edge with adjacent edges in the chart, + to form combined edges. In particular, this rule specifies that + either of the edges: + + - ``[A -> alpha \* B beta][i:j]`` + - ``[B -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B * beta][i:j]`` + + if the other edge is already in the chart. + + :note: This is basically ``FundamentalRule``, with one edge left + unspecified. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + yield from self._apply_incomplete(chart, grammar, edge) + else: + yield from self._apply_complete(chart, grammar, edge) + + def _apply_complete(self, chart, grammar, right_edge): + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + def _apply_incomplete(self, chart, grammar, left_edge): + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Inserting Terminal Leafs +# //////////////////////////////////////////////////////////// + + +class LeafInitRule(AbstractChartRule): + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for index in range(chart.num_leaves()): + new_edge = LeafEdge(chart.leaf(index), index) + if chart.insert(new_edge, ()): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Top-Down Prediction +# //////////////////////////////////////////////////////////// + + +class TopDownInitRule(AbstractChartRule): + r""" + A rule licensing edges corresponding to the grammar productions for + the grammar's start symbol. In particular, this rule specifies that + ``[S -> \* alpha][0:i]`` is licensed for each grammar production + ``S -> alpha``, where ``S`` is the grammar's start symbol. + """ + + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for prod in grammar.productions(lhs=grammar.start()): + new_edge = TreeEdge.from_production(prod, 0) + if chart.insert(new_edge, ()): + yield new_edge + + +class TopDownPredictRule(AbstractChartRule): + r""" + A rule licensing edges corresponding to the grammar productions + for the nonterminal following an incomplete edge's dot. In + particular, this rule specifies that + ``[A -> alpha \* B beta][i:j]`` licenses the edge + ``[B -> \* gamma][j:j]`` for each grammar production ``B -> gamma``. + + :note: This rule corresponds to the Predictor Rule in Earley parsing. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + for prod in grammar.productions(lhs=edge.nextsym()): + new_edge = TreeEdge.from_production(prod, edge.end()) + if chart.insert(new_edge, ()): + yield new_edge + + +class CachedTopDownPredictRule(TopDownPredictRule): + r""" + A cached version of ``TopDownPredictRule``. After the first time + this rule is applied to an edge with a given ``end`` and ``next``, + it will not generate any more edges for edges with that ``end`` and + ``next``. + + If ``chart`` or ``grammar`` are changed, then the cache is flushed. + """ + + def __init__(self): + TopDownPredictRule.__init__(self) + self._done = {} + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + nextsym, index = edge.nextsym(), edge.end() + if not is_nonterminal(nextsym): + return + + # If we've already applied this rule to an edge with the same + # next & end, and the chart & grammar have not changed, then + # just return (no new edges to add). + done = self._done.get((nextsym, index), (None, None)) + if done[0] is chart and done[1] is grammar: + return + + # Add all the edges indicated by the top down expand rule. + for prod in grammar.productions(lhs=nextsym): + # If the left corner in the predicted production is + # leaf, it must match with the input. + if prod.rhs(): + first = prod.rhs()[0] + if is_terminal(first): + if index >= chart.num_leaves() or first != chart.leaf(index): + continue + + new_edge = TreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + # Record the fact that we've applied this rule. + self._done[nextsym, index] = (chart, grammar) + + +# //////////////////////////////////////////////////////////// +# Bottom-Up Prediction +# //////////////////////////////////////////////////////////// + + +class BottomUpPredictRule(AbstractChartRule): + r""" + A rule licensing any edge corresponding to a production whose + right-hand side begins with a complete edge's left-hand side. In + particular, this rule specifies that ``[A -> alpha \*]`` licenses + the edge ``[B -> \* A beta]`` for each grammar production ``B -> A beta``. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + new_edge = TreeEdge.from_production(prod, edge.start()) + if chart.insert(new_edge, ()): + yield new_edge + + +class BottomUpPredictCombineRule(BottomUpPredictRule): + r""" + A rule licensing any edge corresponding to a production whose + right-hand side begins with a complete edge's left-hand side. In + particular, this rule specifies that ``[A -> alpha \*]`` + licenses the edge ``[B -> A \* beta]`` for each grammar + production ``B -> A beta``. + + :note: This is like ``BottomUpPredictRule``, but it also applies + the ``FundamentalRule`` to the resulting edge. + """ + + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +class EmptyPredictRule(AbstractChartRule): + """ + A rule that inserts all empty productions as passive edges, + in every position in the chart. + """ + + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for prod in grammar.productions(empty=True): + for index in range(chart.num_leaves() + 1): + new_edge = TreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + +######################################################################## +## Filtered Bottom Up +######################################################################## + + +class FilteredSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): + def _apply_complete(self, chart, grammar, right_edge): + end = right_edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + def _apply_incomplete(self, chart, grammar, left_edge): + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + end = right_edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + if _bottomup_filter(grammar, nexttoken, left_edge.rhs(), left_edge.dot()): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class FilteredBottomUpPredictCombineRule(BottomUpPredictCombineRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + + end = edge.end() + nexttoken = end < chart.num_leaves() and chart.leaf(end) + for prod in grammar.productions(rhs=edge.lhs()): + if _bottomup_filter(grammar, nexttoken, prod.rhs()): + new_edge = TreeEdge(edge.span(), prod.lhs(), prod.rhs(), 1) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +def _bottomup_filter(grammar, nexttoken, rhs, dot=0): + if len(rhs) <= dot + 1: + return True + _next = rhs[dot + 1] + if is_terminal(_next): + return nexttoken == _next + else: + return grammar.is_leftcorner(_next, nexttoken) + + +######################################################################## +## Generic Chart Parser +######################################################################## + +TD_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CachedTopDownPredictRule(), + SingleEdgeFundamentalRule(), +] +BU_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictRule(), + SingleEdgeFundamentalRule(), +] +BU_LC_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictCombineRule(), + SingleEdgeFundamentalRule(), +] + +LC_STRATEGY = [ + LeafInitRule(), + FilteredBottomUpPredictCombineRule(), + FilteredSingleEdgeFundamentalRule(), +] + + +class ChartParser(ParserI): + """ + A generic chart parser. A "strategy", or list of + ``ChartRuleI`` instances, is used to decide what edges to add to + the chart. In particular, ``ChartParser`` uses the following + algorithm to parse texts: + + | Until no new edges are added: + | For each *rule* in *strategy*: + | Apply *rule* to any applicable edges in the chart. + | Return any complete parses in the chart + """ + + def __init__( + self, + grammar, + strategy=BU_LC_STRATEGY, + trace=0, + trace_chart_width=50, + use_agenda=True, + chart_class=Chart, + ): + """ + Create a new chart parser, that uses ``grammar`` to parse + texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type strategy: list(ChartRuleI) + :param strategy: A list of rules that should be used to decide + what edges to add to the chart (top-down strategy by default). + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + :type trace_chart_width: int + :param trace_chart_width: The default total width reserved for + the chart in trace output. The remainder of each line will + be used to display edges. + :type use_agenda: bool + :param use_agenda: Use an optimized agenda-based algorithm, + if possible. + :param chart_class: The class that should be used to create + the parse charts. + """ + self._grammar = grammar + self._strategy = strategy + self._trace = trace + self._trace_chart_width = trace_chart_width + # If the strategy only consists of axioms (NUM_EDGES==0) and + # inference rules (NUM_EDGES==1), we can use an agenda-based algorithm: + self._use_agenda = use_agenda + self._chart_class = chart_class + + self._axioms = [] + self._inference_rules = [] + for rule in strategy: + if rule.NUM_EDGES == 0: + self._axioms.append(rule) + elif rule.NUM_EDGES == 1: + self._inference_rules.append(rule) + else: + self._use_agenda = False + + def grammar(self): + return self._grammar + + def _trace_new_edges(self, chart, rule, new_edges, trace, edge_width): + if not trace: + return + print_rule_header = trace > 1 + for edge in new_edges: + if print_rule_header: + print("%s:" % rule) + print_rule_header = False + print(chart.pretty_format_edge(edge, edge_width)) + + def chart_parse(self, tokens, trace=None): + """ + Return the final parse ``Chart`` from which all possible + parse trees can be extracted. + + :param tokens: The sentence to be parsed + :type tokens: list(str) + :rtype: Chart + """ + if trace is None: + trace = self._trace + trace_new_edges = self._trace_new_edges + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + chart = self._chart_class(tokens) + grammar = self._grammar + + # Width, for printing trace edges. + trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) + if trace: + print(chart.pretty_format_leaves(trace_edge_width)) + + if self._use_agenda: + # Use an agenda-based algorithm. + for axiom in self._axioms: + new_edges = list(axiom.apply(chart, grammar)) + trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) + + inference_rules = self._inference_rules + agenda = chart.edges() + # We reverse the initial agenda, since it is a stack + # but chart.edges() functions as a queue. + agenda.reverse() + while agenda: + edge = agenda.pop() + for rule in inference_rules: + new_edges = list(rule.apply(chart, grammar, edge)) + if trace: + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + agenda += new_edges + + else: + # Do not use an agenda-based algorithm. + edges_added = True + while edges_added: + edges_added = False + for rule in self._strategy: + new_edges = list(rule.apply_everywhere(chart, grammar)) + edges_added = len(new_edges) + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + + # Return the final chart. + return chart + + def parse(self, tokens, tree_class=Tree): + chart = self.chart_parse(tokens) + return iter(chart.parses(self._grammar.start(), tree_class=tree_class)) + + +class TopDownChartParser(ChartParser): + """ + A ``ChartParser`` using a top-down parsing strategy. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + ChartParser.__init__(self, grammar, TD_STRATEGY, **parser_args) + + +class BottomUpChartParser(ChartParser): + """ + A ``ChartParser`` using a bottom-up parsing strategy. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + if isinstance(grammar, PCFG): + warnings.warn( + "BottomUpChartParser only works for CFG, " + "use BottomUpProbabilisticChartParser instead", + category=DeprecationWarning, + ) + ChartParser.__init__(self, grammar, BU_STRATEGY, **parser_args) + + +class BottomUpLeftCornerChartParser(ChartParser): + """ + A ``ChartParser`` using a bottom-up left-corner parsing strategy. + This strategy is often more efficient than standard bottom-up. + See ``ChartParser`` for more information. + """ + + def __init__(self, grammar, **parser_args): + ChartParser.__init__(self, grammar, BU_LC_STRATEGY, **parser_args) + + +class LeftCornerChartParser(ChartParser): + def __init__(self, grammar, **parser_args): + if not grammar.is_nonempty(): + raise ValueError( + "LeftCornerParser only works for grammars " "without empty productions." + ) + ChartParser.__init__(self, grammar, LC_STRATEGY, **parser_args) + + +######################################################################## +## Stepping Chart Parser +######################################################################## + + +class SteppingChartParser(ChartParser): + """ + A ``ChartParser`` that allows you to step through the parsing + process, adding a single edge at a time. It also allows you to + change the parser's strategy or grammar midway through parsing a + text. + + The ``initialize`` method is used to start parsing a text. ``step`` + adds a single edge to the chart. ``set_strategy`` changes the + strategy used by the chart parser. ``parses`` returns the set of + parses that has been found by the chart parser. + + :ivar _restart: Records whether the parser's strategy, grammar, + or chart has been changed. If so, then ``step`` must restart + the parsing algorithm. + """ + + def __init__(self, grammar, strategy=[], trace=0): + self._chart = None + self._current_chartrule = None + self._restart = False + ChartParser.__init__(self, grammar, strategy, trace) + + # //////////////////////////////////////////////////////////// + # Initialization + # //////////////////////////////////////////////////////////// + + def initialize(self, tokens): + "Begin parsing the given tokens." + self._chart = Chart(list(tokens)) + self._restart = True + + # //////////////////////////////////////////////////////////// + # Stepping + # //////////////////////////////////////////////////////////// + + def step(self): + """ + Return a generator that adds edges to the chart, one at a + time. Each time the generator is resumed, it adds a single + edge and yields that edge. If no more edges can be added, + then it yields None. + + If the parser's strategy, grammar, or chart is changed, then + the generator will continue adding edges using the new + strategy, grammar, or chart. + + Note that this generator never terminates, since the grammar + or strategy might be changed to values that would add new + edges. Instead, it yields None when no more edges can be + added with the current strategy and grammar. + """ + if self._chart is None: + raise ValueError("Parser must be initialized first") + while True: + self._restart = False + w = 50 // (self._chart.num_leaves() + 1) + + for e in self._parse(): + if self._trace > 1: + print(self._current_chartrule) + if self._trace > 0: + print(self._chart.pretty_format_edge(e, w)) + yield e + if self._restart: + break + else: + yield None # No more edges. + + def _parse(self): + """ + A generator that implements the actual parsing algorithm. + ``step`` iterates through this generator, and restarts it + whenever the parser's strategy, grammar, or chart is modified. + """ + chart = self._chart + grammar = self._grammar + edges_added = 1 + while edges_added > 0: + edges_added = 0 + for rule in self._strategy: + self._current_chartrule = rule + for e in rule.apply_everywhere(chart, grammar): + edges_added += 1 + yield e + + # //////////////////////////////////////////////////////////// + # Accessors + # //////////////////////////////////////////////////////////// + + def strategy(self): + "Return the strategy used by this parser." + return self._strategy + + def grammar(self): + "Return the grammar used by this parser." + return self._grammar + + def chart(self): + "Return the chart that is used by this parser." + return self._chart + + def current_chartrule(self): + "Return the chart rule used to generate the most recent edge." + return self._current_chartrule + + def parses(self, tree_class=Tree): + "Return the parse trees currently contained in the chart." + return self._chart.parses(self._grammar.start(), tree_class) + + # //////////////////////////////////////////////////////////// + # Parser modification + # //////////////////////////////////////////////////////////// + + def set_strategy(self, strategy): + """ + Change the strategy that the parser uses to decide which edges + to add to the chart. + + :type strategy: list(ChartRuleI) + :param strategy: A list of rules that should be used to decide + what edges to add to the chart. + """ + if strategy == self._strategy: + return + self._strategy = strategy[:] # Make a copy. + self._restart = True + + def set_grammar(self, grammar): + "Change the grammar used by the parser." + if grammar is self._grammar: + return + self._grammar = grammar + self._restart = True + + def set_chart(self, chart): + "Load a given chart into the chart parser." + if chart is self._chart: + return + self._chart = chart + self._restart = True + + # //////////////////////////////////////////////////////////// + # Standard parser methods + # //////////////////////////////////////////////////////////// + + def parse(self, tokens, tree_class=Tree): + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # Initialize ourselves. + self.initialize(tokens) + + # Step until no more edges are generated. + for e in self.step(): + if e is None: + break + + # Return an iterator of complete parses. + return self.parses(tree_class=tree_class) + + +######################################################################## +## Demo Code +######################################################################## + + +def demo_grammar(): + from nltk.grammar import CFG + + return CFG.fromstring( + """ +S -> NP VP +PP -> "with" NP +NP -> NP PP +VP -> VP PP +VP -> Verb NP +VP -> Verb +NP -> Det Noun +NP -> "John" +NP -> "I" +Det -> "the" +Det -> "my" +Det -> "a" +Noun -> "dog" +Noun -> "cookie" +Verb -> "ate" +Verb -> "saw" +Prep -> "with" +Prep -> "under" +""" + ) + + +def demo( + choice=None, + print_times=True, + print_grammar=False, + print_trees=True, + trace=2, + sent="I saw John with a dog with my cookie", + numparses=5, +): + """ + A demonstration of the chart parsers. + """ + import sys + import time + + from nltk import CFG, Production, nonterminals + + # The grammar for ChartParser and SteppingChartParser: + grammar = demo_grammar() + if print_grammar: + print("* Grammar") + print(grammar) + + # Tokenize the sample sentence. + print("* Sentence:") + print(sent) + tokens = sent.split() + print(tokens) + print() + + # Ask the user which parser to test, + # if the parser wasn't provided as an argument + if choice is None: + print(" 1: Top-down chart parser") + print(" 2: Bottom-up chart parser") + print(" 3: Bottom-up left-corner chart parser") + print(" 4: Left-corner chart parser with bottom-up filter") + print(" 5: Stepping chart parser (alternating top-down & bottom-up)") + print(" 6: All parsers") + print("\nWhich parser (1-6)? ", end=" ") + choice = sys.stdin.readline().strip() + print() + + choice = str(choice) + if choice not in "123456": + print("Bad parser number") + return + + # Keep track of how long each parser takes. + times = {} + + strategies = { + "1": ("Top-down", TD_STRATEGY), + "2": ("Bottom-up", BU_STRATEGY), + "3": ("Bottom-up left-corner", BU_LC_STRATEGY), + "4": ("Filtered left-corner", LC_STRATEGY), + } + choices = [] + if choice in strategies: + choices = [choice] + if choice == "6": + choices = "1234" + + # Run the requested chart parser(s), except the stepping parser. + for strategy in choices: + print("* Strategy: " + strategies[strategy][0]) + print() + cp = ChartParser(grammar, strategies[strategy][1], trace=trace) + t = time.time() + chart = cp.chart_parse(tokens) + parses = list(chart.parses(grammar.start())) + + times[strategies[strategy][0]] = time.time() - t + print("Nr edges in chart:", len(chart.edges())) + if numparses: + assert len(parses) == numparses, "Not all parses found" + if print_trees: + for tree in parses: + print(tree) + else: + print("Nr trees:", len(parses)) + print() + + # Run the stepping parser, if requested. + if choice in "56": + print("* Strategy: Stepping (top-down vs bottom-up)") + print() + t = time.time() + cp = SteppingChartParser(grammar, trace=trace) + cp.initialize(tokens) + for i in range(5): + print("*** SWITCH TO TOP DOWN") + cp.set_strategy(TD_STRATEGY) + for j, e in enumerate(cp.step()): + if j > 20 or e is None: + break + print("*** SWITCH TO BOTTOM UP") + cp.set_strategy(BU_STRATEGY) + for j, e in enumerate(cp.step()): + if j > 20 or e is None: + break + times["Stepping"] = time.time() - t + print("Nr edges in chart:", len(cp.chart().edges())) + if numparses: + assert len(list(cp.parses())) == numparses, "Not all parses found" + if print_trees: + for tree in cp.parses(): + print(tree) + else: + print("Nr trees:", len(list(cp.parses()))) + print() + + # Print the times of all parsers: + if not (print_times and times): + return + print("* Parsing times") + print() + maxlen = max(len(key) for key in times) + format = "%" + repr(maxlen) + "s parser: %6.3fsec" + times_items = times.items() + for (parser, t) in sorted(times_items, key=lambda a: a[1]): + print(format % (parser, t)) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/corenlp.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/corenlp.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3146d1a086d4e49a0eaae585e09cab4a267834 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/corenlp.py @@ -0,0 +1,800 @@ +# Natural Language Toolkit: Interface to the CoreNLP REST API. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Dmitrijs Milajevs +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os # required for doctests +import re +import socket +import time +from typing import List, Tuple + +from nltk.internals import _java_options, config_java, find_jar_iter, java +from nltk.parse.api import ParserI +from nltk.parse.dependencygraph import DependencyGraph +from nltk.tag.api import TaggerI +from nltk.tokenize.api import TokenizerI +from nltk.tree import Tree + +_stanford_url = "https://stanfordnlp.github.io/CoreNLP/" + + +class CoreNLPServerError(EnvironmentError): + """Exceptions associated with the Core NLP server.""" + + +def try_port(port=0): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("", port)) + + p = sock.getsockname()[1] + sock.close() + + return p + + +class CoreNLPServer: + + _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)-models\.jar" + _JAR = r"stanford-corenlp-(\d+)\.(\d+)\.(\d+)\.jar" + + def __init__( + self, + path_to_jar=None, + path_to_models_jar=None, + verbose=False, + java_options=None, + corenlp_options=None, + port=None, + ): + + if corenlp_options is None: + corenlp_options = ["-preload", "tokenize,ssplit,pos,lemma,parse,depparse"] + + jars = list( + find_jar_iter( + self._JAR, + path_to_jar, + env_vars=("CORENLP",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ) + ) + + # find the most recent code and model jar + stanford_jar = max(jars, key=lambda model_name: re.match(self._JAR, model_name)) + + if port is None: + try: + port = try_port(9000) + except OSError: + port = try_port() + corenlp_options.extend(["-port", str(port)]) + else: + try_port(port) + corenlp_options.extend(["-port", str(port)]) + + self.url = f"http://localhost:{port}" + + model_jar = max( + find_jar_iter( + self._MODEL_JAR_PATTERN, + path_to_models_jar, + env_vars=("CORENLP_MODELS",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + is_regex=True, + ), + key=lambda model_name: re.match(self._MODEL_JAR_PATTERN, model_name), + ) + + self.verbose = verbose + + self._classpath = stanford_jar, model_jar + + self.corenlp_options = corenlp_options + self.java_options = java_options or ["-mx2g"] + + def start(self, stdout="devnull", stderr="devnull"): + """Starts the CoreNLP server + + :param stdout, stderr: Specifies where CoreNLP output is redirected. Valid values are 'devnull', 'stdout', 'pipe' + """ + import requests + + cmd = ["edu.stanford.nlp.pipeline.StanfordCoreNLPServer"] + + if self.corenlp_options: + cmd.extend(self.corenlp_options) + + # Configure java. + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=self.verbose) + + try: + self.popen = java( + cmd, + classpath=self._classpath, + blocking=False, + stdout=stdout, + stderr=stderr, + ) + finally: + # Return java configurations to their default values. + config_java(options=default_options, verbose=self.verbose) + + # Check that the server is istill running. + returncode = self.popen.poll() + if returncode is not None: + _, stderrdata = self.popen.communicate() + raise CoreNLPServerError( + returncode, + "Could not start the server. " + "The error was: {}".format(stderrdata.decode("ascii")), + ) + + for i in range(30): + try: + response = requests.get(requests.compat.urljoin(self.url, "live")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("Could not connect to the server.") + + for i in range(60): + try: + response = requests.get(requests.compat.urljoin(self.url, "ready")) + except requests.exceptions.ConnectionError: + time.sleep(1) + else: + if response.ok: + break + else: + raise CoreNLPServerError("The server is not ready.") + + def stop(self): + self.popen.terminate() + self.popen.wait() + + def __enter__(self): + self.start() + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + return False + + +class GenericCoreNLPParser(ParserI, TokenizerI, TaggerI): + """Interface to the CoreNLP Parser.""" + + def __init__( + self, + url="http://localhost:9000", + encoding="utf8", + tagtype=None, + strict_json=True, + ): + import requests + + self.url = url + self.encoding = encoding + + if tagtype not in ["pos", "ner", None]: + raise ValueError("tagtype must be either 'pos', 'ner' or None") + + self.tagtype = tagtype + self.strict_json = strict_json + + self.session = requests.Session() + + def parse_sents(self, sentences, *args, **kwargs): + """Parse multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + words. Each sentence will be automatically tagged with this + CoreNLPParser instance's tagger. + + If a whitespace exists inside a token, then the token will be treated as + several tokens. + + :param sentences: Input sentences to parse + :type sentences: list(list(str)) + :rtype: iter(iter(Tree)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return self.raw_parse_sents(sentences, *args, **kwargs) + + def raw_parse(self, sentence, properties=None, *args, **kwargs): + """Parse a sentence. + + Takes a sentence as a string; before parsing, it will be automatically + tokenized and tagged by the CoreNLP Parser. + + :param sentence: Input sentence to parse + :type sentence: str + :rtype: iter(Tree) + """ + default_properties = {"tokenize.whitespace": "false"} + default_properties.update(properties or {}) + + return next( + self.raw_parse_sents( + [sentence], properties=default_properties, *args, **kwargs + ) + ) + + def api_call(self, data, properties=None, timeout=60): + default_properties = { + "outputFormat": "json", + "annotators": "tokenize,pos,lemma,ssplit,{parser_annotator}".format( + parser_annotator=self.parser_annotator + ), + } + + default_properties.update(properties or {}) + + response = self.session.post( + self.url, + params={"properties": json.dumps(default_properties)}, + data=data.encode(self.encoding), + headers={"Content-Type": f"text/plain; charset={self.encoding}"}, + timeout=timeout, + ) + + response.raise_for_status() + + return response.json(strict=self.strict_json) + + def raw_parse_sents( + self, sentences, verbose=False, properties=None, *args, **kwargs + ): + """Parse multiple sentences. + + Takes multiple sentences as a list of strings. Each sentence will be + automatically tokenized and tagged. + + :param sentences: Input sentences to parse. + :type sentences: list(str) + :rtype: iter(iter(Tree)) + + """ + default_properties = { + # Only splits on '\n', never inside the sentence. + "ssplit.eolonly": "true" + } + + default_properties.update(properties or {}) + + """ + for sentence in sentences: + parsed_data = self.api_call(sentence, properties=default_properties) + + assert len(parsed_data['sentences']) == 1 + + for parse in parsed_data['sentences']: + tree = self.make_tree(parse) + yield iter([tree]) + """ + parsed_data = self.api_call("\n".join(sentences), properties=default_properties) + for parsed_sent in parsed_data["sentences"]: + tree = self.make_tree(parsed_sent) + yield iter([tree]) + + def parse_text(self, text, *args, **kwargs): + """Parse a piece of text. + + The text might contain several sentences which will be split by CoreNLP. + + :param str text: text to be split. + :returns: an iterable of syntactic structures. # TODO: should it be an iterable of iterables? + + """ + parsed_data = self.api_call(text, *args, **kwargs) + + for parse in parsed_data["sentences"]: + yield self.make_tree(parse) + + def tokenize(self, text, properties=None): + """Tokenize a string of text. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> text = 'Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\nThanks.' + >>> list(parser.tokenize(text)) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + + >>> s = "The colour of the wall is blue." + >>> list( + ... parser.tokenize( + ... 'The colour of the wall is blue.', + ... properties={'tokenize.options': 'americanize=true'}, + ... ) + ... ) + ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.'] + >>> server.stop() + + """ + default_properties = {"annotators": "tokenize,ssplit"} + + default_properties.update(properties or {}) + + result = self.api_call(text, properties=default_properties) + + for sentence in result["sentences"]: + for token in sentence["tokens"]: + yield token["originalText"] or token["word"] + + def tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a list of + tokens. + + :param sentences: Input sentences to tag + :type sentences: list(list(str)) + :rtype: list(list(tuple(str, str)) + """ + # Converting list(list(str)) -> list(str) + sentences = (" ".join(words) for words in sentences) + return [sentences[0] for sentences in self.raw_tag_sents(sentences)] + + def tag(self, sentence: str) -> List[Tuple[str, str]]: + """ + Tag a list of tokens. + + :rtype: list(tuple(str, str)) + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The CoreNLP server can be started using the following notation, although + we recommend the `with CoreNLPServer() as server:` context manager notation + to ensure that the server is always stopped. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url, tagtype='ner') + >>> tokens = 'Rami Eid is studying at Stony Brook University in NY'.split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), ('at', 'O'), ('Stony', 'ORGANIZATION'), + ('Brook', 'ORGANIZATION'), ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'STATE_OR_PROVINCE')] + + >>> parser = CoreNLPParser(url=server.url, tagtype='pos') + >>> tokens = "What is the airspeed of an unladen swallow ?".split() + >>> parser.tag(tokens) # doctest: +NORMALIZE_WHITESPACE + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), + ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), + ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + >>> server.stop() + """ + return self.tag_sents([sentence])[0] + + def raw_tag_sents(self, sentences): + """ + Tag multiple sentences. + + Takes multiple sentences as a list where each sentence is a string. + + :param sentences: Input sentences to tag + :type sentences: list(str) + :rtype: list(list(list(tuple(str, str))) + """ + default_properties = { + "ssplit.isOneSentence": "true", + "annotators": "tokenize,ssplit,", + } + + # Supports only 'pos' or 'ner' tags. + assert self.tagtype in ["pos", "ner"] + default_properties["annotators"] += self.tagtype + for sentence in sentences: + tagged_data = self.api_call(sentence, properties=default_properties) + yield [ + [ + (token["word"], token[self.tagtype]) + for token in tagged_sentence["tokens"] + ] + for tagged_sentence in tagged_data["sentences"] + ] + + +class CoreNLPParser(GenericCoreNLPParser): + """ + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... parser = CoreNLPParser(url=server.url) + ... next( + ... parser.raw_parse('The quick brown fox jumps over the lazy dog.') + ... ).pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> parser = CoreNLPParser(url=server.url) + + >>> (parse_fox, ), (parse_wolf, ) = parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + + >>> parse_fox.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|__________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick brown fox jumps over the lazy dog . + + >>> parse_wolf.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______________|__________________________ + | VP | + | _________|___ | + | | PP | + | | ________|___ | + NP | | NP | + ____|_________ | | _______|____ | + DT JJ JJ NN VBZ IN DT JJ NN . + | | | | | | | | | | + The quick grey wolf jumps over the lazy fox . + + >>> (parse_dog, ), (parse_friends, ) = parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + + >>> parse_dog.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _______|____ + | VP + | ________|___ + NP | NP + | | ___|___ + PRP VBP DT NN + | | | | + I 'm a dog + + >>> parse_friends.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + ____|___________ + | VP + | ___________|_____________ + | | NP + | | _______|________________________ + | | NP | | | + | | _____|_______ | | | + NP | NP | | NP | + | | ______|_________ | | ___|____ | + DT VBZ PRP$ NNS POS NN -LRB- DT NN -RRB- + | | | | | | | | | | + This is my friends ' cat -LRB- the tabby -RRB- + + >>> parse_john, parse_mary, = parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> parse_john.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|_____________ + | VP | + | ____|___ | + NP | NP | + | | | | + NNP VBZ NNP . + | | | | + John loves Mary . + + >>> parse_mary.pretty_print() # doctest: +NORMALIZE_WHITESPACE + ROOT + | + S + _____|____ + NP VP | + | | | + NNP VBZ . + | | | + Mary walks . + + Special cases + + >>> next( + ... parser.raw_parse( + ... 'NASIRIYA, Iraq—Iraqi doctors who treated former prisoner of war ' + ... 'Jessica Lynch have angrily dismissed claims made in her biography ' + ... 'that she was raped by her Iraqi captors.' + ... ) + ... ).height() + 14 + + >>> next( + ... parser.raw_parse( + ... "The broader Standard & Poor's 500 Index <.SPX> was 0.46 points lower, or " + ... '0.05 percent, at 997.02.' + ... ) + ... ).height() + 11 + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "penn" + parser_annotator = "parse" + + def make_tree(self, result): + return Tree.fromstring(result["parse"]) + + +class CoreNLPDependencyParser(GenericCoreNLPParser): + """Dependency parser. + + Skip these tests if CoreNLP is likely not ready. + >>> from nltk.test.setup_fixt import check_jar + >>> check_jar(CoreNLPServer._JAR, env_vars=("CORENLP",), is_regex=True) + + The recommended usage of `CoreNLPParser` is using the context manager notation: + >>> with CoreNLPServer() as server: + ... dep_parser = CoreNLPDependencyParser(url=server.url) + ... parse, = dep_parser.raw_parse( + ... 'The quick brown fox jumps over the lazy dog.' + ... ) + ... print(parse.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + Alternatively, the server can be started using the following notation. + Note that `CoreNLPServer` does not need to be used if the CoreNLP server is started + outside of Python. + >>> server = CoreNLPServer() + >>> server.start() + >>> dep_parser = CoreNLPDependencyParser(url=server.url) + >>> parse, = dep_parser.raw_parse('The quick brown fox jumps over the lazy dog.') + >>> print(parse.tree()) # doctest: +NORMALIZE_WHITESPACE + (jumps (fox The quick brown) (dog over the lazy) .) + + >>> for governor, dep, dependent in parse.triples(): + ... print(governor, dep, dependent) # doctest: +NORMALIZE_WHITESPACE + ('jumps', 'VBZ') nsubj ('fox', 'NN') + ('fox', 'NN') det ('The', 'DT') + ('fox', 'NN') amod ('quick', 'JJ') + ('fox', 'NN') amod ('brown', 'JJ') + ('jumps', 'VBZ') obl ('dog', 'NN') + ('dog', 'NN') case ('over', 'IN') + ('dog', 'NN') det ('the', 'DT') + ('dog', 'NN') amod ('lazy', 'JJ') + ('jumps', 'VBZ') punct ('.', '.') + + >>> (parse_fox, ), (parse_dog, ) = dep_parser.raw_parse_sents( + ... [ + ... 'The quick brown fox jumps over the lazy dog.', + ... 'The quick grey wolf jumps over the lazy fox.', + ... ] + ... ) + >>> print(parse_fox.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + brown JJ 4 amod + fox NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + dog NN 5 obl + . . 5 punct + + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + The DT 4 det + quick JJ 4 amod + grey JJ 4 amod + wolf NN 5 nsubj + jumps VBZ 0 ROOT + over IN 9 case + the DT 9 det + lazy JJ 9 amod + fox NN 5 obl + . . 5 punct + + >>> (parse_dog, ), (parse_friends, ) = dep_parser.parse_sents( + ... [ + ... "I 'm a dog".split(), + ... "This is my friends ' cat ( the tabby )".split(), + ... ] + ... ) + >>> print(parse_dog.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + I PRP 4 nsubj + 'm VBP 4 cop + a DT 4 det + dog NN 0 ROOT + + >>> print(parse_friends.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + This DT 6 nsubj + is VBZ 6 cop + my PRP$ 4 nmod:poss + friends NNS 6 nmod:poss + ' POS 4 case + cat NN 0 ROOT + ( -LRB- 9 punct + the DT 9 det + tabby NN 6 dep + ) -RRB- 9 punct + + >>> parse_john, parse_mary, = dep_parser.parse_text( + ... 'John loves Mary. Mary walks.' + ... ) + + >>> print(parse_john.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + John NNP 2 nsubj + loves VBZ 0 ROOT + Mary NNP 2 obj + . . 2 punct + + >>> print(parse_mary.to_conll(4)) # doctest: +NORMALIZE_WHITESPACE + Mary NNP 2 nsubj + walks VBZ 0 ROOT + . . 2 punct + + Special cases + + Non-breaking space inside of a token. + + >>> len( + ... next( + ... dep_parser.raw_parse( + ... 'Anhalt said children typically treat a 20-ounce soda bottle as one ' + ... 'serving, while it actually contains 2 1/2 servings.' + ... ) + ... ).nodes + ... ) + 23 + + Phone numbers. + + >>> len( + ... next( + ... dep_parser.raw_parse('This is not going to crash: 01 111 555.') + ... ).nodes + ... ) + 10 + + >>> print( + ... next( + ... dep_parser.raw_parse('The underscore _ should not simply disappear.') + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE + The DT 2 det + underscore NN 7 nsubj + _ NFP 7 punct + should MD 7 aux + not RB 7 advmod + simply RB 7 advmod + disappear VB 0 ROOT + . . 7 punct + + >>> print( + ... next( + ... dep_parser.raw_parse( + ... 'for all of its insights into the dream world of teen life , and its electronic expression through ' + ... 'cyber culture , the film gives no quarter to anyone seeking to pull a cohesive story out of its 2 ' + ... '1/2-hour running time .' + ... ) + ... ).to_conll(4) + ... ) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS + for IN 2 case + all DT 24 obl + of IN 5 case + its PRP$ 5 nmod:poss + insights NNS 2 nmod + into IN 9 case + the DT 9 det + dream NN 9 compound + world NN 5 nmod + of IN 12 case + teen NN 12 compound + ... + + >>> server.stop() + """ + + _OUTPUT_FORMAT = "conll2007" + parser_annotator = "depparse" + + def make_tree(self, result): + + return DependencyGraph( + ( + " ".join(n_items[1:]) # NLTK expects an iterable of strings... + for n_items in sorted(transform(result)) + ), + cell_separator=" ", # To make sure that a non-breaking space is kept inside of a token. + ) + + +def transform(sentence): + for dependency in sentence["basicDependencies"]: + + dependent_index = dependency["dependent"] + token = sentence["tokens"][dependent_index - 1] + + # Return values that we don't know as '_'. Also, consider tag and ctag + # to be equal. + yield ( + dependent_index, + "_", + token["word"], + token["lemma"], + token["pos"], + token["pos"], + "_", + str(dependency["governor"]), + dependency["dep"], + "_", + "_", + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/earleychart.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/earleychart.py new file mode 100644 index 0000000000000000000000000000000000000000..1054e114c8e3177754ed895b67ac2b2f4d39cc21 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/earleychart.py @@ -0,0 +1,552 @@ +# Natural Language Toolkit: An Incremental Earley Chart Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Rob Speer +# Edward Loper +# Steven Bird +# Jean Mark Gawron +# URL: +# For license information, see LICENSE.TXT + +""" +Data classes and parser implementations for *incremental* chart +parsers, which use dynamic programming to efficiently parse a text. +A "chart parser" derives parse trees for a text by iteratively adding +\"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree +structure for a subsequence of the text. The "chart" is a +\"blackboard\" for composing and combining these hypotheses. + +A parser is "incremental", if it guarantees that for all i, j where i < j, +all edges ending at i are built before any edges ending at j. +This is appealing for, say, speech recognizer hypothesis filtering. + +The main parser class is ``EarleyChartParser``, which is a top-down +algorithm, originally formulated by Jay Earley (1970). +""" + +from time import perf_counter + +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + CachedTopDownPredictRule, + Chart, + ChartParser, + EdgeI, + EmptyPredictRule, + FilteredBottomUpPredictCombineRule, + FilteredSingleEdgeFundamentalRule, + LeafEdge, + LeafInitRule, + SingleEdgeFundamentalRule, + TopDownInitRule, +) +from nltk.parse.featurechart import ( + FeatureBottomUpPredictCombineRule, + FeatureBottomUpPredictRule, + FeatureChart, + FeatureChartParser, + FeatureEmptyPredictRule, + FeatureSingleEdgeFundamentalRule, + FeatureTopDownInitRule, + FeatureTopDownPredictRule, +) + +# //////////////////////////////////////////////////////////// +# Incremental Chart +# //////////////////////////////////////////////////////////// + + +class IncrementalChart(Chart): + def initialize(self): + # A sequence of edge lists contained in this chart. + self._edgelists = tuple([] for x in self._positions()) + + # The set of child pointer lists associated with each edge. + self._edge_to_cpls = {} + + # Indexes mapping attribute values to lists of edges + # (used by select()). + self._indexes = {} + + def edges(self): + return list(self.iteredges()) + + def iteredges(self): + return (edge for edgelist in self._edgelists for edge in edgelist) + + def select(self, end, **restrictions): + edgelist = self._edgelists[end] + + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(edgelist) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple(restrictions[key] for key in restr_keys) + return iter(self._indexes[restr_keys][end].get(vals, [])) + + def _add_index(self, restr_keys): + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = tuple({} for x in self._positions()) + + # Add all existing edges to the index. + for end, edgelist in enumerate(self._edgelists): + this_index = index[end] + for edge in edgelist: + vals = tuple(getattr(edge, key)() for key in restr_keys) + this_index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + end = edge.end() + for (restr_keys, index) in self._indexes.items(): + vals = tuple(getattr(edge, key)() for key in restr_keys) + index[end].setdefault(vals, []).append(edge) + + def _append_edge(self, edge): + self._edgelists[edge.end()].append(edge) + + def _positions(self): + return range(self.num_leaves() + 1) + + +class FeatureIncrementalChart(IncrementalChart, FeatureChart): + def select(self, end, **restrictions): + edgelist = self._edgelists[end] + + # If there are no restrictions, then return all edges. + if restrictions == {}: + return iter(edgelist) + + # Find the index corresponding to the given restrictions. + restr_keys = sorted(restrictions.keys()) + restr_keys = tuple(restr_keys) + + # If it doesn't exist, then create it. + if restr_keys not in self._indexes: + self._add_index(restr_keys) + + vals = tuple( + self._get_type_if_possible(restrictions[key]) for key in restr_keys + ) + return iter(self._indexes[restr_keys][end].get(vals, [])) + + def _add_index(self, restr_keys): + # Make sure it's a valid index. + for key in restr_keys: + if not hasattr(EdgeI, key): + raise ValueError("Bad restriction: %s" % key) + + # Create the index. + index = self._indexes[restr_keys] = tuple({} for x in self._positions()) + + # Add all existing edges to the index. + for end, edgelist in enumerate(self._edgelists): + this_index = index[end] + for edge in edgelist: + vals = tuple( + self._get_type_if_possible(getattr(edge, key)()) + for key in restr_keys + ) + this_index.setdefault(vals, []).append(edge) + + def _register_with_indexes(self, edge): + end = edge.end() + for (restr_keys, index) in self._indexes.items(): + vals = tuple( + self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys + ) + index[end].setdefault(vals, []).append(edge) + + +# //////////////////////////////////////////////////////////// +# Incremental CFG Rules +# //////////////////////////////////////////////////////////// + + +class CompleteFundamentalRule(SingleEdgeFundamentalRule): + def _apply_incomplete(self, chart, grammar, left_edge): + end = left_edge.end() + # When the chart is incremental, we only have to look for + # empty complete edges here. + for right_edge in chart.select( + start=end, end=end, is_complete=True, lhs=left_edge.nextsym() + ): + new_edge = left_edge.move_dot_forward(right_edge.end()) + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class CompleterRule(CompleteFundamentalRule): + _fundamental_rule = CompleteFundamentalRule() + + def apply(self, chart, grammar, edge): + if not isinstance(edge, LeafEdge): + yield from self._fundamental_rule.apply(chart, grammar, edge) + + +class ScannerRule(CompleteFundamentalRule): + _fundamental_rule = CompleteFundamentalRule() + + def apply(self, chart, grammar, edge): + if isinstance(edge, LeafEdge): + yield from self._fundamental_rule.apply(chart, grammar, edge) + + +class PredictorRule(CachedTopDownPredictRule): + pass + + +class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule): + def apply(self, chart, grammar, edge): + # Since the Filtered rule only works for grammars without empty productions, + # we only have to bother with complete edges here. + if edge.is_complete(): + yield from self._apply_complete(chart, grammar, edge) + + +# //////////////////////////////////////////////////////////// +# Incremental FCFG Rules +# //////////////////////////////////////////////////////////// + + +class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule): + def _apply_incomplete(self, chart, grammar, left_edge): + fr = self._fundamental_rule + end = left_edge.end() + # When the chart is incremental, we only have to look for + # empty complete edges here. + for right_edge in chart.select( + start=end, end=end, is_complete=True, lhs=left_edge.nextsym() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + +class FeatureCompleterRule(CompleterRule): + _fundamental_rule = FeatureCompleteFundamentalRule() + + +class FeatureScannerRule(ScannerRule): + _fundamental_rule = FeatureCompleteFundamentalRule() + + +class FeaturePredictorRule(FeatureTopDownPredictRule): + pass + + +# //////////////////////////////////////////////////////////// +# Incremental CFG Chart Parsers +# //////////////////////////////////////////////////////////// + +EARLEY_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CompleterRule(), + ScannerRule(), + PredictorRule(), +] +TD_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + TopDownInitRule(), + CachedTopDownPredictRule(), + CompleteFundamentalRule(), +] +BU_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictRule(), + CompleteFundamentalRule(), +] +BU_LC_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + EmptyPredictRule(), + BottomUpPredictCombineRule(), + CompleteFundamentalRule(), +] + +LC_INCREMENTAL_STRATEGY = [ + LeafInitRule(), + FilteredBottomUpPredictCombineRule(), + FilteredCompleteFundamentalRule(), +] + + +class IncrementalChartParser(ChartParser): + """ + An *incremental* chart parser implementing Jay Earley's + parsing algorithm: + + | For each index end in [0, 1, ..., N]: + | For each edge such that edge.end = end: + | If edge is incomplete and edge.next is not a part of speech: + | Apply PredictorRule to edge + | If edge is incomplete and edge.next is a part of speech: + | Apply ScannerRule to edge + | If edge is complete: + | Apply CompleterRule to edge + | Return any complete parses in the chart + """ + + def __init__( + self, + grammar, + strategy=BU_LC_INCREMENTAL_STRATEGY, + trace=0, + trace_chart_width=50, + chart_class=IncrementalChart, + ): + """ + Create a new Earley chart parser, that uses ``grammar`` to + parse texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + :type trace_chart_width: int + :param trace_chart_width: The default total width reserved for + the chart in trace output. The remainder of each line will + be used to display edges. + :param chart_class: The class that should be used to create + the charts used by this parser. + """ + self._grammar = grammar + self._trace = trace + self._trace_chart_width = trace_chart_width + self._chart_class = chart_class + + self._axioms = [] + self._inference_rules = [] + for rule in strategy: + if rule.NUM_EDGES == 0: + self._axioms.append(rule) + elif rule.NUM_EDGES == 1: + self._inference_rules.append(rule) + else: + raise ValueError( + "Incremental inference rules must have " "NUM_EDGES == 0 or 1" + ) + + def chart_parse(self, tokens, trace=None): + if trace is None: + trace = self._trace + trace_new_edges = self._trace_new_edges + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + chart = self._chart_class(tokens) + grammar = self._grammar + + # Width, for printing trace edges. + trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) + if trace: + print(chart.pretty_format_leaves(trace_edge_width)) + + for axiom in self._axioms: + new_edges = list(axiom.apply(chart, grammar)) + trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) + + inference_rules = self._inference_rules + for end in range(chart.num_leaves() + 1): + if trace > 1: + print("\n* Processing queue:", end, "\n") + agenda = list(chart.select(end=end)) + while agenda: + edge = agenda.pop() + for rule in inference_rules: + new_edges = list(rule.apply(chart, grammar, edge)) + trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) + for new_edge in new_edges: + if new_edge.end() == end: + agenda.append(new_edge) + + return chart + + +class EarleyChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args) + + +class IncrementalTopDownChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalBottomUpChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + IncrementalChartParser.__init__( + self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args + ) + + +class IncrementalLeftCornerChartParser(IncrementalChartParser): + def __init__(self, grammar, **parser_args): + if not grammar.is_nonempty(): + raise ValueError( + "IncrementalLeftCornerParser only works for grammars " + "without empty productions." + ) + IncrementalChartParser.__init__( + self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Incremental FCFG Chart Parsers +# //////////////////////////////////////////////////////////// + +EARLEY_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureCompleterRule(), + FeatureScannerRule(), + FeaturePredictorRule(), +] +TD_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureTopDownPredictRule(), + FeatureCompleteFundamentalRule(), +] +BU_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictRule(), + FeatureCompleteFundamentalRule(), +] +BU_LC_INCREMENTAL_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictCombineRule(), + FeatureCompleteFundamentalRule(), +] + + +class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser): + def __init__( + self, + grammar, + strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY, + trace_chart_width=20, + chart_class=FeatureIncrementalChart, + **parser_args + ): + IncrementalChartParser.__init__( + self, + grammar, + strategy=strategy, + trace_chart_width=trace_chart_width, + chart_class=chart_class, + **parser_args + ) + + +class FeatureEarleyChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser): + def __init__(self, grammar, **parser_args): + FeatureIncrementalChartParser.__init__( + self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Demonstration +# //////////////////////////////////////////////////////////// + + +def demo( + print_times=True, + print_grammar=False, + print_trees=True, + trace=2, + sent="I saw John with a dog with my cookie", + numparses=5, +): + """ + A demonstration of the Earley parsers. + """ + import sys + import time + + from nltk.parse.chart import demo_grammar + + # The grammar for ChartParser and SteppingChartParser: + grammar = demo_grammar() + if print_grammar: + print("* Grammar") + print(grammar) + + # Tokenize the sample sentence. + print("* Sentence:") + print(sent) + tokens = sent.split() + print(tokens) + print() + + # Do the parsing. + earley = EarleyChartParser(grammar, trace=trace) + t = perf_counter() + chart = earley.chart_parse(tokens) + parses = list(chart.parses(grammar.start())) + t = perf_counter() - t + + # Print results. + if numparses: + assert len(parses) == numparses, "Not all parses found" + if print_trees: + for tree in parses: + print(tree) + else: + print("Nr trees:", len(parses)) + if print_times: + print("Time:", t) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/evaluate.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/evaluate.py new file mode 100644 index 0000000000000000000000000000000000000000..07ab1c9832b42be2e655663cacf87d84db5ea3a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/evaluate.py @@ -0,0 +1,129 @@ +# Natural Language Toolkit: evaluation of dependency parser +# +# Author: Long Duong +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import unicodedata + + +class DependencyEvaluator: + """ + Class for measuring labelled and unlabelled attachment score for + dependency parsing. Note that the evaluation ignores punctuation. + + >>> from nltk.parse import DependencyGraph, DependencyEvaluator + + >>> gold_sent = DependencyGraph(\""" + ... Pierre NNP 2 NMOD + ... Vinken NNP 8 SUB + ... , , 2 P + ... 61 CD 5 NMOD + ... years NNS 6 AMOD + ... old JJ 2 NMOD + ... , , 2 P + ... will MD 0 ROOT + ... join VB 8 VC + ... the DT 11 NMOD + ... board NN 9 OBJ + ... as IN 9 VMOD + ... a DT 15 NMOD + ... nonexecutive JJ 15 NMOD + ... director NN 12 PMOD + ... Nov. NNP 9 VMOD + ... 29 CD 16 NMOD + ... . . 9 VMOD + ... \""") + + >>> parsed_sent = DependencyGraph(\""" + ... Pierre NNP 8 NMOD + ... Vinken NNP 1 SUB + ... , , 3 P + ... 61 CD 6 NMOD + ... years NNS 6 AMOD + ... old JJ 2 NMOD + ... , , 3 AMOD + ... will MD 0 ROOT + ... join VB 8 VC + ... the DT 11 AMOD + ... board NN 9 OBJECT + ... as IN 9 NMOD + ... a DT 15 NMOD + ... nonexecutive JJ 15 NMOD + ... director NN 12 PMOD + ... Nov. NNP 9 VMOD + ... 29 CD 16 NMOD + ... . . 9 VMOD + ... \""") + + >>> de = DependencyEvaluator([parsed_sent],[gold_sent]) + >>> las, uas = de.eval() + >>> las + 0.6 + >>> uas + 0.8 + >>> abs(uas - 0.8) < 0.00001 + True + """ + + def __init__(self, parsed_sents, gold_sents): + """ + :param parsed_sents: the list of parsed_sents as the output of parser + :type parsed_sents: list(DependencyGraph) + """ + self._parsed_sents = parsed_sents + self._gold_sents = gold_sents + + def _remove_punct(self, inStr): + """ + Function to remove punctuation from Unicode string. + :param input: the input string + :return: Unicode string after remove all punctuation + """ + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + return "".join(x for x in inStr if unicodedata.category(x) not in punc_cat) + + def eval(self): + """ + Return the Labeled Attachment Score (LAS) and Unlabeled Attachment Score (UAS) + + :return : tuple(float,float) + """ + if len(self._parsed_sents) != len(self._gold_sents): + raise ValueError( + " Number of parsed sentence is different with number of gold sentence." + ) + + corr = 0 + corrL = 0 + total = 0 + + for i in range(len(self._parsed_sents)): + parsed_sent_nodes = self._parsed_sents[i].nodes + gold_sent_nodes = self._gold_sents[i].nodes + + if len(parsed_sent_nodes) != len(gold_sent_nodes): + raise ValueError("Sentences must have equal length.") + + for parsed_node_address, parsed_node in parsed_sent_nodes.items(): + gold_node = gold_sent_nodes[parsed_node_address] + + if parsed_node["word"] is None: + continue + if parsed_node["word"] != gold_node["word"]: + raise ValueError("Sentence sequence is not matched.") + + # Ignore if word is punctuation by default + # if (parsed_sent[j]["word"] in string.punctuation): + if self._remove_punct(parsed_node["word"]) == "": + continue + + total += 1 + if parsed_node["head"] == gold_node["head"]: + corr += 1 + if parsed_node["rel"] == gold_node["rel"]: + corrL += 1 + + return corrL / total, corr / total diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/featurechart.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/featurechart.py new file mode 100644 index 0000000000000000000000000000000000000000..0a981001e4f9ad301d4c564ac45c6a0bdcbd310e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/featurechart.py @@ -0,0 +1,674 @@ +# Natural Language Toolkit: Chart Parser for Feature-Based Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rob Speer +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Extension of chart parsing implementation to handle grammars with +feature structures as nodes. +""" +from time import perf_counter + +from nltk.featstruct import TYPE, FeatStruct, find_variables, unify +from nltk.grammar import ( + CFG, + FeatStructNonterminal, + Nonterminal, + Production, + is_nonterminal, + is_terminal, +) +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + CachedTopDownPredictRule, + Chart, + ChartParser, + EdgeI, + EmptyPredictRule, + FundamentalRule, + LeafInitRule, + SingleEdgeFundamentalRule, + TopDownInitRule, + TreeEdge, +) +from nltk.sem import logic +from nltk.tree import Tree + +# //////////////////////////////////////////////////////////// +# Tree Edge +# //////////////////////////////////////////////////////////// + + +class FeatureTreeEdge(TreeEdge): + """ + A specialized tree edge that allows shared variable bindings + between nonterminals on the left-hand side and right-hand side. + + Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a + dictionary mapping from variables to values. If the edge is not + complete, then these bindings are simply stored. However, if the + edge is complete, then the constructor applies these bindings to + every nonterminal in the edge whose symbol implements the + interface ``SubstituteBindingsI``. + """ + + def __init__(self, span, lhs, rhs, dot=0, bindings=None): + """ + Construct a new edge. If the edge is incomplete (i.e., if + ``dot alpha \* B1 beta][i:j]`` + - ``[B2 -> gamma \*][j:k]`` + + licenses the edge: + + - ``[A -> alpha B3 \* beta][i:j]`` + + assuming that B1 and B2 can be unified to generate B3. + """ + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.end() == right_edge.start() + and left_edge.is_incomplete() + and right_edge.is_complete() + and isinstance(left_edge, FeatureTreeEdge) + ): + return + found = right_edge.lhs() + nextsym = left_edge.nextsym() + if isinstance(right_edge, FeatureTreeEdge): + if not is_nonterminal(nextsym): + return + if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]: + return + # Create a copy of the bindings. + bindings = left_edge.bindings() + # We rename vars here, because we don't want variables + # from the two different productions to match. + found = found.rename_variables(used_vars=left_edge.variables()) + # Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to + # generate B3 (result). + result = unify(nextsym, found, bindings, rename_vars=False) + if result is None: + return + else: + if nextsym != found: + return + # Create a copy of the bindings. + bindings = left_edge.bindings() + + # Construct the new edge. + new_edge = left_edge.move_dot_forward(right_edge.end(), bindings) + + # Add it to the chart, with appropriate child pointers. + if chart.insert_with_backpointer(new_edge, left_edge, right_edge): + yield new_edge + + +class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): + """ + A specialized version of the completer / single edge fundamental rule + that operates on nonterminals whose symbols are ``FeatStructNonterminal``. + Rather than simply comparing the nonterminals for equality, they are + unified. + """ + + _fundamental_rule = FeatureFundamentalRule() + + def _apply_complete(self, chart, grammar, right_edge): + fr = self._fundamental_rule + for left_edge in chart.select( + end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + def _apply_incomplete(self, chart, grammar, left_edge): + fr = self._fundamental_rule + for right_edge in chart.select( + start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() + ): + yield from fr.apply(chart, grammar, left_edge, right_edge) + + +# //////////////////////////////////////////////////////////// +# Top-Down Prediction +# //////////////////////////////////////////////////////////// + + +class FeatureTopDownInitRule(TopDownInitRule): + def apply(self, chart, grammar): + for prod in grammar.productions(lhs=grammar.start()): + new_edge = FeatureTreeEdge.from_production(prod, 0) + if chart.insert(new_edge, ()): + yield new_edge + + +class FeatureTopDownPredictRule(CachedTopDownPredictRule): + r""" + A specialized version of the (cached) top down predict rule that operates + on nonterminals whose symbols are ``FeatStructNonterminal``. Rather + than simply comparing the nonterminals for equality, they are + unified. + + The top down expand rule states that: + + - ``[A -> alpha \* B1 beta][i:j]`` + + licenses the edge: + + - ``[B2 -> \* gamma][j:j]`` + + for each grammar production ``B2 -> gamma``, assuming that B1 + and B2 can be unified. + """ + + def apply(self, chart, grammar, edge): + if edge.is_complete(): + return + nextsym, index = edge.nextsym(), edge.end() + if not is_nonterminal(nextsym): + return + + # If we've already applied this rule to an edge with the same + # next & end, and the chart & grammar have not changed, then + # just return (no new edges to add). + nextsym_with_bindings = edge.next_with_bindings() + done = self._done.get((nextsym_with_bindings, index), (None, None)) + if done[0] is chart and done[1] is grammar: + return + + for prod in grammar.productions(lhs=nextsym): + # If the left corner in the predicted production is + # leaf, it must match with the input. + if prod.rhs(): + first = prod.rhs()[0] + if is_terminal(first): + if index >= chart.num_leaves(): + continue + if first != chart.leaf(index): + continue + + # We rename vars here, because we don't want variables + # from the two different productions to match. + if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True): + new_edge = FeatureTreeEdge.from_production(prod, edge.end()) + if chart.insert(new_edge, ()): + yield new_edge + + # Record the fact that we've applied this rule. + self._done[nextsym_with_bindings, index] = (chart, grammar) + + +# //////////////////////////////////////////////////////////// +# Bottom-Up Prediction +# //////////////////////////////////////////////////////////// + + +class FeatureBottomUpPredictRule(BottomUpPredictRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(rhs=edge.lhs()): + if isinstance(edge, FeatureTreeEdge): + _next = prod.rhs()[0] + if not is_nonterminal(_next): + continue + + new_edge = FeatureTreeEdge.from_production(prod, edge.start()) + if chart.insert(new_edge, ()): + yield new_edge + + +class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule): + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + found = edge.lhs() + for prod in grammar.productions(rhs=found): + bindings = {} + if isinstance(edge, FeatureTreeEdge): + _next = prod.rhs()[0] + if not is_nonterminal(_next): + continue + + # We rename vars here, because we don't want variables + # from the two different productions to match. + used_vars = find_variables( + (prod.lhs(),) + prod.rhs(), fs_class=FeatStruct + ) + found = found.rename_variables(used_vars=used_vars) + + result = unify(_next, found, bindings, rename_vars=False) + if result is None: + continue + + new_edge = FeatureTreeEdge.from_production( + prod, edge.start() + ).move_dot_forward(edge.end(), bindings) + if chart.insert(new_edge, (edge,)): + yield new_edge + + +class FeatureEmptyPredictRule(EmptyPredictRule): + def apply(self, chart, grammar): + for prod in grammar.productions(empty=True): + for index in range(chart.num_leaves() + 1): + new_edge = FeatureTreeEdge.from_production(prod, index) + if chart.insert(new_edge, ()): + yield new_edge + + +# //////////////////////////////////////////////////////////// +# Feature Chart Parser +# //////////////////////////////////////////////////////////// + +TD_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureTopDownInitRule(), + FeatureTopDownPredictRule(), + FeatureSingleEdgeFundamentalRule(), +] +BU_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictRule(), + FeatureSingleEdgeFundamentalRule(), +] +BU_LC_FEATURE_STRATEGY = [ + LeafInitRule(), + FeatureEmptyPredictRule(), + FeatureBottomUpPredictCombineRule(), + FeatureSingleEdgeFundamentalRule(), +] + + +class FeatureChartParser(ChartParser): + def __init__( + self, + grammar, + strategy=BU_LC_FEATURE_STRATEGY, + trace_chart_width=20, + chart_class=FeatureChart, + **parser_args, + ): + ChartParser.__init__( + self, + grammar, + strategy=strategy, + trace_chart_width=trace_chart_width, + chart_class=chart_class, + **parser_args, + ) + + +class FeatureTopDownChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args) + + +class FeatureBottomUpChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args) + + +class FeatureBottomUpLeftCornerChartParser(FeatureChartParser): + def __init__(self, grammar, **parser_args): + FeatureChartParser.__init__( + self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args + ) + + +# //////////////////////////////////////////////////////////// +# Instantiate Variable Chart +# //////////////////////////////////////////////////////////// + + +class InstantiateVarsChart(FeatureChart): + """ + A specialized chart that 'instantiates' variables whose names + start with '@', by replacing them with unique new variables. + In particular, whenever a complete edge is added to the chart, any + variables in the edge's ``lhs`` whose names start with '@' will be + replaced by unique new ``Variable``. + """ + + def __init__(self, tokens): + FeatureChart.__init__(self, tokens) + + def initialize(self): + self._instantiated = set() + FeatureChart.initialize(self) + + def insert(self, edge, child_pointer_list): + if edge in self._instantiated: + return False + self.instantiate_edge(edge) + return FeatureChart.insert(self, edge, child_pointer_list) + + def instantiate_edge(self, edge): + """ + If the edge is a ``FeatureTreeEdge``, and it is complete, + then instantiate all variables whose names start with '@', + by replacing them with unique new variables. + + Note that instantiation is done in-place, since the + parsing algorithms might already hold a reference to + the edge for future use. + """ + # If the edge is a leaf, or is not complete, or is + # already in the chart, then just return it as-is. + if not isinstance(edge, FeatureTreeEdge): + return + if not edge.is_complete(): + return + if edge in self._edge_to_cpls: + return + + # Get a list of variables that need to be instantiated. + # If there are none, then return as-is. + inst_vars = self.inst_vars(edge) + if not inst_vars: + return + + # Instantiate the edge! + self._instantiated.add(edge) + edge._lhs = edge.lhs().substitute_bindings(inst_vars) + + def inst_vars(self, edge): + return { + var: logic.unique_variable() + for var in edge.lhs().variables() + if var.name.startswith("@") + } + + +# //////////////////////////////////////////////////////////// +# Demo +# //////////////////////////////////////////////////////////// + + +def demo_grammar(): + from nltk.grammar import FeatureGrammar + + return FeatureGrammar.fromstring( + """ +S -> NP VP +PP -> Prep NP +NP -> NP PP +VP -> VP PP +VP -> Verb NP +VP -> Verb +NP -> Det[pl=?x] Noun[pl=?x] +NP -> "John" +NP -> "I" +Det -> "the" +Det -> "my" +Det[-pl] -> "a" +Noun[-pl] -> "dog" +Noun[-pl] -> "cookie" +Verb -> "ate" +Verb -> "saw" +Prep -> "with" +Prep -> "under" +""" + ) + + +def demo( + print_times=True, + print_grammar=True, + print_trees=True, + print_sentence=True, + trace=1, + parser=FeatureChartParser, + sent="I saw John with a dog with my cookie", +): + import sys + import time + + print() + grammar = demo_grammar() + if print_grammar: + print(grammar) + print() + print("*", parser.__name__) + if print_sentence: + print("Sentence:", sent) + tokens = sent.split() + t = perf_counter() + cp = parser(grammar, trace=trace) + chart = cp.chart_parse(tokens) + trees = list(chart.parses(grammar.start())) + if print_times: + print("Time: %s" % (perf_counter() - t)) + if print_trees: + for tree in trees: + print(tree) + else: + print("Nr trees:", len(trees)) + + +def run_profile(): + import profile + + profile.run("for i in range(1): demo()", "/tmp/profile.out") + import pstats + + p = pstats.Stats("/tmp/profile.out") + p.strip_dirs().sort_stats("time", "cum").print_stats(60) + p.strip_dirs().sort_stats("cum", "time").print_stats(60) + + +if __name__ == "__main__": + from nltk.data import load + + demo() + print() + grammar = load("grammars/book_grammars/feat0.fcfg") + cp = FeatureChartParser(grammar, trace=2) + sent = "Kim likes children" + tokens = sent.split() + trees = cp.parse(tokens) + for tree in trees: + print(tree) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..b96f996cf63b4d3e093994d6319c8fb9fb91569a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/nonprojectivedependencyparser.py @@ -0,0 +1,772 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +import logging +import math + +from nltk.parse.dependencygraph import DependencyGraph + +logger = logging.getLogger(__name__) + +################################################################# +# DependencyScorerI - Interface for Graph-Edge Weight Calculation +################################################################# + + +class DependencyScorerI: + """ + A scorer for calculated the weights on the edges of a weighted + dependency graph. This is used by a + ``ProbabilisticNonprojectiveParser`` to initialize the edge + weights of a ``DependencyGraph``. While typically this would be done + by training a binary classifier, any class that can return a + multidimensional list representation of the edge weights can + implement this interface. As such, it has no necessary + fields. + """ + + def __init__(self): + if self.__class__ == DependencyScorerI: + raise TypeError("DependencyScorerI is an abstract interface") + + def train(self, graphs): + """ + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + Typically the edges present in the graphs can be used as + positive training examples, and the edges not present as negative + examples. + """ + raise NotImplementedError() + + def score(self, graph): + """ + :type graph: DependencyGraph + :param graph: A dependency graph whose set of edges need to be + scored. + :rtype: A three-dimensional list of numbers. + :return: The score is returned in a multidimensional(3) list, such + that the outer-dimension refers to the head, and the + inner-dimension refers to the dependencies. For instance, + scores[0][1] would reference the list of scores corresponding to + arcs from node 0 to node 1. The node's 'address' field can be used + to determine its number identification. + + For further illustration, a score list corresponding to Fig.2 of + Keith Hall's 'K-best Spanning Tree Parsing' paper:: + + scores = [[[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []]] + + When used in conjunction with a MaxEntClassifier, each score would + correspond to the confidence of a particular edge being classified + with the positive training examples. + """ + raise NotImplementedError() + + +################################################################# +# NaiveBayesDependencyScorer +################################################################# + + +class NaiveBayesDependencyScorer(DependencyScorerI): + """ + A dependency scorer built around a MaxEnt classifier. In this + particular class that classifier is a ``NaiveBayesClassifier``. + It uses head-word, head-tag, child-word, and child-tag features + for classification. + + >>> from nltk.parse.dependencygraph import DependencyGraph, conll_data2 + + >>> graphs = [DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry] + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train(graphs, NaiveBayesDependencyScorer()) + >>> parses = npp.parse(['Cathy', 'zag', 'hen', 'zwaaien', '.'], ['N', 'V', 'Pron', 'Adj', 'N', 'Punc']) + >>> len(list(parses)) + 1 + + """ + + def __init__(self): + pass # Do nothing without throwing error + + def train(self, graphs): + """ + Trains a ``NaiveBayesClassifier`` using the edges present in + graphs list as positive examples, the edges not present as + negative examples. Uses a feature vector of head-word, + head-tag, child-word, and child-tag. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + """ + + from nltk.classify import NaiveBayesClassifier + + # Create training labeled training examples + labeled_examples = [] + for graph in graphs: + for head_node in graph.nodes.values(): + for child_index, child_node in graph.nodes.items(): + if child_index in head_node["deps"]: + label = "T" + else: + label = "F" + labeled_examples.append( + ( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ), + label, + ) + ) + + self.classifier = NaiveBayesClassifier.train(labeled_examples) + + def score(self, graph): + """ + Converts the graph into a feature-based representation of + each edge, and then assigns a score to each based on the + confidence of the classifier in assigning it to the + positive label. Scores are returned in a multidimensional list. + + :type graph: DependencyGraph + :param graph: A dependency graph to score. + :rtype: 3 dimensional list + :return: Edge scores for the graph parameter. + """ + # Convert graph to feature representation + edges = [] + for head_node in graph.nodes.values(): + for child_node in graph.nodes.values(): + edges.append( + dict( + a=head_node["word"], + b=head_node["tag"], + c=child_node["word"], + d=child_node["tag"], + ) + ) + + # Score edges + edge_scores = [] + row = [] + count = 0 + for pdist in self.classifier.prob_classify_many(edges): + logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F")) + # smoothing in case the probability = 0 + row.append([math.log(pdist.prob("T") + 0.00000000001)]) + count += 1 + if count == len(graph.nodes): + edge_scores.append(row) + row = [] + count = 0 + return edge_scores + + +################################################################# +# A Scorer for Demo Purposes +################################################################# +# A short class necessary to show parsing example from paper +class DemoScorer(DependencyScorerI): + def train(self, graphs): + print("Training...") + + def score(self, graph): + # scores for Keith Hall 'K-best Spanning Tree Parsing' paper + return [ + [[], [5], [1], [1]], + [[], [], [11], [4]], + [[], [10], [], [5]], + [[], [8], [8], []], + ] + + +################################################################# +# Non-Projective Probabilistic Parsing +################################################################# + + +class ProbabilisticNonprojectiveParser: + """A probabilistic non-projective dependency parser. + + Nonprojective dependencies allows for "crossing branches" in the parse tree + which is necessary for representing particular linguistic phenomena, or even + typical parses in some languages. This parser follows the MST parsing + algorithm, outlined in McDonald(2005), which likens the search for the best + non-projective parse to finding the maximum spanning tree in a weighted + directed graph. + + >>> class Scorer(DependencyScorerI): + ... def train(self, graphs): + ... pass + ... + ... def score(self, graph): + ... return [ + ... [[], [5], [1], [1]], + ... [[], [], [11], [4]], + ... [[], [10], [], [5]], + ... [[], [8], [8], []], + ... ] + + + >>> npp = ProbabilisticNonprojectiveParser() + >>> npp.train([], Scorer()) + + >>> parses = npp.parse(['v1', 'v2', 'v3'], [None, None, None]) + >>> len(list(parses)) + 1 + + Rule based example + + >>> from nltk.grammar import DependencyGrammar + + >>> grammar = DependencyGrammar.fromstring(''' + ... 'taught' -> 'play' | 'man' + ... 'man' -> 'the' | 'in' + ... 'in' -> 'corner' + ... 'corner' -> 'the' + ... 'play' -> 'golf' | 'dachshund' | 'to' + ... 'dachshund' -> 'his' + ... ''') + + >>> ndp = NonprojectiveDependencyParser(grammar) + >>> parses = ndp.parse(['the', 'man', 'in', 'the', 'corner', 'taught', 'his', 'dachshund', 'to', 'play', 'golf']) + >>> len(list(parses)) + 4 + + """ + + def __init__(self): + """ + Creates a new non-projective parser. + """ + logging.debug("initializing prob. nonprojective...") + + def train(self, graphs, dependency_scorer): + """ + Trains a ``DependencyScorerI`` from a set of ``DependencyGraph`` objects, + and establishes this as the parser's scorer. This is used to + initialize the scores on a ``DependencyGraph`` during the parsing + procedure. + + :type graphs: list(DependencyGraph) + :param graphs: A list of dependency graphs to train the scorer. + :type dependency_scorer: DependencyScorerI + :param dependency_scorer: A scorer which implements the + ``DependencyScorerI`` interface. + """ + self._scorer = dependency_scorer + self._scorer.train(graphs) + + def initialize_edge_scores(self, graph): + """ + Assigns a score to every edge in the ``DependencyGraph`` graph. + These scores are generated via the parser's scorer which + was assigned during the training process. + + :type graph: DependencyGraph + :param graph: A dependency graph to assign scores to. + """ + self.scores = self._scorer.score(graph) + + def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): + """ + Takes a list of nodes that have been identified to belong to a cycle, + and collapses them into on larger node. The arcs of all nodes in + the graph must be updated to account for this. + + :type new_node: Node. + :param new_node: A Node (Dictionary) to collapse the cycle nodes into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses, each of which is in the cycle. + :type g_graph, b_graph, c_graph: DependencyGraph + :param g_graph, b_graph, c_graph: Graphs which need to be updated. + """ + logger.debug("Collapsing nodes...") + # Collapse all cycle nodes into v_n+1 in G_Graph + for cycle_node_index in cycle_path: + g_graph.remove_by_address(cycle_node_index) + g_graph.add_node(new_node) + g_graph.redirect_arcs(cycle_path, new_node["address"]) + + def update_edge_scores(self, new_node, cycle_path): + """ + Updates the edge scores to reflect a collapse operation into + new_node. + + :type new_node: A Node. + :param new_node: The node which cycle nodes are collapsed into. + :type cycle_path: A list of integers. + :param cycle_path: A list of node addresses that belong to the cycle. + """ + logger.debug("cycle %s", cycle_path) + + cycle_path = self.compute_original_indexes(cycle_path) + + logger.debug("old cycle %s", cycle_path) + logger.debug("Prior to update: %s", self.scores) + + for i, row in enumerate(self.scores): + for j, column in enumerate(self.scores[i]): + logger.debug(self.scores[i][j]) + if j in cycle_path and i not in cycle_path and self.scores[i][j]: + subtract_val = self.compute_max_subtract_score(j, cycle_path) + + logger.debug("%s - %s", self.scores[i][j], subtract_val) + + new_vals = [] + for cur_val in self.scores[i][j]: + new_vals.append(cur_val - subtract_val) + + self.scores[i][j] = new_vals + + for i, row in enumerate(self.scores): + for j, cell in enumerate(self.scores[i]): + if i in cycle_path and j in cycle_path: + self.scores[i][j] = [] + + logger.debug("After update: %s", self.scores) + + def compute_original_indexes(self, new_indexes): + """ + As nodes are collapsed into others, they are replaced + by the new node in the graph, but it's still necessary + to keep track of what these original nodes were. This + takes a list of node addresses and replaces any collapsed + node addresses with their original addresses. + + :type new_indexes: A list of integers. + :param new_indexes: A list of node addresses to check for + subsumed nodes. + """ + swapped = True + while swapped: + originals = [] + swapped = False + for new_index in new_indexes: + if new_index in self.inner_nodes: + for old_val in self.inner_nodes[new_index]: + if old_val not in originals: + originals.append(old_val) + swapped = True + else: + originals.append(new_index) + new_indexes = originals + return new_indexes + + def compute_max_subtract_score(self, column_index, cycle_indexes): + """ + When updating scores the score of the highest-weighted incoming + arc is subtracted upon collapse. This returns the correct + amount to subtract from that edge. + + :type column_index: integer. + :param column_index: A index representing the column of incoming arcs + to a particular node being updated + :type cycle_indexes: A list of integers. + :param cycle_indexes: Only arcs from cycle nodes are considered. This + is a list of such nodes addresses. + """ + max_score = -100000 + for row_index in cycle_indexes: + for subtract_val in self.scores[row_index][column_index]: + if subtract_val > max_score: + max_score = subtract_val + return max_score + + def best_incoming_arc(self, node_index): + """ + Returns the source of the best incoming arc to the + node with address: node_index + + :type node_index: integer. + :param node_index: The address of the 'destination' node, + the node that is arced to. + """ + originals = self.compute_original_indexes([node_index]) + logger.debug("originals: %s", originals) + + max_arc = None + max_score = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + logger.debug("%s, %s", row_index, col_index) + + logger.debug(max_score) + + for key in self.inner_nodes: + replaced_nodes = self.inner_nodes[key] + if max_arc in replaced_nodes: + return key + + return max_arc + + def original_best_arc(self, node_index): + originals = self.compute_original_indexes([node_index]) + max_arc = None + max_score = None + max_orig = None + for row_index in range(len(self.scores)): + for col_index in range(len(self.scores[row_index])): + if col_index in originals and ( + max_score is None or self.scores[row_index][col_index] > max_score + ): + max_score = self.scores[row_index][col_index] + max_arc = row_index + max_orig = col_index + return [max_arc, max_orig] + + def parse(self, tokens, tags): + """ + Parses a list of tokens in accordance to the MST parsing algorithm + for non-projective dependency parses. Assumes that the tokens to + be parsed have already been tagged and those tags are provided. Various + scoring methods can be used by implementing the ``DependencyScorerI`` + interface and passing it to the training algorithm. + + :type tokens: list(str) + :param tokens: A list of words or punctuation to be parsed. + :type tags: list(str) + :param tags: A list of tags corresponding by index to the words in the tokens list. + :return: An iterator of non-projective parses. + :rtype: iter(DependencyGraph) + """ + self.inner_nodes = {} + + # Initialize g_graph + g_graph = DependencyGraph() + for index, token in enumerate(tokens): + g_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Fully connect non-root nodes in g_graph + g_graph.connect_graph() + original_graph = DependencyGraph() + for index, token in enumerate(tokens): + original_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + b_graph = DependencyGraph() + c_graph = DependencyGraph() + + for index, token in enumerate(tokens): + c_graph.nodes[index + 1].update( + {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} + ) + + # Assign initial scores to g_graph edges + self.initialize_edge_scores(g_graph) + logger.debug(self.scores) + # Initialize a list of unvisited vertices (by node address) + unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()] + # Iterate over unvisited vertices + nr_vertices = len(tokens) + betas = {} + while unvisited_vertices: + # Mark current node as visited + current_vertex = unvisited_vertices.pop(0) + logger.debug("current_vertex: %s", current_vertex) + # Get corresponding node n_i to vertex v_i + current_node = g_graph.get_by_address(current_vertex) + logger.debug("current_node: %s", current_node) + # Get best in-edge node b for current node + best_in_edge = self.best_incoming_arc(current_vertex) + betas[current_vertex] = self.original_best_arc(current_vertex) + logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex) + # b_graph = Union(b_graph, b) + for new_vertex in [current_vertex, best_in_edge]: + b_graph.nodes[new_vertex].update( + {"word": "TEMP", "rel": "NTOP", "address": new_vertex} + ) + b_graph.add_arc(best_in_edge, current_vertex) + # Beta(current node) = b - stored for parse recovery + # If b_graph contains a cycle, collapse it + cycle_path = b_graph.contains_cycle() + if cycle_path: + # Create a new node v_n+1 with address = len(nodes) + 1 + new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1} + # c_graph = Union(c_graph, v_n+1) + c_graph.add_node(new_node) + # Collapse all nodes in cycle C into v_n+1 + self.update_edge_scores(new_node, cycle_path) + self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) + for cycle_index in cycle_path: + c_graph.add_arc(new_node["address"], cycle_index) + # self.replaced_by[cycle_index] = new_node['address'] + + self.inner_nodes[new_node["address"]] = cycle_path + + # Add v_n+1 to list of unvisited vertices + unvisited_vertices.insert(0, nr_vertices + 1) + + # increment # of nodes counter + nr_vertices += 1 + + # Remove cycle nodes from b_graph; B = B - cycle c + for cycle_node_address in cycle_path: + b_graph.remove_by_address(cycle_node_address) + + logger.debug("g_graph: %s", g_graph) + logger.debug("b_graph: %s", b_graph) + logger.debug("c_graph: %s", c_graph) + logger.debug("Betas: %s", betas) + logger.debug("replaced nodes %s", self.inner_nodes) + + # Recover parse tree + logger.debug("Final scores: %s", self.scores) + + logger.debug("Recovering parse...") + for i in range(len(tokens) + 1, nr_vertices + 1): + betas[betas[i][1]] = betas[i] + + logger.debug("Betas: %s", betas) + for node in original_graph.nodes.values(): + # TODO: It's dangerous to assume that deps it a dictionary + # because it's a default dictionary. Ideally, here we should not + # be concerned how dependencies are stored inside of a dependency + # graph. + node["deps"] = {} + for i in range(1, len(tokens) + 1): + original_graph.add_arc(betas[i][0], betas[i][1]) + + logger.debug("Done.") + yield original_graph + + +################################################################# +# Rule-based Non-Projective Parser +################################################################# + + +class NonprojectiveDependencyParser: + """ + A non-projective, rule-based, dependency parser. This parser + will return the set of all possible non-projective parses based on + the word-to-word relations defined in the parser's dependency + grammar, and will allow the branches of the parse tree to cross + in order to capture a variety of linguistic phenomena that a + projective parser will not. + """ + + def __init__(self, dependency_grammar): + """ + Creates a new ``NonprojectiveDependencyParser``. + + :param dependency_grammar: a grammar of word-to-word relations. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Parses the input tokens with respect to the parser's grammar. Parsing + is accomplished by representing the search-space of possible parses as + a fully-connected directed graph. Arcs that would lead to ungrammatical + parses are removed and a lattice is constructed of length n, where n is + the number of input tokens, to represent all possible grammatical + traversals. All possible paths through the lattice are then enumerated + to produce the set of non-projective parses. + + param tokens: A list of tokens to parse. + type tokens: list(str) + return: An iterator of non-projective parses. + rtype: iter(DependencyGraph) + """ + # Create graph representation of tokens + self._graph = DependencyGraph() + + for index, token in enumerate(tokens): + self._graph.nodes[index] = { + "word": token, + "deps": [], + "rel": "NTOP", + "address": index, + } + + for head_node in self._graph.nodes.values(): + deps = [] + for dep_node in self._graph.nodes.values(): + if ( + self._grammar.contains(head_node["word"], dep_node["word"]) + and head_node["word"] != dep_node["word"] + ): + deps.append(dep_node["address"]) + head_node["deps"] = deps + + # Create lattice of possible heads + roots = [] + possible_heads = [] + for i, word in enumerate(tokens): + heads = [] + for j, head in enumerate(tokens): + if (i != j) and self._grammar.contains(head, word): + heads.append(j) + if len(heads) == 0: + roots.append(i) + possible_heads.append(heads) + + # Set roots to attempt + if len(roots) < 2: + if len(roots) == 0: + for i in range(len(tokens)): + roots.append(i) + + # Traverse lattice + analyses = [] + for _ in roots: + stack = [] + analysis = [[] for i in range(len(possible_heads))] + i = 0 + forward = True + while i >= 0: + if forward: + if len(possible_heads[i]) == 1: + analysis[i] = possible_heads[i][0] + elif len(possible_heads[i]) == 0: + analysis[i] = -1 + else: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + if not forward: + index_on_stack = False + for stack_item in stack: + if stack_item[0] == i: + index_on_stack = True + orig_length = len(possible_heads[i]) + + if index_on_stack and orig_length == 0: + for j in range(len(stack) - 1, -1, -1): + stack_item = stack[j] + if stack_item[0] == i: + possible_heads[i].append(stack.pop(j)[1]) + + elif index_on_stack and orig_length > 0: + head = possible_heads[i].pop() + analysis[i] = head + stack.append([i, head]) + forward = True + + if i + 1 == len(possible_heads): + analyses.append(analysis[:]) + forward = False + if forward: + i += 1 + else: + i -= 1 + + # Filter parses + # ensure 1 root, every thing has 1 head + for analysis in analyses: + if analysis.count(-1) > 1: + # there are several root elements! + continue + + graph = DependencyGraph() + graph.root = graph.nodes[analysis.index(-1) + 1] + + for address, (token, head_index) in enumerate( + zip(tokens, analysis), start=1 + ): + head_address = head_index + 1 + + node = graph.nodes[address] + node.update({"word": token, "address": address}) + + if head_address == 0: + rel = "ROOT" + else: + rel = "" + graph.nodes[head_index + 1]["deps"][rel].append(address) + + # TODO: check for cycles + yield graph + + +################################################################# +# Demos +################################################################# + + +def demo(): + # hall_demo() + nonprojective_conll_parse_demo() + rule_based_demo() + + +def hall_demo(): + npp = ProbabilisticNonprojectiveParser() + npp.train([], DemoScorer()) + for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]): + print(parse_graph) + + +def nonprojective_conll_parse_demo(): + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + npp = ProbabilisticNonprojectiveParser() + npp.train(graphs, NaiveBayesDependencyScorer()) + for parse_graph in npp.parse( + ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"] + ): + print(parse_graph) + + +def rule_based_demo(): + from nltk.grammar import DependencyGrammar + + grammar = DependencyGrammar.fromstring( + """ + 'taught' -> 'play' | 'man' + 'man' -> 'the' | 'in' + 'in' -> 'corner' + 'corner' -> 'the' + 'play' -> 'golf' | 'dachshund' | 'to' + 'dachshund' -> 'his' + """ + ) + print(grammar) + ndp = NonprojectiveDependencyParser(grammar) + graphs = ndp.parse( + [ + "the", + "man", + "in", + "the", + "corner", + "taught", + "his", + "dachshund", + "to", + "play", + "golf", + ] + ) + print("Graphs:") + for graph in graphs: + print(graph) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/pchart.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/pchart.py new file mode 100644 index 0000000000000000000000000000000000000000..319655d023a462c0c6c7ac087746dc77d46b7949 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/pchart.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: Probabilistic Chart Parsers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Classes and interfaces for associating probabilities with tree +structures that represent the internal organization of a text. The +probabilistic parser module defines ``BottomUpProbabilisticChartParser``. + +``BottomUpProbabilisticChartParser`` is an abstract class that implements +a bottom-up chart parser for ``PCFG`` grammars. It maintains a queue of edges, +and adds them to the chart one at a time. The ordering of this queue +is based on the probabilities associated with the edges, allowing the +parser to expand more likely edges before less likely ones. Each +subclass implements a different queue ordering, producing different +search strategies. Currently the following subclasses are defined: + + - ``InsideChartParser`` searches edges in decreasing order of + their trees' inside probabilities. + - ``RandomChartParser`` searches edges in random order. + - ``LongestChartParser`` searches edges in decreasing order of their + location's length. + +The ``BottomUpProbabilisticChartParser`` constructor has an optional +argument beam_size. If non-zero, this controls the size of the beam +(aka the edge queue). This option is most useful with InsideChartParser. +""" + +##////////////////////////////////////////////////////// +## Bottom-Up PCFG Chart Parser +##////////////////////////////////////////////////////// + +# [XX] This might not be implemented quite right -- it would be better +# to associate probabilities with child pointer lists. + +import random +from functools import reduce + +from nltk.grammar import PCFG, Nonterminal +from nltk.parse.api import ParserI +from nltk.parse.chart import AbstractChartRule, Chart, LeafEdge, TreeEdge +from nltk.tree import ProbabilisticTree, Tree + + +# Probabilistic edges +class ProbabilisticLeafEdge(LeafEdge): + def prob(self): + return 1.0 + + +class ProbabilisticTreeEdge(TreeEdge): + def __init__(self, prob, *args, **kwargs): + TreeEdge.__init__(self, *args, **kwargs) + self._prob = prob + # two edges with different probabilities are not equal. + self._comparison_key = (self._comparison_key, prob) + + def prob(self): + return self._prob + + @staticmethod + def from_production(production, index, p): + return ProbabilisticTreeEdge( + p, (index, index), production.lhs(), production.rhs(), 0 + ) + + +# Rules using probabilistic edges +class ProbabilisticBottomUpInitRule(AbstractChartRule): + NUM_EDGES = 0 + + def apply(self, chart, grammar): + for index in range(chart.num_leaves()): + new_edge = ProbabilisticLeafEdge(chart.leaf(index), index) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticBottomUpPredictRule(AbstractChartRule): + NUM_EDGES = 1 + + def apply(self, chart, grammar, edge): + if edge.is_incomplete(): + return + for prod in grammar.productions(): + if edge.lhs() == prod.rhs()[0]: + new_edge = ProbabilisticTreeEdge.from_production( + prod, edge.start(), prod.prob() + ) + if chart.insert(new_edge, ()): + yield new_edge + + +class ProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 2 + + def apply(self, chart, grammar, left_edge, right_edge): + # Make sure the rule is applicable. + if not ( + left_edge.end() == right_edge.start() + and left_edge.nextsym() == right_edge.lhs() + and left_edge.is_incomplete() + and right_edge.is_complete() + ): + return + + # Construct the new edge. + p = left_edge.prob() * right_edge.prob() + new_edge = ProbabilisticTreeEdge( + p, + span=(left_edge.start(), right_edge.end()), + lhs=left_edge.lhs(), + rhs=left_edge.rhs(), + dot=left_edge.dot() + 1, + ) + + # Add it to the chart, with appropriate child pointers. + changed_chart = False + for cpl1 in chart.child_pointer_lists(left_edge): + if chart.insert(new_edge, cpl1 + (right_edge,)): + changed_chart = True + + # If we changed the chart, then generate the edge. + if changed_chart: + yield new_edge + + +class SingleEdgeProbabilisticFundamentalRule(AbstractChartRule): + NUM_EDGES = 1 + + _fundamental_rule = ProbabilisticFundamentalRule() + + def apply(self, chart, grammar, edge1): + fr = self._fundamental_rule + if edge1.is_incomplete(): + # edge1 = left_edge; edge2 = right_edge + for edge2 in chart.select( + start=edge1.end(), is_complete=True, lhs=edge1.nextsym() + ): + yield from fr.apply(chart, grammar, edge1, edge2) + else: + # edge2 = left_edge; edge1 = right_edge + for edge2 in chart.select( + end=edge1.start(), is_complete=False, nextsym=edge1.lhs() + ): + yield from fr.apply(chart, grammar, edge2, edge1) + + def __str__(self): + return "Fundamental Rule" + + +class BottomUpProbabilisticChartParser(ParserI): + """ + An abstract bottom-up parser for ``PCFG`` grammars that uses a ``Chart`` to + record partial results. ``BottomUpProbabilisticChartParser`` maintains + a queue of edges that can be added to the chart. This queue is + initialized with edges for each token in the text that is being + parsed. ``BottomUpProbabilisticChartParser`` inserts these edges into + the chart one at a time, starting with the most likely edges, and + proceeding to less likely edges. For each edge that is added to + the chart, it may become possible to insert additional edges into + the chart; these are added to the queue. This process continues + until enough complete parses have been generated, or until the + queue is empty. + + The sorting order for the queue is not specified by + ``BottomUpProbabilisticChartParser``. Different sorting orders will + result in different search strategies. The sorting order for the + queue is defined by the method ``sort_queue``; subclasses are required + to provide a definition for this method. + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, beam_size=0, trace=0): + """ + Create a new ``BottomUpProbabilisticChartParser``, that uses + ``grammar`` to parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + if not isinstance(grammar, PCFG): + raise ValueError("The grammar must be probabilistic PCFG") + self._grammar = grammar + self.beam_size = beam_size + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + # TODO: change this to conform more with the standard ChartParser + def parse(self, tokens): + self._grammar.check_coverage(tokens) + chart = Chart(list(tokens)) + grammar = self._grammar + + # Chart parser rules. + bu_init = ProbabilisticBottomUpInitRule() + bu = ProbabilisticBottomUpPredictRule() + fr = SingleEdgeProbabilisticFundamentalRule() + + # Our queue + queue = [] + + # Initialize the chart. + for edge in bu_init.apply(chart, grammar): + if self._trace > 1: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + queue.append(edge) + + while len(queue) > 0: + # Re-sort the queue. + self.sort_queue(queue, chart) + + # Prune the queue to the correct size if a beam was defined + if self.beam_size: + self._prune(queue, chart) + + # Get the best edge. + edge = queue.pop() + if self._trace > 0: + print( + " %-50s [%s]" + % (chart.pretty_format_edge(edge, width=2), edge.prob()) + ) + + # Apply BU & FR to it. + queue.extend(bu.apply(chart, grammar, edge)) + queue.extend(fr.apply(chart, grammar, edge)) + + # Get a list of complete parses. + parses = list(chart.parses(grammar.start(), ProbabilisticTree)) + + # Assign probabilities to the trees. + prod_probs = {} + for prod in grammar.productions(): + prod_probs[prod.lhs(), prod.rhs()] = prod.prob() + for parse in parses: + self._setprob(parse, prod_probs) + + # Sort by probability + parses.sort(reverse=True, key=lambda tree: tree.prob()) + + return iter(parses) + + def _setprob(self, tree, prod_probs): + if tree.prob() is not None: + return + + # Get the prob of the CFG production. + lhs = Nonterminal(tree.label()) + rhs = [] + for child in tree: + if isinstance(child, Tree): + rhs.append(Nonterminal(child.label())) + else: + rhs.append(child) + prob = prod_probs[lhs, tuple(rhs)] + + # Get the probs of children. + for child in tree: + if isinstance(child, Tree): + self._setprob(child, prod_probs) + prob *= child.prob() + + tree.set_prob(prob) + + def sort_queue(self, queue, chart): + """ + Sort the given queue of ``Edge`` objects, placing the edge that should + be tried first at the beginning of the queue. This method + will be called after each ``Edge`` is added to the queue. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + raise NotImplementedError() + + def _prune(self, queue, chart): + """Discard items in the queue if the queue is longer than the beam.""" + if len(queue) > self.beam_size: + split = len(queue) - self.beam_size + if self._trace > 2: + for edge in queue[:split]: + print(" %-50s [DISCARDED]" % chart.pretty_format_edge(edge, 2)) + del queue[:split] + + +class InsideChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in descending + order of the inside probabilities of their trees. The "inside + probability" of a tree is simply the + probability of the entire tree, ignoring its context. In + particular, the inside probability of a tree generated by + production *p* with children *c[1], c[2], ..., c[n]* is + *P(p)P(c[1])P(c[2])...P(c[n])*; and the inside + probability of a token is 1 if it is present in the text, and 0 if + it is absent. + + This sorting order results in a type of lowest-cost-first search + strategy. + """ + + # Inherit constructor. + def sort_queue(self, queue, chart): + """ + Sort the given queue of edges, in descending order of the + inside probabilities of the edges' trees. + + :param queue: The queue of ``Edge`` objects to sort. Each edge in + this queue is an edge that could be added to the chart by + the fundamental rule; but that has not yet been added. + :type queue: list(Edge) + :param chart: The chart being used to parse the text. This + chart can be used to provide extra information for sorting + the queue. + :type chart: Chart + :rtype: None + """ + queue.sort(key=lambda edge: edge.prob()) + + +# Eventually, this will become some sort of inside-outside parser: +# class InsideOutsideParser(BottomUpProbabilisticChartParser): +# def __init__(self, grammar, trace=0): +# # Inherit docs. +# BottomUpProbabilisticChartParser.__init__(self, grammar, trace) +# +# # Find the best path from S to each nonterminal +# bestp = {} +# for production in grammar.productions(): bestp[production.lhs()]=0 +# bestp[grammar.start()] = 1.0 +# +# for i in range(len(grammar.productions())): +# for production in grammar.productions(): +# lhs = production.lhs() +# for elt in production.rhs(): +# bestp[elt] = max(bestp[lhs]*production.prob(), +# bestp.get(elt,0)) +# +# self._bestp = bestp +# for (k,v) in self._bestp.items(): print(k,v) +# +# def _sortkey(self, edge): +# return edge.structure()[PROB] * self._bestp[edge.lhs()] +# +# def sort_queue(self, queue, chart): +# queue.sort(key=self._sortkey) + + +class RandomChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in random order. + This sorting order results in a random search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + i = random.randint(0, len(queue) - 1) + (queue[-1], queue[i]) = (queue[i], queue[-1]) + + +class UnsortedChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries edges in whatever order. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + return + + +class LongestChartParser(BottomUpProbabilisticChartParser): + """ + A bottom-up parser for ``PCFG`` grammars that tries longer edges before + shorter ones. This sorting order results in a type of best-first + search strategy. + """ + + # Inherit constructor + def sort_queue(self, queue, chart): + queue.sort(key=lambda edge: edge.length()) + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(choice=None, draw_parses=None, print_parses=None): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.parse import pchart + + # Define two demos. Each demo has a sentence and a grammar. + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + demos = [ + ("I saw John with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + if choice is None: + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + choice = int(sys.stdin.readline().strip()) - 1 + try: + sent, grammar = demos[choice] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + # Define a list of parsers. We'll use all parsers. + parsers = [ + pchart.InsideChartParser(grammar), + pchart.RandomChartParser(grammar), + pchart.UnsortedChartParser(grammar), + pchart.LongestChartParser(grammar), + pchart.InsideChartParser(grammar, beam_size=len(tokens) + 1), # was BeamParser + ] + + # Run the parsers on the tokenized sentence. + times = [] + average_p = [] + num_parses = [] + all_parses = {} + for parser in parsers: + print(f"\ns: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = list(parser.parse(tokens)) + times.append(time.time() - t) + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + average_p.append(p) + num_parses.append(len(parses)) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print(" Parser Beam | Time (secs) # Parses Average P(parse)") + print("------------------------+------------------------------------------") + for i in range(len(parsers)): + print( + "%18s %4d |%11.4f%11d%19.14f" + % ( + parsers[i].__class__.__name__, + parsers[i].beam_size, + times[i], + num_parses[i], + average_p[i], + ) + ) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------+------------------------------------------") + print("%18s |%11s%11d%19.14f" % ("(All Parses)", "n/a", len(parses), p)) + + if draw_parses is None: + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + draw_parses = sys.stdin.readline().strip().lower().startswith("y") + if draw_parses: + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + if print_parses is None: + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + print_parses = sys.stdin.readline().strip().lower().startswith("y") + if print_parses: + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py new file mode 100644 index 0000000000000000000000000000000000000000..9e4e3ba4d6d8e19820de6d527d5847e365e018d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/projectivedependencyparser.py @@ -0,0 +1,716 @@ +# Natural Language Toolkit: Dependency Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jason Narad +# +# URL: +# For license information, see LICENSE.TXT +# + +from collections import defaultdict +from functools import total_ordering +from itertools import chain + +from nltk.grammar import ( + DependencyGrammar, + DependencyProduction, + ProbabilisticDependencyGrammar, +) +from nltk.internals import raise_unorderable_types +from nltk.parse.dependencygraph import DependencyGraph + +################################################################# +# Dependency Span +################################################################# + + +@total_ordering +class DependencySpan: + """ + A contiguous span over some part of the input string representing + dependency (head -> modifier) relationships amongst words. An atomic + span corresponds to only one word so it isn't a 'span' in the conventional + sense, as its _start_index = _end_index = _head_index for concatenation + purposes. All other spans are assumed to have arcs between all nodes + within the start and end indexes of the span, and one head index corresponding + to the head word for the entire span. This is the same as the root node if + the dependency structure were depicted as a graph. + """ + + def __init__(self, start_index, end_index, head_index, arcs, tags): + self._start_index = start_index + self._end_index = end_index + self._head_index = head_index + self._arcs = arcs + self._tags = tags + self._comparison_key = (start_index, end_index, head_index, tuple(arcs)) + self._hash = hash(self._comparison_key) + + def head_index(self): + """ + :return: An value indexing the head of the entire ``DependencySpan``. + :rtype: int + """ + return self._head_index + + def __repr__(self): + """ + :return: A concise string representatino of the ``DependencySpan``. + :rtype: str. + """ + return "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + + def __str__(self): + """ + :return: A verbose string representation of the ``DependencySpan``. + :rtype: str + """ + str = "Span %d-%d; Head Index: %d" % ( + self._start_index, + self._end_index, + self._head_index, + ) + for i in range(len(self._arcs)): + str += "\n%d <- %d, %s" % (i, self._arcs[i], self._tags[i]) + return str + + def __eq__(self, other): + return ( + type(self) == type(other) and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, DependencySpan): + raise_unorderable_types("<", self, other) + return self._comparison_key < other._comparison_key + + def __hash__(self): + """ + :return: The hash value of this ``DependencySpan``. + """ + return self._hash + + +################################################################# +# Chart Cell +################################################################# + + +class ChartCell: + """ + A cell from the parse chart formed when performing the CYK algorithm. + Each cell keeps track of its x and y coordinates (though this will probably + be discarded), and a list of spans serving as the cell's entries. + """ + + def __init__(self, x, y): + """ + :param x: This cell's x coordinate. + :type x: int. + :param y: This cell's y coordinate. + :type y: int. + """ + self._x = x + self._y = y + self._entries = set() + + def add(self, span): + """ + Appends the given span to the list of spans + representing the chart cell's entries. + + :param span: The span to add. + :type span: DependencySpan + """ + self._entries.add(span) + + def __str__(self): + """ + :return: A verbose string representation of this ``ChartCell``. + :rtype: str. + """ + return "CC[%d,%d]: %s" % (self._x, self._y, self._entries) + + def __repr__(self): + """ + :return: A concise string representation of this ``ChartCell``. + :rtype: str. + """ + return "%s" % self + + +################################################################# +# Parsing with Dependency Grammars +################################################################# + + +class ProjectiveDependencyParser: + """ + A projective, rule-based, dependency parser. A ProjectiveDependencyParser + is created with a DependencyGrammar, a set of productions specifying + word-to-word dependency relations. The parse() method will then + return the set of all parses, in tree representation, for a given input + sequence of tokens. Each parse must meet the requirements of the both + the grammar and the projectivity constraint which specifies that the + branches of the dependency tree are not allowed to cross. Alternatively, + this can be understood as stating that each parent node and its children + in the parse tree form a continuous substring of the input sequence. + """ + + def __init__(self, dependency_grammar): + """ + Create a new ProjectiveDependencyParser, from a word-to-word + dependency grammar ``DependencyGrammar``. + + :param dependency_grammar: A word-to-word relation dependencygrammar. + :type dependency_grammar: DependencyGrammar + """ + self._grammar = dependency_grammar + + def parse(self, tokens): + """ + Performs a projective dependency parse on the list of tokens using + a chart-based, span-concatenation algorithm similar to Eisner (1996). + + :param tokens: The list of input tokens. + :type tokens: list(str) + :return: An iterator over parse trees. + :rtype: iter(Tree) + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + chart[i][j].add(DependencySpan(i - 1, i, i - 1, [-1], ["null"])) + + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + # malt_format = "" + for i in range(len(tokens)): + # malt_format += '%s\t%s\t%d\t%s\n' % (tokens[i], 'null', parse._arcs[i] + 1, 'null') + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], 'null', 'null', 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with the new Dependency Graph requirement (at least must have an root elements) + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + "null", + "null", + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + # if self.meets_arity(dg): + yield dg.tree() + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + # print('Performing rightward cover %d to %d' % (span1._head_index, span2._head_index)) + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + # print('performing leftward cover %d to %d' % (span2._head_index, span1._head_index)) + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + +################################################################# +# Parsing with Probabilistic Dependency Grammars +################################################################# + + +class ProbabilisticProjectiveDependencyParser: + """A probabilistic, projective dependency parser. + + This parser returns the most probable projective parse derived from the + probabilistic dependency grammar derived from the train() method. The + probabilistic model is an implementation of Eisner's (1996) Model C, which + conditions on head-word, head-tag, child-word, and child-tag. The decoding + uses a bottom-up chart-based span concatenation algorithm that's identical + to the one utilized by the rule-based projective parser. + + Usage example + + >>> from nltk.parse.dependencygraph import conll_data2 + + >>> graphs = [ + ... DependencyGraph(entry) for entry in conll_data2.split('\\n\\n') if entry + ... ] + + >>> ppdp = ProbabilisticProjectiveDependencyParser() + >>> ppdp.train(graphs) + + >>> sent = ['Cathy', 'zag', 'hen', 'wild', 'zwaaien', '.'] + >>> list(ppdp.parse(sent)) + [Tree('zag', ['Cathy', 'hen', Tree('zwaaien', ['wild', '.'])])] + + """ + + def __init__(self): + """ + Create a new probabilistic dependency parser. No additional + operations are necessary. + """ + + def parse(self, tokens): + """ + Parses the list of tokens subject to the projectivity constraint + and the productions in the parser's grammar. This uses a method + similar to the span-concatenation algorithm defined in Eisner (1996). + It returns the most probable parse derived from the parser's + probabilistic dependency grammar. + """ + self._tokens = list(tokens) + chart = [] + for i in range(0, len(self._tokens) + 1): + chart.append([]) + for j in range(0, len(self._tokens) + 1): + chart[i].append(ChartCell(i, j)) + if i == j + 1: + if tokens[i - 1] in self._grammar._tags: + for tag in self._grammar._tags[tokens[i - 1]]: + chart[i][j].add( + DependencySpan(i - 1, i, i - 1, [-1], [tag]) + ) + else: + print( + "No tag found for input token '%s', parse is impossible." + % tokens[i - 1] + ) + return [] + for i in range(1, len(self._tokens) + 1): + for j in range(i - 2, -1, -1): + for k in range(i - 1, j, -1): + for span1 in chart[k][j]._entries: + for span2 in chart[i][k]._entries: + for newspan in self.concatenate(span1, span2): + chart[i][j].add(newspan) + trees = [] + max_parse = None + max_score = 0 + for parse in chart[len(self._tokens)][0]._entries: + conll_format = "" + malt_format = "" + for i in range(len(tokens)): + malt_format += "%s\t%s\t%d\t%s\n" % ( + tokens[i], + "null", + parse._arcs[i] + 1, + "null", + ) + # conll_format += '\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n' % (i+1, tokens[i], tokens[i], parse._tags[i], parse._tags[i], 'null', parse._arcs[i] + 1, 'null', '-', '-') + # Modify to comply with recent change in dependency graph such that there must be a ROOT element. + conll_format += "\t%d\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n" % ( + i + 1, + tokens[i], + tokens[i], + parse._tags[i], + parse._tags[i], + "null", + parse._arcs[i] + 1, + "ROOT", + "-", + "-", + ) + dg = DependencyGraph(conll_format) + score = self.compute_prob(dg) + trees.append((score, dg.tree())) + trees.sort() + return (tree for (score, tree) in trees) + + def concatenate(self, span1, span2): + """ + Concatenates the two spans in whichever way possible. This + includes rightward concatenation (from the leftmost word of the + leftmost span to the rightmost word of the rightmost span) and + leftward concatenation (vice-versa) between adjacent spans. Unlike + Eisner's presentation of span concatenation, these spans do not + share or pivot on a particular word/word-index. + + :return: A list of new spans formed through concatenation. + :rtype: list(DependencySpan) + """ + spans = [] + if span1._start_index == span2._start_index: + print("Error: Mismatched spans - replace this with thrown error") + if span1._start_index > span2._start_index: + temp_span = span1 + span1 = span2 + span2 = temp_span + # adjacent rightward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span1._head_index], self._tokens[span2._head_index] + ): + new_arcs[span2._head_index - span1._start_index] = span1._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span1._head_index, + new_arcs, + new_tags, + ) + ) + # adjacent leftward covered concatenation + new_arcs = span1._arcs + span2._arcs + new_tags = span1._tags + span2._tags + if self._grammar.contains( + self._tokens[span2._head_index], self._tokens[span1._head_index] + ): + new_arcs[span1._head_index - span1._start_index] = span2._head_index + spans.append( + DependencySpan( + span1._start_index, + span2._end_index, + span2._head_index, + new_arcs, + new_tags, + ) + ) + return spans + + def train(self, graphs): + """ + Trains a ProbabilisticDependencyGrammar based on the list of input + DependencyGraphs. This model is an implementation of Eisner's (1996) + Model C, which derives its statistics from head-word, head-tag, + child-word, and child-tag relationships. + + :param graphs: A list of dependency graphs to train from. + :type: list(DependencyGraph) + """ + productions = [] + events = defaultdict(int) + tags = {} + for dg in graphs: + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list( + chain.from_iterable(dg.nodes[node_index]["deps"].values()) + ) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range( + 0 - (nr_left_children + 1), nr_right_children + 2 + ): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + if head_word in tags: + tags[head_word].add(head_tag) + else: + tags[head_word] = {head_tag} + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + if child != "STOP": + productions.append(DependencyProduction(head_word, [child])) + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + events[head_event] += 1 + events[mod_event] += 1 + self._grammar = ProbabilisticDependencyGrammar(productions, events, tags) + + def compute_prob(self, dg): + """ + Computes the probability of a dependency graph based + on the parser's probability model (defined by the parser's + statistical dependency grammar). + + :param dg: A dependency graph to score. + :type dg: DependencyGraph + :return: The probability of the dependency graph. + :rtype: int + """ + prob = 1.0 + for node_index in range(1, len(dg.nodes)): + # children = dg.nodes[node_index]['deps'] + children = list(chain.from_iterable(dg.nodes[node_index]["deps"].values())) + + nr_left_children = dg.left_children(node_index) + nr_right_children = dg.right_children(node_index) + nr_children = nr_left_children + nr_right_children + for child_index in range(0 - (nr_left_children + 1), nr_right_children + 2): + head_word = dg.nodes[node_index]["word"] + head_tag = dg.nodes[node_index]["tag"] + child = "STOP" + child_tag = "STOP" + prev_word = "START" + prev_tag = "START" + if child_index < 0: + array_index = child_index + nr_left_children + if array_index >= 0: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != -1: + prev_word = dg.nodes[children[array_index + 1]]["word"] + prev_tag = dg.nodes[children[array_index + 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) left))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) left))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + # If the grammar is not covered + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + elif child_index > 0: + array_index = child_index + nr_left_children - 1 + if array_index < nr_children: + child = dg.nodes[children[array_index]]["word"] + child_tag = dg.nodes[children[array_index]]["tag"] + if child_index != 1: + prev_word = dg.nodes[children[array_index - 1]]["word"] + prev_tag = dg.nodes[children[array_index - 1]]["tag"] + head_event = "(head ({} {}) (mods ({}, {}, {}) right))".format( + child, + child_tag, + prev_tag, + head_word, + head_tag, + ) + mod_event = "(mods ({}, {}, {}) right))".format( + prev_tag, + head_word, + head_tag, + ) + h_count = self._grammar._events[head_event] + m_count = self._grammar._events[mod_event] + + if m_count != 0: + prob *= h_count / m_count + else: + prob = 0.00000001 # Very small number + + return prob + + +################################################################# +# Demos +################################################################# + + +def demo(): + projective_rule_parse_demo() + # arity_parse_demo() + projective_prob_parse_demo() + + +def projective_rule_parse_demo(): + """ + A demonstration showing the creation and use of a + ``DependencyGrammar`` to perform a projective dependency + parse. + """ + grammar = DependencyGrammar.fromstring( + """ + 'scratch' -> 'cats' | 'walls' + 'walls' -> 'the' + 'cats' -> 'the' + """ + ) + print(grammar) + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "cats", "scratch", "the", "walls"]) + for tree in trees: + print(tree) + + +def arity_parse_demo(): + """ + A demonstration showing the creation of a ``DependencyGrammar`` + in which a specific number of modifiers is listed for a given + head. This can further constrain the number of possible parses + created by a ``ProjectiveDependencyParser``. + """ + print() + print("A grammar with no arity constraints. Each DependencyProduction") + print("specifies a relationship between one head word and only one") + print("modifier word.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' | 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print("For the sentence 'The price of the stock fell', this grammar") + print("will produce the following three parses:") + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + print() + print("By contrast, the following grammar contains a ") + print("DependencyProduction that specifies a relationship") + print("between a single head word, 'price', and two modifier") + print("words, 'of' and 'the'.") + grammar = DependencyGrammar.fromstring( + """ + 'fell' -> 'price' | 'stock' + 'price' -> 'of' 'the' + 'of' -> 'stock' + 'stock' -> 'the' + """ + ) + print(grammar) + + print() + print( + "This constrains the number of possible parses to just one:" + ) # unimplemented, soon to replace + pdp = ProjectiveDependencyParser(grammar) + trees = pdp.parse(["the", "price", "of", "the", "stock", "fell"]) + for tree in trees: + print(tree) + + +def projective_prob_parse_demo(): + """ + A demo showing the training and use of a projective + dependency parser. + """ + from nltk.parse.dependencygraph import conll_data2 + + graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] + ppdp = ProbabilisticProjectiveDependencyParser() + print("Training Probabilistic Projective Dependency Parser...") + ppdp.train(graphs) + + sent = ["Cathy", "zag", "hen", "wild", "zwaaien", "."] + print("Parsing '", " ".join(sent), "'...") + print("Parse:") + for tree in ppdp.parse(sent): + print(tree) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/recursivedescent.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/recursivedescent.py new file mode 100644 index 0000000000000000000000000000000000000000..dc5d88c0884d8da7fdc52b044331ff0536bc19c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/recursivedescent.py @@ -0,0 +1,684 @@ +# Natural Language Toolkit: Recursive Descent Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.grammar import Nonterminal +from nltk.parse.api import ParserI +from nltk.tree import ImmutableTree, Tree + + +##////////////////////////////////////////////////////// +## Recursive Descent Parser +##////////////////////////////////////////////////////// +class RecursiveDescentParser(ParserI): + """ + A simple top-down CFG parser that parses texts by recursively + expanding the fringe of a Tree, and matching it against a + text. + + ``RecursiveDescentParser`` uses a list of tree locations called a + "frontier" to remember which subtrees have not yet been expanded + and which leaves have not yet been matched against the text. Each + tree location consists of a list of child indices specifying the + path from the root of the tree to a subtree or a leaf; see the + reference documentation for Tree for more information + about tree locations. + + When the parser begins parsing a text, it constructs a tree + containing only the start symbol, and a frontier containing the + location of the tree's root node. It then extends the tree to + cover the text, using the following recursive procedure: + + - If the frontier is empty, and the text is covered by the tree, + then return the tree as a possible parse. + - If the frontier is empty, and the text is not covered by the + tree, then return no parses. + - If the first element of the frontier is a subtree, then + use CFG productions to "expand" it. For each applicable + production, add the expanded subtree's children to the + frontier, and recursively find all parses that can be + generated by the new tree and frontier. + - If the first element of the frontier is a token, then "match" + it against the next token from the text. Remove the token + from the frontier, and recursively find all parses that can be + generated by the new tree and frontier. + + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``RecursiveDescentParser``, that uses ``grammar`` + to parse texts. + + :type grammar: CFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # Start a recursive descent parse, with an initial tree + # containing just the start symbol. + start = self._grammar.start().symbol() + initial_tree = Tree(start, []) + frontier = [()] + if self._trace: + self._trace_start(initial_tree, frontier, tokens) + return self._parse(tokens, initial_tree, frontier) + + def _parse(self, remaining_text, tree, frontier): + """ + Recursively expand and match each elements of ``tree`` + specified by ``frontier``, to cover ``remaining_text``. Return + a list of all parses found. + + :return: An iterator of all parses that can be generated by + matching and expanding the elements of ``tree`` + specified by ``frontier``. + :rtype: iter(Tree) + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. This list sorted + in left-to-right order of location within the tree. + """ + + # If the tree covers the text, and there's nothing left to + # expand, then we've found a complete parse; return it. + if len(remaining_text) == 0 and len(frontier) == 0: + if self._trace: + self._trace_succeed(tree, frontier) + yield tree + + # If there's still text, but nothing left to expand, we failed. + elif len(frontier) == 0: + if self._trace: + self._trace_backtrack(tree, frontier) + + # If the next element on the frontier is a tree, expand it. + elif isinstance(tree[frontier[0]], Tree): + yield from self._expand(remaining_text, tree, frontier) + + # If the next element on the frontier is a token, match it. + else: + yield from self._match(remaining_text, tree, frontier) + + def _match(self, rtext, tree, frontier): + """ + :rtype: iter(Tree) + :return: an iterator of all parses that can be generated by + matching the first element of ``frontier`` against the + first token in ``rtext``. In particular, if the first + element of ``frontier`` has the same type as the first + token in ``rtext``, then substitute the token into + ``tree``; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` does not + have the same type as the first token in ``rtext``, then + return empty list. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type rtext: list(str) + :param rtext: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list of tuple of int + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + tree_leaf = tree[frontier[0]] + if len(rtext) > 0 and tree_leaf == rtext[0]: + # If it's a terminal that matches rtext[0], then substitute + # in the token, and continue parsing. + newtree = tree.copy(deep=True) + newtree[frontier[0]] = rtext[0] + if self._trace: + self._trace_match(newtree, frontier[1:], rtext[0]) + yield from self._parse(rtext[1:], newtree, frontier[1:]) + else: + # If it's a non-matching terminal, fail. + if self._trace: + self._trace_backtrack(tree, frontier, rtext[:1]) + + def _expand(self, remaining_text, tree, frontier, production=None): + """ + :rtype: iter(Tree) + :return: An iterator of all parses that can be generated by + expanding the first element of ``frontier`` with + ``production``. In particular, if the first element of + ``frontier`` is a subtree whose node type is equal to + ``production``'s left hand side, then add a child to that + subtree for each element of ``production``'s right hand + side; and return all parses that can be generated by + matching and expanding the remaining elements of + ``frontier``. If the first element of ``frontier`` is not a + subtree whose node type is equal to ``production``'s left + hand side, then return an empty list. If ``production`` is + not specified, then return a list of all parses that can + be generated by expanding the first element of ``frontier`` + with *any* CFG production. + + :type tree: Tree + :param tree: A partial structure for the text that is + currently being parsed. The elements of ``tree`` + that are specified by ``frontier`` have not yet been + expanded or matched. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``tree``. + :type frontier: list(tuple(int)) + :param frontier: A list of the locations within ``tree`` of + all subtrees that have not yet been expanded, and all + leaves that have not yet been matched. + """ + + if production is None: + productions = self._grammar.productions() + else: + productions = [production] + + for production in productions: + lhs = production.lhs().symbol() + if lhs == tree[frontier[0]].label(): + subtree = self._production_to_tree(production) + if frontier[0] == (): + newtree = subtree + else: + newtree = tree.copy(deep=True) + newtree[frontier[0]] = subtree + new_frontier = [ + frontier[0] + (i,) for i in range(len(production.rhs())) + ] + if self._trace: + self._trace_expand(newtree, new_frontier, production) + yield from self._parse( + remaining_text, newtree, new_frontier + frontier[1:] + ) + + def _production_to_tree(self, production): + """ + :rtype: Tree + :return: The Tree that is licensed by ``production``. + In particular, given the production ``[lhs -> elt[1] ... elt[n]]`` + return a tree that has a node ``lhs.symbol``, and + ``n`` children. For each nonterminal element + ``elt[i]`` in the production, the tree token has a + childless subtree with node value ``elt[i].symbol``; and + for each terminal element ``elt[j]``, the tree token has + a leaf token with type ``elt[j]``. + + :param production: The CFG production that licenses the tree + token that should be returned. + :type production: Production + """ + children = [] + for elt in production.rhs(): + if isinstance(elt, Nonterminal): + children.append(Tree(elt.symbol(), [])) + else: + # This will be matched. + children.append(elt) + return Tree(production.lhs().symbol(), children) + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def _trace_fringe(self, tree, treeloc=None): + """ + Print trace output displaying the fringe of ``tree``. The + fringe of ``tree`` consists of all of its leaves and all of + its childless subtrees. + + :rtype: None + """ + + if treeloc == (): + print("*", end=" ") + if isinstance(tree, Tree): + if len(tree) == 0: + print(repr(Nonterminal(tree.label())), end=" ") + for i in range(len(tree)): + if treeloc is not None and i == treeloc[0]: + self._trace_fringe(tree[i], treeloc[1:]) + else: + self._trace_fringe(tree[i]) + else: + print(repr(tree), end=" ") + + def _trace_tree(self, tree, frontier, operation): + """ + Print trace output displaying the parser's current state. + + :param operation: A character identifying the operation that + generated the current state. + :rtype: None + """ + if self._trace == 2: + print(" %c [" % operation, end=" ") + else: + print(" [", end=" ") + if len(frontier) > 0: + self._trace_fringe(tree, frontier[0]) + else: + self._trace_fringe(tree) + print("]") + + def _trace_start(self, tree, frontier, text): + print("Parsing %r" % " ".join(text)) + if self._trace > 2: + print("Start:") + if self._trace > 1: + self._trace_tree(tree, frontier, " ") + + def _trace_expand(self, tree, frontier, production): + if self._trace > 2: + print("Expand: %s" % production) + if self._trace > 1: + self._trace_tree(tree, frontier, "E") + + def _trace_match(self, tree, frontier, tok): + if self._trace > 2: + print("Match: %r" % tok) + if self._trace > 1: + self._trace_tree(tree, frontier, "M") + + def _trace_succeed(self, tree, frontier): + if self._trace > 2: + print("GOOD PARSE:") + if self._trace == 1: + print("Found a parse:\n%s" % tree) + if self._trace > 1: + self._trace_tree(tree, frontier, "+") + + def _trace_backtrack(self, tree, frontier, toks=None): + if self._trace > 2: + if toks: + print("Backtrack: %r match failed" % toks[0]) + else: + print("Backtrack") + + +##////////////////////////////////////////////////////// +## Stepping Recursive Descent Parser +##////////////////////////////////////////////////////// +class SteppingRecursiveDescentParser(RecursiveDescentParser): + """ + A ``RecursiveDescentParser`` that allows you to step through the + parsing process, performing a single operation at a time. + + The ``initialize`` method is used to start parsing a text. + ``expand`` expands the first element on the frontier using a single + CFG production, and ``match`` matches the first element on the + frontier against the next text token. ``backtrack`` undoes the most + recent expand or match operation. ``step`` performs a single + expand, match, or backtrack operation. ``parses`` returns the set + of parses that have been found by the parser. + + :ivar _history: A list of ``(rtext, tree, frontier)`` tripples, + containing the previous states of the parser. This history is + used to implement the ``backtrack`` operation. + :ivar _tried_e: A record of all productions that have been tried + for a given tree. This record is used by ``expand`` to perform + the next untried production. + :ivar _tried_m: A record of what tokens have been matched for a + given tree. This record is used by ``step`` to decide whether + or not to match a token. + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + super().__init__(grammar, trace) + self._rtext = None + self._tree = None + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + + # [XX] TEMPORARY HACK WARNING! This should be replaced with + # something nicer when we get the chance. + def _freeze(self, tree): + c = tree.copy() + # for pos in c.treepositions('leaves'): + # c[pos] = c[pos].freeze() + return ImmutableTree.convert(c) + + def parse(self, tokens): + tokens = list(tokens) + self.initialize(tokens) + while self.step() is not None: + pass + return self.parses() + + def initialize(self, tokens): + """ + Start parsing a given text. This sets the parser's tree to + the start symbol, its frontier to the root node, and its + remaining text to ``token['SUBTOKENS']``. + """ + + self._rtext = tokens + start = self._grammar.start().symbol() + self._tree = Tree(start, []) + self._frontier = [()] + self._tried_e = {} + self._tried_m = {} + self._history = [] + self._parses = [] + if self._trace: + self._trace_start(self._tree, self._frontier, self._rtext) + + def remaining_text(self): + """ + :return: The portion of the text that is not yet covered by the + tree. + :rtype: list(str) + """ + return self._rtext + + def frontier(self): + """ + :return: A list of the tree locations of all subtrees that + have not yet been expanded, and all leaves that have not + yet been matched. + :rtype: list(tuple(int)) + """ + return self._frontier + + def tree(self): + """ + :return: A partial structure for the text that is + currently being parsed. The elements specified by the + frontier have not yet been expanded or matched. + :rtype: Tree + """ + return self._tree + + def step(self): + """ + Perform a single parsing operation. If an untried match is + possible, then perform the match, and return the matched + token. If an untried expansion is possible, then perform the + expansion, and return the production that it is based on. If + backtracking is possible, then backtrack, and return True. + Otherwise, return None. + + :return: None if no operation was performed; a token if a match + was performed; a production if an expansion was performed; + and True if a backtrack operation was performed. + :rtype: Production or String or bool + """ + # Try matching (if we haven't already) + if self.untried_match(): + token = self.match() + if token is not None: + return token + + # Try expanding. + production = self.expand() + if production is not None: + return production + + # Try backtracking + if self.backtrack(): + self._trace_backtrack(self._tree, self._frontier) + return True + + # Nothing left to do. + return None + + def expand(self, production=None): + """ + Expand the first element of the frontier. In particular, if + the first element of the frontier is a subtree whose node type + is equal to ``production``'s left hand side, then add a child + to that subtree for each element of ``production``'s right hand + side. If ``production`` is not specified, then use the first + untried expandable production. If all expandable productions + have been tried, do nothing. + + :return: The production used to expand the frontier, if an + expansion was performed. If no expansion was performed, + return None. + :rtype: Production or None + """ + + # Make sure we *can* expand. + if len(self._frontier) == 0: + return None + if not isinstance(self._tree[self._frontier[0]], Tree): + return None + + # If they didn't specify a production, check all untried ones. + if production is None: + productions = self.untried_expandable_productions() + else: + productions = [production] + + parses = [] + for prod in productions: + # Record that we've tried this production now. + self._tried_e.setdefault(self._freeze(self._tree), []).append(prod) + + # Try expanding. + for _result in self._expand(self._rtext, self._tree, self._frontier, prod): + return prod + + # We didn't expand anything. + return None + + def match(self): + """ + Match the first element of the frontier. In particular, if + the first element of the frontier has the same type as the + next text token, then substitute the text token into the tree. + + :return: The token matched, if a match operation was + performed. If no match was performed, return None + :rtype: str or None + """ + + # Record that we've tried matching this token. + tok = self._rtext[0] + self._tried_m.setdefault(self._freeze(self._tree), []).append(tok) + + # Make sure we *can* match. + if len(self._frontier) == 0: + return None + if isinstance(self._tree[self._frontier[0]], Tree): + return None + + for _result in self._match(self._rtext, self._tree, self._frontier): + # Return the token we just matched. + return self._history[-1][0][0] + return None + + def backtrack(self): + """ + Return the parser to its state before the most recent + match or expand operation. Calling ``undo`` repeatedly return + the parser to successively earlier states. If no match or + expand operations have been performed, ``undo`` will make no + changes. + + :return: true if an operation was successfully undone. + :rtype: bool + """ + if len(self._history) == 0: + return False + (self._rtext, self._tree, self._frontier) = self._history.pop() + return True + + def expandable_productions(self): + """ + :return: A list of all the productions for which expansions + are available for the current parser state. + :rtype: list(Production) + """ + # Make sure we *can* expand. + if len(self._frontier) == 0: + return [] + frontier_child = self._tree[self._frontier[0]] + if len(self._frontier) == 0 or not isinstance(frontier_child, Tree): + return [] + + return [ + p + for p in self._grammar.productions() + if p.lhs().symbol() == frontier_child.label() + ] + + def untried_expandable_productions(self): + """ + :return: A list of all the untried productions for which + expansions are available for the current parser state. + :rtype: list(Production) + """ + + tried_expansions = self._tried_e.get(self._freeze(self._tree), []) + return [p for p in self.expandable_productions() if p not in tried_expansions] + + def untried_match(self): + """ + :return: Whether the first element of the frontier is a token + that has not yet been matched. + :rtype: bool + """ + + if len(self._rtext) == 0: + return False + tried_matches = self._tried_m.get(self._freeze(self._tree), []) + return self._rtext[0] not in tried_matches + + def currently_complete(self): + """ + :return: Whether the parser's current state represents a + complete parse. + :rtype: bool + """ + return len(self._frontier) == 0 and len(self._rtext) == 0 + + def _parse(self, remaining_text, tree, frontier): + """ + A stub version of ``_parse`` that sets the parsers current + state to the given arguments. In ``RecursiveDescentParser``, + the ``_parse`` method is used to recursively continue parsing a + text. ``SteppingRecursiveDescentParser`` overrides it to + capture these recursive calls. It records the parser's old + state in the history (to allow for backtracking), and updates + the parser's new state using the given arguments. Finally, it + returns ``[1]``, which is used by ``match`` and ``expand`` to + detect whether their operations were successful. + + :return: ``[1]`` + :rtype: list of int + """ + self._history.append((self._rtext, self._tree, self._frontier)) + self._rtext = remaining_text + self._tree = tree + self._frontier = frontier + + # Is it a good parse? If so, record it. + if len(frontier) == 0 and len(remaining_text) == 0: + self._parses.append(tree) + self._trace_succeed(self._tree, self._frontier) + + return [1] + + def parses(self): + """ + :return: An iterator of the parses that have been found by this + parser so far. + :rtype: list of Tree + """ + return iter(self._parses) + + def set_grammar(self, grammar): + """ + Change the grammar used to parse texts. + + :param grammar: The new grammar. + :type grammar: CFG + """ + self._grammar = grammar + + +##////////////////////////////////////////////////////// +## Demonstration Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the recursive descent parser. + """ + + from nltk import CFG, parse + + grammar = CFG.fromstring( + """ + S -> NP VP + NP -> Det N | Det N PP + VP -> V NP | V NP PP + PP -> P NP + NP -> 'I' + N -> 'man' | 'park' | 'telescope' | 'dog' + Det -> 'the' | 'a' + P -> 'in' | 'with' + V -> 'saw' + """ + ) + + for prod in grammar.productions(): + print(prod) + + sent = "I saw a man in the park".split() + parser = parse.RecursiveDescentParser(grammar, trace=2) + for p in parser.parse(sent): + print(p) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/shiftreduce.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/shiftreduce.py new file mode 100644 index 0000000000000000000000000000000000000000..bf18342573a14f18ca3918580e22d81f82c896cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/shiftreduce.py @@ -0,0 +1,479 @@ +# Natural Language Toolkit: Shift-Reduce Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.grammar import Nonterminal +from nltk.parse.api import ParserI +from nltk.tree import Tree + + +##////////////////////////////////////////////////////// +## Shift/Reduce Parser +##////////////////////////////////////////////////////// +class ShiftReduceParser(ParserI): + """ + A simple bottom-up CFG parser that uses two operations, "shift" + and "reduce", to find a single parse for a text. + + ``ShiftReduceParser`` maintains a stack, which records the + structure of a portion of the text. This stack is a list of + strings and Trees that collectively cover a portion of + the text. For example, while parsing the sentence "the dog saw + the man" with a typical grammar, ``ShiftReduceParser`` will produce + the following stack, which covers "the dog saw":: + + [(NP: (Det: 'the') (N: 'dog')), (V: 'saw')] + + ``ShiftReduceParser`` attempts to extend the stack to cover the + entire text, and to combine the stack elements into a single tree, + producing a complete parse for the sentence. + + Initially, the stack is empty. It is extended to cover the text, + from left to right, by repeatedly applying two operations: + + - "shift" moves a token from the beginning of the text to the + end of the stack. + - "reduce" uses a CFG production to combine the rightmost stack + elements into a single Tree. + + Often, more than one operation can be performed on a given stack. + In this case, ``ShiftReduceParser`` uses the following heuristics + to decide which operation to perform: + + - Only shift if no reductions are available. + - If multiple reductions are available, then apply the reduction + whose CFG production is listed earliest in the grammar. + + Note that these heuristics are not guaranteed to choose an + operation that leads to a parse of the text. Also, if multiple + parses exists, ``ShiftReduceParser`` will return at most one of + them. + + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``ShiftReduceParser``, that uses ``grammar`` to + parse texts. + + :type grammar: Grammar + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + self._check_grammar() + + def grammar(self): + return self._grammar + + def parse(self, tokens): + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # initialize the stack. + stack = [] + remaining_text = tokens + + # Trace output. + if self._trace: + print("Parsing %r" % " ".join(tokens)) + self._trace_stack(stack, remaining_text) + + # iterate through the text, pushing the token onto + # the stack, then reducing the stack. + while len(remaining_text) > 0: + self._shift(stack, remaining_text) + while self._reduce(stack, remaining_text): + pass + + # Did we reduce everything? + if len(stack) == 1: + # Did we end up with the right category? + if stack[0].label() == self._grammar.start().symbol(): + yield stack[0] + + def _shift(self, stack, remaining_text): + """ + Move a token from the beginning of ``remaining_text`` to the + end of ``stack``. + + :type stack: list(str and Tree) + :param stack: A list of strings and Trees, encoding + the structure of the text that has been parsed so far. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``stack``. + :rtype: None + """ + stack.append(remaining_text[0]) + remaining_text.remove(remaining_text[0]) + if self._trace: + self._trace_shift(stack, remaining_text) + + def _match_rhs(self, rhs, rightmost_stack): + """ + :rtype: bool + :return: true if the right hand side of a CFG production + matches the rightmost elements of the stack. ``rhs`` + matches ``rightmost_stack`` if they are the same length, + and each element of ``rhs`` matches the corresponding + element of ``rightmost_stack``. A nonterminal element of + ``rhs`` matches any Tree whose node value is equal + to the nonterminal's symbol. A terminal element of ``rhs`` + matches any string whose type is equal to the terminal. + :type rhs: list(terminal and Nonterminal) + :param rhs: The right hand side of a CFG production. + :type rightmost_stack: list(string and Tree) + :param rightmost_stack: The rightmost elements of the parser's + stack. + """ + + if len(rightmost_stack) != len(rhs): + return False + for i in range(len(rightmost_stack)): + if isinstance(rightmost_stack[i], Tree): + if not isinstance(rhs[i], Nonterminal): + return False + if rightmost_stack[i].label() != rhs[i].symbol(): + return False + else: + if isinstance(rhs[i], Nonterminal): + return False + if rightmost_stack[i] != rhs[i]: + return False + return True + + def _reduce(self, stack, remaining_text, production=None): + """ + Find a CFG production whose right hand side matches the + rightmost stack elements; and combine those stack elements + into a single Tree, with the node specified by the + production's left-hand side. If more than one CFG production + matches the stack, then use the production that is listed + earliest in the grammar. The new Tree replaces the + elements in the stack. + + :rtype: Production or None + :return: If a reduction is performed, then return the CFG + production that the reduction is based on; otherwise, + return false. + :type stack: list(string and Tree) + :param stack: A list of strings and Trees, encoding + the structure of the text that has been parsed so far. + :type remaining_text: list(str) + :param remaining_text: The portion of the text that is not yet + covered by ``stack``. + """ + if production is None: + productions = self._grammar.productions() + else: + productions = [production] + + # Try each production, in order. + for production in productions: + rhslen = len(production.rhs()) + + # check if the RHS of a production matches the top of the stack + if self._match_rhs(production.rhs(), stack[-rhslen:]): + + # combine the tree to reflect the reduction + tree = Tree(production.lhs().symbol(), stack[-rhslen:]) + stack[-rhslen:] = [tree] + + # We reduced something + if self._trace: + self._trace_reduce(stack, production, remaining_text) + return production + + # We didn't reduce anything + return None + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + # 1: just show shifts. + # 2: show shifts & reduces + # 3: display which tokens & productions are shifed/reduced + self._trace = trace + + def _trace_stack(self, stack, remaining_text, marker=" "): + """ + Print trace output displaying the given stack and text. + + :rtype: None + :param marker: A character that is printed to the left of the + stack. This is used with trace level 2 to print 'S' + before shifted stacks and 'R' before reduced stacks. + """ + s = " " + marker + " [ " + for elt in stack: + if isinstance(elt, Tree): + s += repr(Nonterminal(elt.label())) + " " + else: + s += repr(elt) + " " + s += "* " + " ".join(remaining_text) + "]" + print(s) + + def _trace_shift(self, stack, remaining_text): + """ + Print trace output displaying that a token has been shifted. + + :rtype: None + """ + if self._trace > 2: + print("Shift %r:" % stack[-1]) + if self._trace == 2: + self._trace_stack(stack, remaining_text, "S") + elif self._trace > 0: + self._trace_stack(stack, remaining_text) + + def _trace_reduce(self, stack, production, remaining_text): + """ + Print trace output displaying that ``production`` was used to + reduce ``stack``. + + :rtype: None + """ + if self._trace > 2: + rhs = " ".join(production.rhs()) + print(f"Reduce {production.lhs()!r} <- {rhs}") + if self._trace == 2: + self._trace_stack(stack, remaining_text, "R") + elif self._trace > 1: + self._trace_stack(stack, remaining_text) + + def _check_grammar(self): + """ + Check to make sure that all of the CFG productions are + potentially useful. If any productions can never be used, + then print a warning. + + :rtype: None + """ + productions = self._grammar.productions() + + # Any production whose RHS is an extension of another production's RHS + # will never be used. + for i in range(len(productions)): + for j in range(i + 1, len(productions)): + rhs1 = productions[i].rhs() + rhs2 = productions[j].rhs() + if rhs1[: len(rhs2)] == rhs2: + print("Warning: %r will never be used" % productions[i]) + + +##////////////////////////////////////////////////////// +## Stepping Shift/Reduce Parser +##////////////////////////////////////////////////////// +class SteppingShiftReduceParser(ShiftReduceParser): + """ + A ``ShiftReduceParser`` that allows you to setp through the parsing + process, performing a single operation at a time. It also allows + you to change the parser's grammar midway through parsing a text. + + The ``initialize`` method is used to start parsing a text. + ``shift`` performs a single shift operation, and ``reduce`` performs + a single reduce operation. ``step`` will perform a single reduce + operation if possible; otherwise, it will perform a single shift + operation. ``parses`` returns the set of parses that have been + found by the parser. + + :ivar _history: A list of ``(stack, remaining_text)`` pairs, + containing all of the previous states of the parser. This + history is used to implement the ``undo`` operation. + :see: ``nltk.grammar`` + """ + + def __init__(self, grammar, trace=0): + super().__init__(grammar, trace) + self._stack = None + self._remaining_text = None + self._history = [] + + def parse(self, tokens): + tokens = list(tokens) + self.initialize(tokens) + while self.step(): + pass + return self.parses() + + def stack(self): + """ + :return: The parser's stack. + :rtype: list(str and Tree) + """ + return self._stack + + def remaining_text(self): + """ + :return: The portion of the text that is not yet covered by the + stack. + :rtype: list(str) + """ + return self._remaining_text + + def initialize(self, tokens): + """ + Start parsing a given text. This sets the parser's stack to + ``[]`` and sets its remaining text to ``tokens``. + """ + self._stack = [] + self._remaining_text = tokens + self._history = [] + + def step(self): + """ + Perform a single parsing operation. If a reduction is + possible, then perform that reduction, and return the + production that it is based on. Otherwise, if a shift is + possible, then perform it, and return True. Otherwise, + return False. + + :return: False if no operation was performed; True if a shift was + performed; and the CFG production used to reduce if a + reduction was performed. + :rtype: Production or bool + """ + return self.reduce() or self.shift() + + def shift(self): + """ + Move a token from the beginning of the remaining text to the + end of the stack. If there are no more tokens in the + remaining text, then do nothing. + + :return: True if the shift operation was successful. + :rtype: bool + """ + if len(self._remaining_text) == 0: + return False + self._history.append((self._stack[:], self._remaining_text[:])) + self._shift(self._stack, self._remaining_text) + return True + + def reduce(self, production=None): + """ + Use ``production`` to combine the rightmost stack elements into + a single Tree. If ``production`` does not match the + rightmost stack elements, then do nothing. + + :return: The production used to reduce the stack, if a + reduction was performed. If no reduction was performed, + return None. + + :rtype: Production or None + """ + self._history.append((self._stack[:], self._remaining_text[:])) + return_val = self._reduce(self._stack, self._remaining_text, production) + + if not return_val: + self._history.pop() + return return_val + + def undo(self): + """ + Return the parser to its state before the most recent + shift or reduce operation. Calling ``undo`` repeatedly return + the parser to successively earlier states. If no shift or + reduce operations have been performed, ``undo`` will make no + changes. + + :return: true if an operation was successfully undone. + :rtype: bool + """ + if len(self._history) == 0: + return False + (self._stack, self._remaining_text) = self._history.pop() + return True + + def reducible_productions(self): + """ + :return: A list of the productions for which reductions are + available for the current parser state. + :rtype: list(Production) + """ + productions = [] + for production in self._grammar.productions(): + rhslen = len(production.rhs()) + if self._match_rhs(production.rhs(), self._stack[-rhslen:]): + productions.append(production) + return productions + + def parses(self): + """ + :return: An iterator of the parses that have been found by this + parser so far. + :rtype: iter(Tree) + """ + if ( + len(self._remaining_text) == 0 + and len(self._stack) == 1 + and self._stack[0].label() == self._grammar.start().symbol() + ): + yield self._stack[0] + + # copied from nltk.parser + + def set_grammar(self, grammar): + """ + Change the grammar used to parse texts. + + :param grammar: The new grammar. + :type grammar: CFG + """ + self._grammar = grammar + + +##////////////////////////////////////////////////////// +## Demonstration Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the shift-reduce parser. + """ + + from nltk import CFG, parse + + grammar = CFG.fromstring( + """ + S -> NP VP + NP -> Det N | Det N PP + VP -> V NP | V NP PP + PP -> P NP + NP -> 'I' + N -> 'man' | 'park' | 'telescope' | 'dog' + Det -> 'the' | 'a' + P -> 'in' | 'with' + V -> 'saw' + """ + ) + + sent = "I saw a man in the park".split() + + parser = parse.ShiftReduceParser(grammar, trace=2) + for p in parser.parse(sent): + print(p) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/transitionparser.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/transitionparser.py new file mode 100644 index 0000000000000000000000000000000000000000..476d70260a09c92196ea1cce749fc6774e75d822 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/transitionparser.py @@ -0,0 +1,794 @@ +# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers +# +# Author: Long Duong +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +import pickle +import tempfile +from copy import deepcopy +from operator import itemgetter +from os import remove + +try: + from numpy import array + from scipy import sparse + from sklearn import svm + from sklearn.datasets import load_svmlight_file +except ImportError: + pass + +from nltk.parse import DependencyEvaluator, DependencyGraph, ParserI + + +class Configuration: + """ + Class for holding configuration which is the partial analysis of the input sentence. + The transition based parser aims at finding set of operators that transfer the initial + configuration to the terminal configuration. + + The configuration includes: + - Stack: for storing partially proceeded words + - Buffer: for storing remaining input words + - Set of arcs: for storing partially built dependency tree + + This class also provides a method to represent a configuration as list of features. + """ + + def __init__(self, dep_graph): + """ + :param dep_graph: the representation of an input in the form of dependency graph. + :type dep_graph: DependencyGraph where the dependencies are not specified. + """ + # dep_graph.nodes contain list of token for a sentence + self.stack = [0] # The root element + self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer + self.arcs = [] # empty set of arc + self._tokens = dep_graph.nodes + self._max_address = len(self.buffer) + + def __str__(self): + return ( + "Stack : " + + str(self.stack) + + " Buffer : " + + str(self.buffer) + + " Arcs : " + + str(self.arcs) + ) + + def _check_informative(self, feat, flag=False): + """ + Check whether a feature is informative + The flag control whether "_" is informative or not + """ + if feat is None: + return False + if feat == "": + return False + if flag is False: + if feat == "_": + return False + return True + + def extract_features(self): + """ + Extract the set of features for the current configuration. Implement standard features as describe in + Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre. + Please note that these features are very basic. + :return: list(str) + """ + result = [] + # Todo : can come up with more complicated features set for better + # performance. + if len(self.stack) > 0: + # Stack 0 + stack_idx0 = self.stack[len(self.stack) - 1] + token = self._tokens[stack_idx0] + if self._check_informative(token["word"], True): + result.append("STK_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("STK_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("STK_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("STK_0_FEATS_" + feat) + # Stack 1 + if len(self.stack) > 1: + stack_idx1 = self.stack[len(self.stack) - 2] + token = self._tokens[stack_idx1] + if self._check_informative(token["tag"]): + result.append("STK_1_POS_" + token["tag"]) + + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == stack_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("STK_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("STK_0_RDEP_" + dep_right_most) + + # Check Buffered 0 + if len(self.buffer) > 0: + # Buffer 0 + buffer_idx0 = self.buffer[0] + token = self._tokens[buffer_idx0] + if self._check_informative(token["word"], True): + result.append("BUF_0_FORM_" + token["word"]) + if "lemma" in token and self._check_informative(token["lemma"]): + result.append("BUF_0_LEMMA_" + token["lemma"]) + if self._check_informative(token["tag"]): + result.append("BUF_0_POS_" + token["tag"]) + if "feats" in token and self._check_informative(token["feats"]): + feats = token["feats"].split("|") + for feat in feats: + result.append("BUF_0_FEATS_" + feat) + # Buffer 1 + if len(self.buffer) > 1: + buffer_idx1 = self.buffer[1] + token = self._tokens[buffer_idx1] + if self._check_informative(token["word"], True): + result.append("BUF_1_FORM_" + token["word"]) + if self._check_informative(token["tag"]): + result.append("BUF_1_POS_" + token["tag"]) + if len(self.buffer) > 2: + buffer_idx2 = self.buffer[2] + token = self._tokens[buffer_idx2] + if self._check_informative(token["tag"]): + result.append("BUF_2_POS_" + token["tag"]) + if len(self.buffer) > 3: + buffer_idx3 = self.buffer[3] + token = self._tokens[buffer_idx3] + if self._check_informative(token["tag"]): + result.append("BUF_3_POS_" + token["tag"]) + # Left most, right most dependency of stack[0] + left_most = 1000000 + right_most = -1 + dep_left_most = "" + dep_right_most = "" + for (wi, r, wj) in self.arcs: + if wi == buffer_idx0: + if (wj > wi) and (wj > right_most): + right_most = wj + dep_right_most = r + if (wj < wi) and (wj < left_most): + left_most = wj + dep_left_most = r + if self._check_informative(dep_left_most): + result.append("BUF_0_LDEP_" + dep_left_most) + if self._check_informative(dep_right_most): + result.append("BUF_0_RDEP_" + dep_right_most) + + return result + + +class Transition: + """ + This class defines a set of transition which is applied to a configuration to get another configuration + Note that for different parsing algorithm, the transition is different. + """ + + # Define set of transitions + LEFT_ARC = "LEFTARC" + RIGHT_ARC = "RIGHTARC" + SHIFT = "SHIFT" + REDUCE = "REDUCE" + + def __init__(self, alg_option): + """ + :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type alg_option: str + """ + self._algo = alg_option + if alg_option not in [ + TransitionParser.ARC_STANDARD, + TransitionParser.ARC_EAGER, + ]: + raise ValueError( + " Currently we only support %s and %s " + % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER) + ) + + def left_arc(self, conf, relation): + """ + Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if conf.buffer[0] == 0: + # here is the Root element + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + + flag = True + if self._algo == TransitionParser.ARC_EAGER: + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = False + + if flag: + conf.stack.pop() + idx_wj = conf.buffer[0] + conf.arcs.append((idx_wj, relation, idx_wi)) + else: + return -1 + + def right_arc(self, conf, relation): + """ + Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): + return -1 + if self._algo == TransitionParser.ARC_STANDARD: + idx_wi = conf.stack.pop() + idx_wj = conf.buffer[0] + conf.buffer[0] = idx_wi + conf.arcs.append((idx_wi, relation, idx_wj)) + else: # arc-eager + idx_wi = conf.stack[len(conf.stack) - 1] + idx_wj = conf.buffer.pop(0) + conf.stack.append(idx_wj) + conf.arcs.append((idx_wi, relation, idx_wj)) + + def reduce(self, conf): + """ + Note that the algorithm for reduce is only available for arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + + if self._algo != TransitionParser.ARC_EAGER: + return -1 + if len(conf.stack) <= 0: + return -1 + + idx_wi = conf.stack[len(conf.stack) - 1] + flag = False + for (idx_parent, r, idx_child) in conf.arcs: + if idx_child == idx_wi: + flag = True + if flag: + conf.stack.pop() # reduce it + else: + return -1 + + def shift(self, conf): + """ + Note that the algorithm for shift is the SAME for arc-standard and arc-eager + + :param configuration: is the current configuration + :return: A new configuration or -1 if the pre-condition is not satisfied + """ + if len(conf.buffer) <= 0: + return -1 + idx_wi = conf.buffer.pop(0) + conf.stack.append(idx_wi) + + +class TransitionParser(ParserI): + + """ + Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager" + """ + + ARC_STANDARD = "arc-standard" + ARC_EAGER = "arc-eager" + + def __init__(self, algorithm): + """ + :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm + :type algorithm: str + """ + if not (algorithm in [self.ARC_STANDARD, self.ARC_EAGER]): + raise ValueError( + " Currently we only support %s and %s " + % (self.ARC_STANDARD, self.ARC_EAGER) + ) + self._algorithm = algorithm + + self._dictionary = {} + self._transition = {} + self._match_transition = {} + + def _get_dep_relation(self, idx_parent, idx_child, depgraph): + p_node = depgraph.nodes[idx_parent] + c_node = depgraph.nodes[idx_child] + + if c_node["word"] is None: + return None # Root word + + if c_node["head"] == p_node["address"]: + return c_node["rel"] + else: + return None + + def _convert_to_binary_features(self, features): + """ + :param features: list of feature string which is needed to convert to binary features + :type features: list(str) + :return : string of binary features in libsvm format which is 'featureID:value' pairs + """ + unsorted_result = [] + for feature in features: + self._dictionary.setdefault(feature, len(self._dictionary)) + unsorted_result.append(self._dictionary[feature]) + + # Default value of each feature is 1.0 + return " ".join( + str(featureID) + ":1.0" for featureID in sorted(unsorted_result) + ) + + def _is_projective(self, depgraph): + arc_list = [] + for key in depgraph.nodes: + node = depgraph.nodes[key] + + if "head" in node: + childIdx = node["address"] + parentIdx = node["head"] + if parentIdx is not None: + arc_list.append((parentIdx, childIdx)) + + for (parentIdx, childIdx) in arc_list: + # Ensure that childIdx < parentIdx + if childIdx > parentIdx: + temp = childIdx + childIdx = parentIdx + parentIdx = temp + for k in range(childIdx + 1, parentIdx): + for m in range(len(depgraph.nodes)): + if (m < childIdx) or (m > parentIdx): + if (k, m) in arc_list: + return False + if (m, k) in arc_list: + return False + return True + + def _write_to_file(self, key, binary_features, input_file): + """ + write the binary features to input file and update the transition dictionary + """ + self._transition.setdefault(key, len(self._transition) + 1) + self._match_transition[self._transition[key]] = key + + input_str = str(self._transition[key]) + " " + binary_features + "\n" + input_file.write(input_str.encode("utf-8")) + + def _create_training_examples_arc_std(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009) + """ + operation = Transition(self.ARC_STANDARD) + count_proj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + count_proj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + precondition = True + # Get the max-index of buffer + maxID = conf._max_address + + for w in range(maxID + 1): + if w != b0: + relw = self._get_dep_relation(b0, w, depgraph) + if relw is not None: + if (b0, relw, w) not in conf.arcs: + precondition = False + + if precondition: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(count_proj)) + return training_seq + + def _create_training_examples_arc_eager(self, depgraphs, input_file): + """ + Create the training example in the libsvm format and write it to the input_file. + Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre + """ + operation = Transition(self.ARC_EAGER) + countProj = 0 + training_seq = [] + + for depgraph in depgraphs: + if not self._is_projective(depgraph): + continue + + countProj += 1 + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + b0 = conf.buffer[0] + features = conf.extract_features() + binary_features = self._convert_to_binary_features(features) + + if len(conf.stack) > 0: + s0 = conf.stack[len(conf.stack) - 1] + # Left-arc operation + rel = self._get_dep_relation(b0, s0, depgraph) + if rel is not None: + key = Transition.LEFT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.left_arc(conf, rel) + training_seq.append(key) + continue + + # Right-arc operation + rel = self._get_dep_relation(s0, b0, depgraph) + if rel is not None: + key = Transition.RIGHT_ARC + ":" + rel + self._write_to_file(key, binary_features, input_file) + operation.right_arc(conf, rel) + training_seq.append(key) + continue + + # reduce operation + flag = False + for k in range(s0): + if self._get_dep_relation(k, b0, depgraph) is not None: + flag = True + if self._get_dep_relation(b0, k, depgraph) is not None: + flag = True + if flag: + key = Transition.REDUCE + self._write_to_file(key, binary_features, input_file) + operation.reduce(conf) + training_seq.append(key) + continue + + # Shift operation as the default + key = Transition.SHIFT + self._write_to_file(key, binary_features, input_file) + operation.shift(conf) + training_seq.append(key) + + print(" Number of training examples : " + str(len(depgraphs))) + print(" Number of valid (projective) examples : " + str(countProj)) + return training_seq + + def train(self, depgraphs, modelfile, verbose=True): + """ + :param depgraphs : list of DependencyGraph as the training data + :type depgraphs : DependencyGraph + :param modelfile : file name to save the trained model + :type modelfile : str + """ + + try: + input_file = tempfile.NamedTemporaryFile( + prefix="transition_parse.train", dir=tempfile.gettempdir(), delete=False + ) + + if self._algorithm == self.ARC_STANDARD: + self._create_training_examples_arc_std(depgraphs, input_file) + else: + self._create_training_examples_arc_eager(depgraphs, input_file) + + input_file.close() + # Using the temporary file to train the libsvm classifier + x_train, y_train = load_svmlight_file(input_file.name) + # The parameter is set according to the paper: + # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre + # Todo : because of probability = True => very slow due to + # cross-validation. Need to improve the speed here + model = svm.SVC( + kernel="poly", + degree=2, + coef0=0, + gamma=0.2, + C=0.5, + verbose=verbose, + probability=True, + ) + + model.fit(x_train, y_train) + # Save the model to file name (as pickle) + pickle.dump(model, open(modelfile, "wb")) + finally: + remove(input_file.name) + + def parse(self, depgraphs, modelFile): + """ + :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy + :type depgraphs: list(DependencyGraph) + :param modelfile: the model file + :type modelfile: str + :return: list (DependencyGraph) with the 'head' and 'rel' information + """ + result = [] + # First load the model + model = pickle.load(open(modelFile, "rb")) + operation = Transition(self._algorithm) + + for depgraph in depgraphs: + conf = Configuration(depgraph) + while len(conf.buffer) > 0: + features = conf.extract_features() + col = [] + row = [] + data = [] + for feature in features: + if feature in self._dictionary: + col.append(self._dictionary[feature]) + row.append(0) + data.append(1.0) + np_col = array(sorted(col)) # NB : index must be sorted + np_row = array(row) + np_data = array(data) + + x_test = sparse.csr_matrix( + (np_data, (np_row, np_col)), shape=(1, len(self._dictionary)) + ) + + # It's best to use decision function as follow BUT it's not supported yet for sparse SVM + # Using decision function to build the votes array + # dec_func = model.decision_function(x_test)[0] + # votes = {} + # k = 0 + # for i in range(len(model.classes_)): + # for j in range(i+1, len(model.classes_)): + # #if dec_func[k] > 0: + # votes.setdefault(i,0) + # votes[i] +=1 + # else: + # votes.setdefault(j,0) + # votes[j] +=1 + # k +=1 + # Sort votes according to the values + # sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True) + + # We will use predict_proba instead of decision_function + prob_dict = {} + pred_prob = model.predict_proba(x_test)[0] + for i in range(len(pred_prob)): + prob_dict[i] = pred_prob[i] + sorted_Prob = sorted(prob_dict.items(), key=itemgetter(1), reverse=True) + + # Note that SHIFT is always a valid operation + for (y_pred_idx, confidence) in sorted_Prob: + # y_pred = model.predict(x_test)[0] + # From the prediction match to the operation + y_pred = model.classes_[y_pred_idx] + + if y_pred in self._match_transition: + strTransition = self._match_transition[y_pred] + baseTransition = strTransition.split(":")[0] + + if baseTransition == Transition.LEFT_ARC: + if ( + operation.left_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.RIGHT_ARC: + if ( + operation.right_arc(conf, strTransition.split(":")[1]) + != -1 + ): + break + elif baseTransition == Transition.REDUCE: + if operation.reduce(conf) != -1: + break + elif baseTransition == Transition.SHIFT: + if operation.shift(conf) != -1: + break + else: + raise ValueError( + "The predicted transition is not recognized, expected errors" + ) + + # Finish with operations build the dependency graph from Conf.arcs + + new_depgraph = deepcopy(depgraph) + for key in new_depgraph.nodes: + node = new_depgraph.nodes[key] + node["rel"] = "" + # With the default, all the token depend on the Root + node["head"] = 0 + for (head, rel, child) in conf.arcs: + c_node = new_depgraph.nodes[child] + c_node["head"] = head + c_node["rel"] = rel + result.append(new_depgraph) + + return result + + +def demo(): + """ + >>> from nltk.parse import DependencyGraph, DependencyEvaluator + >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition + >>> gold_sent = DependencyGraph(\""" + ... Economic JJ 2 ATT + ... news NN 3 SBJ + ... has VBD 0 ROOT + ... little JJ 5 ATT + ... effect NN 3 OBJ + ... on IN 5 ATT + ... financial JJ 8 ATT + ... markets NNS 6 PC + ... . . 3 PU + ... \""") + + >>> conf = Configuration(gold_sent) + + ###################### Check the Initial Feature ######################## + + >>> print(', '.join(conf.extract_features())) + STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ + + ###################### Check The Transition ####################### + Check the Initialized Configuration + >>> print(conf) + Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] + + A. Do some transition checks for ARC-STANDARD + + >>> operation = Transition('arc-standard') + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.left_arc(conf,"SBJ") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.shift(conf) + >>> operation.left_arc(conf, "ATT") + + Middle Configuration and Features Check + >>> print(conf) + Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] + + >>> print(', '.join(conf.extract_features())) + STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT + + >>> operation.right_arc(conf, "PC") + >>> operation.right_arc(conf, "ATT") + >>> operation.right_arc(conf, "OBJ") + >>> operation.shift(conf) + >>> operation.right_arc(conf, "PU") + >>> operation.right_arc(conf, "ROOT") + >>> operation.shift(conf) + + Terminated Configuration Check + >>> print(conf) + Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] + + + B. Do some transition checks for ARC-EAGER + + >>> conf = Configuration(gold_sent) + >>> operation = Transition('arc-eager') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'SBJ') + >>> operation.right_arc(conf,'ROOT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'OBJ') + >>> operation.right_arc(conf,'ATT') + >>> operation.shift(conf) + >>> operation.left_arc(conf,'ATT') + >>> operation.right_arc(conf,'PC') + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.reduce(conf) + >>> operation.right_arc(conf,'PU') + >>> print(conf) + Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] + + ###################### Check The Training Function ####################### + + A. Check the ARC-STANDARD training + >>> import tempfile + >>> import os + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) + + >>> parser_std = TransitionParser('arc-standard') + >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT + + >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + >>> input_file.close() + >>> remove(input_file.name) + + B. Check the ARC-EAGER training + + >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) + >>> parser_eager = TransitionParser('arc-eager') + >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) + Number of training examples : 1 + Number of valid (projective) examples : 1 + SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU + + >>> parser_eager.train([gold_sent],'temp.arceager.model', verbose=False) + Number of training examples : 1 + Number of valid (projective) examples : 1 + + >>> input_file.close() + >>> remove(input_file.name) + + ###################### Check The Parsing Function ######################## + + A. Check the ARC-STANDARD parser + + >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + B. Check the ARC-EAGER parser + >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') + >>> de = DependencyEvaluator(result, [gold_sent]) + >>> de.eval() >= (0, 0) + True + + Remove test temporary files + >>> remove('temp.arceager.model') + >>> remove('temp.arcstd.model') + + Note that result is very poor because of only one training example. + """ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/util.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/util.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc5bee08fdb9aa237513992a36fa2eaa0aa8219 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/util.py @@ -0,0 +1,234 @@ +# Natural Language Toolkit: Parser Utility Functions +# +# Author: Ewan Klein +# Tom Aarsen <> +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + + +""" +Utility functions for parsers. +""" + +from nltk.data import load +from nltk.grammar import CFG, PCFG, FeatureGrammar +from nltk.parse.chart import Chart, ChartParser +from nltk.parse.featurechart import FeatureChart, FeatureChartParser +from nltk.parse.pchart import InsideChartParser + + +def load_parser( + grammar_url, trace=0, parser=None, chart_class=None, beam_size=0, **load_args +): + """ + Load a grammar from a file, and build a parser based on that grammar. + The parser depends on the grammar format, and might also depend + on properties of the grammar itself. + + The following grammar formats are currently supported: + - ``'cfg'`` (CFGs: ``CFG``) + - ``'pcfg'`` (probabilistic CFGs: ``PCFG``) + - ``'fcfg'`` (feature-based CFGs: ``FeatureGrammar``) + + :type grammar_url: str + :param grammar_url: A URL specifying where the grammar is located. + The default protocol is ``"nltk:"``, which searches for the file + in the the NLTK data package. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing output. + :param parser: The class used for parsing; should be ``ChartParser`` + or a subclass. + If None, the class depends on the grammar format. + :param chart_class: The class used for storing the chart; + should be ``Chart`` or a subclass. + Only used for CFGs and feature CFGs. + If None, the chart class depends on the grammar format. + :type beam_size: int + :param beam_size: The maximum length for the parser's edge queue. + Only used for probabilistic CFGs. + :param load_args: Keyword parameters used when loading the grammar. + See ``data.load`` for more information. + """ + grammar = load(grammar_url, **load_args) + if not isinstance(grammar, CFG): + raise ValueError("The grammar must be a CFG, " "or a subclass thereof.") + if isinstance(grammar, PCFG): + if parser is None: + parser = InsideChartParser + return parser(grammar, trace=trace, beam_size=beam_size) + + elif isinstance(grammar, FeatureGrammar): + if parser is None: + parser = FeatureChartParser + if chart_class is None: + chart_class = FeatureChart + return parser(grammar, trace=trace, chart_class=chart_class) + + else: # Plain CFG. + if parser is None: + parser = ChartParser + if chart_class is None: + chart_class = Chart + return parser(grammar, trace=trace, chart_class=chart_class) + + +def taggedsent_to_conll(sentence): + """ + A module to convert a single POS tagged sentence into CONLL format. + + >>> from nltk import word_tokenize, pos_tag + >>> text = "This is a foobar sentence." + >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + :param sentence: A single input sentence to parse + :type sentence: list(tuple(str, str)) + :rtype: iter(str) + :return: a generator yielding a single sentence in CONLL format. + """ + for (i, (word, tag)) in enumerate(sentence, start=1): + input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] + input_str = "\t".join(input_str) + "\n" + yield input_str + + +def taggedsents_to_conll(sentences): + """ + A module to convert the a POS tagged document stream + (i.e. list of list of tuples, a list of sentences) and yield lines + in CONLL format. This module yields one line per word and two newlines + for end of sentence. + + >>> from nltk import word_tokenize, sent_tokenize, pos_tag + >>> text = "This is a foobar sentence. Is that right?" + >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] + >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE + ... if line: + ... print(line, end="") + 1 This _ DT DT _ 0 a _ _ + 2 is _ VBZ VBZ _ 0 a _ _ + 3 a _ DT DT _ 0 a _ _ + 4 foobar _ JJ JJ _ 0 a _ _ + 5 sentence _ NN NN _ 0 a _ _ + 6 . _ . . _ 0 a _ _ + + + 1 Is _ VBZ VBZ _ 0 a _ _ + 2 that _ IN IN _ 0 a _ _ + 3 right _ NN NN _ 0 a _ _ + 4 ? _ . . _ 0 a _ _ + + + + :param sentences: Input sentences to parse + :type sentence: list(list(tuple(str, str))) + :rtype: iter(str) + :return: a generator yielding sentences in CONLL format. + """ + for sentence in sentences: + yield from taggedsent_to_conll(sentence) + yield "\n\n" + + +###################################################################### +# { Test Suites +###################################################################### + + +class TestGrammar: + """ + Unit tests for CFG. + """ + + def __init__(self, grammar, suite, accept=None, reject=None): + self.test_grammar = grammar + + self.cp = load_parser(grammar, trace=0) + self.suite = suite + self._accept = accept + self._reject = reject + + def run(self, show_trees=False): + """ + Sentences in the test suite are divided into two classes: + + - grammatical (``accept``) and + - ungrammatical (``reject``). + + If a sentence should parse according to the grammar, the value of + ``trees`` will be a non-empty list. If a sentence should be rejected + according to the grammar, then the value of ``trees`` will be None. + """ + for test in self.suite: + print(test["doc"] + ":", end=" ") + for key in ["accept", "reject"]: + for sent in test[key]: + tokens = sent.split() + trees = list(self.cp.parse(tokens)) + if show_trees and trees: + print() + print(sent) + for tree in trees: + print(tree) + if key == "accept": + if trees == []: + raise ValueError("Sentence '%s' failed to parse'" % sent) + else: + accepted = True + else: + if trees: + raise ValueError("Sentence '%s' received a parse'" % sent) + else: + rejected = True + if accepted and rejected: + print("All tests passed!") + + +def extract_test_sentences(string, comment_chars="#%;", encoding=None): + """ + Parses a string with one test sentence per line. + Lines can optionally begin with: + + - a bool, saying if the sentence is grammatical or not, or + - an int, giving the number of parse trees is should have, + + The result information is followed by a colon, and then the sentence. + Empty lines and lines beginning with a comment char are ignored. + + :return: a list of tuple of sentences and expected results, + where a sentence is a list of str, + and a result is None, or bool, or int + + :param comment_chars: ``str`` of possible comment characters. + :param encoding: the encoding of the string, if it is binary + """ + if encoding is not None: + string = string.decode(encoding) + sentences = [] + for sentence in string.split("\n"): + if sentence == "" or sentence[0] in comment_chars: + continue + split_info = sentence.split(":", 1) + result = None + if len(split_info) == 2: + if split_info[0] in ["True", "true", "False", "false"]: + result = split_info[0] in ["True", "true"] + sentence = split_info[1] + else: + result = int(split_info[0]) + sentence = split_info[1] + tokens = sentence.split() + if tokens == []: + continue + sentences += [(tokens, result)] + return sentences diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/parse/viterbi.py b/llmeval-env/lib/python3.10/site-packages/nltk/parse/viterbi.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3e9de30432a65828463e32e6ea7bff27b7c5ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/parse/viterbi.py @@ -0,0 +1,453 @@ +# Natural Language Toolkit: Viterbi Probabilistic Parser +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from functools import reduce + +from nltk.parse.api import ParserI +from nltk.tree import ProbabilisticTree, Tree + +##////////////////////////////////////////////////////// +## Viterbi PCFG Parser +##////////////////////////////////////////////////////// + + +class ViterbiParser(ParserI): + """ + A bottom-up ``PCFG`` parser that uses dynamic programming to find + the single most likely parse for a text. The ``ViterbiParser`` parser + parses texts by filling in a "most likely constituent table". + This table records the most probable tree representation for any + given span and node value. In particular, it has an entry for + every start index, end index, and node value, recording the most + likely subtree that spans from the start index to the end index, + and has the given node value. + + The ``ViterbiParser`` parser fills in this table incrementally. It starts + by filling in all entries for constituents that span one element + of text (i.e., entries where the end index is one greater than the + start index). After it has filled in all table entries for + constituents that span one element of text, it fills in the + entries for constitutants that span two elements of text. It + continues filling in the entries for constituents spanning larger + and larger portions of the text, until the entire table has been + filled. Finally, it returns the table entry for a constituent + spanning the entire text, whose node value is the grammar's start + symbol. + + In order to find the most likely constituent with a given span and + node value, the ``ViterbiParser`` parser considers all productions that + could produce that node value. For each production, it finds all + children that collectively cover the span and have the node values + specified by the production's right hand side. If the probability + of the tree formed by applying the production to the children is + greater than the probability of the current entry in the table, + then the table is updated with this new tree. + + A pseudo-code description of the algorithm used by + ``ViterbiParser`` is: + + | Create an empty most likely constituent table, *MLC*. + | For width in 1...len(text): + | For start in 1...len(text)-width: + | For prod in grammar.productions: + | For each sequence of subtrees [t[1], t[2], ..., t[n]] in MLC, + | where t[i].label()==prod.rhs[i], + | and the sequence covers [start:start+width]: + | old_p = MLC[start, start+width, prod.lhs] + | new_p = P(t[1])P(t[1])...P(t[n])P(prod) + | if new_p > old_p: + | new_tree = Tree(prod.lhs, t[1], t[2], ..., t[n]) + | MLC[start, start+width, prod.lhs] = new_tree + | Return MLC[0, len(text), start_symbol] + + :type _grammar: PCFG + :ivar _grammar: The grammar used to parse sentences. + :type _trace: int + :ivar _trace: The level of tracing output that should be generated + when parsing a text. + """ + + def __init__(self, grammar, trace=0): + """ + Create a new ``ViterbiParser`` parser, that uses ``grammar`` to + parse texts. + + :type grammar: PCFG + :param grammar: The grammar used to parse texts. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + and higher numbers will produce more verbose tracing + output. + """ + self._grammar = grammar + self._trace = trace + + def grammar(self): + return self._grammar + + def trace(self, trace=2): + """ + Set the level of tracing output that should be generated when + parsing a text. + + :type trace: int + :param trace: The trace level. A trace level of ``0`` will + generate no tracing output; and higher trace levels will + produce more verbose tracing output. + :rtype: None + """ + self._trace = trace + + def parse(self, tokens): + # Inherit docs from ParserI + + tokens = list(tokens) + self._grammar.check_coverage(tokens) + + # The most likely constituent table. This table specifies the + # most likely constituent for a given span and type. + # Constituents can be either Trees or tokens. For Trees, + # the "type" is the Nonterminal for the tree's root node + # value. For Tokens, the "type" is the token's type. + # The table is stored as a dictionary, since it is sparse. + constituents = {} + + # Initialize the constituents dictionary with the words from + # the text. + if self._trace: + print("Inserting tokens into the most likely" + " constituents table...") + for index in range(len(tokens)): + token = tokens[index] + constituents[index, index + 1, token] = token + if self._trace > 1: + self._trace_lexical_insertion(token, index, len(tokens)) + + # Consider each span of length 1, 2, ..., n; and add any trees + # that might cover that span to the constituents dictionary. + for length in range(1, len(tokens) + 1): + if self._trace: + print( + "Finding the most likely constituents" + + " spanning %d text elements..." % length + ) + for start in range(len(tokens) - length + 1): + span = (start, start + length) + self._add_constituents_spanning(span, constituents, tokens) + + # Return the tree that spans the entire text & have the right cat + tree = constituents.get((0, len(tokens), self._grammar.start())) + if tree is not None: + yield tree + + def _add_constituents_spanning(self, span, constituents, tokens): + """ + Find any constituents that might cover ``span``, and add them + to the most likely constituents table. + + :rtype: None + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find possible constituents. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be included in + the constituent; and the second integer is the index of + the first token that should not be included in the + constituent. I.e., the constituent should cover + ``text[span[0]:span[1]]``, where ``text`` is the text + that we are parsing. + + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. In particular, + ``constituents(s,e,nv)`` is the most likely + ``ProbabilisticTree`` that covers ``text[s:e]`` + and has a node value ``nv.symbol()``, where ``text`` + is the text that we are parsing. When + ``_add_constituents_spanning`` is called, ``constituents`` + should contain all possible constituents that are shorter + than ``span``. + + :type tokens: list of tokens + :param tokens: The text we are parsing. This is only used for + trace output. + """ + # Since some of the grammar productions may be unary, we need to + # repeatedly try all of the productions until none of them add any + # new constituents. + changed = True + while changed: + changed = False + + # Find all ways instantiations of the grammar productions that + # cover the span. + instantiations = self._find_instantiations(span, constituents) + + # For each production instantiation, add a new + # ProbabilisticTree whose probability is the product + # of the childrens' probabilities and the production's + # probability. + for (production, children) in instantiations: + subtrees = [c for c in children if isinstance(c, Tree)] + p = reduce(lambda pr, t: pr * t.prob(), subtrees, production.prob()) + node = production.lhs().symbol() + tree = ProbabilisticTree(node, children, prob=p) + + # If it's new a constituent, then add it to the + # constituents dictionary. + c = constituents.get((span[0], span[1], production.lhs())) + if self._trace > 1: + if c is None or c != tree: + if c is None or c.prob() < tree.prob(): + print(" Insert:", end=" ") + else: + print(" Discard:", end=" ") + self._trace_production(production, p, span, len(tokens)) + if c is None or c.prob() < tree.prob(): + constituents[span[0], span[1], production.lhs()] = tree + changed = True + + def _find_instantiations(self, span, constituents): + """ + :return: a list of the production instantiations that cover a + given span of the text. A "production instantiation" is + a tuple containing a production and a list of children, + where the production's right hand side matches the list of + children; and the children cover ``span``. :rtype: list + of ``pair`` of ``Production``, (list of + (``ProbabilisticTree`` or token. + + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find production instantiations. The span is + specified as a pair of integers, where the first integer + is the index of the first token that should be covered by + the production instantiation; and the second integer is + the index of the first token that should not be covered by + the production instantiation. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + rv = [] + for production in self._grammar.productions(): + childlists = self._match_rhs(production.rhs(), span, constituents) + + for childlist in childlists: + rv.append((production, childlist)) + return rv + + def _match_rhs(self, rhs, span, constituents): + """ + :return: a set of all the lists of children that cover ``span`` + and that match ``rhs``. + :rtype: list(list(ProbabilisticTree or token) + + :type rhs: list(Nonterminal or any) + :param rhs: The list specifying what kinds of children need to + cover ``span``. Each nonterminal in ``rhs`` specifies + that the corresponding child should be a tree whose node + value is that nonterminal's symbol. Each terminal in ``rhs`` + specifies that the corresponding child should be a token + whose type is that terminal. + :type span: tuple(int, int) + :param span: The section of the text for which we are + trying to find child lists. The span is specified as a + pair of integers, where the first integer is the index of + the first token that should be covered by the child list; + and the second integer is the index of the first token + that should not be covered by the child list. + :type constituents: dict(tuple(int,int,Nonterminal) -> ProbabilisticToken or ProbabilisticTree) + :param constituents: The most likely constituents table. This + table records the most probable tree representation for + any given span and node value. See the module + documentation for more information. + """ + (start, end) = span + + # Base case + if start >= end and rhs == (): + return [[]] + if start >= end or rhs == (): + return [] + + # Find everything that matches the 1st symbol of the RHS + childlists = [] + for split in range(start, end + 1): + l = constituents.get((start, split, rhs[0])) + if l is not None: + rights = self._match_rhs(rhs[1:], (split, end), constituents) + childlists += [[l] + r for r in rights] + + return childlists + + def _trace_production(self, production, p, span, width): + """ + Print trace output indicating that a given production has been + applied at a given location. + + :param production: The production that has been applied + :type production: Production + :param p: The probability of the tree produced by the production. + :type p: float + :param span: The span of the production + :type span: tuple + :rtype: None + """ + + str = "|" + "." * span[0] + str += "=" * (span[1] - span[0]) + str += "." * (width - span[1]) + "| " + str += "%s" % production + if self._trace > 2: + str = f"{str:<40} {p:12.10f} " + + print(str) + + def _trace_lexical_insertion(self, token, index, width): + str = " Insert: |" + "." * index + "=" + "." * (width - index - 1) + "| " + str += f"{token}" + print(str) + + def __repr__(self): + return "" % self._grammar + + +##////////////////////////////////////////////////////// +## Test Code +##////////////////////////////////////////////////////// + + +def demo(): + """ + A demonstration of the probabilistic parsers. The user is + prompted to select which demo to run, and how many parses should + be found; and then each parser is run on the same demo, and a + summary of the results are displayed. + """ + import sys + import time + + from nltk import tokenize + from nltk.grammar import PCFG + from nltk.parse import ViterbiParser + + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + # Define two demos. Each demo has a sentence and a grammar. + demos = [ + ("I saw the man with my telescope", toy_pcfg1), + ("the boy saw Jack with Bob under the table with a telescope", toy_pcfg2), + ] + + # Ask the user which demo they want to use. + print() + for i in range(len(demos)): + print(f"{i + 1:>3}: {demos[i][0]}") + print(" %r" % demos[i][1]) + print() + print("Which demo (%d-%d)? " % (1, len(demos)), end=" ") + try: + snum = int(sys.stdin.readline().strip()) - 1 + sent, grammar = demos[snum] + except: + print("Bad sentence number") + return + + # Tokenize the sentence. + tokens = sent.split() + + parser = ViterbiParser(grammar) + all_parses = {} + + print(f"\nsent: {sent}\nparser: {parser}\ngrammar: {grammar}") + parser.trace(3) + t = time.time() + parses = parser.parse_all(tokens) + time = time.time() - t + average = ( + reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) if parses else 0 + ) + num_parses = len(parses) + for p in parses: + all_parses[p.freeze()] = 1 + + # Print some summary statistics + print() + print("Time (secs) # Parses Average P(parse)") + print("-----------------------------------------") + print("%11.4f%11d%19.14f" % (time, num_parses, average)) + parses = all_parses.keys() + if parses: + p = reduce(lambda a, b: a + b.prob(), parses, 0) / len(parses) + else: + p = 0 + print("------------------------------------------") + print("%11s%11d%19.14f" % ("n/a", len(parses), p)) + + # Ask the user if we should draw the parses. + print() + print("Draw parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + from nltk.draw.tree import draw_trees + + print(" please wait...") + draw_trees(*parses) + + # Ask the user if we should print the parses. + print() + print("Print parses (y/n)? ", end=" ") + if sys.stdin.readline().strip().lower().startswith("y"): + for parse in parses: + print(parse) + + +if __name__ == "__main__": + demo() diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d63afe91bb82d1b2b1679b3af22f2e5291ca6c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baea1cd0991a97e3cce9aa88a417639b5b1a2762 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/api.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/api.py new file mode 100644 index 0000000000000000000000000000000000000000..27e45026cabe6d747f4b4a7dc108b7c3cec1c6f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/api.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: Tagger Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for tagging each token in a sentence with supplementary +information, such as its part of speech. +""" +from abc import ABCMeta, abstractmethod +from functools import lru_cache +from itertools import chain +from typing import Dict + +from nltk.internals import deprecated, overridden +from nltk.metrics import ConfusionMatrix, accuracy +from nltk.tag.util import untag + + +class TaggerI(metaclass=ABCMeta): + """ + A processing interface for assigning a tag to each token in a list. + Tags are case sensitive strings that identify some property of each + token, such as its part of speech or its sense. + + Some taggers require specific types for their tokens. This is + generally indicated by the use of a sub-interface to ``TaggerI``. + For example, featureset taggers, which are subclassed from + ``FeaturesetTagger``, require that each token be a ``featureset``. + + Subclasses must define: + - either ``tag()`` or ``tag_sents()`` (or both) + """ + + @abstractmethod + def tag(self, tokens): + """ + Determine the most appropriate tag sequence for the given + token sequence, and return a corresponding list of tagged + tokens. A tagged token is encoded as a tuple ``(token, tag)``. + + :rtype: list(tuple(str, str)) + """ + if overridden(self.tag_sents): + return self.tag_sents([tokens])[0] + + def tag_sents(self, sentences): + """ + Apply ``self.tag()`` to each element of *sentences*. I.e.:: + + return [self.tag(sent) for sent in sentences] + """ + return [self.tag(sent) for sent in sentences] + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the tagger against the gold standard. + Strip the tags from the gold standard text, retag it using + the tagger, then compute the accuracy score. + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :rtype: float + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = list(chain.from_iterable(gold)) + test_tokens = list(chain.from_iterable(tagged_sents)) + return accuracy(gold_tokens, test_tokens) + + @lru_cache(maxsize=1) + def _confusion_cached(self, gold): + """ + Inner function used after ``gold`` is converted to a + ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on + creating a ConfusionMatrix. + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: tuple(tuple(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = [token for _word, token in chain.from_iterable(gold)] + test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)] + return ConfusionMatrix(gold_tokens, test_tokens) + + def confusion(self, gold): + """ + Return a ConfusionMatrix with the tags from ``gold`` as the reference + values, with the predictions from ``tag_sents`` as the predicted values. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.confusion(gold_data)) + | - | + | N | + | O P | + | N J J N N P P R R V V V V V W | + | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` | + | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` | + -------+----------------------------------------------------------------------------------------------+ + '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . | + . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . | + CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . | + CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . | + DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . | + EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . | + IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . | + JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . | + JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . | + JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . | + MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . | + NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . | + NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . | + NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . | + POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . | + PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . | + PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . | + RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . | + RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . | + RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . | + TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . | + VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . | + VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . | + VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . | + VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . | + VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . | + VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . | + WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . | + `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>| + -------+----------------------------------------------------------------------------------------------+ + (row = reference; col = test) + + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: list(list(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + return self._confusion_cached(tuple(tuple(sent) for sent in gold)) + + def recall(self, gold) -> Dict[str, float]: + """ + Compute the recall for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to recall. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to recall + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.recall(tag) for tag in cm._values} + + def precision(self, gold): + """ + Compute the precision for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to precision. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.precision(tag) for tag in cm._values} + + def f_measure(self, gold, alpha=0.5): + """ + Compute the f-measure for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to f-measure. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + cm = self.confusion(gold) + return {tag: cm.f_measure(tag, alpha) for tag in cm._values} + + def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False): + """Tabulate the **recall**, **precision** and **f-measure** + for each tag from ``gold`` or from running ``tag`` on the tokenized + sentences from ``gold``. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.evaluate_per_tag(gold_data)) + Tag | Prec. | Recall | F-measure + -------+--------+--------+----------- + '' | 1.0000 | 1.0000 | 1.0000 + , | 1.0000 | 1.0000 | 1.0000 + -NONE- | 0.0000 | 0.0000 | 0.0000 + . | 1.0000 | 1.0000 | 1.0000 + CC | 1.0000 | 1.0000 | 1.0000 + CD | 0.7143 | 1.0000 | 0.8333 + DT | 1.0000 | 1.0000 | 1.0000 + EX | 1.0000 | 1.0000 | 1.0000 + IN | 0.9167 | 0.8800 | 0.8980 + JJ | 0.8889 | 0.8889 | 0.8889 + JJR | 0.0000 | 0.0000 | 0.0000 + JJS | 1.0000 | 1.0000 | 1.0000 + MD | 1.0000 | 1.0000 | 1.0000 + NN | 0.8000 | 0.9333 | 0.8615 + NNP | 0.8929 | 1.0000 | 0.9434 + NNS | 0.9500 | 1.0000 | 0.9744 + POS | 1.0000 | 1.0000 | 1.0000 + PRP | 1.0000 | 1.0000 | 1.0000 + PRP$ | 1.0000 | 1.0000 | 1.0000 + RB | 0.4000 | 1.0000 | 0.5714 + RBR | 1.0000 | 0.5000 | 0.6667 + RP | 1.0000 | 1.0000 | 1.0000 + TO | 1.0000 | 1.0000 | 1.0000 + VB | 1.0000 | 1.0000 | 1.0000 + VBD | 0.8571 | 0.8571 | 0.8571 + VBG | 1.0000 | 0.8000 | 0.8889 + VBN | 1.0000 | 0.8000 | 0.8889 + VBP | 1.0000 | 1.0000 | 1.0000 + VBZ | 1.0000 | 1.0000 | 1.0000 + WDT | 0.0000 | 0.0000 | 0.0000 + `` | 1.0000 | 1.0000 | 1.0000 + + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on number of + occurrences of that tag in the ``gold`` data, defaults to False + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + cm = self.confusion(gold) + return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count) + + def _check_params(self, train, model): + if (train and model) or (not train and not model): + raise ValueError("Must specify either training data or trained model.") + + +class FeaturesetTaggerI(TaggerI): + """ + A tagger that requires tokens to be ``featuresets``. A featureset + is a dictionary that maps from feature names to feature + values. See ``nltk.classify`` for more information about features + and featuresets. + """ diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill.py new file mode 100644 index 0000000000000000000000000000000000000000..d3bd1cd3b6cb10c4b62b7d23910e2a8ba9568cd2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/brill.py @@ -0,0 +1,449 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import Counter, defaultdict + +from nltk import jsontags +from nltk.tag import TaggerI +from nltk.tbl import Feature, Template + +###################################################################### +# Brill Templates +###################################################################### + + +@jsontags.register_tag +class Word(Feature): + """ + Feature which examines the text (word) of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Word" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's text.""" + return tokens[index][0] + + +@jsontags.register_tag +class Pos(Feature): + """ + Feature which examines the tags of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Pos" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's tag.""" + return tokens[index][1] + + +def nltkdemo18(): + """ + Return 18 templates, from the original nltk demo, in multi-feature syntax + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-3, -2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-1]), Word([1])), + ] + + +def nltkdemo18plus(): + """ + Return 18 templates, from the original nltk demo, and additionally a few + multi-feature ones (the motivation is easy comparison with nltkdemo18) + """ + return nltkdemo18() + [ + Template(Word([-1]), Pos([1])), + Template(Pos([-1]), Word([1])), + Template(Word([-1]), Word([0]), Pos([1])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-1]), Word([0]), Pos([1])), + ] + + +def fntbl37(): + """ + Return 37 templates taken from the postagging task of the + fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ + (37 is after excluding a handful which do not condition on Pos[0]; + fntbl can do that but the current nltk implementation cannot.) + """ + return [ + Template(Word([0]), Word([1]), Word([2])), + Template(Word([-1]), Word([0]), Word([1])), + Template(Word([0]), Word([-1])), + Template(Word([0]), Word([1])), + Template(Word([0]), Word([2])), + Template(Word([0]), Word([-2])), + Template(Word([1, 2])), + Template(Word([-2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-3, -2, -1])), + Template(Word([0]), Pos([2])), + Template(Word([0]), Pos([-2])), + Template(Word([0]), Pos([1])), + Template(Word([0]), Pos([-1])), + Template(Word([0])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([1])), + Template(Word([-1])), + Template(Pos([-1]), Pos([1])), + Template(Pos([1]), Pos([2])), + Template(Pos([-1]), Pos([-2])), + Template(Pos([1])), + Template(Pos([-1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([1, 2, 3])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([-2, -1])), + Template(Pos([1]), Word([0]), Word([1])), + Template(Pos([1]), Word([0]), Word([-1])), + Template(Pos([-1]), Word([-1]), Word([0])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Pos([1]), Pos([2]), Word([1])), + ] + + +def brill24(): + """ + Return 24 templates of the seminal TBL paper, Brill (1995) + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-1, 0])), + Template(Word([0, 1])), + Template(Word([0])), + Template(Word([-1]), Pos([-1])), + Template(Word([1]), Pos([1])), + Template(Word([0]), Word([-1]), Pos([-1])), + Template(Word([0]), Word([1]), Pos([1])), + ] + + +def describe_template_sets(): + """ + Print the available template sets in this demo, with a short description" + """ + import inspect + import sys + + # a bit of magic to get all functions in this module + templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) + for (name, obj) in templatesets: + if name == "describe_template_sets": + continue + print(name, obj.__doc__, "\n") + + +###################################################################### +# The Brill Tagger +###################################################################### + + +@jsontags.register_tag +class BrillTagger(TaggerI): + """ + Brill's transformational rule-based tagger. Brill taggers use an + initial tagger (such as ``tag.DefaultTagger``) to assign an initial + tag sequence to a text; and then apply an ordered list of + transformational rules to correct the tags of individual tokens. + These transformation rules are specified by the ``TagRule`` + interface. + + Brill taggers can be created directly, from an initial tagger and + a list of transformational rules; but more often, Brill taggers + are created by learning rules from a training corpus, using one + of the TaggerTrainers available. + """ + + json_tag = "nltk.tag.BrillTagger" + + def __init__(self, initial_tagger, rules, training_stats=None): + """ + :param initial_tagger: The initial tagger + :type initial_tagger: TaggerI + + :param rules: An ordered list of transformation rules that + should be used to correct the initial tagging. + :type rules: list(TagRule) + + :param training_stats: A dictionary of statistics collected + during training, for possible later use + :type training_stats: dict + + """ + self._initial_tagger = initial_tagger + self._rules = tuple(rules) + self._training_stats = training_stats + + def encode_json_obj(self): + return self._initial_tagger, self._rules, self._training_stats + + @classmethod + def decode_json_obj(cls, obj): + _initial_tagger, _rules, _training_stats = obj + return cls(_initial_tagger, _rules, _training_stats) + + def rules(self): + """ + Return the ordered list of transformation rules that this tagger has learnt + + :return: the ordered list of transformation rules that correct the initial tagging + :rtype: list of Rules + """ + return self._rules + + def train_stats(self, statistic=None): + """ + Return a named statistic collected during training, or a dictionary of all + available statistics if no name given + + :param statistic: name of statistic + :type statistic: str + :return: some statistic collected during training of this tagger + :rtype: any (but usually a number) + """ + if statistic is None: + return self._training_stats + else: + return self._training_stats.get(statistic) + + def tag(self, tokens): + # Inherit documentation from TaggerI + + # Run the initial tagger. + tagged_tokens = self._initial_tagger.tag(tokens) + + # Create a dictionary that maps each tag to a list of the + # indices of tokens that have that tag. + tag_to_positions = defaultdict(set) + for i, (token, tag) in enumerate(tagged_tokens): + tag_to_positions[tag].add(i) + + # Apply each rule, in order. Only try to apply rules at + # positions that have the desired original tag. + for rule in self._rules: + # Find the positions where it might apply + positions = tag_to_positions.get(rule.original_tag, []) + # Apply the rule at those positions. + changed = rule.apply(tagged_tokens, positions) + # Update tag_to_positions with the positions of tags that + # were modified. + for i in changed: + tag_to_positions[rule.original_tag].remove(i) + tag_to_positions[rule.replacement_tag].add(i) + + return tagged_tokens + + def print_template_statistics(self, test_stats=None, printunused=True): + """ + Print a list of all templates, ranked according to efficiency. + + If test_stats is available, the templates are ranked according to their + relative contribution (summed for all rules created from a given template, + weighted by score) to the performance on the test set. If no test_stats, then + statistics collected during training are used instead. There is also + an unweighted measure (just counting the rules). This is less informative, + though, as many low-score rules will appear towards end of training. + + :param test_stats: dictionary of statistics collected during testing + :type test_stats: dict of str -> any (but usually numbers) + :param printunused: if True, print a list of all unused templates + :type printunused: bool + :return: None + :rtype: None + """ + tids = [r.templateid for r in self._rules] + train_stats = self.train_stats() + + trainscores = train_stats["rulescores"] + assert len(trainscores) == len( + tids + ), "corrupt statistics: " "{} train scores for {} rules".format( + trainscores, tids + ) + template_counts = Counter(tids) + weighted_traincounts = Counter() + for (tid, score) in zip(tids, trainscores): + weighted_traincounts[tid] += score + tottrainscores = sum(trainscores) + + # det_tplsort() is for deterministic sorting; + # the otherwise convenient Counter.most_common() unfortunately + # does not break ties deterministically + # between python versions and will break cross-version tests + def det_tplsort(tpl_value): + return (tpl_value[1], repr(tpl_value[0])) + + def print_train_stats(): + print( + "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats) + ) + head = "#ID | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + train_tplscores = sorted( + weighted_traincounts.items(), key=det_tplsort, reverse=True + ) + for (tid, trainscore) in train_tplscores: + s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format( + tid, + trainscore, + trainscore / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_testtrain_stats(): + testscores = test_stats["rulescores"] + print( + "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats) + ) + weighted_testcounts = Counter() + for (tid, score) in zip(tids, testscores): + weighted_testcounts[tid] += score + tottestscores = sum(testscores) + head = "#ID | Score (test) | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + test_tplscores = sorted( + weighted_testcounts.items(), key=det_tplsort, reverse=True + ) + for (tid, testscore) in test_tplscores: + s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format( + tid, + testscore, + testscore / tottestscores, + weighted_traincounts[tid], + weighted_traincounts[tid] / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_unused_templates(): + usedtpls = {int(tid) for tid in tids} + unused = [ + (tid, tpl) + for (tid, tpl) in enumerate(Template.ALLTEMPLATES) + if tid not in usedtpls + ] + print(f"UNUSED TEMPLATES ({len(unused)})") + + for (tid, tpl) in unused: + print(f"{tid:03d} {str(tpl):s}") + + if test_stats is None: + print_train_stats() + else: + print_testtrain_stats() + print() + if printunused: + print_unused_templates() + print() + + def batch_tag_incremental(self, sequences, gold): + """ + Tags by applying each rule to the entire corpus (rather than all rules to a + single sequence). The point is to collect statistics on the test set for + individual rules. + + NOTE: This is inefficient (does not build any index, so will traverse the entire + corpus N times for N rules) -- usually you would not care about statistics for + individual rules and thus use batch_tag() instead + + :param sequences: lists of token sequences (sentences, in some applications) to be tagged + :type sequences: list of list of strings + :param gold: the gold standard + :type gold: list of list of strings + :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule)) + """ + + def counterrors(xs): + return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair)) + + testing_stats = {} + testing_stats["tokencount"] = sum(len(t) for t in sequences) + testing_stats["sequencecount"] = len(sequences) + tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences] + testing_stats["initialerrors"] = counterrors(tagged_tokenses) + testing_stats["initialacc"] = ( + 1 - testing_stats["initialerrors"] / testing_stats["tokencount"] + ) + # Apply each rule to the entire corpus, in order + errors = [testing_stats["initialerrors"]] + for rule in self._rules: + for tagged_tokens in tagged_tokenses: + rule.apply(tagged_tokens) + errors.append(counterrors(tagged_tokenses)) + testing_stats["rulescores"] = [ + err0 - err1 for (err0, err1) in zip(errors, errors[1:]) + ] + testing_stats["finalerrors"] = errors[-1] + testing_stats["finalacc"] = ( + 1 - testing_stats["finalerrors"] / testing_stats["tokencount"] + ) + return (tagged_tokenses, testing_stats) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/hmm.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/hmm.py new file mode 100644 index 0000000000000000000000000000000000000000..6577789b883828ce01e84c0864de57eead81f12b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/hmm.py @@ -0,0 +1,1329 @@ +# Natural Language Toolkit: Hidden Markov Model +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Philip Blunsom +# Tiago Tresoldi (fixes) +# Steven Bird (fixes) +# Joseph Frazee (fixes) +# Steven Xu (fixes) +# URL: +# For license information, see LICENSE.TXT + +""" +Hidden Markov Models (HMMs) largely used to assign the correct label sequence +to sequential data or assess the probability of a given label and data +sequence. These models are finite state machines characterised by a number of +states, transitions between these states, and output symbols emitted while in +each state. The HMM is an extension to the Markov chain, where each state +corresponds deterministically to a given event. In the HMM the observation is +a probabilistic function of the state. HMMs share the Markov chain's +assumption, being that the probability of transition from one state to another +only depends on the current state - i.e. the series of states that led to the +current state are not used. They are also time invariant. + +The HMM is a directed graph, with probability weighted edges (representing the +probability of a transition between the source and sink states) where each +vertex emits an output symbol when entered. The symbol (or observation) is +non-deterministically generated. For this reason, knowing that a sequence of +output observations was generated by a given HMM does not mean that the +corresponding sequence of states (and what the current state is) is known. +This is the 'hidden' in the hidden markov model. + +Formally, a HMM can be characterised by: + +- the output observation alphabet. This is the set of symbols which may be + observed as output of the system. +- the set of states. +- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These + represent the probability of transition to each state from a given state. +- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These + represent the probability of observing each symbol in a given state. +- the initial state distribution. This gives the probability of starting + in each state. + +To ground this discussion, take a common NLP application, part-of-speech (POS) +tagging. An HMM is desirable for this task as the highest probability tag +sequence can be calculated for a given sequence of word forms. This differs +from other tagging techniques which often tag each word individually, seeking +to optimise each individual tagging greedily without regard to the optimal +combination of tags for a larger unit, such as a sentence. The HMM does this +with the Viterbi algorithm, which efficiently computes the optimal path +through the graph given the sequence of words forms. + +In POS tagging the states usually have a 1:1 correspondence with the tag +alphabet - i.e. each state represents a single tag. The output observation +alphabet is the set of word forms (the lexicon), and the remaining three +parameters are derived by a training regime. With this information the +probability of a given sentence can be easily derived, by simply summing the +probability of each distinct path through the model. Similarly, the highest +probability tagging sequence can be derived with the Viterbi algorithm, +yielding a state sequence which can be mapped into a tag sequence. + +This discussion assumes that the HMM has been trained. This is probably the +most difficult task with the model, and requires either MLE estimates of the +parameters or unsupervised learning using the Baum-Welch algorithm, a variant +of EM. + +For more information, please consult the source code for this module, +which includes extensive demonstration code. +""" + +import itertools +import re + +try: + import numpy as np +except ImportError: + pass + +from nltk.metrics import accuracy +from nltk.probability import ( + ConditionalFreqDist, + ConditionalProbDist, + DictionaryConditionalProbDist, + DictionaryProbDist, + FreqDist, + LidstoneProbDist, + MLEProbDist, + MutableProbDist, + RandomProbDist, +) +from nltk.tag.api import TaggerI +from nltk.util import LazyMap, unique_list + +_TEXT = 0 # index of text in a tuple +_TAG = 1 # index of tag in a tuple + + +def _identity(labeled_symbols): + return labeled_symbols + + +class HiddenMarkovModelTagger(TaggerI): + """ + Hidden Markov model class, a generative model for labelling sequence data. + These models define the joint probability of a sequence of symbols and + their labels (state transitions) as the product of the starting state + probability, the probability of each state transition, and the probability + of each observation being generated from each state. This is described in + more detail in the module documentation. + + This implementation is based on the HMM description in Chapter 8, Huang, + Acero and Hon, Spoken Language Processing and includes an extension for + training shallow HMM parsers or specialized HMMs as in Molina et. + al, 2002. A specialized HMM modifies training data by applying a + specialization function to create a new training set that is more + appropriate for sequential tagging with an HMM. A typical use case is + chunking. + + :param symbols: the set of output symbols (alphabet) + :type symbols: seq of any + :param states: a set of states representing state space + :type states: seq of any + :param transitions: transition probabilities; Pr(s_i | s_j) is the + probability of transition from state i given the model is in + state_j + :type transitions: ConditionalProbDistI + :param outputs: output probabilities; Pr(o_k | s_i) is the probability + of emitting symbol k when entering state i + :type outputs: ConditionalProbDistI + :param priors: initial state distribution; Pr(s_i) is the probability + of starting in state i + :type priors: ProbDistI + :param transform: an optional function for transforming training + instances, defaults to the identity function. + :type transform: callable + """ + + def __init__( + self, symbols, states, transitions, outputs, priors, transform=_identity + ): + self._symbols = unique_list(symbols) + self._states = unique_list(states) + self._transitions = transitions + self._outputs = outputs + self._priors = priors + self._cache = None + self._transform = transform + + @classmethod + def _train( + cls, + labeled_sequence, + test_sequence=None, + unlabeled_sequence=None, + transform=_identity, + estimator=None, + **kwargs, + ): + + if estimator is None: + + def estimator(fd, bins): + return LidstoneProbDist(fd, 0.1, bins) + + labeled_sequence = LazyMap(transform, labeled_sequence) + symbols = unique_list(word for sent in labeled_sequence for word, tag in sent) + tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent) + + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised(labeled_sequence, estimator=estimator) + hmm = cls( + hmm._symbols, + hmm._states, + hmm._transitions, + hmm._outputs, + hmm._priors, + transform=transform, + ) + + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + if unlabeled_sequence: + max_iterations = kwargs.get("max_iterations", 5) + hmm = trainer.train_unsupervised( + unlabeled_sequence, model=hmm, max_iterations=max_iterations + ) + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + return hmm + + @classmethod + def train( + cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs + ): + """ + Train a new HiddenMarkovModelTagger using the given labeled and + unlabeled training instances. Testing will be performed if test + instances are provided. + + :return: a hidden markov model tagger + :rtype: HiddenMarkovModelTagger + :param labeled_sequence: a sequence of labeled training instances, + i.e. a list of sentences represented as tuples + :type labeled_sequence: list(list) + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param unlabeled_sequence: a sequence of unlabeled training instances, + i.e. a list of sentences represented as words + :type unlabeled_sequence: list(list) + :param transform: an optional function for transforming training + instances, defaults to the identity function, see ``transform()`` + :type transform: function + :param estimator: an optional function or class that maps a + condition's frequency distribution to its probability + distribution, defaults to a Lidstone distribution with gamma = 0.1 + :type estimator: class or function + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + :param max_iterations: number of Baum-Welch iterations to perform + :type max_iterations: int + """ + return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs) + + def probability(self, sequence): + """ + Returns the probability of the given symbol sequence. If the sequence + is labelled, then returns the joint probability of the symbol, state + sequence. Otherwise, uses the forward algorithm to find the + probability over all label sequences. + + :return: the probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + return 2 ** (self.log_probability(self._transform(sequence))) + + def log_probability(self, sequence): + """ + Returns the log-probability of the given symbol sequence. If the + sequence is labelled, then returns the joint log-probability of the + symbol, state sequence. Otherwise, uses the forward algorithm to find + the log-probability over all label sequences. + + :return: the log-probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + sequence = self._transform(sequence) + + T = len(sequence) + + if T > 0 and sequence[0][_TAG]: + last_state = sequence[0][_TAG] + p = self._priors.logprob(last_state) + self._output_logprob( + last_state, sequence[0][_TEXT] + ) + for t in range(1, T): + state = sequence[t][_TAG] + p += self._transitions[last_state].logprob( + state + ) + self._output_logprob(state, sequence[t][_TEXT]) + last_state = state + return p + else: + alpha = self._forward_probability(sequence) + p = logsumexp2(alpha[T - 1]) + return p + + def tag(self, unlabeled_sequence): + """ + Tags the sequence with the highest probability state sequence. This + uses the best_path method to find the Viterbi path. + + :return: a labelled sequence of symbols + :rtype: list + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._tag(unlabeled_sequence) + + def _tag(self, unlabeled_sequence): + path = self._best_path(unlabeled_sequence) + return list(zip(unlabeled_sequence, path)) + + def _output_logprob(self, state, symbol): + """ + :return: the log probability of the symbol being observed in the given + state + :rtype: float + """ + return self._outputs[state].logprob(symbol) + + def _create_cache(self): + """ + The cache is a tuple (P, O, X, S) where: + + - S maps symbols to integers. I.e., it is the inverse + mapping from self._symbols; for each symbol s in + self._symbols, the following is true:: + + self._symbols[S[s]] == s + + - O is the log output probabilities:: + + O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) ) + + - X is the log transition probabilities:: + + X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) ) + + - P is the log prior probabilities:: + + P[i] = log( P(tag[0]=state[i]) ) + """ + if not self._cache: + N = len(self._states) + M = len(self._symbols) + P = np.zeros(N, np.float32) + X = np.zeros((N, N), np.float32) + O = np.zeros((N, M), np.float32) + for i in range(N): + si = self._states[i] + P[i] = self._priors.logprob(si) + for j in range(N): + X[i, j] = self._transitions[si].logprob(self._states[j]) + for k in range(M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + S = {} + for k in range(M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def _update_cache(self, symbols): + # add new symbols to the symbol table and repopulate the output + # probabilities and symbol table mapping + if symbols: + self._create_cache() + P, O, X, S = self._cache + for symbol in symbols: + if symbol not in self._symbols: + self._cache = None + self._symbols.append(symbol) + # don't bother with the work if there aren't any new symbols + if not self._cache: + N = len(self._states) + M = len(self._symbols) + Q = O.shape[1] + # add new columns to the output probability table without + # destroying the old probabilities + O = np.hstack([O, np.zeros((N, M - Q), np.float32)]) + for i in range(N): + si = self._states[i] + # only calculate probabilities for new symbols + for k in range(Q, M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + # only create symbol mappings for new symbols + for k in range(Q, M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def reset_cache(self): + self._cache = None + + def best_path(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path(unlabeled_sequence) + + def _best_path(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + self._create_cache() + self._update_cache(unlabeled_sequence) + P, O, X, S = self._cache + + V = np.zeros((T, N), np.float32) + B = -np.ones((T, N), int) + + V[0] = P + O[:, S[unlabeled_sequence[0]]] + for t in range(1, T): + for j in range(N): + vs = V[t - 1, :] + X[:, j] + best = np.argmax(vs) + V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]] + B[t, j] = best + + current = np.argmax(V[T - 1, :]) + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return list(map(self._states.__getitem__, sequence)) + + def best_path_simple(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. This uses a simple, direct method, and is included for + teaching purposes. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path_simple(unlabeled_sequence) + + def _best_path_simple(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + V = np.zeros((T, N), np.float64) + B = {} + + # find the starting log probabilities for each state + symbol = unlabeled_sequence[0] + for i, state in enumerate(self._states): + V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol) + B[0, state] = None + + # find the maximum log probabilities for reaching each state at time t + for t in range(1, T): + symbol = unlabeled_sequence[t] + for j in range(N): + sj = self._states[j] + best = None + for i in range(N): + si = self._states[i] + va = V[t - 1, i] + self._transitions[si].logprob(sj) + if not best or va > best[0]: + best = (va, si) + V[t, j] = best[0] + self._output_logprob(sj, symbol) + B[t, sj] = best[1] + + # find the highest probability final state + best = None + for i in range(N): + val = V[T - 1, i] + if not best or val > best[0]: + best = (val, self._states[i]) + + # traverse the back-pointers B to find the state sequence + current = best[1] + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return sequence + + def random_sample(self, rng, length): + """ + Randomly sample the HMM to generate a sentence of a given length. This + samples the prior distribution then the observation distribution and + transition distribution for each subsequent observation and state. + This will mostly generate unintelligible garbage, but can provide some + amusement. + + :return: the randomly created state/observation sequence, + generated according to the HMM's probability + distributions. The SUBTOKENS have TEXT and TAG + properties containing the observation and state + respectively. + :rtype: list + :param rng: random number generator + :type rng: Random (or any object with a random() method) + :param length: desired output length + :type length: int + """ + + # sample the starting state and symbol prob dists + tokens = [] + state = self._sample_probdist(self._priors, rng.random(), self._states) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + for i in range(1, length): + # sample the state transition and symbol prob dists + state = self._sample_probdist( + self._transitions[state], rng.random(), self._states + ) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + return tokens + + def _sample_probdist(self, probdist, p, samples): + cum_p = 0 + for sample in samples: + add_p = probdist.prob(sample) + if cum_p <= p <= cum_p + add_p: + return sample + cum_p += add_p + raise Exception("Invalid probability distribution - " "does not sum to one") + + def entropy(self, unlabeled_sequence): + """ + Returns the entropy over labellings of the given sequence. This is + given by:: + + H(O) = - sum_S Pr(S | O) log Pr(S | O) + + where the summation ranges over all state sequences, S. Let + *Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state + sequences and O is the observation sequence. As such the entropy can + be re-expressed as:: + + H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ] + = log Z - sum_S Pr(S | O) log Pr(S, 0) + = log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ] + + The order of summation for the log terms can be flipped, allowing + dynamic programming to be used to calculate the entropy. Specifically, + we use the forward and backward probabilities (alpha, beta) giving:: + + H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0) + + sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si) + + sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st) + + This simply uses alpha and beta to find the probabilities of partial + sequences, constrained to include the given state(s) at some point in + time. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropy = normalisation + + # starting state, t = 0 + for i, state in enumerate(self._states): + p = 2 ** (alpha[0, i] + beta[0, i] - normalisation) + entropy -= p * self._priors.logprob(state) + # print('p(s_0 = %s) =' % state, p) + + # state transitions + for t0 in range(T - 1): + t1 = t0 + 1 + for i0, s0 in enumerate(self._states): + for i1, s1 in enumerate(self._states): + p = 2 ** ( + alpha[t0, i0] + + self._transitions[s0].logprob(s1) + + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT]) + + beta[t1, i1] + - normalisation + ) + entropy -= p * self._transitions[s0].logprob(s1) + # print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p) + + # symbol emissions + for t in range(T): + for i, state in enumerate(self._states): + p = 2 ** (alpha[t, i] + beta[t, i] - normalisation) + entropy -= p * self._outputs[state].logprob( + unlabeled_sequence[t][_TEXT] + ) + # print('p(s_%d = %s) =' % (t, state), p) + + return entropy + + def point_entropy(self, unlabeled_sequence): + """ + Returns the pointwise entropy over the possible states at each + position in the chain, given the observation sequence. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropies = np.zeros(T, np.float64) + probs = np.zeros(N, np.float64) + for t in range(T): + for s in range(N): + probs[s] = alpha[t, s] + beta[t, s] - normalisation + + for s in range(N): + entropies[t] -= 2 ** (probs[s]) * probs[s] + + return entropies + + def _exhaustive_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labeled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labeled_sequence[t] = (labeled_sequence[t][_TEXT], label) + lp = self.log_probability(labeled_sequence) + log_probs.append(lp) + normalisation = _log_add(*log_probs) + + entropy = 0 + for lp in log_probs: + lp -= normalisation + entropy -= 2 ** (lp) * lp + + return entropy + + def _exhaustive_point_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labelled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labelled_sequence[t] = (labelled_sequence[t][_TEXT], label) + lp = self.log_probability(labelled_sequence) + log_probs.append(lp) + + normalisation = _log_add(*log_probs) + + probabilities = _ninf_array((T, N)) + + for labelling, lp in zip(labellings, log_probs): + lp -= normalisation + for t, label in enumerate(labelling): + index = self._states.index(label) + probabilities[t, index] = _log_add(probabilities[t, index], lp) + + entropies = np.zeros(T, np.float64) + for t in range(T): + for s in range(N): + entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s] + + return entropies + + def _transitions_matrix(self): + """Return a matrix of transition log probabilities.""" + trans_iter = ( + self._transitions[sj].logprob(si) + for sj in self._states + for si in self._states + ) + + transitions_logprob = np.fromiter(trans_iter, dtype=np.float64) + N = len(self._states) + return transitions_logprob.reshape((N, N)).T + + def _outputs_vector(self, symbol): + """ + Return a vector with log probabilities of emitting a symbol + when entering states. + """ + out_iter = (self._output_logprob(sj, symbol) for sj in self._states) + return np.fromiter(out_iter, dtype=np.float64) + + def _forward_probability(self, unlabeled_sequence): + """ + Return the forward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence up to + and including t. + + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + :return: the forward log probability matrix + :rtype: array + """ + T = len(unlabeled_sequence) + N = len(self._states) + alpha = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix() + + # Initialization + symbol = unlabeled_sequence[0][_TEXT] + for i, state in enumerate(self._states): + alpha[0, i] = self._priors.logprob(state) + self._output_logprob( + state, symbol + ) + + # Induction + for t in range(1, T): + symbol = unlabeled_sequence[t][_TEXT] + output_logprob = self._outputs_vector(symbol) + + for i in range(N): + summand = alpha[t - 1] + transitions_logprob[i] + alpha[t, i] = logsumexp2(summand) + output_logprob[i] + + return alpha + + def _backward_probability(self, unlabeled_sequence): + """ + Return the backward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence from t + .. T. + + :return: the backward log probability matrix + :rtype: array + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + T = len(unlabeled_sequence) + N = len(self._states) + beta = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix().T + + # initialise the backward values; + # "1" is an arbitrarily chosen value from Rabiner tutorial + beta[T - 1, :] = np.log2(1) + + # inductively calculate remaining backward values + for t in range(T - 2, -1, -1): + symbol = unlabeled_sequence[t + 1][_TEXT] + outputs = self._outputs_vector(symbol) + + for i in range(N): + summand = transitions_logprob[i] + beta[t + 1] + outputs + beta[t, i] = logsumexp2(summand) + + return beta + + def test(self, test_sequence, verbose=False, **kwargs): + """ + Tests the HiddenMarkovModelTagger instance. + + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + """ + + def words(sent): + return [word for (word, tag) in sent] + + def tags(sent): + return [tag for (word, tag) in sent] + + def flatten(seq): + return list(itertools.chain(*seq)) + + test_sequence = self._transform(test_sequence) + predicted_sequence = list(map(self._tag, map(words, test_sequence))) + + if verbose: + for test_sent, predicted_sent in zip(test_sequence, predicted_sequence): + print( + "Test:", + " ".join(f"{token}/{tag}" for (token, tag) in test_sent), + ) + print() + print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent)) + print() + print( + "HMM-tagged:", + " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent), + ) + print() + print( + "Entropy:", + self.entropy([(token, None) for (token, tag) in predicted_sent]), + ) + print() + print("-" * 60) + + test_tags = flatten(map(tags, test_sequence)) + predicted_tags = flatten(map(tags, predicted_sequence)) + + acc = accuracy(test_tags, predicted_tags) + count = sum(len(sent) for sent in test_sequence) + print("accuracy over %d tokens: %.2f" % (count, acc * 100)) + + def __repr__(self): + return "" % ( + len(self._states), + len(self._symbols), + ) + + +class HiddenMarkovModelTrainer: + """ + Algorithms for learning HMM parameters from training data. These include + both supervised learning (MLE) and unsupervised learning (Baum-Welch). + + Creates an HMM trainer to induce an HMM with the given states and + output symbol alphabet. A supervised and unsupervised training + method may be used. If either of the states or symbols are not given, + these may be derived from supervised training. + + :param states: the set of state labels + :type states: sequence of any + :param symbols: the set of observation symbols + :type symbols: sequence of any + """ + + def __init__(self, states=None, symbols=None): + self._states = states if states else [] + self._symbols = symbols if symbols else [] + + def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs): + """ + Trains the HMM using both (or either of) supervised and unsupervised + techniques. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the supervised training data, a set of + labelled sequences of observations + ex: [ (word_1, tag_1),...,(word_n,tag_n) ] + :type labelled_sequences: list + :param unlabeled_sequences: the unsupervised training data, a set of + sequences of observations + ex: [ word_1, ..., word_n ] + :type unlabeled_sequences: list + :param kwargs: additional arguments to pass to the training methods + """ + assert labeled_sequences or unlabeled_sequences + model = None + if labeled_sequences: + model = self.train_supervised(labeled_sequences, **kwargs) + if unlabeled_sequences: + if model: + kwargs["model"] = model + model = self.train_unsupervised(unlabeled_sequences, **kwargs) + return model + + def _baum_welch_step(self, sequence, model, symbol_to_number): + + N = len(model._states) + M = len(model._symbols) + T = len(sequence) + + # compute forward and backward probabilities + alpha = model._forward_probability(sequence) + beta = model._backward_probability(sequence) + + # find the log probability of the sequence + lpk = logsumexp2(alpha[T - 1]) + + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + transitions_logprob = model._transitions_matrix().T + + for t in range(T): + symbol = sequence[t][_TEXT] # not found? FIXME + next_symbol = None + if t < T - 1: + next_symbol = sequence[t + 1][_TEXT] # not found? FIXME + xi = symbol_to_number[symbol] + + next_outputs_logprob = model._outputs_vector(next_symbol) + alpha_plus_beta = alpha[t] + beta[t] + + if t < T - 1: + numer_add = ( + transitions_logprob + + next_outputs_logprob + + beta[t + 1] + + alpha[t].reshape(N, 1) + ) + A_numer = np.logaddexp2(A_numer, numer_add) + A_denom = np.logaddexp2(A_denom, alpha_plus_beta) + else: + B_denom = np.logaddexp2(A_denom, alpha_plus_beta) + + B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta) + + return lpk, A_numer, A_denom, B_numer, B_denom + + def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs): + """ + Trains the HMM using the Baum-Welch algorithm to maximise the + probability of the data sequence. This is a variant of the EM + algorithm, and is unsupervised in that it doesn't need the state + sequences for the symbols. The code is based on 'A Tutorial on Hidden + Markov Models and Selected Applications in Speech Recognition', + Lawrence Rabiner, IEEE, 1989. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param unlabeled_sequences: the training data, a set of + sequences of observations + :type unlabeled_sequences: list + + kwargs may include following parameters: + + :param model: a HiddenMarkovModelTagger instance used to begin + the Baum-Welch algorithm + :param max_iterations: the maximum number of EM iterations + :param convergence_logprob: the maximum change in log probability to + allow convergence + """ + + # create a uniform HMM, which will be iteratively refined, unless + # given an existing model + model = kwargs.get("model") + if not model: + priors = RandomProbDist(self._states) + transitions = DictionaryConditionalProbDist( + {state: RandomProbDist(self._states) for state in self._states} + ) + outputs = DictionaryConditionalProbDist( + {state: RandomProbDist(self._symbols) for state in self._states} + ) + model = HiddenMarkovModelTagger( + self._symbols, self._states, transitions, outputs, priors + ) + + self._states = model._states + self._symbols = model._symbols + + N = len(self._states) + M = len(self._symbols) + symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)} + + # update model prob dists so that they can be modified + # model._priors = MutableProbDist(model._priors, self._states) + + model._transitions = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._transitions[s], self._states) + for s in self._states + } + ) + + if update_outputs: + model._outputs = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._outputs[s], self._symbols) + for s in self._states + } + ) + + model.reset_cache() + + # iterate until convergence + converged = False + last_logprob = None + iteration = 0 + max_iterations = kwargs.get("max_iterations", 1000) + epsilon = kwargs.get("convergence_logprob", 1e-6) + + while not converged and iteration < max_iterations: + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + logprob = 0 + for sequence in unlabeled_sequences: + sequence = list(sequence) + if not sequence: + continue + + ( + lpk, + seq_A_numer, + seq_A_denom, + seq_B_numer, + seq_B_denom, + ) = self._baum_welch_step(sequence, model, symbol_numbers) + + # add these sums to the global A and B values + for i in range(N): + A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk) + B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk) + + A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk) + B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk) + + logprob += lpk + + # use the calculated values to update the transition and output + # probability values + for i in range(N): + logprob_Ai = A_numer[i] - A_denom[i] + logprob_Bi = B_numer[i] - B_denom[i] + + # We should normalize all probabilities (see p.391 Huang et al) + # Let sum(P) be K. + # We can divide each Pi by K to make sum(P) == 1. + # Pi' = Pi/K + # log2(Pi') = log2(Pi) - log2(K) + logprob_Ai -= logsumexp2(logprob_Ai) + logprob_Bi -= logsumexp2(logprob_Bi) + + # update output and transition probabilities + si = self._states[i] + + for j in range(N): + sj = self._states[j] + model._transitions[si].update(sj, logprob_Ai[j]) + + if update_outputs: + for k in range(M): + ok = self._symbols[k] + model._outputs[si].update(ok, logprob_Bi[k]) + + # Rabiner says the priors don't need to be updated. I don't + # believe him. FIXME + + # test for convergence + if iteration > 0 and abs(logprob - last_logprob) < epsilon: + converged = True + + print("iteration", iteration, "logprob", logprob) + iteration += 1 + last_logprob = logprob + + return model + + def train_supervised(self, labelled_sequences, estimator=None): + """ + Supervised training maximising the joint probability of the symbol and + state sequences. This is done via collecting frequencies of + transitions between states, symbol observations while within each + state and which states start a sentence. These frequency distributions + are then normalised into probability estimates, which can be + smoothed if desired. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the training data, a set of + labelled sequences of observations + :type labelled_sequences: list + :param estimator: a function taking + a FreqDist and a number of bins and returning a CProbDistI; + otherwise a MLE estimate is used + """ + + # default to the MLE estimate + if estimator is None: + estimator = lambda fdist, bins: MLEProbDist(fdist) + + # count occurrences of starting states, transitions out of each state + # and output symbols observed in each state + known_symbols = set(self._symbols) + known_states = set(self._states) + + starting = FreqDist() + transitions = ConditionalFreqDist() + outputs = ConditionalFreqDist() + for sequence in labelled_sequences: + lasts = None + for token in sequence: + state = token[_TAG] + symbol = token[_TEXT] + if lasts is None: + starting[state] += 1 + else: + transitions[lasts][state] += 1 + outputs[state][symbol] += 1 + lasts = state + + # update the state and symbol lists + if state not in known_states: + self._states.append(state) + known_states.add(state) + + if symbol not in known_symbols: + self._symbols.append(symbol) + known_symbols.add(symbol) + + # create probability distributions (with smoothing) + N = len(self._states) + pi = estimator(starting, N) + A = ConditionalProbDist(transitions, estimator, N) + B = ConditionalProbDist(outputs, estimator, len(self._symbols)) + + return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi) + + +def _ninf_array(shape): + res = np.empty(shape, np.float64) + res.fill(-np.inf) + return res + + +def logsumexp2(arr): + max_ = arr.max() + return np.log2(np.sum(2 ** (arr - max_))) + max_ + + +def _log_add(*values): + """ + Adds the logged values, returning the logarithm of the addition. + """ + x = max(values) + if x > -np.inf: + sum_diffs = 0 + for value in values: + sum_diffs += 2 ** (value - x) + return x + np.log2(sum_diffs) + else: + return x + + +def _create_hmm_tagger(states, symbols, A, B, pi): + def pd(values, samples): + d = dict(zip(samples, values)) + return DictionaryProbDist(d) + + def cpd(array, conditions, samples): + d = {} + for values, condition in zip(array, conditions): + d[condition] = pd(values, samples) + return DictionaryConditionalProbDist(d) + + A = cpd(A, states, states) + B = cpd(B, states, symbols) + pi = pd(pi, states) + return HiddenMarkovModelTagger( + symbols=symbols, states=states, transitions=A, outputs=B, priors=pi + ) + + +def _market_hmm_example(): + """ + Return an example HMM (described at page 381, Huang et al) + """ + states = ["bull", "bear", "static"] + symbols = ["up", "down", "unchanged"] + A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64) + B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64) + pi = np.array([0.5, 0.2, 0.3], np.float64) + + model = _create_hmm_tagger(states, symbols, A, B, pi) + return model, states, symbols + + +def demo(): + # demonstrates HMM probability calculation + + print() + print("HMM probability calculation demo") + print() + + model, states, symbols = _market_hmm_example() + + print("Testing", model) + + for test in [ + ["up", "up"], + ["up", "down", "up"], + ["down"] * 5, + ["unchanged"] * 5 + ["up"], + ]: + + sequence = [(t, None) for t in test] + + print("Testing with state sequence", test) + print("probability =", model.probability(sequence)) + print("tagging = ", model.tag([word for (word, tag) in sequence])) + print("p(tagged) = ", model.probability(sequence)) + print("H = ", model.entropy(sequence)) + print("H_exh = ", model._exhaustive_entropy(sequence)) + print("H(point) = ", model.point_entropy(sequence)) + print("H_exh(point)=", model._exhaustive_point_entropy(sequence)) + print() + + +def load_pos(num_sents): + from nltk.corpus import brown + + sentences = brown.tagged_sents(categories="news")[:num_sents] + + tag_re = re.compile(r"[*]|--|[^+*-]+") + tag_set = set() + symbols = set() + + cleaned_sentences = [] + for sentence in sentences: + for i in range(len(sentence)): + word, tag = sentence[i] + word = word.lower() # normalize + symbols.add(word) # log this word + # Clean up the tag. + tag = tag_re.match(tag).group() + tag_set.add(tag) + sentence[i] = (word, tag) # store cleaned-up tagged token + cleaned_sentences += [sentence] + + return cleaned_sentences, list(tag_set), list(symbols) + + +def demo_pos(): + # demonstrates POS tagging using supervised training + + print() + print("HMM POS tagging demo") + print() + + print("Training HMM...") + labelled_sequences, tag_set, symbols = load_pos(20000) + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised( + labelled_sequences[10:], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + print("Testing...") + hmm.test(labelled_sequences[:10], verbose=True) + + +def _untag(sentences): + unlabeled = [] + for sentence in sentences: + unlabeled.append([(token[_TEXT], None) for token in sentence]) + return unlabeled + + +def demo_pos_bw( + test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5 +): + # demonstrates the Baum-Welch algorithm in POS tagging + + print() + print("Baum-Welch demo for POS tagging") + print() + + print("Training HMM (supervised, %d sentences)..." % supervised) + + sentences, tag_set, symbols = load_pos(test + supervised + unsupervised) + + symbols = set() + for sentence in sentences: + for token in sentence: + symbols.add(token[_TEXT]) + + trainer = HiddenMarkovModelTrainer(tag_set, list(symbols)) + hmm = trainer.train_supervised( + sentences[test : test + supervised], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + hmm.test(sentences[:test], verbose=verbose) + + print("Training (unsupervised, %d sentences)..." % unsupervised) + # it's rather slow - so only use 10 samples by default + unlabeled = _untag(sentences[test + supervised :]) + hmm = trainer.train_unsupervised( + unlabeled, model=hmm, max_iterations=max_iterations + ) + hmm.test(sentences[:test], verbose=verbose) + + +def demo_bw(): + # demo Baum Welch by generating some sequences and then performing + # unsupervised training on them + + print() + print("Baum-Welch demo for market example") + print() + + model, states, symbols = _market_hmm_example() + + # generate some random sequences + training = [] + import random + + rng = random.Random() + rng.seed(0) + for i in range(10): + item = model.random_sample(rng, 5) + training.append([(i[0], None) for i in item]) + + # train on those examples, starting with the model that generated them + trainer = HiddenMarkovModelTrainer(states, symbols) + hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/hunpos.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/hunpos.py new file mode 100644 index 0000000000000000000000000000000000000000..e001c6d6dbc1257515ed1149abe6bab06f1c7337 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/hunpos.py @@ -0,0 +1,142 @@ +# Natural Language Toolkit: Interface to the HunPos POS-tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Dávid Márk Nemeskey (modifications) +# Attila Zséder (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the HunPos open-source POS-tagger. +""" + +import os +from subprocess import PIPE, Popen + +from nltk.internals import find_binary, find_file +from nltk.tag.api import TaggerI + +_hunpos_url = "https://code.google.com/p/hunpos/" + +_hunpos_charset = "ISO-8859-1" +"""The default encoding used by hunpos: ISO-8859-1.""" + + +class HunposTagger(TaggerI): + """ + A class for pos tagging with HunPos. The input is the paths to: + - a model trained on training data + - (optionally) the path to the hunpos-tag binary + - (optionally) the encoding of the training data (default: ISO-8859-1) + + Check whether the required "hunpos-tag" binary is available: + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('hunpos-tag') + + Example: + >>> from nltk.tag import HunposTagger + >>> ht = HunposTagger('en_wsj.model') + >>> ht.tag('What is the airspeed of an unladen swallow ?'.split()) + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + >>> ht.close() + + This class communicates with the hunpos-tag binary via pipes. When the + tagger object is no longer needed, the close() method should be called to + free system resources. The class supports the context manager interface; if + used in a with statement, the close() method is invoked automatically: + + >>> with HunposTagger('en_wsj.model') as ht: + ... ht.tag('What is the airspeed of an unladen swallow ?'.split()) + ... + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + """ + + def __init__( + self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False + ): + """ + Starts the hunpos-tag executable and establishes a connection with it. + + :param path_to_model: The model file. + :param path_to_bin: The hunpos-tag binary. + :param encoding: The encoding used by the model. Unicode tokens + passed to the tag() and tag_sents() methods are converted to + this charset when they are sent to hunpos-tag. + The default is ISO-8859-1 (Latin-1). + + This parameter is ignored for str tokens, which are sent as-is. + The caller must ensure that tokens are encoded in the right charset. + """ + self._closed = True + hunpos_paths = [ + ".", + "/usr/bin", + "/usr/local/bin", + "/opt/local/bin", + "/Applications/bin", + "~/bin", + "~/Applications/bin", + ] + hunpos_paths = list(map(os.path.expanduser, hunpos_paths)) + + self._hunpos_bin = find_binary( + "hunpos-tag", + path_to_bin, + env_vars=("HUNPOS_TAGGER",), + searchpath=hunpos_paths, + url=_hunpos_url, + verbose=verbose, + ) + + self._hunpos_model = find_file( + path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose + ) + self._encoding = encoding + self._hunpos = Popen( + [self._hunpos_bin, self._hunpos_model], + shell=False, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + ) + self._closed = False + + def __del__(self): + self.close() + + def close(self): + """Closes the pipe to the hunpos executable.""" + if not self._closed: + self._hunpos.communicate() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def tag(self, tokens): + """Tags a single sentence: a list of words. + The tokens should not contain any newline characters. + """ + for token in tokens: + assert "\n" not in token, "Tokens should not contain newlines" + if isinstance(token, str): + token = token.encode(self._encoding) + self._hunpos.stdin.write(token + b"\n") + # We write a final empty line to tell hunpos that the sentence is finished: + self._hunpos.stdin.write(b"\n") + self._hunpos.stdin.flush() + + tagged_tokens = [] + for token in tokens: + tagged = self._hunpos.stdout.readline().strip().split(b"\t") + tag = tagged[1] if len(tagged) > 1 else None + tagged_tokens.append((token, tag)) + # We have to read (and dismiss) the final empty line: + self._hunpos.stdout.readline() + + return tagged_tokens diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/mapping.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..0af1a0eef945b3cfb2bb3a5860b223a42dbaeae7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/mapping.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Tagset Mapping +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for converting POS tags from various treebanks +to the universal tagset of Petrov, Das, & McDonald. + +The tagset consists of the following 12 coarse tags: + +VERB - verbs (all tenses and modes) +NOUN - nouns (common and proper) +PRON - pronouns +ADJ - adjectives +ADV - adverbs +ADP - adpositions (prepositions and postpositions) +CONJ - conjunctions +DET - determiners +NUM - cardinal numbers +PRT - particles or other function words +X - other: foreign words, typos, abbreviations +. - punctuation + +@see: https://arxiv.org/abs/1104.2086 and https://code.google.com/p/universal-pos-tags/ + +""" + +from collections import defaultdict +from os.path import join + +from nltk.data import load + +_UNIVERSAL_DATA = "taggers/universal_tagset" +_UNIVERSAL_TAGS = ( + "VERB", + "NOUN", + "PRON", + "ADJ", + "ADV", + "ADP", + "CONJ", + "DET", + "NUM", + "PRT", + "X", + ".", +) + +# _MAPPINGS = defaultdict(lambda: defaultdict(dict)) +# the mapping between tagset T1 and T2 returns UNK if applied to an unrecognized tag +_MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK"))) + + +def _load_universal_map(fileid): + contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text") + + # When mapping to the Universal Tagset, + # map unknown inputs to 'X' not 'UNK' + _MAPPINGS[fileid]["universal"].default_factory = lambda: "X" + + for line in contents.splitlines(): + line = line.strip() + if line == "": + continue + fine, coarse = line.split("\t") + + assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}" + assert ( + fine not in _MAPPINGS[fileid]["universal"] + ), f"Multiple entries for original tag: {fine}" + + _MAPPINGS[fileid]["universal"][fine] = coarse + + +def tagset_mapping(source, target): + """ + Retrieve the mapping dictionary between tagsets. + + >>> tagset_mapping('ru-rnc', 'universal') == {'!': '.', 'A': 'ADJ', 'C': 'CONJ', 'AD': 'ADV',\ + 'NN': 'NOUN', 'VG': 'VERB', 'COMP': 'CONJ', 'NC': 'NUM', 'VP': 'VERB', 'P': 'ADP',\ + 'IJ': 'X', 'V': 'VERB', 'Z': 'X', 'VI': 'VERB', 'YES_NO_SENT': 'X', 'PTCL': 'PRT'} + True + """ + + if source not in _MAPPINGS or target not in _MAPPINGS[source]: + if target == "universal": + _load_universal_map(source) + # Added the new Russian National Corpus mappings because the + # Russian model for nltk.pos_tag() uses it. + _MAPPINGS["ru-rnc-new"]["universal"] = { + "A": "ADJ", + "A-PRO": "PRON", + "ADV": "ADV", + "ADV-PRO": "PRON", + "ANUM": "ADJ", + "CONJ": "CONJ", + "INTJ": "X", + "NONLEX": ".", + "NUM": "NUM", + "PARENTH": "PRT", + "PART": "PRT", + "PR": "ADP", + "PRAEDIC": "PRT", + "PRAEDIC-PRO": "PRON", + "S": "NOUN", + "S-PRO": "PRON", + "V": "VERB", + } + + return _MAPPINGS[source][target] + + +def map_tag(source, target, source_tag): + """ + Maps the tag from the source tagset to the target tagset. + + >>> map_tag('en-ptb', 'universal', 'VBZ') + 'VERB' + >>> map_tag('en-ptb', 'universal', 'VBP') + 'VERB' + >>> map_tag('en-ptb', 'universal', '``') + '.' + """ + + # we need a systematic approach to naming + if target == "universal": + if source == "wsj": + source = "en-ptb" + if source == "brown": + source = "en-brown" + + return tagset_mapping(source, target)[source_tag] diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/perceptron.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..9afe08f0c8d6a9d5852a225e6c9569a291fb1e3d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/perceptron.py @@ -0,0 +1,371 @@ +# This module is a port of the Textblob Averaged Perceptron Tagger +# Author: Matthew Honnibal , +# Long Duong (NLTK port) +# URL: +# +# Copyright 2013 Matthew Honnibal +# NLTK modifications Copyright 2015 The NLTK Project +# +# This module is provided under the terms of the MIT License. + +import logging +import pickle +import random +from collections import defaultdict + +from nltk import jsontags +from nltk.data import find, load +from nltk.tag.api import TaggerI + +try: + import numpy as np +except ImportError: + pass + +PICKLE = "averaged_perceptron_tagger.pickle" + + +@jsontags.register_tag +class AveragedPerceptron: + + """An averaged perceptron, as implemented by Matthew Honnibal. + + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + """ + + json_tag = "nltk.tag.perceptron.AveragedPerceptron" + + def __init__(self, weights=None): + # Each feature gets its own weight vector, so weights is a dict-of-dicts + self.weights = weights if weights else {} + self.classes = set() + # The accumulated values, for the averaging. These will be keyed by + # feature/clas tuples + self._totals = defaultdict(int) + # The last time the feature was changed, for the averaging. Also + # keyed by feature/clas tuples + # (tstamps is short for timestamps) + self._tstamps = defaultdict(int) + # Number of instances seen + self.i = 0 + + def _softmax(self, scores): + s = np.fromiter(scores.values(), dtype=float) + exps = np.exp(s) + return exps / np.sum(exps) + + def predict(self, features, return_conf=False): + """Dot-product the features and current weights and return the best label.""" + scores = defaultdict(float) + for feat, value in features.items(): + if feat not in self.weights or value == 0: + continue + weights = self.weights[feat] + for label, weight in weights.items(): + scores[label] += value * weight + + # Do a secondary alphabetic sort, for stability + best_label = max(self.classes, key=lambda label: (scores[label], label)) + # compute the confidence + conf = max(self._softmax(scores)) if return_conf == True else None + + return best_label, conf + + def update(self, truth, guess, features): + """Update the feature weights.""" + + def upd_feat(c, f, w, v): + param = (f, c) + self._totals[param] += (self.i - self._tstamps[param]) * w + self._tstamps[param] = self.i + self.weights[f][c] = w + v + + self.i += 1 + if truth == guess: + return None + for f in features: + weights = self.weights.setdefault(f, {}) + upd_feat(truth, f, weights.get(truth, 0.0), 1.0) + upd_feat(guess, f, weights.get(guess, 0.0), -1.0) + + def average_weights(self): + """Average weights from all iterations.""" + for feat, weights in self.weights.items(): + new_feat_weights = {} + for clas, weight in weights.items(): + param = (feat, clas) + total = self._totals[param] + total += (self.i - self._tstamps[param]) * weight + averaged = round(total / self.i, 3) + if averaged: + new_feat_weights[clas] = averaged + self.weights[feat] = new_feat_weights + + def save(self, path): + """Save the pickled model weights.""" + with open(path, "wb") as fout: + return pickle.dump(dict(self.weights), fout) + + def load(self, path): + """Load the pickled model weights.""" + self.weights = load(path) + + def encode_json_obj(self): + return self.weights + + @classmethod + def decode_json_obj(cls, obj): + return cls(obj) + + +@jsontags.register_tag +class PerceptronTagger(TaggerI): + + """ + Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal. + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + + >>> from nltk.tag.perceptron import PerceptronTagger + + Train the model + + >>> tagger = PerceptronTagger(load=False) + + >>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')], + ... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]]) + + >>> tagger.tag(['today','is','a','beautiful','day']) + [('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')] + + Use the pretrain model (the default constructor) + + >>> pretrain = PerceptronTagger() + + >>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split()) + [('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')] + + >>> pretrain.tag("The red cat".split()) + [('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')] + """ + + json_tag = "nltk.tag.sequential.PerceptronTagger" + + START = ["-START-", "-START2-"] + END = ["-END-", "-END2-"] + + def __init__(self, load=True): + """ + :param load: Load the pickled model upon instantiation. + """ + self.model = AveragedPerceptron() + self.tagdict = {} + self.classes = set() + if load: + AP_MODEL_LOC = "file:" + str( + find("taggers/averaged_perceptron_tagger/" + PICKLE) + ) + self.load(AP_MODEL_LOC) + + def tag(self, tokens, return_conf=False, use_tagdict=True): + """ + Tag tokenized sentences. + :params tokens: list of word + :type tokens: list(str) + """ + prev, prev2 = self.START + output = [] + + context = self.START + [self.normalize(w) for w in tokens] + self.END + for i, word in enumerate(tokens): + tag, conf = ( + (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None) + ) + if not tag: + features = self._get_features(i, word, context, prev, prev2) + tag, conf = self.model.predict(features, return_conf) + output.append((word, tag, conf) if return_conf == True else (word, tag)) + + prev2 = prev + prev = tag + + return output + + def train(self, sentences, save_loc=None, nr_iter=5): + """Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` + controls the number of Perceptron training iterations. + + :param sentences: A list or iterator of sentences, where each sentence + is a list of (words, tags) tuples. + :param save_loc: If not ``None``, saves a pickled model in this location. + :param nr_iter: Number of training iterations. + """ + # We'd like to allow ``sentences`` to be either a list or an iterator, + # the latter being especially important for a large training dataset. + # Because ``self._make_tagdict(sentences)`` runs regardless, we make + # it populate ``self._sentences`` (a list) with all the sentences. + # This saves the overheard of just iterating through ``sentences`` to + # get the list by ``sentences = list(sentences)``. + + self._sentences = list() # to be populated by self._make_tagdict... + self._make_tagdict(sentences) + self.model.classes = self.classes + for iter_ in range(nr_iter): + c = 0 + n = 0 + for sentence in self._sentences: + words, tags = zip(*sentence) + + prev, prev2 = self.START + context = self.START + [self.normalize(w) for w in words] + self.END + for i, word in enumerate(words): + guess = self.tagdict.get(word) + if not guess: + feats = self._get_features(i, word, context, prev, prev2) + guess, _ = self.model.predict(feats) + self.model.update(tags[i], guess, feats) + prev2 = prev + prev = guess + c += guess == tags[i] + n += 1 + random.shuffle(self._sentences) + logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}") + + # We don't need the training sentences anymore, and we don't want to + # waste space on them when we pickle the trained tagger. + self._sentences = None + + self.model.average_weights() + # Pickle as a binary file + if save_loc is not None: + with open(save_loc, "wb") as fout: + # changed protocol from -1 to 2 to make pickling Python 2 compatible + pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2) + + def load(self, loc): + """ + :param loc: Load a pickled model at location. + :type loc: str + """ + + self.model.weights, self.tagdict, self.classes = load(loc) + self.model.classes = self.classes + + def encode_json_obj(self): + return self.model.weights, self.tagdict, list(self.classes) + + @classmethod + def decode_json_obj(cls, obj): + tagger = cls(load=False) + tagger.model.weights, tagger.tagdict, tagger.classes = obj + tagger.classes = set(tagger.classes) + tagger.model.classes = tagger.classes + return tagger + + def normalize(self, word): + """ + Normalization used in pre-processing. + - All words are lower cased + - Groups of digits of length 4 are represented as !YEAR; + - Other digits are represented as !DIGITS + + :rtype: str + """ + if "-" in word and word[0] != "-": + return "!HYPHEN" + if word.isdigit() and len(word) == 4: + return "!YEAR" + if word and word[0].isdigit(): + return "!DIGITS" + return word.lower() + + def _get_features(self, i, word, context, prev, prev2): + """Map tokens into a feature representation, implemented as a + {hashable: int} dict. If the features change, a new model must be + trained. + """ + + def add(name, *args): + features[" ".join((name,) + tuple(args))] += 1 + + i += len(self.START) + features = defaultdict(int) + # It's useful to have a constant feature, which acts sort of like a prior + add("bias") + add("i suffix", word[-3:]) + add("i pref1", word[0] if word else "") + add("i-1 tag", prev) + add("i-2 tag", prev2) + add("i tag+i-2 tag", prev, prev2) + add("i word", context[i]) + add("i-1 tag+i word", prev, context[i]) + add("i-1 word", context[i - 1]) + add("i-1 suffix", context[i - 1][-3:]) + add("i-2 word", context[i - 2]) + add("i+1 word", context[i + 1]) + add("i+1 suffix", context[i + 1][-3:]) + add("i+2 word", context[i + 2]) + return features + + def _make_tagdict(self, sentences): + """ + Make a tag dictionary for single-tag words. + :param sentences: A list of list of (word, tag) tuples. + """ + counts = defaultdict(lambda: defaultdict(int)) + for sentence in sentences: + self._sentences.append(sentence) + for word, tag in sentence: + counts[word][tag] += 1 + self.classes.add(tag) + freq_thresh = 20 + ambiguity_thresh = 0.97 + for word, tag_freqs in counts.items(): + tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) + n = sum(tag_freqs.values()) + # Don't add rare words to the tag dictionary + # Only add quite unambiguous words + if n >= freq_thresh and (mode / n) >= ambiguity_thresh: + self.tagdict[word] = tag + + +def _pc(n, d): + return (n / d) * 100 + + +def _load_data_conll_format(filename): + print("Read from file: ", filename) + with open(filename, "rb") as fin: + sentences = [] + sentence = [] + for line in fin.readlines(): + line = line.strip() + # print line + if len(line) == 0: + sentences.append(sentence) + sentence = [] + continue + tokens = line.split("\t") + word = tokens[1] + tag = tokens[4] + sentence.append((word, tag)) + return sentences + + +def _get_pretrain_model(): + # Train and test on English part of ConLL data (WSJ part of Penn Treebank) + # Train: section 2-11 + # Test : section 23 + tagger = PerceptronTagger() + training = _load_data_conll_format("english_ptb_train.conll") + testing = _load_data_conll_format("english_ptb_test.conll") + print("Size of training and testing (sentence)", len(training), len(testing)) + # Train and save the model + tagger.train(training, PICKLE) + print("Accuracy : ", tagger.accuracy(testing)) + + +if __name__ == "__main__": + # _get_pretrain_model() + pass diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/senna.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/senna.py new file mode 100644 index 0000000000000000000000000000000000000000..7b52b7ee0a7bc01614c3a2a397a6ffce47835999 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/senna.py @@ -0,0 +1,134 @@ +# Natural Language Toolkit: Senna POS Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +Senna POS tagger, NER Tagger, Chunk Tagger + +The input is: + +- path to the directory that contains SENNA executables. If the path is incorrect, + SennaTagger will automatically search for executable file specified in SENNA environment variable +- (optionally) the encoding of the input data (default:utf-8) + +Note: Unit tests for this module can be found in test/unit/test_senna.py + +>>> from nltk.tag import SennaTagger +>>> tagger = SennaTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> tagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), +('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'NN'), ('?', '.')] + +>>> from nltk.tag import SennaChunkTagger +>>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> chktagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), +('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), +('?', 'O')] + +>>> from nltk.tag import SennaNERTagger +>>> nertagger = SennaNERTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> nertagger.tag('Shakespeare theatre was in London .'.split()) # doctest: +SKIP +[('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'), ('in', 'O'), +('London', 'B-LOC'), ('.', 'O')] +>>> nertagger.tag('UN headquarters are in NY , USA .'.split()) # doctest: +SKIP +[('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), +('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')] +""" + +from nltk.classify import Senna + + +class SennaTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["pos"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["pos"]) + return tagged_sents + + +class SennaChunkTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["chk"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["chk"]) + return tagged_sents + + def bio_to_chunks(self, tagged_sent, chunk_type): + """ + Extracts the chunks in a BIO chunk-tagged sentence. + + >>> from nltk.tag import SennaChunkTagger + >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP + >>> sent = 'What is the airspeed of an unladen swallow ?'.split() + >>> tagged_sent = chktagger.tag(sent) # doctest: +SKIP + >>> tagged_sent # doctest: +SKIP + [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), + ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), + ('?', 'O')] + >>> list(chktagger.bio_to_chunks(tagged_sent, chunk_type='NP')) # doctest: +SKIP + [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow', '5-6-7')] + + :param tagged_sent: A list of tuples of word and BIO chunk tag. + :type tagged_sent: list(tuple) + :param tagged_sent: The chunk tag that users want to extract, e.g. 'NP' or 'VP' + :type tagged_sent: str + + :return: An iterable of tuples of chunks that users want to extract + and their corresponding indices. + :rtype: iter(tuple(str)) + """ + current_chunk = [] + current_chunk_position = [] + for idx, word_pos in enumerate(tagged_sent): + word, pos = word_pos + if "-" + chunk_type in pos: # Append the word to the current_chunk. + current_chunk.append(word) + current_chunk_position.append(idx) + else: + if current_chunk: # Flush the full chunk when out of an NP. + _chunk_str = " ".join(current_chunk) + _chunk_pos_str = "-".join(map(str, current_chunk_position)) + yield _chunk_str, _chunk_pos_str + current_chunk = [] + current_chunk_position = [] + if current_chunk: # Flush the last chunk. + yield " ".join(current_chunk), "-".join(map(str, current_chunk_position)) + + +class SennaNERTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["ner"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["ner"]) + return tagged_sents diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/tag/tnt.py b/llmeval-env/lib/python3.10/site-packages/nltk/tag/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a505104d812532af561ee3d3d9d80611f78db2cd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/tag/tnt.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: TnT Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston +# +# URL: +# For license information, see LICENSE.TXT + +""" +Implementation of 'TnT - A Statisical Part of Speech Tagger' +by Thorsten Brants + +https://aclanthology.org/A00-1031.pdf +""" + +from math import log +from operator import itemgetter + +from nltk.probability import ConditionalFreqDist, FreqDist +from nltk.tag.api import TaggerI + + +class TnT(TaggerI): + """ + TnT - Statistical POS tagger + + IMPORTANT NOTES: + + * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS + + - It is possible to provide an untrained POS tagger to + create tags for unknown words, see __init__ function + + * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT + + - Due to the nature of this tagger, it works best when + trained over sentence delimited input. + - However it still produces good results if the training + data and testing data are separated on all punctuation eg: [,.?!] + - Input for training is expected to be a list of sentences + where each sentence is a list of (word, tag) tuples + - Input for tag function is a single sentence + Input for tagdata function is a list of sentences + Output is of a similar form + + * Function provided to process text that is unsegmented + + - Please see basic_sent_chop() + + + TnT uses a second order Markov model to produce tags for + a sequence of input, specifically: + + argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) + + IE: the maximum projection of a set of probabilities + + The set of possible tags for a given word is derived + from the training data. It is the set of all tags + that exact word has been assigned. + + To speed up and get more precision, we can use log addition + to instead multiplication, specifically: + + argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + + log(P(t_T+1|t_T)) + + The probability of a tag for a given word is the linear + interpolation of 3 markov models; a zero-order, first-order, + and a second order model. + + P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + + l3*P(t_i| t_i-1, t_i-2) + + A beam search is used to limit the memory usage of the algorithm. + The degree of the beam can be changed using N in the initialization. + N represents the maximum number of possible solutions to maintain + while tagging. + + It is possible to differentiate the tags which are assigned to + capitalized words. However this does not result in a significant + gain in the accuracy of the results. + """ + + def __init__(self, unk=None, Trained=False, N=1000, C=False): + """ + Construct a TnT statistical tagger. Tagger must be trained + before being used to tag input. + + :param unk: instance of a POS tagger, conforms to TaggerI + :type unk: TaggerI + :param Trained: Indication that the POS tagger is trained or not + :type Trained: bool + :param N: Beam search degree (see above) + :type N: int + :param C: Capitalization flag + :type C: bool + + Initializer, creates frequency distributions to be used + for tagging + + _lx values represent the portion of the tri/bi/uni taggers + to be used to calculate the probability + + N value is the number of possible solutions to maintain + while tagging. A good value for this is 1000 + + C is a boolean value which specifies to use or + not use the Capitalization of the word as additional + information for tagging. + NOTE: using capitalization may not increase the accuracy + of the tagger + """ + + self._uni = FreqDist() + self._bi = ConditionalFreqDist() + self._tri = ConditionalFreqDist() + self._wd = ConditionalFreqDist() + self._eos = ConditionalFreqDist() + self._l1 = 0.0 + self._l2 = 0.0 + self._l3 = 0.0 + self._N = N + self._C = C + self._T = Trained + + self._unk = unk + + # statistical tools (ignore or delete me) + self.unknown = 0 + self.known = 0 + + def train(self, data): + """ + Uses a set of tagged data to train the tagger. + If an unknown word tagger is specified, + it is trained on the same data. + + :param data: List of lists of (word, tag) tuples + :type data: tuple(str) + """ + + # Ensure that local C flag is initialized before use + C = False + + if self._unk is not None and self._T == False: + self._unk.train(data) + + for sent in data: + history = [("BOS", False), ("BOS", False)] + for w, t in sent: + + # if capitalization is requested, + # and the word begins with a capital + # set local flag C to True + if self._C and w[0].isupper(): + C = True + + self._wd[w][t] += 1 + self._uni[(t, C)] += 1 + self._bi[history[1]][(t, C)] += 1 + self._tri[tuple(history)][(t, C)] += 1 + + history.append((t, C)) + history.pop(0) + + # set local flag C to false for the next word + C = False + + self._eos[t]["EOS"] += 1 + + # compute lambda values from the trained frequency distributions + self._compute_lambda() + + def _compute_lambda(self): + """ + creates lambda values based upon training data + + NOTE: no need to explicitly reference C, + it is contained within the tag variable :: tag == (tag,C) + + for each tag trigram (t1, t2, t3) + depending on the maximum value of + - f(t1,t2,t3)-1 / f(t1,t2)-1 + - f(t2,t3)-1 / f(t2)-1 + - f(t3)-1 / N-1 + + increment l3,l2, or l1 by f(t1,t2,t3) + + ISSUES -- Resolutions: + if 2 values are equal, increment both lambda values + by (f(t1,t2,t3) / 2) + """ + + # temporary lambda variables + tl1 = 0.0 + tl2 = 0.0 + tl3 = 0.0 + + # for each t1,t2 in system + for history in self._tri.conditions(): + (h1, h2) = history + + # for each t3 given t1,t2 in system + # (NOTE: tag actually represents (tag,C)) + # However no effect within this function + for tag in self._tri[history].keys(): + + # if there has only been 1 occurrence of this tag in the data + # then ignore this trigram. + if self._uni[tag] == 1: + continue + + # safe_div provides a safe floating point division + # it returns -1 if the denominator is 0 + c3 = self._safe_div( + (self._tri[history][tag] - 1), (self._tri[history].N() - 1) + ) + c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) + c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) + + # if c1 is the maximum value: + if (c1 > c3) and (c1 > c2): + tl1 += self._tri[history][tag] + + # if c2 is the maximum value + elif (c2 > c3) and (c2 > c1): + tl2 += self._tri[history][tag] + + # if c3 is the maximum value + elif (c3 > c2) and (c3 > c1): + tl3 += self._tri[history][tag] + + # if c3, and c2 are equal and larger than c1 + elif (c3 == c2) and (c3 > c1): + tl2 += self._tri[history][tag] / 2.0 + tl3 += self._tri[history][tag] / 2.0 + + # if c1, and c2 are equal and larger than c3 + # this might be a dumb thing to do....(not sure yet) + elif (c2 == c1) and (c1 > c3): + tl1 += self._tri[history][tag] / 2.0 + tl2 += self._tri[history][tag] / 2.0 + + # otherwise there might be a problem + # eg: all values = 0 + else: + pass + + # Lambda normalisation: + # ensures that l1+l2+l3 = 1 + self._l1 = tl1 / (tl1 + tl2 + tl3) + self._l2 = tl2 / (tl1 + tl2 + tl3) + self._l3 = tl3 / (tl1 + tl2 + tl3) + + def _safe_div(self, v1, v2): + """ + Safe floating point division function, does not allow division by 0 + returns -1 if the denominator is 0 + """ + if v2 == 0: + return -1 + else: + return v1 / v2 + + def tagdata(self, data): + """ + Tags each sentence in a list of sentences + + :param data:list of list of words + :type data: [[string,],] + :return: list of list of (word, tag) tuples + + Invokes tag(sent) function for each sentence + compiles the results into a list of tagged sentences + each tagged sentence is a list of (word, tag) tuples + """ + res = [] + for sent in data: + res1 = self.tag(sent) + res.append(res1) + return res + + def tag(self, data): + """ + Tags a single sentence + + :param data: list of words + :type data: [string,] + + :return: [(word, tag),] + + Calls recursive function '_tagword' + to produce a list of tags + + Associates the sequence of returned tags + with the correct words in the input sequence + + returns a list of (word, tag) tuples + """ + + current_state = [(["BOS", "BOS"], 0.0)] + + sent = list(data) + + tags = self._tagword(sent, current_state) + + res = [] + for i in range(len(sent)): + # unpack and discard the C flags + (t, C) = tags[i + 2] + res.append((sent[i], t)) + + return res + + def _tagword(self, sent, current_states): + """ + :param sent : List of words remaining in the sentence + :type sent : [word,] + :param current_states : List of possible tag combinations for + the sentence so far, and the log probability + associated with each tag combination + :type current_states : [([tag, ], logprob), ] + + Tags the first word in the sentence and + recursively tags the reminder of sentence + + Uses formula specified above to calculate the probability + of a particular tag + """ + + # if this word marks the end of the sentence, + # return the most probable tag + if sent == []: + (h, logp) = current_states[0] + return h + + # otherwise there are more words to be tagged + word = sent[0] + sent = sent[1:] + new_states = [] + + # if the Capitalisation is requested, + # initialise the flag for this word + C = False + if self._C and word[0].isupper(): + C = True + + # if word is known + # compute the set of possible tags + # and their associated log probabilities + if word in self._wd: + self.known += 1 + + for (history, curr_sent_logprob) in current_states: + logprobs = [] + + for t in self._wd[word].keys(): + tC = (t, C) + p_uni = self._uni.freq(tC) + p_bi = self._bi[history[-1]].freq(tC) + p_tri = self._tri[tuple(history[-2:])].freq(tC) + p_wd = self._wd[word][t] / self._uni[tC] + p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri + p2 = log(p, 2) + log(p_wd, 2) + + # compute the result of appending each tag to this history + new_states.append((history + [tC], curr_sent_logprob + p2)) + + # otherwise a new word, set of possible tags is unknown + else: + self.unknown += 1 + + # since a set of possible tags, + # and the probability of each specific tag + # can not be returned from most classifiers: + # specify that any unknown words are tagged with certainty + p = 1 + + # if no unknown word tagger has been specified + # then use the tag 'Unk' + if self._unk is None: + tag = ("Unk", C) + + # otherwise apply the unknown word tagger + else: + [(_w, t)] = list(self._unk.tag([word])) + tag = (t, C) + + for (history, logprob) in current_states: + history.append(tag) + + new_states = current_states + + # now have computed a set of possible new_states + + # sort states by log prob + # set is now ordered greatest to least log probability + new_states.sort(reverse=True, key=itemgetter(1)) + + # del everything after N (threshold) + # this is the beam search cut + if len(new_states) > self._N: + new_states = new_states[: self._N] + + # compute the tags for the rest of the sentence + # return the best list of tags for the sentence + return self._tagword(sent, new_states) + + +######################################## +# helper function -- basic sentence tokenizer +######################################## + + +def basic_sent_chop(data, raw=True): + """ + Basic method for tokenizing input into sentences + for this tagger: + + :param data: list of tokens (words or (word, tag) tuples) + :type data: str or tuple(str, str) + :param raw: boolean flag marking the input data + as a list of words or a list of tagged words + :type raw: bool + :return: list of sentences + sentences are a list of tokens + tokens are the same as the input + + Function takes a list of tokens and separates the tokens into lists + where each list represents a sentence fragment + This function can separate both tagged and raw sequences into + basic sentences. + + Sentence markers are the set of [,.!?] + + This is a simple method which enhances the performance of the TnT + tagger. Better sentence tokenization will further enhance the results. + """ + + new_data = [] + curr_sent = [] + sent_mark = [",", ".", "?", "!"] + + if raw: + for word in data: + if word in sent_mark: + curr_sent.append(word) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append(word) + + else: + for (word, tag) in data: + if word in sent_mark: + curr_sent.append((word, tag)) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append((word, tag)) + return new_data + + +def demo(): + from nltk.corpus import brown + + sents = list(brown.tagged_sents()) + test = list(brown.sents()) + + tagger = TnT() + tagger.train(sents[200:1000]) + + tagged_data = tagger.tagdata(test[100:120]) + + for j in range(len(tagged_data)): + s = tagged_data[j] + t = sents[j + 100] + for i in range(len(s)): + print(s[i], "--", t[i]) + print() + + +def demo2(): + from nltk.corpus import treebank + + d = list(treebank.tagged_sents()) + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=True) + t.train(d[(11) * 100 :]) + s.train(d[(11) * 100 :]) + + for i in range(10): + tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)]) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + t.unknown = 0 + t.known = 0 + + print("Capitalization off:") + print("Accuracy:", tacc) + print("Percentage known:", tp_kn) + print("Percentage unknown:", tp_un) + print("Accuracy over known words:", (tacc / tp_kn)) + + sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)]) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + s.unknown = 0 + s.known = 0 + + print("Capitalization on:") + print("Accuracy:", sacc) + print("Percentage known:", sp_kn) + print("Percentage unknown:", sp_un) + print("Accuracy over known words:", (sacc / sp_kn)) + + +def demo3(): + from nltk.corpus import brown, treebank + + d = list(treebank.tagged_sents()) + e = list(brown.tagged_sents()) + + d = d[:1000] + e = e[:1000] + + d10 = int(len(d) * 0.1) + e10 = int(len(e) * 0.1) + + tknacc = 0 + sknacc = 0 + tallacc = 0 + sallacc = 0 + tknown = 0 + sknown = 0 + + for i in range(10): + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=False) + + dtest = d[(i * d10) : ((i + 1) * d10)] + etest = e[(i * e10) : ((i + 1) * e10)] + + dtrain = d[: (i * d10)] + d[((i + 1) * d10) :] + etrain = e[: (i * e10)] + e[((i + 1) * e10) :] + + t.train(dtrain) + s.train(etrain) + + tacc = t.accuracy(dtest) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + tknown += tp_kn + t.unknown = 0 + t.known = 0 + + sacc = s.accuracy(etest) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + sknown += sp_kn + s.unknown = 0 + s.known = 0 + + tknacc += tacc / tp_kn + sknacc += sacc / tp_kn + tallacc += tacc + sallacc += sacc + + # print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc) + + print("brown: acc over words known:", 10 * tknacc) + print(" : overall accuracy:", 10 * tallacc) + print(" : words known:", 10 * tknown) + print("treebank: acc over words known:", 10 * sknacc) + print(" : overall accuracy:", 10 * sallacc) + print(" : words known:", 10 * sknown) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm2.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm2.py new file mode 100644 index 0000000000000000000000000000000000000000..0b3ff375f045f4a809778ea8d3221e6b62e5e2ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm2.py @@ -0,0 +1,319 @@ +# Natural Language Toolkit: IBM Model 2 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Lexical translation model that considers word order. + +IBM Model 2 improves on Model 1 by accounting for word order. +An alignment probability is introduced, a(i | j,l,m), which predicts +a source word position, given its aligned target word's position. + +The EM algorithm used in Model 2 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the source + sentence is aligned to a particular position in the target + sentence + +:M step: Estimate new probabilities based on the counts from the E step + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel1 +from nltk.translate.ibm_model import Counts + + +class IBMModel2(IBMModel): + """ + Lexical translation model that considers word order + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + + >>> ibm2 = IBMModel2(bitext, 5) + + >>> print(round(ibm2.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm2.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm2.translation_table['buch'][None], 3)) + 0.0 + >>> print(round(ibm2.translation_table['ja'][None], 3)) + 0.0 + + >>> print(round(ibm2.alignment_table[1][1][2][2], 3)) + 0.939 + >>> print(round(ibm2.alignment_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm2.alignment_table[2][2][4][5], 3)) + 1.0 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model and an alignment model. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + + if probability_tables is None: + # Get translation probabilities from IBM Model 1 + # Run more iterations of training for Model 1, since it is + # faster than Model 2 + ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations) + self.translation_table = ibm1.translation_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + self.align_all(sentence_aligned_corpus) + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / (l + 1) + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A source sentence is too long (" + + str(l) + + " words). Results may be less accurate." + ) + + for i in range(0, l + 1): + for j in range(1, m + 1): + self.alignment_table[i][j][l][m] = initial_prob + + def train(self, parallel_corpus): + counts = Model2Counts() + for aligned_sentence in parallel_corpus: + src_sentence = [None] + aligned_sentence.mots + trg_sentence = ["UNUSED"] + aligned_sentence.words # 1-indexed + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_all_alignments(src_sentence, trg_sentence) + + # E step (b): Collect counts + for j in range(1, m + 1): + t = trg_sentence[j] + for i in range(0, l + 1): + s = src_sentence[i] + count = self.prob_alignment_point(i, j, src_sentence, trg_sentence) + normalized_count = count / total_count[t] + + counts.update_lexical_translation(normalized_count, s, t) + counts.update_alignment(normalized_count, i, j, l, m) + + # M step: Update probabilities with maximum likelihood estimates + self.maximize_lexical_translation_probabilities(counts) + self.maximize_alignment_probabilities(counts) + + def maximize_alignment_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for i, j_s in counts.alignment.items(): + for j, src_sentence_lengths in j_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.alignment[i][j][l][m] + / counts.alignment_for_any_i[j][l][m] + ) + self.alignment_table[i][j][l][m] = max(estimate, MIN_PROB) + + def prob_all_alignments(self, src_sentence, trg_sentence): + """ + Computes the probability of all possible word alignments, + expressed as a marginal distribution over target words t + + Each entry in the return value represents the contribution to + the total alignment probability by the target word t. + + To obtain probability(alignment | src_sentence, trg_sentence), + simply sum the entries in the return value. + + :return: Probability of t for all s in ``src_sentence`` + :rtype: dict(str): float + """ + alignment_prob_for_t = defaultdict(lambda: 0.0) + for j in range(1, len(trg_sentence)): + t = trg_sentence[j] + for i in range(0, len(src_sentence)): + alignment_prob_for_t[t] += self.prob_alignment_point( + i, j, src_sentence, trg_sentence + ) + return alignment_prob_for_t + + def prob_alignment_point(self, i, j, src_sentence, trg_sentence): + """ + Probability that position j in ``trg_sentence`` is aligned to + position i in the ``src_sentence`` + """ + l = len(src_sentence) - 1 + m = len(trg_sentence) - 1 + s = src_sentence[i] + t = trg_sentence[j] + return self.translation_table[t][s] * self.alignment_table[i][j][l][m] + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + prob = 1.0 + l = len(alignment_info.src_sentence) - 1 + m = len(alignment_info.trg_sentence) - 1 + + for j, i in enumerate(alignment_info.alignment): + if j == 0: + continue # skip the dummy zeroeth element + trg_word = alignment_info.trg_sentence[j] + src_word = alignment_info.src_sentence[i] + prob *= ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i][j][l][m] + ) + + return max(prob, IBMModel.MIN_PROB) + + def align_all(self, parallel_corpus): + for sentence_pair in parallel_corpus: + self.align(sentence_pair) + + def align(self, sentence_pair): + """ + Determines the best word alignment for one sentence pair from + the corpus that the model was trained on. + + The best alignment will be set in ``sentence_pair`` when the + method returns. In contrast with the internal implementation of + IBM models, the word indices in the ``Alignment`` are zero- + indexed, not one-indexed. + + :param sentence_pair: A sentence in the source language and its + counterpart sentence in the target language + :type sentence_pair: AlignedSent + """ + best_alignment = [] + + l = len(sentence_pair.mots) + m = len(sentence_pair.words) + + for j, trg_word in enumerate(sentence_pair.words): + # Initialize trg_word to align with the NULL token + best_prob = ( + self.translation_table[trg_word][None] + * self.alignment_table[0][j + 1][l][m] + ) + best_prob = max(best_prob, IBMModel.MIN_PROB) + best_alignment_point = None + for i, src_word in enumerate(sentence_pair.mots): + align_prob = ( + self.translation_table[trg_word][src_word] + * self.alignment_table[i + 1][j + 1][l][m] + ) + if align_prob >= best_prob: + best_prob = align_prob + best_alignment_point = i + + best_alignment.append((j, best_alignment_point)) + + sentence_pair.alignment = Alignment(best_alignment) + + +class Model2Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for alignment. + """ + + def __init__(self): + super().__init__() + self.alignment = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.alignment_for_any_i = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_lexical_translation(self, count, s, t): + self.t_given_s[t][s] += count + self.any_t_given_s[s] += count + + def update_alignment(self, count, i, j, l, m): + self.alignment[i][j][l][m] += count + self.alignment_for_any_i[j][l][m] += count diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm3.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm3.py new file mode 100644 index 0000000000000000000000000000000000000000..f295dee0b563bbcb9a5b9557c8d1602942a75bc3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm3.py @@ -0,0 +1,346 @@ +# Natural Language Toolkit: IBM Model 3 +# +# Copyright (C) 2001-2013 NLTK Project +# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that considers how a word can be aligned to +multiple words in another language. + +IBM Model 3 improves on Model 2 by directly modeling the phenomenon +where a word in one language may be translated into zero or more words +in another. This is expressed by the fertility probability, +n(phi | source word). + +If a source word translates into more than one word, it is possible to +generate sentences that have the same alignment in multiple ways. This +is modeled by a distortion step. The distortion probability, d(j|i,l,m), +predicts a target word position, given its aligned source word's +position. The distortion probability replaces the alignment probability +of Model 2. + +The fertility probability is not applicable for NULL. Target words that +align to NULL are assumed to be distributed uniformly in the target +sentence. The existence of these words is modeled by p1, the probability +that a target word produced by a real source word requires another +target word that is produced by NULL. + +The EM algorithm used in Model 3 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) count how many times a particular position in the target + sentence is aligned to a particular position in the source + sentence + - (c) count how many times a source word is aligned to phi number + of target words + - (d) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Because there are too many possible alignments, only the most probable +ones are considered. First, the best alignment is determined using prior +probabilities. Then, a hill climbing approach is used to find other good +candidates. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel2 +from nltk.translate.ibm_model import Counts + + +class IBMModel3(IBMModel): + """ + Translation model that considers how a word can be aligned to + multiple words in another language + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + + >>> ibm3 = IBMModel3(bitext, 5) + + >>> print(round(ibm3.translation_table['buch']['book'], 3)) + 1.0 + >>> print(round(ibm3.translation_table['das']['book'], 3)) + 0.0 + >>> print(round(ibm3.translation_table['ja'][None], 3)) + 1.0 + + >>> print(round(ibm3.distortion_table[1][1][2][2], 3)) + 1.0 + >>> print(round(ibm3.distortion_table[1][2][2][2], 3)) + 0.0 + >>> print(round(ibm3.distortion_table[2][2][4][5], 3)) + 0.75 + + >>> print(round(ibm3.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm3.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm3.p1, 3)) + 0.054 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, a distortion model, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``distortion_table``. + See ``IBMModel`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + + if probability_tables is None: + # Get translation and alignment probabilities from IBM Model 2 + ibm2 = IBMModel2(sentence_aligned_corpus, iterations) + self.translation_table = ibm2.translation_table + self.alignment_table = ibm2.alignment_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.distortion_table = probability_tables["distortion_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.distortion_table = defaultdict( + lambda: defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + ) + """ + dict[int][int][int][int]: float. Probability(j | i,l,m). + Values accessed as ``distortion_table[j][i][l][m]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + # d(j | i,l,m) = 1 / m for all i, j, l, m + l_m_combinations = set() + for aligned_sentence in sentence_aligned_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + if (l, m) not in l_m_combinations: + l_m_combinations.add((l, m)) + initial_prob = 1 / m + if initial_prob < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(m) + + " words). Results may be less accurate." + ) + for j in range(1, m + 1): + for i in range(0, l + 1): + self.distortion_table[j][i][l][m] = initial_prob + + # simple initialization, taken from GIZA++ + self.fertility_table[0] = defaultdict(lambda: 0.2) + self.fertility_table[1] = defaultdict(lambda: 0.65) + self.fertility_table[2] = defaultdict(lambda: 0.1) + self.fertility_table[3] = defaultdict(lambda: 0.04) + MAX_FERTILITY = 10 + initial_fert_prob = 0.01 / (MAX_FERTILITY - 4) + for phi in range(4, MAX_FERTILITY): + self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob) + + self.p1 = 0.5 + + def train(self, parallel_corpus): + counts = Model3Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + counts.update_distortion(normalized_count, alignment_info, j, l, m) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_distortion_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def maximize_distortion_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + for j, i_s in counts.distortion.items(): + for i, src_sentence_lengths in i_s.items(): + for l, trg_sentence_lengths in src_sentence_lengths.items(): + for m in trg_sentence_lengths: + estimate = ( + counts.distortion[j][i][l][m] + / counts.distortion_for_any_j[i][l][m] + ) + self.distortion_table[j][i][l][m] = max(estimate, MIN_PROB) + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + src_sentence = alignment_info.src_sentence + trg_sentence = alignment_info.trg_sentence + l = len(src_sentence) - 1 # exclude NULL + m = len(trg_sentence) - 1 + p1 = self.p1 + p0 = 1 - p1 + + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + + # Combine NULL insertion probability + null_fertility = alignment_info.fertility_of_i(0) + probability *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if probability < MIN_PROB: + return MIN_PROB + + # Compute combination (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + probability *= (m - null_fertility - i + 1) / i + if probability < MIN_PROB: + return MIN_PROB + + # Combine fertility probabilities + for i in range(1, l + 1): + fertility = alignment_info.fertility_of_i(i) + probability *= ( + factorial(fertility) * self.fertility_table[fertility][src_sentence[i]] + ) + if probability < MIN_PROB: + return MIN_PROB + + # Combine lexical and distortion probabilities + for j in range(1, m + 1): + t = trg_sentence[j] + i = alignment_info.alignment[j] + s = src_sentence[i] + + probability *= ( + self.translation_table[t][s] * self.distortion_table[j][i][l][m] + ) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + +class Model3Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for distortion. + """ + + def __init__(self): + super().__init__() + self.distortion = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) + ) + self.distortion_for_any_j = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + + def update_distortion(self, count, alignment_info, j, l, m): + i = alignment_info.alignment[j] + self.distortion[j][i][l][m] += count + self.distortion_for_any_j[i][l][m] += count diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm5.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm5.py new file mode 100644 index 0000000000000000000000000000000000000000..98ed2ec0aec4535fd6b4e18abbf8ecd8f696a9e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/ibm5.py @@ -0,0 +1,663 @@ +# Natural Language Toolkit: IBM Model 5 +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +Translation model that keeps track of vacant positions in the target +sentence to decide where to place translated words. + +Translation can be viewed as a process where each word in the source +sentence is stepped through sequentially, generating translated words +for each source word. The target sentence can be viewed as being made +up of ``m`` empty slots initially, which gradually fill up as generated +words are placed in them. + +Models 3 and 4 use distortion probabilities to decide how to place +translated words. For simplicity, these models ignore the history of +which slots have already been occupied with translated words. +Consider the placement of the last translated word: there is only one +empty slot left in the target sentence, so the distortion probability +should be 1.0 for that position and 0.0 everywhere else. However, the +distortion probabilities for Models 3 and 4 are set up such that all +positions are under consideration. + +IBM Model 5 fixes this deficiency by accounting for occupied slots +during translation. It introduces the vacancy function v(j), the number +of vacancies up to, and including, position j in the target sentence. + +Terminology +----------- + +:Maximum vacancy: + The number of valid slots that a word can be placed in. + This is not necessarily the same as the number of vacant slots. + For example, if a tablet contains more than one word, the head word + cannot be placed at the last vacant slot because there will be no + space for the other words in the tablet. The number of valid slots + has to take into account the length of the tablet. + Non-head words cannot be placed before the head word, so vacancies + to the left of the head word are ignored. +:Vacancy difference: + For a head word: (v(j) - v(center of previous cept)) + Can be positive or negative. + For a non-head word: (v(j) - v(position of previously placed word)) + Always positive, because successive words in a tablet are assumed to + appear to the right of the previous word. + +Positioning of target words fall under three cases: + +1. Words generated by NULL are distributed uniformly +2. For a head word t, its position is modeled by the probability + v_head(dv | max_v,word_class_t(t)) +3. For a non-head word t, its position is modeled by the probability + v_non_head(dv | max_v,word_class_t(t)) + +dv and max_v are defined differently for head and non-head words. + +The EM algorithm used in Model 5 is: + +:E step: In the training data, collect counts, weighted by prior + probabilities. + + - (a) count how many times a source language word is translated + into a target language word + - (b) for a particular word class and maximum vacancy, count how + many times a head word and the previous cept's center have + a particular difference in number of vacancies + - (b) for a particular word class and maximum vacancy, count how + many times a non-head word and the previous target word + have a particular difference in number of vacancies + - (d) count how many times a source word is aligned to phi number + of target words + - (e) count how many times NULL is aligned to a target word + +:M step: Estimate new probabilities based on the counts from the E step + +Like Model 4, there are too many possible alignments to consider. Thus, +a hill climbing approach is used to sample good candidates. In addition, +pruning is used to weed out unlikely alignments based on Model 4 scores. + +Notations +--------- + +:i: Position in the source sentence + Valid values are 0 (for NULL), 1, 2, ..., length of source sentence +:j: Position in the target sentence + Valid values are 1, 2, ..., length of target sentence +:l: Number of words in the source sentence, excluding NULL +:m: Number of words in the target sentence +:s: A word in the source language +:t: A word in the target language +:phi: Fertility, the number of target words produced by a source word +:p1: Probability that a target word produced by a source word is + accompanied by another target word that is aligned to NULL +:p0: 1 - p1 +:max_v: Maximum vacancy +:dv: Vacancy difference, Δv + +The definition of v_head here differs from GIZA++, section 4.7 of +[Brown et al., 1993], and [Koehn, 2010]. In the latter cases, v_head is +v_head(v(j) | v(center of previous cept),max_v,word_class(t)). + +Here, we follow appendix B of [Brown et al., 1993] and combine v(j) with +v(center of previous cept) to obtain dv: +v_head(v(j) - v(center of previous cept) | max_v,word_class(t)). + +References +---------- + +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. + +Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and +Robert L. Mercer. 1993. The Mathematics of Statistical Machine +Translation: Parameter Estimation. Computational Linguistics, 19 (2), +263-311. +""" + +import warnings +from collections import defaultdict +from math import factorial + +from nltk.translate import AlignedSent, Alignment, IBMModel, IBMModel4 +from nltk.translate.ibm_model import Counts, longest_target_sentence_length + + +class IBMModel5(IBMModel): + """ + Translation model that keeps track of vacant positions in the target + sentence to decide where to place translated words + + >>> bitext = [] + >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big'])) + >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small'])) + >>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small'])) + >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house'])) + >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book'])) + >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book'])) + >>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book'])) + >>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize'])) + >>> src_classes = {'the': 0, 'a': 0, 'small': 1, 'big': 1, 'house': 2, 'book': 2, 'is': 3, 'was': 3, 'i': 4, 'summarize': 5 } + >>> trg_classes = {'das': 0, 'ein': 0, 'haus': 1, 'buch': 1, 'klein': 2, 'groß': 2, 'ist': 3, 'war': 3, 'ja': 4, 'ich': 5, 'fasse': 6, 'zusammen': 6 } + + >>> ibm5 = IBMModel5(bitext, 5, src_classes, trg_classes) + + >>> print(round(ibm5.head_vacancy_table[1][1][1], 3)) + 1.0 + >>> print(round(ibm5.head_vacancy_table[2][1][1], 3)) + 0.0 + >>> print(round(ibm5.non_head_vacancy_table[3][3][6], 3)) + 1.0 + + >>> print(round(ibm5.fertility_table[2]['summarize'], 3)) + 1.0 + >>> print(round(ibm5.fertility_table[1]['book'], 3)) + 1.0 + + >>> print(round(ibm5.p1, 3)) + 0.033 + + >>> test_sentence = bitext[2] + >>> test_sentence.words + ['das', 'buch', 'ist', 'ja', 'klein'] + >>> test_sentence.mots + ['the', 'book', 'is', 'small'] + >>> test_sentence.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)]) + + """ + + MIN_SCORE_FACTOR = 0.2 + """ + Alignments with scores below this factor are pruned during sampling + """ + + def __init__( + self, + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + probability_tables=None, + ): + """ + Train on ``sentence_aligned_corpus`` and create a lexical + translation model, vacancy models, a fertility model, and a + model for generating NULL-aligned words. + + Translation direction is from ``AlignedSent.mots`` to + ``AlignedSent.words``. + + :param sentence_aligned_corpus: Sentence-aligned parallel corpus + :type sentence_aligned_corpus: list(AlignedSent) + + :param iterations: Number of iterations to run training algorithm + :type iterations: int + + :param source_word_classes: Lookup table that maps a source word + to its word class, the latter represented by an integer id + :type source_word_classes: dict[str]: int + + :param target_word_classes: Lookup table that maps a target word + to its word class, the latter represented by an integer id + :type target_word_classes: dict[str]: int + + :param probability_tables: Optional. Use this to pass in custom + probability values. If not specified, probabilities will be + set to a uniform distribution, or some other sensible value. + If specified, all the following entries must be present: + ``translation_table``, ``alignment_table``, + ``fertility_table``, ``p1``, ``head_distortion_table``, + ``non_head_distortion_table``, ``head_vacancy_table``, + ``non_head_vacancy_table``. See ``IBMModel``, ``IBMModel4``, + and ``IBMModel5`` for the type and purpose of these tables. + :type probability_tables: dict[str]: object + """ + super().__init__(sentence_aligned_corpus) + self.reset_probabilities() + self.src_classes = source_word_classes + self.trg_classes = target_word_classes + + if probability_tables is None: + # Get probabilities from IBM model 4 + ibm4 = IBMModel4( + sentence_aligned_corpus, + iterations, + source_word_classes, + target_word_classes, + ) + self.translation_table = ibm4.translation_table + self.alignment_table = ibm4.alignment_table + self.fertility_table = ibm4.fertility_table + self.p1 = ibm4.p1 + self.head_distortion_table = ibm4.head_distortion_table + self.non_head_distortion_table = ibm4.non_head_distortion_table + self.set_uniform_probabilities(sentence_aligned_corpus) + else: + # Set user-defined probabilities + self.translation_table = probability_tables["translation_table"] + self.alignment_table = probability_tables["alignment_table"] + self.fertility_table = probability_tables["fertility_table"] + self.p1 = probability_tables["p1"] + self.head_distortion_table = probability_tables["head_distortion_table"] + self.non_head_distortion_table = probability_tables[ + "non_head_distortion_table" + ] + self.head_vacancy_table = probability_tables["head_vacancy_table"] + self.non_head_vacancy_table = probability_tables["non_head_vacancy_table"] + + for n in range(0, iterations): + self.train(sentence_aligned_corpus) + + def reset_probabilities(self): + super().reset_probabilities() + self.head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``head_vacancy_table[dv][v_max][trg_class]``. + """ + + self.non_head_vacancy_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: self.MIN_PROB)) + ) + """ + dict[int][int][int]: float. Probability(vacancy difference | + number of remaining valid positions,target word class). + Values accessed as ``non_head_vacancy_table[dv][v_max][trg_class]``. + """ + + def set_uniform_probabilities(self, sentence_aligned_corpus): + """ + Set vacancy probabilities uniformly to + 1 / cardinality of vacancy difference values + """ + max_m = longest_target_sentence_length(sentence_aligned_corpus) + + # The maximum vacancy difference occurs when a word is placed in + # the last available position m of the target sentence and the + # previous word position has no vacancies. + # The minimum is 1-max_v, when a word is placed in the first + # available position and the previous word is placed beyond the + # last available position. + # Thus, the number of possible vacancy difference values is + # (max_v) - (1-max_v) + 1 = 2 * max_v. + if max_m > 0 and (1 / (2 * max_m)) < IBMModel.MIN_PROB: + warnings.warn( + "A target sentence is too long (" + + str(max_m) + + " words). Results may be less accurate." + ) + + for max_v in range(1, max_m + 1): + for dv in range(1, max_m + 1): + initial_prob = 1 / (2 * max_v) + self.head_vacancy_table[dv][max_v] = defaultdict(lambda: initial_prob) + self.head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[dv][max_v] = defaultdict( + lambda: initial_prob + ) + self.non_head_vacancy_table[-(dv - 1)][max_v] = defaultdict( + lambda: initial_prob + ) + + def train(self, parallel_corpus): + counts = Model5Counts() + for aligned_sentence in parallel_corpus: + l = len(aligned_sentence.mots) + m = len(aligned_sentence.words) + + # Sample the alignment space + sampled_alignments, best_alignment = self.sample(aligned_sentence) + # Record the most probable alignment + aligned_sentence.alignment = Alignment( + best_alignment.zero_indexed_alignment() + ) + + # E step (a): Compute normalization factors to weigh counts + total_count = self.prob_of_alignments(sampled_alignments) + + # E step (b): Collect counts + for alignment_info in sampled_alignments: + count = self.prob_t_a_given_s(alignment_info) + normalized_count = count / total_count + + for j in range(1, m + 1): + counts.update_lexical_translation( + normalized_count, alignment_info, j + ) + + slots = Slots(m) + for i in range(1, l + 1): + counts.update_vacancy( + normalized_count, alignment_info, i, self.trg_classes, slots + ) + + counts.update_null_generation(normalized_count, alignment_info) + counts.update_fertility(normalized_count, alignment_info) + + # M step: Update probabilities with maximum likelihood estimates + # If any probability is less than MIN_PROB, clamp it to MIN_PROB + existing_alignment_table = self.alignment_table + self.reset_probabilities() + self.alignment_table = existing_alignment_table # don't retrain + + self.maximize_lexical_translation_probabilities(counts) + self.maximize_vacancy_probabilities(counts) + self.maximize_fertility_probabilities(counts) + self.maximize_null_generation_probabilities(counts) + + def sample(self, sentence_pair): + """ + Sample the most probable alignments from the entire alignment + space according to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + First, determine the best alignment according to IBM Model 2. + With this initial alignment, use hill climbing to determine the + best alignment according to a IBM Model 4. Add this + alignment and its neighbors to the sample set. Repeat this + process with other initial alignments obtained by pegging an + alignment point. Finally, prune alignments that have + substantially lower Model 4 scores than the best alignment. + + :param sentence_pair: Source and target language sentence pair + to generate a sample of alignments from + :type sentence_pair: AlignedSent + + :return: A set of best alignments represented by their ``AlignmentInfo`` + and the best alignment of the set for convenience + :rtype: set(AlignmentInfo), AlignmentInfo + """ + sampled_alignments, best_alignment = super().sample(sentence_pair) + return self.prune(sampled_alignments), best_alignment + + def prune(self, alignment_infos): + """ + Removes alignments from ``alignment_infos`` that have + substantially lower Model 4 scores than the best alignment + + :return: Pruned alignments + :rtype: set(AlignmentInfo) + """ + alignments = [] + best_score = 0 + + for alignment_info in alignment_infos: + score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) + best_score = max(score, best_score) + alignments.append((alignment_info, score)) + + threshold = IBMModel5.MIN_SCORE_FACTOR * best_score + alignments = [a[0] for a in alignments if a[1] > threshold] + return set(alignments) + + def hillclimb(self, alignment_info, j_pegged=None): + """ + Starting from the alignment in ``alignment_info``, look at + neighboring alignments iteratively for the best one, according + to Model 4 + + Note that Model 4 scoring is used instead of Model 5 because the + latter is too expensive to compute. + + There is no guarantee that the best alignment in the alignment + space will be found, because the algorithm might be stuck in a + local maximum. + + :param j_pegged: If specified, the search will be constrained to + alignments where ``j_pegged`` remains unchanged + :type j_pegged: int + + :return: The best alignment found from hill climbing + :rtype: AlignmentInfo + """ + alignment = alignment_info # alias with shorter name + max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) + + while True: + old_alignment = alignment + for neighbor_alignment in self.neighboring(alignment, j_pegged): + neighbor_probability = IBMModel4.model4_prob_t_a_given_s( + neighbor_alignment, self + ) + + if neighbor_probability > max_probability: + alignment = neighbor_alignment + max_probability = neighbor_probability + + if alignment == old_alignment: + # Until there are no better alignments + break + + alignment.score = max_probability + return alignment + + def prob_t_a_given_s(self, alignment_info): + """ + Probability of target sentence and an alignment given the + source sentence + """ + probability = 1.0 + MIN_PROB = IBMModel.MIN_PROB + slots = Slots(len(alignment_info.trg_sentence) - 1) + + def null_generation_term(): + # Binomial distribution: B(m - null_fertility, p1) + value = 1.0 + p1 = self.p1 + p0 = 1 - p1 + null_fertility = alignment_info.fertility_of_i(0) + m = len(alignment_info.trg_sentence) - 1 + value *= pow(p1, null_fertility) * pow(p0, m - 2 * null_fertility) + if value < MIN_PROB: + return MIN_PROB + + # Combination: (m - null_fertility) choose null_fertility + for i in range(1, null_fertility + 1): + value *= (m - null_fertility - i + 1) / i + return value + + def fertility_term(): + value = 1.0 + src_sentence = alignment_info.src_sentence + for i in range(1, len(src_sentence)): + fertility = alignment_info.fertility_of_i(i) + value *= ( + factorial(fertility) + * self.fertility_table[fertility][src_sentence[i]] + ) + if value < MIN_PROB: + return MIN_PROB + return value + + def lexical_translation_term(j): + t = alignment_info.trg_sentence[j] + i = alignment_info.alignment[j] + s = alignment_info.src_sentence[i] + return self.translation_table[t][s] + + def vacancy_term(i): + value = 1.0 + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL-aligned words + if tablet_length == 0: + return value + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = self.trg_classes[alignment_info.trg_sentence[j]] + value *= self.non_head_vacancy_table[dv][max_v][trg_class] + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + if value < MIN_PROB: + return MIN_PROB + + return value + + # end nested functions + + # Abort computation whenever probability falls below MIN_PROB at + # any point, since MIN_PROB can be considered as zero + probability *= null_generation_term() + if probability < MIN_PROB: + return MIN_PROB + + probability *= fertility_term() + if probability < MIN_PROB: + return MIN_PROB + + for j in range(1, len(alignment_info.trg_sentence)): + probability *= lexical_translation_term(j) + if probability < MIN_PROB: + return MIN_PROB + + for i in range(1, len(alignment_info.src_sentence)): + probability *= vacancy_term(i) + if probability < MIN_PROB: + return MIN_PROB + + return probability + + def maximize_vacancy_probabilities(self, counts): + MIN_PROB = IBMModel.MIN_PROB + head_vacancy_table = self.head_vacancy_table + for dv, max_vs in counts.head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.head_vacancy[dv][max_v][t_cls] + / counts.head_vacancy_for_any_dv[max_v][t_cls] + ) + head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + non_head_vacancy_table = self.non_head_vacancy_table + for dv, max_vs in counts.non_head_vacancy.items(): + for max_v, trg_classes in max_vs.items(): + for t_cls in trg_classes: + estimate = ( + counts.non_head_vacancy[dv][max_v][t_cls] + / counts.non_head_vacancy_for_any_dv[max_v][t_cls] + ) + non_head_vacancy_table[dv][max_v][t_cls] = max(estimate, MIN_PROB) + + +class Model5Counts(Counts): + """ + Data object to store counts of various parameters during training. + Includes counts for vacancies. + """ + + def __init__(self): + super().__init__() + self.head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + self.non_head_vacancy = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: 0.0)) + ) + self.non_head_vacancy_for_any_dv = defaultdict(lambda: defaultdict(lambda: 0.0)) + + def update_vacancy(self, count, alignment_info, i, trg_classes, slots): + """ + :param count: Value to add to the vacancy counts + :param alignment_info: Alignment under consideration + :param i: Source word position under consideration + :param trg_classes: Target word classes + :param slots: Vacancy states of the slots in the target sentence. + Output parameter that will be modified as new words are placed + in the target sentence. + """ + tablet = alignment_info.cepts[i] + tablet_length = len(tablet) + total_vacancies = slots.vacancies_at(len(slots)) + + # case 1: NULL aligned words + if tablet_length == 0: + return # ignore zero fertility words + + # case 2: head word + j = tablet[0] + previous_cept = alignment_info.previous_cept(j) + previous_center = alignment_info.center_of_cept(previous_cept) + dv = slots.vacancies_at(j) - slots.vacancies_at(previous_center) + max_v = total_vacancies - tablet_length + 1 + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.head_vacancy[dv][max_v][trg_class] += count + self.head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + # case 3: non-head words + for k in range(1, tablet_length): + previous_position = tablet[k - 1] + previous_vacancies = slots.vacancies_at(previous_position) + j = tablet[k] + dv = slots.vacancies_at(j) - previous_vacancies + max_v = total_vacancies - tablet_length + k + 1 - previous_vacancies + trg_class = trg_classes[alignment_info.trg_sentence[j]] + self.non_head_vacancy[dv][max_v][trg_class] += count + self.non_head_vacancy_for_any_dv[max_v][trg_class] += count + slots.occupy(j) # mark position as occupied + total_vacancies -= 1 + + +class Slots: + """ + Represents positions in a target sentence. Used to keep track of + which slot (position) is occupied. + """ + + def __init__(self, target_sentence_length): + self._slots = [False] * (target_sentence_length + 1) # 1-indexed + + def occupy(self, position): + """ + :return: Mark slot at ``position`` as occupied + """ + self._slots[position] = True + + def vacancies_at(self, position): + """ + :return: Number of vacant slots up to, and including, ``position`` + """ + vacancies = 0 + for k in range(1, position + 1): + if not self._slots[k]: + vacancies += 1 + return vacancies + + def __len__(self): + return len(self._slots) - 1 # exclude dummy zeroeth element diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/meteor_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/meteor_score.py new file mode 100644 index 0000000000000000000000000000000000000000..847f2ad19205816f71caff5623b1d992ef2dbfda --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/meteor_score.py @@ -0,0 +1,409 @@ +# Natural Language Toolkit: Machine Translation +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Uday Krishna +# Contributor: Tom Aarsen +# URL: +# For license information, see LICENSE.TXT + + +from itertools import chain, product +from typing import Callable, Iterable, List, Tuple + +from nltk.corpus import WordNetCorpusReader, wordnet +from nltk.stem.api import StemmerI +from nltk.stem.porter import PorterStemmer + + +def _generate_enums( + hypothesis: Iterable[str], + reference: Iterable[str], + preprocess: Callable[[str], str] = str.lower, +) -> Tuple[List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Takes in pre-tokenized inputs for hypothesis and reference and returns + enumerated word lists for each of them + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :preprocess: preprocessing method (default str.lower) + :return: enumerated words list + """ + if isinstance(hypothesis, str): + raise TypeError( + f'"hypothesis" expects pre-tokenized hypothesis (Iterable[str]): {hypothesis}' + ) + + if isinstance(reference, str): + raise TypeError( + f'"reference" expects pre-tokenized reference (Iterable[str]): {reference}' + ) + + enum_hypothesis_list = list(enumerate(map(preprocess, hypothesis))) + enum_reference_list = list(enumerate(map(preprocess, reference))) + return enum_hypothesis_list, enum_reference_list + + +def exact_match( + hypothesis: Iterable[str], reference: Iterable[str] +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference + and returns a word mapping based on the enumerated + word id between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _match_enums(enum_hypothesis_list, enum_reference_list) + + +def _match_enums( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + matches exact words in hypothesis and reference and returns + a word mapping between enum_hypothesis_list and enum_reference_list + based on the enumerated word id. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + for j in range(len(enum_reference_list))[::-1]: + if enum_hypothesis_list[i][1] == enum_reference_list[j][1]: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def _enum_stem_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between enum_hypothesis_list and + enum_reference_list based on the enumerated word id. The function also + returns a enumerated list of unmatched words for hypothesis and reference. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + stemmed_enum_hypothesis_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_hypothesis_list + ] + + stemmed_enum_reference_list = [ + (word_pair[0], stemmer.stem(word_pair[1])) for word_pair in enum_reference_list + ] + + return _match_enums(stemmed_enum_hypothesis_list, stemmed_enum_reference_list) + + +def stem_match( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Stems each word and matches them in hypothesis and reference + and returns a word mapping between hypothesis and reference + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :return: enumerated matched tuples, enumerated unmatched hypothesis tuples, + enumerated unmatched reference tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_stem_match(enum_hypothesis_list, enum_reference_list, stemmer=stemmer) + + +def _enum_wordnetsyn_match( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis + if any synonym of a hypothesis word is the exact match + to the reference word. + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + """ + word_match = [] + for i in range(len(enum_hypothesis_list))[::-1]: + hypothesis_syns = set( + chain.from_iterable( + ( + lemma.name() + for lemma in synset.lemmas() + if lemma.name().find("_") < 0 + ) + for synset in wordnet.synsets(enum_hypothesis_list[i][1]) + ) + ).union({enum_hypothesis_list[i][1]}) + for j in range(len(enum_reference_list))[::-1]: + if enum_reference_list[j][1] in hypothesis_syns: + word_match.append( + (enum_hypothesis_list[i][0], enum_reference_list[j][0]) + ) + enum_hypothesis_list.pop(i) + enum_reference_list.pop(j) + break + return word_match, enum_hypothesis_list, enum_reference_list + + +def wordnetsyn_match( + hypothesis: Iterable[str], + reference: Iterable[str], + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Matches each word in reference to a word in hypothesis if any synonym + of a hypothesis word is the exact match to the reference word. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: list of mapped tuples + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + +def _enum_align_words( + enum_hypothesis_list: List[Tuple[int, str]], + enum_reference_list: List[Tuple[int, str]], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + in case there are multiple matches the match which has the least number + of crossing is chosen. Takes enumerated list as input instead of + string input + + :param enum_hypothesis_list: enumerated hypothesis list + :param enum_reference_list: enumerated reference list + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, + unmatched reference list + """ + exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums( + enum_hypothesis_list, enum_reference_list + ) + + stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer + ) + + wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match( + enum_hypothesis_list, enum_reference_list, wordnet=wordnet + ) + + return ( + sorted( + exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0] + ), + enum_hypothesis_list, + enum_reference_list, + ) + + +def align_words( + hypothesis: Iterable[str], + reference: Iterable[str], + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, +) -> Tuple[List[Tuple[int, int]], List[Tuple[int, str]], List[Tuple[int, str]]]: + """ + Aligns/matches words in the hypothesis to reference by sequentially + applying exact match, stemmed match and wordnet based synonym match. + In case there are multiple matches the match which has the least number + of crossing is chosen. + + :param hypothesis: pre-tokenized hypothesis + :param reference: pre-tokenized reference + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list + """ + enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference) + return _enum_align_words( + enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet + ) + + +def _count_chunks(matches: List[Tuple[int, int]]) -> int: + """ + Counts the fewest possible number of chunks such that matched unigrams + of each chunk are adjacent to each other. This is used to calculate the + fragmentation part of the metric. + + :param matches: list containing a mapping of matched words (output of align_words) + :return: Number of chunks a sentence is divided into post alignment + """ + i = 0 + chunks = 1 + while i < len(matches) - 1: + if (matches[i + 1][0] == matches[i][0] + 1) and ( + matches[i + 1][1] == matches[i][1] + 1 + ): + i += 1 + continue + i += 1 + chunks += 1 + return chunks + + +def single_meteor_score( + reference: Iterable[str], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for single hypothesis and reference as per + "Meteor: An Automatic Metric for MT Evaluation with HighLevels of + Correlation with Human Judgments" by Alon Lavie and Abhaya Agarwal, + in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + + + >>> round(single_meteor_score(reference1, hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(single_meteor_score(['this', 'is', 'a', 'cat'], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param reference: pre-tokenized reference + :param hypothesis: pre-tokenized hypothesis + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a + function of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + enum_hypothesis, enum_reference = _generate_enums( + hypothesis, reference, preprocess=preprocess + ) + translation_length = len(enum_hypothesis) + reference_length = len(enum_reference) + matches, _, _ = _enum_align_words( + enum_hypothesis, enum_reference, stemmer=stemmer, wordnet=wordnet + ) + matches_count = len(matches) + try: + precision = float(matches_count) / translation_length + recall = float(matches_count) / reference_length + fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall) + chunk_count = float(_count_chunks(matches)) + frag_frac = chunk_count / matches_count + except ZeroDivisionError: + return 0.0 + penalty = gamma * frag_frac**beta + return (1 - penalty) * fmean + + +def meteor_score( + references: Iterable[Iterable[str]], + hypothesis: Iterable[str], + preprocess: Callable[[str], str] = str.lower, + stemmer: StemmerI = PorterStemmer(), + wordnet: WordNetCorpusReader = wordnet, + alpha: float = 0.9, + beta: float = 3.0, + gamma: float = 0.5, +) -> float: + """ + Calculates METEOR score for hypothesis with multiple references as + described in "Meteor: An Automatic Metric for MT Evaluation with + HighLevels of Correlation with Human Judgments" by Alon Lavie and + Abhaya Agarwal, in Proceedings of ACL. + https://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf + + + In case of multiple references the best score is chosen. This method + iterates over single_meteor_score and picks the best pair among all + the references for a given hypothesis + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures', 'that', 'the', 'military', 'always', 'obeys', 'the', 'commands', 'of', 'the', 'party'] + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', 'forever', 'hearing', 'the', 'activity', 'guidebook', 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures', 'that', 'the', 'military', 'will', 'forever', 'heed', 'Party', 'commands'] + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', 'guarantees', 'the', 'military', 'forces', 'always', 'being', 'under', 'the', 'command', 'of', 'the', 'Party'] + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', 'army', 'always', 'to', 'heed', 'the', 'directions', 'of', 'the', 'party'] + + >>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4) + 0.6944 + + If there is no words match during the alignment the method returns the + score as 0. We can safely return a zero instead of raising a + division by zero error as no match usually implies a bad translation. + + >>> round(meteor_score([['this', 'is', 'a', 'cat']], ['non', 'matching', 'hypothesis']),4) + 0.0 + + :param references: pre-tokenized reference sentences + :param hypothesis: a pre-tokenized hypothesis sentence + :param preprocess: preprocessing function (default str.lower) + :param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer()) + :param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet) + :param alpha: parameter for controlling relative weights of precision and recall. + :param beta: parameter for controlling shape of penalty as a function + of as a function of fragmentation. + :param gamma: relative weight assigned to fragmentation penalty. + :return: The sentence-level METEOR score. + """ + return max( + single_meteor_score( + reference, + hypothesis, + preprocess=preprocess, + stemmer=stemmer, + wordnet=wordnet, + alpha=alpha, + beta=beta, + gamma=gamma, + ) + for reference in references + ) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/nist_score.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/nist_score.py new file mode 100644 index 0000000000000000000000000000000000000000..0035a9dcdae5f1acf703c2c957353f880db22615 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/nist_score.py @@ -0,0 +1,195 @@ +# Natural Language Toolkit: NIST Score +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: +# Contributors: +# URL: +# For license information, see LICENSE.TXT + +"""NIST score implementation.""" + +import fractions +import math +from collections import Counter + +from nltk.util import ngrams + + +def sentence_nist(references, hypothesis, n=5): + """ + Calculate NIST score from + George Doddington. 2002. "Automatic evaluation of machine translation quality + using n-gram co-occurrence statistics." Proceedings of HLT. + Morgan Kaufmann Publishers Inc. https://dl.acm.org/citation.cfm?id=1289189.1289273 + + DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU + score. The official script used by NIST to compute BLEU and NIST score is + mteval-14.pl. The main differences are: + + - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. + - NIST has a different brevity penalty + - NIST score from mteval-14.pl has a self-contained tokenizer + + Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT + used in the NIST score computation. + + >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', + ... 'ensures', 'that', 'the', 'military', 'always', + ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] + + >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', + ... 'forever', 'hearing', 'the', 'activity', 'guidebook', + ... 'that', 'party', 'direct'] + + >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', + ... 'ensures', 'that', 'the', 'military', 'will', 'forever', + ... 'heed', 'Party', 'commands'] + + >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', + ... 'guarantees', 'the', 'military', 'forces', 'always', + ... 'being', 'under', 'the', 'command', 'of', 'the', + ... 'Party'] + + >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', + ... 'army', 'always', 'to', 'heed', 'the', 'directions', + ... 'of', 'the', 'party'] + + >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS + 3.3709... + + >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS + 1.4619... + + :param references: reference sentences + :type references: list(list(str)) + :param hypothesis: a hypothesis sentence + :type hypothesis: list(str) + :param n: highest n-gram order + :type n: int + """ + return corpus_nist([references], [hypothesis], n) + + +def corpus_nist(list_of_references, hypotheses, n=5): + """ + Calculate a single corpus-level NIST score (aka. system-level BLEU) for all + the hypotheses and their respective references. + + :param references: a corpus of lists of reference sentences, w.r.t. hypotheses + :type references: list(list(list(str))) + :param hypotheses: a list of hypothesis sentences + :type hypotheses: list(list(str)) + :param n: highest n-gram order + :type n: int + """ + # Before proceeding to compute NIST, perform sanity checks. + assert len(list_of_references) == len( + hypotheses + ), "The number of hypotheses and their reference(s) should be the same" + + # Collect the ngram coounts from the reference sentences. + ngram_freq = Counter() + total_reference_words = 0 + for ( + references + ) in list_of_references: # For each source sent, there's a list of reference sents. + for reference in references: + # For each order of ngram, count the ngram occurrences. + for i in range(1, n + 1): + ngram_freq.update(ngrams(reference, i)) + total_reference_words += len(reference) + + # Compute the information weights based on the reference sentences. + # Eqn 2 in Doddington (2002): + # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ] + information_weights = {} + for _ngram in ngram_freq: # w_1 ... w_n + _mgram = _ngram[:-1] # w_1 ... w_n-1 + # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546 + # it's computed as such: + # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq else denominator = total_reference_words + # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2) + # + # Mathematically, it's equivalent to the our implementation: + if _mgram and _mgram in ngram_freq: + numerator = ngram_freq[_mgram] + else: + numerator = total_reference_words + information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) + + # Micro-average. + nist_precision_numerator_per_ngram = Counter() + nist_precision_denominator_per_ngram = Counter() + l_ref, l_sys = 0, 0 + # For each order of ngram. + for i in range(1, n + 1): + # Iterate through each hypothesis and their corresponding references. + for references, hypothesis in zip(list_of_references, hypotheses): + hyp_len = len(hypothesis) + + # Find reference with the best NIST score. + nist_score_per_ref = [] + for reference in references: + _ref_len = len(reference) + # Counter of ngrams in hypothesis. + hyp_ngrams = ( + Counter(ngrams(hypothesis, i)) + if len(hypothesis) >= i + else Counter() + ) + ref_ngrams = ( + Counter(ngrams(reference, i)) if len(reference) >= i else Counter() + ) + ngram_overlaps = hyp_ngrams & ref_ngrams + # Precision part of the score in Eqn 3 + _numerator = sum( + information_weights[_ngram] * count + for _ngram, count in ngram_overlaps.items() + ) + _denominator = sum(hyp_ngrams.values()) + _precision = 0 if _denominator == 0 else _numerator / _denominator + nist_score_per_ref.append( + (_precision, _numerator, _denominator, _ref_len) + ) + # Best reference. + precision, numerator, denominator, ref_len = max(nist_score_per_ref) + nist_precision_numerator_per_ngram[i] += numerator + nist_precision_denominator_per_ngram[i] += denominator + l_ref += ref_len + l_sys += hyp_len + + # Final NIST micro-average mean aggregation. + nist_precision = 0 + for i in nist_precision_numerator_per_ngram: + precision = ( + nist_precision_numerator_per_ngram[i] + / nist_precision_denominator_per_ngram[i] + ) + nist_precision += precision + # Eqn 3 in Doddington(2002) + return nist_precision * nist_length_penalty(l_ref, l_sys) + + +def nist_length_penalty(ref_len, hyp_len): + """ + Calculates the NIST length penalty, from Eq. 3 in Doddington (2002) + + penalty = exp( beta * log( min( len(hyp)/len(ref) , 1.0 ))) + + where, + + `beta` is chosen to make the brevity penalty factor = 0.5 when the + no. of words in the system output (hyp) is 2/3 of the average + no. of words in the reference translation (ref) + + The NIST penalty is different from BLEU's such that it minimize the impact + of the score of small variations in the length of a translation. + See Fig. 4 in Doddington (2002) + """ + ratio = hyp_len / ref_len + if 0 < ratio < 1: + ratio_x, score_x = 1.5, 0.5 + beta = math.log(score_x) / math.log(ratio_x) ** 2 + return math.exp(beta * math.log(ratio) ** 2) + else: # ratio <= 0 or ratio >= 1 + return max(min(ratio, 1.0), 0.0) diff --git a/llmeval-env/lib/python3.10/site-packages/nltk/translate/stack_decoder.py b/llmeval-env/lib/python3.10/site-packages/nltk/translate/stack_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..29c6c99ff8d39848e3e17d413e9b40296bd5dc71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nltk/translate/stack_decoder.py @@ -0,0 +1,515 @@ +# Natural Language Toolkit: Stack decoder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tah Wei Hoon +# URL: +# For license information, see LICENSE.TXT + +""" +A decoder that uses stacks to implement phrase-based translation. + +In phrase-based translation, the source sentence is segmented into +phrases of one or more words, and translations for those phrases are +used to build the target sentence. + +Hypothesis data structures are used to keep track of the source words +translated so far and the partial output. A hypothesis can be expanded +by selecting an untranslated phrase, looking up its translation in a +phrase table, and appending that translation to the partial output. +Translation is complete when a hypothesis covers all source words. + +The search space is huge because the source sentence can be segmented +in different ways, the source phrases can be selected in any order, +and there could be multiple translations for the same source phrase in +the phrase table. To make decoding tractable, stacks are used to limit +the number of candidate hypotheses by doing histogram and/or threshold +pruning. + +Hypotheses with the same number of words translated are placed in the +same stack. In histogram pruning, each stack has a size limit, and +the hypothesis with the lowest score is removed when the stack is full. +In threshold pruning, hypotheses that score below a certain threshold +of the best hypothesis in that stack are removed. + +Hypothesis scoring can include various factors such as phrase +translation probability, language model probability, length of +translation, cost of remaining words to be translated, and so on. + + +References: +Philipp Koehn. 2010. Statistical Machine Translation. +Cambridge University Press, New York. +""" + +import warnings +from collections import defaultdict +from math import log + + +class StackDecoder: + """ + Phrase-based stack decoder for machine translation + + >>> from nltk.translate import PhraseTable + >>> phrase_table = PhraseTable() + >>> phrase_table.add(('niemand',), ('nobody',), log(0.8)) + >>> phrase_table.add(('niemand',), ('no', 'one'), log(0.2)) + >>> phrase_table.add(('erwartet',), ('expects',), log(0.8)) + >>> phrase_table.add(('erwartet',), ('expecting',), log(0.2)) + >>> phrase_table.add(('niemand', 'erwartet'), ('one', 'does', 'not', 'expect'), log(0.1)) + >>> phrase_table.add(('die', 'spanische', 'inquisition'), ('the', 'spanish', 'inquisition'), log(0.8)) + >>> phrase_table.add(('!',), ('!',), log(0.8)) + + >>> # nltk.model should be used here once it is implemented + >>> from collections import defaultdict + >>> language_prob = defaultdict(lambda: -999.0) + >>> language_prob[('nobody',)] = log(0.5) + >>> language_prob[('expects',)] = log(0.4) + >>> language_prob[('the', 'spanish', 'inquisition')] = log(0.2) + >>> language_prob[('!',)] = log(0.1) + >>> language_model = type('',(object,),{'probability_change': lambda self, context, phrase: language_prob[phrase], 'probability': lambda self, phrase: language_prob[phrase]})() + + >>> stack_decoder = StackDecoder(phrase_table, language_model) + + >>> stack_decoder.translate(['niemand', 'erwartet', 'die', 'spanische', 'inquisition', '!']) + ['nobody', 'expects', 'the', 'spanish', 'inquisition', '!'] + + """ + + def __init__(self, phrase_table, language_model): + """ + :param phrase_table: Table of translations for source language + phrases and the log probabilities for those translations. + :type phrase_table: PhraseTable + + :param language_model: Target language model. Must define a + ``probability_change`` method that calculates the change in + log probability of a sentence, if a given string is appended + to it. + This interface is experimental and will likely be replaced + with nltk.model once it is implemented. + :type language_model: object + """ + self.phrase_table = phrase_table + self.language_model = language_model + + self.word_penalty = 0.0 + """ + float: Influences the translation length exponentially. + If positive, shorter translations are preferred. + If negative, longer translations are preferred. + If zero, no penalty is applied. + """ + + self.beam_threshold = 0.0 + """ + float: Hypotheses that score below this factor of the best + hypothesis in a stack are dropped from consideration. + Value between 0.0 and 1.0. + """ + + self.stack_size = 100 + """ + int: Maximum number of hypotheses to consider in a stack. + Higher values increase the likelihood of a good translation, + but increases processing time. + """ + + self.__distortion_factor = 0.5 + self.__compute_log_distortion() + + @property + def distortion_factor(self): + """ + float: Amount of reordering of source phrases. + Lower values favour monotone translation, suitable when + word order is similar for both source and target languages. + Value between 0.0 and 1.0. Default 0.5. + """ + return self.__distortion_factor + + @distortion_factor.setter + def distortion_factor(self, d): + self.__distortion_factor = d + self.__compute_log_distortion() + + def __compute_log_distortion(self): + # cache log(distortion_factor) so we don't have to recompute it + # when scoring hypotheses + if self.__distortion_factor == 0.0: + self.__log_distortion_factor = log(1e-9) # 1e-9 is almost zero + else: + self.__log_distortion_factor = log(self.__distortion_factor) + + def translate(self, src_sentence): + """ + :param src_sentence: Sentence to be translated + :type src_sentence: list(str) + + :return: Translated sentence + :rtype: list(str) + """ + sentence = tuple(src_sentence) # prevent accidental modification + sentence_length = len(sentence) + stacks = [ + _Stack(self.stack_size, self.beam_threshold) + for _ in range(0, sentence_length + 1) + ] + empty_hypothesis = _Hypothesis() + stacks[0].push(empty_hypothesis) + + all_phrases = self.find_all_src_phrases(sentence) + future_score_table = self.compute_future_scores(sentence) + for stack in stacks: + for hypothesis in stack: + possible_expansions = StackDecoder.valid_phrases( + all_phrases, hypothesis + ) + for src_phrase_span in possible_expansions: + src_phrase = sentence[src_phrase_span[0] : src_phrase_span[1]] + for translation_option in self.phrase_table.translations_for( + src_phrase + ): + raw_score = self.expansion_score( + hypothesis, translation_option, src_phrase_span + ) + new_hypothesis = _Hypothesis( + raw_score=raw_score, + src_phrase_span=src_phrase_span, + trg_phrase=translation_option.trg_phrase, + previous=hypothesis, + ) + new_hypothesis.future_score = self.future_score( + new_hypothesis, future_score_table, sentence_length + ) + total_words = new_hypothesis.total_translated_words() + stacks[total_words].push(new_hypothesis) + + if not stacks[sentence_length]: + warnings.warn( + "Unable to translate all words. " + "The source sentence contains words not in " + "the phrase table" + ) + # Instead of returning empty output, perhaps a partial + # translation could be returned + return [] + + best_hypothesis = stacks[sentence_length].best() + return best_hypothesis.translation_so_far() + + def find_all_src_phrases(self, src_sentence): + """ + Finds all subsequences in src_sentence that have a phrase + translation in the translation table + + :type src_sentence: tuple(str) + + :return: Subsequences that have a phrase translation, + represented as a table of lists of end positions. + For example, if result[2] is [5, 6, 9], then there are + three phrases starting from position 2 in ``src_sentence``, + ending at positions 5, 6, and 9 exclusive. The list of + ending positions are in ascending order. + :rtype: list(list(int)) + """ + sentence_length = len(src_sentence) + phrase_indices = [[] for _ in src_sentence] + for start in range(0, sentence_length): + for end in range(start + 1, sentence_length + 1): + potential_phrase = src_sentence[start:end] + if potential_phrase in self.phrase_table: + phrase_indices[start].append(end) + return phrase_indices + + def compute_future_scores(self, src_sentence): + """ + Determines the approximate scores for translating every + subsequence in ``src_sentence`` + + Future scores can be used a look-ahead to determine the + difficulty of translating the remaining parts of a src_sentence. + + :type src_sentence: tuple(str) + + :return: Scores of subsequences referenced by their start and + end positions. For example, result[2][5] is the score of the + subsequence covering positions 2, 3, and 4. + :rtype: dict(int: (dict(int): float)) + """ + scores = defaultdict(lambda: defaultdict(lambda: float("-inf"))) + for seq_length in range(1, len(src_sentence) + 1): + for start in range(0, len(src_sentence) - seq_length + 1): + end = start + seq_length + phrase = src_sentence[start:end] + if phrase in self.phrase_table: + score = self.phrase_table.translations_for(phrase)[ + 0 + ].log_prob # pick best (first) translation + # Warning: API of language_model is subject to change + score += self.language_model.probability(phrase) + scores[start][end] = score + + # check if a better score can be obtained by combining + # two child subsequences + for mid in range(start + 1, end): + combined_score = scores[start][mid] + scores[mid][end] + if combined_score > scores[start][end]: + scores[start][end] = combined_score + return scores + + def future_score(self, hypothesis, future_score_table, sentence_length): + """ + Determines the approximate score for translating the + untranslated words in ``hypothesis`` + """ + score = 0.0 + for span in hypothesis.untranslated_spans(sentence_length): + score += future_score_table[span[0]][span[1]] + return score + + def expansion_score(self, hypothesis, translation_option, src_phrase_span): + """ + Calculate the score of expanding ``hypothesis`` with + ``translation_option`` + + :param hypothesis: Hypothesis being expanded + :type hypothesis: _Hypothesis + + :param translation_option: Information about the proposed expansion + :type translation_option: PhraseTableEntry + + :param src_phrase_span: Word position span of the source phrase + :type src_phrase_span: tuple(int, int) + """ + score = hypothesis.raw_score + score += translation_option.log_prob + # The API of language_model is subject to change; it could accept + # a string, a list of words, and/or some other type + score += self.language_model.probability_change( + hypothesis, translation_option.trg_phrase + ) + score += self.distortion_score(hypothesis, src_phrase_span) + score -= self.word_penalty * len(translation_option.trg_phrase) + return score + + def distortion_score(self, hypothesis, next_src_phrase_span): + if not hypothesis.src_phrase_span: + return 0.0 + next_src_phrase_start = next_src_phrase_span[0] + prev_src_phrase_end = hypothesis.src_phrase_span[1] + distortion_distance = next_src_phrase_start - prev_src_phrase_end + return abs(distortion_distance) * self.__log_distortion_factor + + @staticmethod + def valid_phrases(all_phrases_from, hypothesis): + """ + Extract phrases from ``all_phrases_from`` that contains words + that have not been translated by ``hypothesis`` + + :param all_phrases_from: Phrases represented by their spans, in + the same format as the return value of + ``find_all_src_phrases`` + :type all_phrases_from: list(list(int)) + + :type hypothesis: _Hypothesis + + :return: A list of phrases, represented by their spans, that + cover untranslated positions. + :rtype: list(tuple(int, int)) + """ + untranslated_spans = hypothesis.untranslated_spans(len(all_phrases_from)) + valid_phrases = [] + for available_span in untranslated_spans: + start = available_span[0] + available_end = available_span[1] + while start < available_end: + for phrase_end in all_phrases_from[start]: + if phrase_end > available_end: + # Subsequent elements in all_phrases_from[start] + # will also be > available_end, since the + # elements are in ascending order + break + valid_phrases.append((start, phrase_end)) + start += 1 + return valid_phrases + + +class _Hypothesis: + """ + Partial solution to a translation. + + Records the word positions of the phrase being translated, its + translation, raw score, and the cost of the untranslated parts of + the sentence. When the next phrase is selected to build upon the + partial solution, a new _Hypothesis object is created, with a back + pointer to the previous hypothesis. + + To find out which words have been translated so far, look at the + ``src_phrase_span`` in the hypothesis chain. Similarly, the + translation output can be found by traversing up the chain. + """ + + def __init__( + self, + raw_score=0.0, + src_phrase_span=(), + trg_phrase=(), + previous=None, + future_score=0.0, + ): + """ + :param raw_score: Likelihood of hypothesis so far. + Higher is better. Does not account for untranslated words. + :type raw_score: float + + :param src_phrase_span: Span of word positions covered by the + source phrase in this hypothesis expansion. For example, + (2, 5) means that the phrase is from the second word up to, + but not including the fifth word in the source sentence. + :type src_phrase_span: tuple(int) + + :param trg_phrase: Translation of the source phrase in this + hypothesis expansion + :type trg_phrase: tuple(str) + + :param previous: Previous hypothesis before expansion to this one + :type previous: _Hypothesis + + :param future_score: Approximate score for translating the + remaining words not covered by this hypothesis. Higher means + that the remaining words are easier to translate. + :type future_score: float + """ + self.raw_score = raw_score + self.src_phrase_span = src_phrase_span + self.trg_phrase = trg_phrase + self.previous = previous + self.future_score = future_score + + def score(self): + """ + Overall score of hypothesis after accounting for local and + global features + """ + return self.raw_score + self.future_score + + def untranslated_spans(self, sentence_length): + """ + Starting from each untranslated word, find the longest + continuous span of untranslated positions + + :param sentence_length: Length of source sentence being + translated by the hypothesis + :type sentence_length: int + + :rtype: list(tuple(int, int)) + """ + translated_positions = self.translated_positions() + translated_positions.sort() + translated_positions.append(sentence_length) # add sentinel position + + untranslated_spans = [] + start = 0 + # each untranslated span must end in one of the translated_positions + for end in translated_positions: + if start < end: + untranslated_spans.append((start, end)) + start = end + 1 + + return untranslated_spans + + def translated_positions(self): + """ + List of positions in the source sentence of words already + translated. The list is not sorted. + + :rtype: list(int) + """ + translated_positions = [] + current_hypothesis = self + while current_hypothesis.previous is not None: + translated_span = current_hypothesis.src_phrase_span + translated_positions.extend(range(translated_span[0], translated_span[1])) + current_hypothesis = current_hypothesis.previous + return translated_positions + + def total_translated_words(self): + return len(self.translated_positions()) + + def translation_so_far(self): + translation = [] + self.__build_translation(self, translation) + return translation + + def __build_translation(self, hypothesis, output): + if hypothesis.previous is None: + return + self.__build_translation(hypothesis.previous, output) + output.extend(hypothesis.trg_phrase) + + +class _Stack: + """ + Collection of _Hypothesis objects + """ + + def __init__(self, max_size=100, beam_threshold=0.0): + """ + :param beam_threshold: Hypotheses that score less than this + factor of the best hypothesis are discarded from the stack. + Value must be between 0.0 and 1.0. + :type beam_threshold: float + """ + self.max_size = max_size + self.items = [] + + if beam_threshold == 0.0: + self.__log_beam_threshold = float("-inf") + else: + self.__log_beam_threshold = log(beam_threshold) + + def push(self, hypothesis): + """ + Add ``hypothesis`` to the stack. + Removes lowest scoring hypothesis if the stack is full. + After insertion, hypotheses that score less than + ``beam_threshold`` times the score of the best hypothesis + are removed. + """ + self.items.append(hypothesis) + self.items.sort(key=lambda h: h.score(), reverse=True) + while len(self.items) > self.max_size: + self.items.pop() + self.threshold_prune() + + def threshold_prune(self): + if not self.items: + return + # log(score * beam_threshold) = log(score) + log(beam_threshold) + threshold = self.items[0].score() + self.__log_beam_threshold + for hypothesis in reversed(self.items): + if hypothesis.score() < threshold: + self.items.pop() + else: + break + + def best(self): + """ + :return: Hypothesis with the highest score in the stack + :rtype: _Hypothesis + """ + if self.items: + return self.items[0] + return None + + def __iter__(self): + return iter(self.items) + + def __contains__(self, hypothesis): + return hypothesis in self.items + + def __bool__(self): + return len(self.items) != 0 + + __nonzero__ = __bool__