diff --git a/ckpts/universal/global_step40/zero/6.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/6.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..05e41f057e48fe181ab1250684b67a245d8aafd2 --- /dev/null +++ b/ckpts/universal/global_step40/zero/6.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6ac93b799da018f9a779731cd8210117adf4acb0090f06ea7041b7d5236fd28 +size 9372 diff --git a/venv/lib/python3.10/site-packages/nltk/app/__init__.py b/venv/lib/python3.10/site-packages/nltk/app/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d4bbf1831e714c40514313293ae9027e181b8a77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/__init__.py @@ -0,0 +1,47 @@ +# Natural Language Toolkit: Applications package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interactive NLTK Applications: + +chartparser: Chart Parser +chunkparser: Regular-Expression Chunk Parser +collocations: Find collocations in text +concordance: Part-of-speech concordancer +nemo: Finding (and Replacing) Nemo regular expression tool +rdparser: Recursive Descent Parser +srparser: Shift-Reduce Parser +wordnet: WordNet Browser +""" + + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.app package not loaded (please install Tkinter library).") +else: + from nltk.app.chartparser_app import app as chartparser + from nltk.app.chunkparser_app import app as chunkparser + from nltk.app.collocations_app import app as collocations + from nltk.app.concordance_app import app as concordance + from nltk.app.nemo_app import app as nemo + from nltk.app.rdparser_app import app as rdparser + from nltk.app.srparser_app import app as srparser + from nltk.app.wordnet_app import app as wordnet + + try: + from matplotlib import pylab + except ImportError: + import warnings + + warnings.warn("nltk.app.wordfreq not loaded (requires the matplotlib library).") + else: + from nltk.app.wordfreq_app import app as wordfreq diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d82ac3b4aeb5148da352d46a72e152bb1be9fdd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b355cd9211c2eab02acd85c36acc7615968d2a69 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chartparser_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5612cca9da426ca74929812e06165d9fd4419c0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/chunkparser_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eb9acc925a06d2c5e289f6641db7177d0d60f30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/collocations_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b85e9bbe17c2d78d40cba3b832bfa5410c085de Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/concordance_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f736f8702d02a97c0b8061ed2e7d362fae18de53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/nemo_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5addb15d622830746bc885b233d56601a34dca9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/rdparser_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9c6a93f469bab55b281df7bbafbbddd14e895d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/srparser_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88e18995b53d362704fb229230e9dee662a1e42c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordfreq_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6582d804408b97d16ff85259f6ee2a84ad8c40c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/app/__pycache__/wordnet_app.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/app/chartparser_app.py b/venv/lib/python3.10/site-packages/nltk/app/chartparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..53a938c642c6dcfe23fc085205cac3a541821207 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/chartparser_app.py @@ -0,0 +1,2569 @@ +# Natural Language Toolkit: Chart Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Jean Mark Gawron +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring chart parsing. + +Chart parsing is a flexible parsing algorithm that uses a data +structure called a "chart" to record hypotheses about syntactic +constituents. Each hypothesis is represented by a single "edge" on +the chart. A set of "chart rules" determine when new edges can be +added to the chart. This set of rules controls the overall behavior +of the parser (e.g. whether it parses top-down or bottom-up). + +The chart parsing tool demonstrates the process of parsing a single +sentence, with a given grammar and lexicon. Its display is divided +into three sections: the bottom section displays the chart; the middle +section displays the sentence; and the top section displays the +partial syntax tree corresponding to the selected edge. Buttons along +the bottom of the window are used to control the execution of the +algorithm. + +The chart parsing tool allows for flexible control of the parsing +algorithm. At each step of the algorithm, you can select which rule +or strategy you wish to apply. This allows you to experiment with +mixing different strategies (e.g. top-down and bottom-up). You can +exercise fine-grained control over the algorithm by selecting which +edge you wish to apply a rule to. +""" + +# At some point, we should rewrite this tool to use the new canvas +# widget system. + + +import os.path +import pickle +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Tk, + Toplevel, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font +from tkinter.messagebox import showerror, showinfo + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + EntryDialog, + MutableOptionMenu, + ShowText, + SymbolWidget, +) +from nltk.grammar import CFG, Nonterminal +from nltk.parse.chart import ( + BottomUpPredictCombineRule, + BottomUpPredictRule, + Chart, + LeafEdge, + LeafInitRule, + SingleEdgeFundamentalRule, + SteppingChartParser, + TopDownInitRule, + TopDownPredictRule, + TreeEdge, +) +from nltk.tree import Tree +from nltk.util import in_idle + +# Known bug: ChartView doesn't handle edges generated by epsilon +# productions (e.g., [Production: PP -> ]) very well. + +####################################################################### +# Edge List +####################################################################### + + +class EdgeList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config("dot", foreground="#000000") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for i, elt in enumerate(item.rhs()): + if i == item.dot(): + contents.append((" *", "dot")) + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + if item.is_complete(): + contents.append((" *", "dot")) + return contents + + +####################################################################### +# Chart Matrix View +####################################################################### + + +class ChartMatrixView: + """ + A view of a chart that displays the contents of the corresponding matrix. + """ + + def __init__( + self, parent, chart, toplevel=True, title="Chart Matrix", show_numedges=False + ): + self._chart = chart + self._cells = [] + self._marks = [] + + self._selected_cell = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title(title) + self._root.bind("", self.destroy) + self._init_quit(self._root) + else: + self._root = Frame(parent) + + self._init_matrix(self._root) + self._init_list(self._root) + if show_numedges: + self._init_numedges(self._root) + else: + self._numedges_label = None + + self._callbacks = {} + + self._num_edges = 0 + + self.draw() + + def _init_quit(self, root): + quit = Button(root, text="Quit", command=self.destroy) + quit.pack(side="bottom", expand=0, fill="none") + + def _init_matrix(self, root): + cframe = Frame(root, border=2, relief="sunken") + cframe.pack(expand=0, fill="none", padx=1, pady=3, side="top") + self._canvas = Canvas(cframe, width=200, height=200, background="white") + self._canvas.pack(expand=0, fill="none") + + def _init_numedges(self, root): + self._numedges_label = Label(root, text="0 edges") + self._numedges_label.pack(expand=0, fill="none", side="top") + + def _init_list(self, root): + self._list = EdgeList(root, [], width=20, height=5) + self._list.pack(side="top", expand=1, fill="both", pady=3) + + def cb(edge, self=self): + self._fire_callbacks("select", edge) + + self._list.add_callback("select", cb) + self._list.focus() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def set_chart(self, chart): + if chart is not self._chart: + self._chart = chart + self._num_edges = 0 + self.draw() + + def update(self): + if self._root is None: + return + + # Count the edges in each cell + N = len(self._cells) + cell_edges = [[0 for i in range(N)] for j in range(N)] + for edge in self._chart: + cell_edges[edge.start()][edge.end()] += 1 + + # Color the cells correspondingly. + for i in range(N): + for j in range(i, N): + if cell_edges[i][j] == 0: + color = "gray20" + else: + color = "#00{:02x}{:02x}".format( + min(255, 50 + 128 * cell_edges[i][j] / 10), + max(0, 128 - 128 * cell_edges[i][j] / 10), + ) + cell_tag = self._cells[i][j] + self._canvas.itemconfig(cell_tag, fill=color) + if (i, j) == self._selected_cell: + self._canvas.itemconfig(cell_tag, outline="#00ffff", width=3) + self._canvas.tag_raise(cell_tag) + else: + self._canvas.itemconfig(cell_tag, outline="black", width=1) + + # Update the edge list. + edges = list(self._chart.select(span=self._selected_cell)) + self._list.set(edges) + + # Update our edge count. + self._num_edges = self._chart.num_edges() + if self._numedges_label is not None: + self._numedges_label["text"] = "%d edges" % self._num_edges + + def activate(self): + self._canvas.itemconfig("inactivebox", state="hidden") + self.update() + + def inactivate(self): + self._canvas.itemconfig("inactivebox", state="normal") + self.update() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + def select_cell(self, i, j): + if self._root is None: + return + + # If the cell is already selected (and the chart contents + # haven't changed), then do nothing. + if (i, j) == self._selected_cell and self._chart.num_edges() == self._num_edges: + return + + self._selected_cell = (i, j) + self.update() + + # Fire the callback. + self._fire_callbacks("select_cell", i, j) + + def deselect_cell(self): + if self._root is None: + return + self._selected_cell = None + self._list.set([]) + self.update() + + def _click_cell(self, i, j): + if self._selected_cell == (i, j): + self.deselect_cell() + else: + self.select_cell(i, j) + + def view_edge(self, edge): + self.select_cell(*edge.span()) + self._list.view(edge) + + def mark_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.mark(edge) + + def unmark_edge(self, edge=None): + if self._root is None: + return + self._list.unmark(edge) + + def markonly_edge(self, edge): + if self._root is None: + return + self.select_cell(*edge.span()) + self._list.markonly(edge) + + def draw(self): + if self._root is None: + return + LEFT_MARGIN = BOT_MARGIN = 15 + TOP_MARGIN = 5 + c = self._canvas + c.delete("all") + N = self._chart.num_leaves() + 1 + dx = (int(c["width"]) - LEFT_MARGIN) / N + dy = (int(c["height"]) - TOP_MARGIN - BOT_MARGIN) / N + + c.delete("all") + + # Labels and dotted lines + for i in range(N): + c.create_text( + LEFT_MARGIN - 2, i * dy + dy / 2 + TOP_MARGIN, text=repr(i), anchor="e" + ) + c.create_text( + i * dx + dx / 2 + LEFT_MARGIN, + N * dy + TOP_MARGIN + 1, + text=repr(i), + anchor="n", + ) + c.create_line( + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dx * N + LEFT_MARGIN, + dy * (i + 1) + TOP_MARGIN, + dash=".", + ) + c.create_line( + dx * i + LEFT_MARGIN, + TOP_MARGIN, + dx * i + LEFT_MARGIN, + dy * N + TOP_MARGIN, + dash=".", + ) + + # A box around the whole thing + c.create_rectangle( + LEFT_MARGIN, TOP_MARGIN, LEFT_MARGIN + dx * N, dy * N + TOP_MARGIN, width=2 + ) + + # Cells + self._cells = [[None for i in range(N)] for j in range(N)] + for i in range(N): + for j in range(i, N): + t = c.create_rectangle( + j * dx + LEFT_MARGIN, + i * dy + TOP_MARGIN, + (j + 1) * dx + LEFT_MARGIN, + (i + 1) * dy + TOP_MARGIN, + fill="gray20", + ) + self._cells[i][j] = t + + def cb(event, self=self, i=i, j=j): + self._click_cell(i, j) + + c.tag_bind(t, "", cb) + + # Inactive box + xmax, ymax = int(c["width"]), int(c["height"]) + t = c.create_rectangle( + -100, + -100, + xmax + 100, + ymax + 100, + fill="gray50", + state="hidden", + tag="inactivebox", + ) + c.tag_lower(t) + + # Update the cells. + self.update() + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Results View +####################################################################### + + +class ChartResultsView: + def __init__(self, parent, chart, grammar, toplevel=True): + self._chart = chart + self._grammar = grammar + self._trees = [] + self._y = 10 + self._treewidgets = [] + self._selection = None + self._selectbox = None + + if toplevel: + self._root = Toplevel(parent) + self._root.title("Chart Parser Application: Results") + self._root.bind("", self.destroy) + else: + self._root = Frame(parent) + + # Buttons + if toplevel: + buttons = Frame(self._root) + buttons.pack(side="bottom", expand=0, fill="x") + Button(buttons, text="Quit", command=self.destroy).pack(side="right") + Button(buttons, text="Print All", command=self.print_all).pack(side="left") + Button(buttons, text="Print Selection", command=self.print_selection).pack( + side="left" + ) + + # Canvas frame. + self._cframe = CanvasFrame(self._root, closeenough=20) + self._cframe.pack(side="top", expand=1, fill="both") + + # Initial update + self.update() + + def update(self, edge=None): + if self._root is None: + return + # If the edge isn't a parse edge, do nothing. + if edge is not None: + if edge.lhs() != self._grammar.start(): + return + if edge.span() != (0, self._chart.num_leaves()): + return + + for parse in self._chart.parses(self._grammar.start()): + if parse not in self._trees: + self._add(parse) + + def _add(self, parse): + # Add it to self._trees. + self._trees.append(parse) + + # Create a widget for it. + c = self._cframe.canvas() + treewidget = tree_to_treesegment(c, parse) + + # Add it to the canvas frame. + self._treewidgets.append(treewidget) + self._cframe.add_widget(treewidget, 10, self._y) + + # Register callbacks. + treewidget.bind_click(self._click) + + # Update y. + self._y = treewidget.bbox()[3] + 10 + + def _click(self, widget): + c = self._cframe.canvas() + if self._selection is not None: + c.delete(self._selectbox) + self._selection = widget + (x1, y1, x2, y2) = widget.bbox() + self._selectbox = c.create_rectangle(x1, y1, x2, y2, width=2, outline="#088") + + def _color(self, treewidget, color): + treewidget.label()["color"] = color + for child in treewidget.subtrees(): + if isinstance(child, TreeSegmentWidget): + self._color(child, color) + else: + child["color"] = color + + def print_all(self, *e): + if self._root is None: + return + self._cframe.print_to_file() + + def print_selection(self, *e): + if self._root is None: + return + if self._selection is None: + showerror("Print Error", "No tree selected") + else: + c = self._cframe.canvas() + for widget in self._treewidgets: + if widget is not self._selection: + self._cframe.destroy_widget(widget) + c.delete(self._selectbox) + (x1, y1, x2, y2) = self._selection.bbox() + self._selection.move(10 - x1, 10 - y1) + c["scrollregion"] = f"0 0 {x2 - x1 + 20} {y2 - y1 + 20}" + self._cframe.print_to_file() + + # Restore our state. + self._treewidgets = [self._selection] + self.clear() + self.update() + + def clear(self): + if self._root is None: + return + for treewidget in self._treewidgets: + self._cframe.destroy_widget(treewidget) + self._trees = [] + self._treewidgets = [] + if self._selection is not None: + self._cframe.canvas().delete(self._selectbox) + self._selection = None + self._y = 10 + + def set_chart(self, chart): + self.clear() + self._chart = chart + self.update() + + def set_grammar(self, grammar): + self.clear() + self._grammar = grammar + self.update() + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def pack(self, *args, **kwargs): + self._root.pack(*args, **kwargs) + + +####################################################################### +# Chart Comparer +####################################################################### + + +class ChartComparer: + """ + + :ivar _root: The root window + + :ivar _charts: A dictionary mapping names to charts. When + charts are loaded, they are added to this dictionary. + + :ivar _left_chart: The left ``Chart``. + :ivar _left_name: The name ``_left_chart`` (derived from filename) + :ivar _left_matrix: The ``ChartMatrixView`` for ``_left_chart`` + :ivar _left_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_left_chart``. + + :ivar _right_chart: The right ``Chart``. + :ivar _right_name: The name ``_right_chart`` (derived from filename) + :ivar _right_matrix: The ``ChartMatrixView`` for ``_right_chart`` + :ivar _right_selector: The drop-down ``MutableOptionsMenu`` used + to select ``_right_chart``. + + :ivar _out_chart: The out ``Chart``. + :ivar _out_name: The name ``_out_chart`` (derived from filename) + :ivar _out_matrix: The ``ChartMatrixView`` for ``_out_chart`` + :ivar _out_label: The label for ``_out_chart``. + + :ivar _op_label: A Label containing the most recent operation. + """ + + _OPSYMBOL = { + "-": "-", + "and": SymbolWidget.SYMBOLS["intersection"], + "or": SymbolWidget.SYMBOLS["union"], + } + + def __init__(self, *chart_filenames): + # This chart is displayed when we don't have a value (eg + # before any chart is loaded). + faketok = [""] * 8 + self._emptychart = Chart(faketok) + + # The left & right charts start out empty. + self._left_name = "None" + self._right_name = "None" + self._left_chart = self._emptychart + self._right_chart = self._emptychart + + # The charts that have been loaded. + self._charts = {"None": self._emptychart} + + # The output chart. + self._out_chart = self._emptychart + + # The most recent operation + self._operator = None + + # Set up the root window. + self._root = Tk() + self._root.title("Chart Comparison") + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + + # Initialize all widgets, etc. + self._init_menubar(self._root) + self._init_chartviews(self._root) + self._init_divider(self._root) + self._init_buttons(self._root) + self._init_bindings(self._root) + + # Load any specified charts. + for filename in chart_filenames: + self.load_chart(filename) + + def destroy(self, *e): + if self._root is None: + return + try: + self._root.destroy() + except: + pass + self._root = None + + def mainloop(self, *args, **kwargs): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization + # //////////////////////////////////////////////////////////// + + def _init_menubar(self, root): + menubar = Menu(root) + + # File menu + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Load Chart", + accelerator="Ctrl-o", + underline=0, + command=self.load_chart_dialog, + ) + filemenu.add_command( + label="Save Output", + accelerator="Ctrl-s", + underline=0, + command=self.save_chart_dialog, + ) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + # Compare menu + opmenu = Menu(menubar, tearoff=0) + opmenu.add_command( + label="Intersection", command=self._intersection, accelerator="+" + ) + opmenu.add_command(label="Union", command=self._union, accelerator="*") + opmenu.add_command( + label="Difference", command=self._difference, accelerator="-" + ) + opmenu.add_separator() + opmenu.add_command(label="Swap Charts", command=self._swapcharts) + menubar.add_cascade(label="Compare", underline=0, menu=opmenu) + + # Add the menu + self._root.config(menu=menubar) + + def _init_divider(self, root): + divider = Frame(root, border=2, relief="sunken") + divider.pack(side="top", fill="x", ipady=2) + + def _init_chartviews(self, root): + opfont = ("symbol", -36) # Font for operator. + eqfont = ("helvetica", -36) # Font for equals sign. + + frame = Frame(root, background="#c0c0c0") + frame.pack(side="top", expand=1, fill="both") + + # The left matrix. + cv1_frame = Frame(frame, border=3, relief="groove") + cv1_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._left_selector = MutableOptionMenu( + cv1_frame, list(self._charts.keys()), command=self._select_left + ) + self._left_selector.pack(side="top", pady=5, fill="x") + self._left_matrix = ChartMatrixView( + cv1_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._left_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._left_matrix.add_callback("select", self.select_edge) + self._left_matrix.add_callback("select_cell", self.select_cell) + self._left_matrix.inactivate() + + # The operator. + self._op_label = Label( + frame, text=" ", width=3, background="#c0c0c0", font=opfont + ) + self._op_label.pack(side="left", padx=5, pady=5) + + # The right matrix. + cv2_frame = Frame(frame, border=3, relief="groove") + cv2_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._right_selector = MutableOptionMenu( + cv2_frame, list(self._charts.keys()), command=self._select_right + ) + self._right_selector.pack(side="top", pady=5, fill="x") + self._right_matrix = ChartMatrixView( + cv2_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._right_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._right_matrix.add_callback("select", self.select_edge) + self._right_matrix.add_callback("select_cell", self.select_cell) + self._right_matrix.inactivate() + + # The equals sign + Label(frame, text="=", width=3, background="#c0c0c0", font=eqfont).pack( + side="left", padx=5, pady=5 + ) + + # The output matrix. + out_frame = Frame(frame, border=3, relief="groove") + out_frame.pack(side="left", padx=8, pady=7, expand=1, fill="both") + self._out_label = Label(out_frame, text="Output") + self._out_label.pack(side="top", pady=9) + self._out_matrix = ChartMatrixView( + out_frame, self._emptychart, toplevel=False, show_numedges=True + ) + self._out_matrix.pack(side="bottom", padx=5, pady=5, expand=1, fill="both") + self._out_matrix.add_callback("select", self.select_edge) + self._out_matrix.add_callback("select_cell", self.select_cell) + self._out_matrix.inactivate() + + def _init_buttons(self, root): + buttons = Frame(root) + buttons.pack(side="bottom", pady=5, fill="x", expand=0) + Button(buttons, text="Intersection", command=self._intersection).pack( + side="left" + ) + Button(buttons, text="Union", command=self._union).pack(side="left") + Button(buttons, text="Difference", command=self._difference).pack(side="left") + Frame(buttons, width=20).pack(side="left") + Button(buttons, text="Swap Charts", command=self._swapcharts).pack(side="left") + + Button(buttons, text="Detach Output", command=self._detach_out).pack( + side="right" + ) + + def _init_bindings(self, root): + # root.bind('', self.save_chart) + root.bind("", self.load_chart_dialog) + # root.bind('', self.reset) + + # //////////////////////////////////////////////////////////// + # Input Handling + # //////////////////////////////////////////////////////////// + + def _select_left(self, name): + self._left_name = name + self._left_chart = self._charts[name] + self._left_matrix.set_chart(self._left_chart) + if name == "None": + self._left_matrix.inactivate() + self._apply_op() + + def _select_right(self, name): + self._right_name = name + self._right_chart = self._charts[name] + self._right_matrix.set_chart(self._right_chart) + if name == "None": + self._right_matrix.inactivate() + self._apply_op() + + def _apply_op(self): + if self._operator == "-": + self._difference() + elif self._operator == "or": + self._union() + elif self._operator == "and": + self._intersection() + + # //////////////////////////////////////////////////////////// + # File + # //////////////////////////////////////////////////////////// + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + + def save_chart_dialog(self, *args): + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._out_chart, outfile) + except Exception as e: + showerror("Error Saving Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart_dialog(self, *args): + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + self.load_chart(filename) + except Exception as e: + showerror("Error Loading Chart", f"Unable to open file: {filename!r}\n{e}") + + def load_chart(self, filename): + with open(filename, "rb") as infile: + chart = pickle.load(infile) + name = os.path.basename(filename) + if name.endswith(".pickle"): + name = name[:-7] + if name.endswith(".chart"): + name = name[:-6] + self._charts[name] = chart + self._left_selector.add(name) + self._right_selector.add(name) + + # If either left_matrix or right_matrix is empty, then + # display the new chart. + if self._left_chart is self._emptychart: + self._left_selector.set(name) + elif self._right_chart is self._emptychart: + self._right_selector.set(name) + + def _update_chartviews(self): + self._left_matrix.update() + self._right_matrix.update() + self._out_matrix.update() + + # //////////////////////////////////////////////////////////// + # Selection + # //////////////////////////////////////////////////////////// + + def select_edge(self, edge): + if edge in self._left_chart: + self._left_matrix.markonly_edge(edge) + else: + self._left_matrix.unmark_edge() + if edge in self._right_chart: + self._right_matrix.markonly_edge(edge) + else: + self._right_matrix.unmark_edge() + if edge in self._out_chart: + self._out_matrix.markonly_edge(edge) + else: + self._out_matrix.unmark_edge() + + def select_cell(self, i, j): + self._left_matrix.select_cell(i, j) + self._right_matrix.select_cell(i, j) + self._out_matrix.select_cell(i, j) + + # //////////////////////////////////////////////////////////// + # Operations + # //////////////////////////////////////////////////////////// + + def _difference(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge not in self._right_chart: + out_chart.insert(edge, []) + + self._update("-", out_chart) + + def _intersection(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + if edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("and", out_chart) + + def _union(self): + if not self._checkcompat(): + return + + out_chart = Chart(self._left_chart.tokens()) + for edge in self._left_chart: + out_chart.insert(edge, []) + for edge in self._right_chart: + out_chart.insert(edge, []) + + self._update("or", out_chart) + + def _swapcharts(self): + left, right = self._left_name, self._right_name + self._left_selector.set(right) + self._right_selector.set(left) + + def _checkcompat(self): + if ( + self._left_chart.tokens() != self._right_chart.tokens() + or self._left_chart.property_names() != self._right_chart.property_names() + or self._left_chart == self._emptychart + or self._right_chart == self._emptychart + ): + # Clear & inactivate the output chart. + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._out_matrix.inactivate() + self._out_label["text"] = "Output" + # Issue some other warning? + return False + else: + return True + + def _update(self, operator, out_chart): + self._operator = operator + self._op_label["text"] = self._OPSYMBOL[operator] + self._out_chart = out_chart + self._out_matrix.set_chart(out_chart) + self._out_label["text"] = "{} {} {}".format( + self._left_name, + self._operator, + self._right_name, + ) + + def _clear_out_chart(self): + self._out_chart = self._emptychart + self._out_matrix.set_chart(self._out_chart) + self._op_label["text"] = " " + self._out_matrix.inactivate() + + def _detach_out(self): + ChartMatrixView(self._root, self._out_chart, title=self._out_label["text"]) + + +####################################################################### +# Chart View +####################################################################### + + +class ChartView: + """ + A component for viewing charts. This is used by ``ChartParserApp`` to + allow students to interactively experiment with various chart + parsing techniques. It is also used by ``Chart.draw()``. + + :ivar _chart: The chart that we are giving a view of. This chart + may be modified; after it is modified, you should call + ``update``. + :ivar _sentence: The list of tokens that the chart spans. + + :ivar _root: The root window. + :ivar _chart_canvas: The canvas we're using to display the chart + itself. + :ivar _tree_canvas: The canvas we're using to display the tree + that each edge spans. May be None, if we're not displaying + trees. + :ivar _sentence_canvas: The canvas we're using to display the sentence + text. May be None, if we're not displaying the sentence text. + :ivar _edgetags: A dictionary mapping from edges to the tags of + the canvas elements (lines, etc) used to display that edge. + The values of this dictionary have the form + ``(linetag, rhstag1, dottag, rhstag2, lhstag)``. + :ivar _treetags: A list of all the tags that make up the tree; + used to erase the tree (without erasing the loclines). + :ivar _chart_height: The height of the chart canvas. + :ivar _sentence_height: The height of the sentence canvas. + :ivar _tree_height: The height of the tree + + :ivar _text_height: The height of a text string (in the normal + font). + + :ivar _edgelevels: A list of edges at each level of the chart (the + top level is the 0th element). This list is used to remember + where edges should be drawn; and to make sure that no edges + are overlapping on the chart view. + + :ivar _unitsize: Pixel size of one unit (from the location). This + is determined by the span of the chart's location, and the + width of the chart display canvas. + + :ivar _fontsize: The current font size + + :ivar _marks: A dictionary from edges to marks. Marks are + strings, specifying colors (e.g. 'green'). + """ + + _LEAF_SPACING = 10 + _MARGIN = 10 + _TREE_LEVEL_SIZE = 12 + _CHART_LEVEL_SIZE = 40 + + def __init__(self, chart, root=None, **kw): + """ + Construct a new ``Chart`` display. + """ + # Process keyword args. + draw_tree = kw.get("draw_tree", 0) + draw_sentence = kw.get("draw_sentence", 1) + self._fontsize = kw.get("fontsize", -12) + + # The chart! + self._chart = chart + + # Callback functions + self._callbacks = {} + + # Keep track of drawn edges + self._edgelevels = [] + self._edgetags = {} + + # Keep track of which edges are marked. + self._marks = {} + + # These are used to keep track of the set of tree tokens + # currently displayed in the tree canvas. + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + # Keep track of the tags used to draw the tree + self._tree_tags = [] + + # Put multiple edges on each level? + self._compact = 0 + + # If they didn't provide a main window, then set one up. + if root is None: + top = Tk() + top.title("Chart View") + + def destroy1(e, top=top): + top.destroy() + + def destroy2(top=top): + top.destroy() + + top.bind("q", destroy1) + b = Button(top, text="Done", command=destroy2) + b.pack(side="bottom") + self._root = top + else: + self._root = root + + # Create some fonts. + self._init_fonts(root) + + # Create the chart canvas. + (self._chart_sb, self._chart_canvas) = self._sb_canvas(self._root) + self._chart_canvas["height"] = 300 + self._chart_canvas["closeenough"] = 15 + + # Create the sentence canvas. + if draw_sentence: + cframe = Frame(self._root, relief="sunk", border=2) + cframe.pack(fill="both", side="bottom") + self._sentence_canvas = Canvas(cframe, height=50) + self._sentence_canvas["background"] = "#e0e0e0" + self._sentence_canvas.pack(fill="both") + # self._sentence_canvas['height'] = self._sentence_height + else: + self._sentence_canvas = None + + # Create the tree canvas. + if draw_tree: + (sb, canvas) = self._sb_canvas(self._root, "n", "x") + (self._tree_sb, self._tree_canvas) = (sb, canvas) + self._tree_canvas["height"] = 200 + else: + self._tree_canvas = None + + # Do some analysis to figure out how big the window should be + self._analyze() + self.draw() + self._resize() + self._grow() + + # Set up the configure callback, which will be called whenever + # the window is resized. + self._chart_canvas.bind("", self._configure) + + def _init_fonts(self, root): + self._boldfont = Font(family="helvetica", weight="bold", size=self._fontsize) + self._font = Font(family="helvetica", size=self._fontsize) + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + def _sb_canvas(self, root, expand="y", fill="both", side="bottom"): + """ + Helper for __init__: construct a canvas with a scrollbar. + """ + cframe = Frame(root, relief="sunk", border=2) + cframe.pack(fill=fill, expand=expand, side=side) + canvas = Canvas(cframe, background="#e0e0e0") + + # Give the canvas a scrollbar. + sb = Scrollbar(cframe, orient="vertical") + sb.pack(side="right", fill="y") + canvas.pack(side="left", fill=fill, expand="yes") + + # Connect the scrollbars to the canvas. + sb["command"] = canvas.yview + canvas["yscrollcommand"] = sb.set + + return (sb, canvas) + + def scroll_up(self, *e): + self._chart_canvas.yview("scroll", -1, "units") + + def scroll_down(self, *e): + self._chart_canvas.yview("scroll", 1, "units") + + def page_up(self, *e): + self._chart_canvas.yview("scroll", -1, "pages") + + def page_down(self, *e): + self._chart_canvas.yview("scroll", 1, "pages") + + def _grow(self): + """ + Grow the window, if necessary + """ + # Grow, if need-be + N = self._chart.num_leaves() + width = max( + int(self._chart_canvas["width"]), N * self._unitsize + ChartView._MARGIN * 2 + ) + + # It won't resize without the second (height) line, but I + # don't understand why not. + self._chart_canvas.configure(width=width) + self._chart_canvas.configure(height=self._chart_canvas["height"]) + + self._unitsize = (width - 2 * ChartView._MARGIN) / N + + # Reset the height for the sentence window. + if self._sentence_canvas is not None: + self._sentence_canvas["height"] = self._sentence_height + + def set_font_size(self, size): + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + self._analyze() + self._grow() + self.draw() + + def get_font_size(self): + return abs(self._fontsize) + + def _configure(self, e): + """ + The configure callback. This is called whenever the window is + resized. It is also called when the window is first mapped. + It figures out the unit size, and redraws the contents of each + canvas. + """ + N = self._chart.num_leaves() + self._unitsize = (e.width - 2 * ChartView._MARGIN) / N + self.draw() + + def update(self, chart=None): + """ + Draw any edges that have not been drawn. This is typically + called when a after modifies the canvas that a CanvasView is + displaying. ``update`` will cause any edges that have been + added to the chart to be drawn. + + If update is given a ``chart`` argument, then it will replace + the current chart with the given chart. + """ + if chart is not None: + self._chart = chart + self._edgelevels = [] + self._marks = {} + self._analyze() + self._grow() + self.draw() + self.erase_tree() + self._resize() + else: + for edge in self._chart: + if edge not in self._edgetags: + self._add_edge(edge) + self._resize() + + def _edge_conflict(self, edge, lvl): + """ + Return True if the given edge overlaps with any edge on the given + level. This is used by _add_edge to figure out what level a + new edge should be added to. + """ + (s1, e1) = edge.span() + for otheredge in self._edgelevels[lvl]: + (s2, e2) = otheredge.span() + if (s1 <= s2 < e1) or (s2 <= s1 < e2) or (s1 == s2 == e1 == e2): + return True + return False + + def _analyze_edge(self, edge): + """ + Given a new edge, recalculate: + + - _text_height + - _unitsize (if the edge text is too big for the current + _unitsize, then increase _unitsize) + """ + c = self._chart_canvas + + if isinstance(edge, TreeEdge): + lhs = edge.lhs() + rhselts = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhselts.append(str(elt.symbol())) + else: + rhselts.append(repr(elt)) + rhs = " ".join(rhselts) + else: + lhs = edge.lhs() + rhs = "" + + for s in (lhs, rhs): + tag = c.create_text( + 0, 0, text=s, font=self._boldfont, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] # + ChartView._LEAF_SPACING + edgelen = max(edge.length(), 1) + self._unitsize = max(self._unitsize, width / edgelen) + self._text_height = max(self._text_height, bbox[3] - bbox[1]) + + def _add_edge(self, edge, minlvl=0): + """ + Add a single edge to the ChartView: + + - Call analyze_edge to recalculate display parameters + - Find an available level + - Call _draw_edge + """ + # Do NOT show leaf edges in the chart. + if isinstance(edge, LeafEdge): + return + + if edge in self._edgetags: + return + self._analyze_edge(edge) + self._grow() + + if not self._compact: + self._edgelevels.append([edge]) + lvl = len(self._edgelevels) - 1 + self._draw_edge(edge, lvl) + self._resize() + return + + # Figure out what level to draw the edge on. + lvl = 0 + while True: + # If this level doesn't exist yet, create it. + while lvl >= len(self._edgelevels): + self._edgelevels.append([]) + self._resize() + + # Check if we can fit the edge in this level. + if lvl >= minlvl and not self._edge_conflict(edge, lvl): + # Go ahead and draw it. + self._edgelevels[lvl].append(edge) + break + + # Try the next level. + lvl += 1 + + self._draw_edge(edge, lvl) + + def view_edge(self, edge): + level = None + for i in range(len(self._edgelevels)): + if edge in self._edgelevels[i]: + level = i + break + if level is None: + return + # Try to view the new edge.. + y = (level + 1) * self._chart_level_size + dy = self._text_height + 10 + self._chart_canvas.yview("moveto", 1.0) + if self._chart_height != 0: + self._chart_canvas.yview("moveto", (y - dy) / self._chart_height) + + def _draw_edge(self, edge, lvl): + """ + Draw a single edge on the ChartView. + """ + c = self._chart_canvas + + # Draw the arrow. + x1 = edge.start() * self._unitsize + ChartView._MARGIN + x2 = edge.end() * self._unitsize + ChartView._MARGIN + if x2 == x1: + x2 += max(4, self._unitsize / 5) + y = (lvl + 1) * self._chart_level_size + linetag = c.create_line(x1, y, x2, y, arrow="last", width=3) + + # Draw a label for the edge. + if isinstance(edge, TreeEdge): + rhs = [] + for elt in edge.rhs(): + if isinstance(elt, Nonterminal): + rhs.append(str(elt.symbol())) + else: + rhs.append(repr(elt)) + pos = edge.dot() + else: + rhs = [] + pos = 0 + + rhs1 = " ".join(rhs[:pos]) + rhs2 = " ".join(rhs[pos:]) + rhstag1 = c.create_text(x1 + 3, y, text=rhs1, font=self._font, anchor="nw") + dotx = c.bbox(rhstag1)[2] + 6 + doty = (c.bbox(rhstag1)[1] + c.bbox(rhstag1)[3]) / 2 + dottag = c.create_oval(dotx - 2, doty - 2, dotx + 2, doty + 2) + rhstag2 = c.create_text(dotx + 6, y, text=rhs2, font=self._font, anchor="nw") + lhstag = c.create_text( + (x1 + x2) / 2, y, text=str(edge.lhs()), anchor="s", font=self._boldfont + ) + + # Keep track of the edge's tags. + self._edgetags[edge] = (linetag, rhstag1, dottag, rhstag2, lhstag) + + # Register a callback for clicking on the edge. + def cb(event, self=self, edge=edge): + self._fire_callbacks("select", edge) + + c.tag_bind(rhstag1, "", cb) + c.tag_bind(rhstag2, "", cb) + c.tag_bind(linetag, "", cb) + c.tag_bind(dottag, "", cb) + c.tag_bind(lhstag, "", cb) + + self._color_edge(edge) + + def _color_edge(self, edge, linecolor=None, textcolor=None): + """ + Color in an edge with the given colors. + If no colors are specified, use intelligent defaults + (dependent on selection, etc.) + """ + if edge not in self._edgetags: + return + c = self._chart_canvas + + if linecolor is not None and textcolor is not None: + if edge in self._marks: + linecolor = self._marks[edge] + tags = self._edgetags[edge] + c.itemconfig(tags[0], fill=linecolor) + c.itemconfig(tags[1], fill=textcolor) + c.itemconfig(tags[2], fill=textcolor, outline=textcolor) + c.itemconfig(tags[3], fill=textcolor) + c.itemconfig(tags[4], fill=textcolor) + return + else: + N = self._chart.num_leaves() + if edge in self._marks: + self._color_edge(self._marks[edge]) + if edge.is_complete() and edge.span() == (0, N): + self._color_edge(edge, "#084", "#042") + elif isinstance(edge, LeafEdge): + self._color_edge(edge, "#48c", "#246") + else: + self._color_edge(edge, "#00f", "#008") + + def mark_edge(self, edge, mark="#0df"): + """ + Mark an edge + """ + self._marks[edge] = mark + self._color_edge(edge) + + def unmark_edge(self, edge=None): + """ + Unmark an edge (or all edges) + """ + if edge is None: + old_marked_edges = list(self._marks.keys()) + self._marks = {} + for edge in old_marked_edges: + self._color_edge(edge) + else: + del self._marks[edge] + self._color_edge(edge) + + def markonly_edge(self, edge, mark="#0df"): + self.unmark_edge() + self.mark_edge(edge, mark) + + def _analyze(self): + """ + Analyze the sentence string, to figure out how big a unit needs + to be, How big the tree should be, etc. + """ + # Figure out the text height and the unit size. + unitsize = 70 # min unitsize + text_height = 0 + c = self._chart_canvas + + # Check against all tokens + for leaf in self._chart.leaves(): + tag = c.create_text( + 0, 0, text=repr(leaf), font=self._font, anchor="nw", justify="left" + ) + bbox = c.bbox(tag) + c.delete(tag) + width = bbox[2] + ChartView._LEAF_SPACING + unitsize = max(width, unitsize) + text_height = max(text_height, bbox[3] - bbox[1]) + + self._unitsize = unitsize + self._text_height = text_height + self._sentence_height = self._text_height + 2 * ChartView._MARGIN + + # Check against edges. + for edge in self._chart.edges(): + self._analyze_edge(edge) + + # Size of chart levels + self._chart_level_size = self._text_height * 2 + + # Default tree size.. + self._tree_height = 3 * (ChartView._TREE_LEVEL_SIZE + self._text_height) + + # Resize the scrollregions. + self._resize() + + def _resize(self): + """ + Update the scroll-regions for each canvas. This ensures that + everything is within a scroll-region, so the user can use the + scrollbars to view the entire display. This does *not* + resize the window. + """ + c = self._chart_canvas + + # Reset the chart scroll region + width = self._chart.num_leaves() * self._unitsize + ChartView._MARGIN * 2 + + levels = len(self._edgelevels) + self._chart_height = (levels + 2) * self._chart_level_size + c["scrollregion"] = (0, 0, width, self._chart_height) + + # Reset the tree scroll region + if self._tree_canvas: + self._tree_canvas["scrollregion"] = (0, 0, width, self._tree_height) + + def _draw_loclines(self): + """ + Draw location lines. These are vertical gridlines used to + show where each location unit is. + """ + BOTTOM = 50000 + c1 = self._tree_canvas + c2 = self._sentence_canvas + c3 = self._chart_canvas + margin = ChartView._MARGIN + self._loclines = [] + for i in range(0, self._chart.num_leaves() + 1): + x = i * self._unitsize + margin + + if c1: + t1 = c1.create_line(x, 0, x, BOTTOM) + c1.tag_lower(t1) + if c2: + t2 = c2.create_line(x, 0, x, self._sentence_height) + c2.tag_lower(t2) + t3 = c3.create_line(x, 0, x, BOTTOM) + c3.tag_lower(t3) + t4 = c3.create_text(x + 2, 0, text=repr(i), anchor="nw", font=self._font) + c3.tag_lower(t4) + # if i % 4 == 0: + # if c1: c1.itemconfig(t1, width=2, fill='gray60') + # if c2: c2.itemconfig(t2, width=2, fill='gray60') + # c3.itemconfig(t3, width=2, fill='gray60') + if i % 2 == 0: + if c1: + c1.itemconfig(t1, fill="gray60") + if c2: + c2.itemconfig(t2, fill="gray60") + c3.itemconfig(t3, fill="gray60") + else: + if c1: + c1.itemconfig(t1, fill="gray80") + if c2: + c2.itemconfig(t2, fill="gray80") + c3.itemconfig(t3, fill="gray80") + + def _draw_sentence(self): + """Draw the sentence string.""" + if self._chart.num_leaves() == 0: + return + c = self._sentence_canvas + margin = ChartView._MARGIN + y = ChartView._MARGIN + + for i, leaf in enumerate(self._chart.leaves()): + x1 = i * self._unitsize + margin + x2 = x1 + self._unitsize + x = (x1 + x2) / 2 + tag = c.create_text( + x, y, text=repr(leaf), font=self._font, anchor="n", justify="left" + ) + bbox = c.bbox(tag) + rt = c.create_rectangle( + x1 + 2, + bbox[1] - (ChartView._LEAF_SPACING / 2), + x2 - 2, + bbox[3] + (ChartView._LEAF_SPACING / 2), + fill="#f0f0f0", + outline="#f0f0f0", + ) + c.tag_lower(rt) + + def erase_tree(self): + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + self._treetoks = [] + self._treetoks_edge = None + self._treetoks_index = 0 + + def draw_tree(self, edge=None): + if edge is None and self._treetoks_edge is None: + return + if edge is None: + edge = self._treetoks_edge + + # If it's a new edge, then get a new list of treetoks. + if self._treetoks_edge != edge: + self._treetoks = [t for t in self._chart.trees(edge) if isinstance(t, Tree)] + self._treetoks_edge = edge + self._treetoks_index = 0 + + # Make sure there's something to draw. + if len(self._treetoks) == 0: + return + + # Erase the old tree. + for tag in self._tree_tags: + self._tree_canvas.delete(tag) + + # Draw the new tree. + tree = self._treetoks[self._treetoks_index] + self._draw_treetok(tree, edge.start()) + + # Show how many trees are available for the edge. + self._draw_treecycle() + + # Update the scroll region. + w = self._chart.num_leaves() * self._unitsize + 2 * ChartView._MARGIN + h = tree.height() * (ChartView._TREE_LEVEL_SIZE + self._text_height) + self._tree_canvas["scrollregion"] = (0, 0, w, h) + + def cycle_tree(self): + self._treetoks_index = (self._treetoks_index + 1) % len(self._treetoks) + self.draw_tree(self._treetoks_edge) + + def _draw_treecycle(self): + if len(self._treetoks) <= 1: + return + + # Draw the label. + label = "%d Trees" % len(self._treetoks) + c = self._tree_canvas + margin = ChartView._MARGIN + right = self._chart.num_leaves() * self._unitsize + margin - 2 + tag = c.create_text(right, 2, anchor="ne", text=label, font=self._boldfont) + self._tree_tags.append(tag) + _, _, _, y = c.bbox(tag) + + # Draw the triangles. + for i in range(len(self._treetoks)): + x = right - 20 * (len(self._treetoks) - i - 1) + if i == self._treetoks_index: + fill = "#084" + else: + fill = "#fff" + tag = c.create_polygon( + x, y + 10, x - 5, y, x - 10, y + 10, fill=fill, outline="black" + ) + self._tree_tags.append(tag) + + # Set up a callback: show the tree if they click on its + # triangle. + def cb(event, self=self, i=i): + self._treetoks_index = i + self.draw_tree() + + c.tag_bind(tag, "", cb) + + def _draw_treetok(self, treetok, index, depth=0): + """ + :param index: The index of the first leaf in the tree. + :return: The index of the first leaf after the tree. + """ + c = self._tree_canvas + margin = ChartView._MARGIN + + # Draw the children + child_xs = [] + for child in treetok: + if isinstance(child, Tree): + child_x, index = self._draw_treetok(child, index, depth + 1) + child_xs.append(child_x) + else: + child_xs.append((2 * index + 1) * self._unitsize / 2 + margin) + index += 1 + + # If we have children, then get the node's x by averaging their + # node x's. Otherwise, make room for ourselves. + if child_xs: + nodex = sum(child_xs) / len(child_xs) + else: + # [XX] breaks for null productions. + nodex = (2 * index + 1) * self._unitsize / 2 + margin + index += 1 + + # Draw the node + nodey = depth * (ChartView._TREE_LEVEL_SIZE + self._text_height) + tag = c.create_text( + nodex, + nodey, + anchor="n", + justify="center", + text=str(treetok.label()), + fill="#042", + font=self._boldfont, + ) + self._tree_tags.append(tag) + + # Draw lines to the children. + childy = nodey + ChartView._TREE_LEVEL_SIZE + self._text_height + for childx, child in zip(child_xs, treetok): + if isinstance(child, Tree) and child: + # A "real" tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + if isinstance(child, Tree) and not child: + # An unexpanded tree token: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + childy, + width=2, + fill="#048", + dash="2 3", + ) + self._tree_tags.append(tag) + if not isinstance(child, Tree): + # A leaf: + tag = c.create_line( + nodex, + nodey + self._text_height, + childx, + 10000, + width=2, + fill="#084", + ) + self._tree_tags.append(tag) + + return nodex, index + + def draw(self): + """ + Draw everything (from scratch). + """ + if self._tree_canvas: + self._tree_canvas.delete("all") + self.draw_tree() + + if self._sentence_canvas: + self._sentence_canvas.delete("all") + self._draw_sentence() + + self._chart_canvas.delete("all") + self._edgetags = {} + + # Redraw any edges we erased. + for lvl in range(len(self._edgelevels)): + for edge in self._edgelevels[lvl]: + self._draw_edge(edge, lvl) + + for edge in self._chart: + self._add_edge(edge) + + self._draw_loclines() + + def add_callback(self, event, func): + self._callbacks.setdefault(event, {})[func] = 1 + + def remove_callback(self, event, func=None): + if func is None: + del self._callbacks[event] + else: + try: + del self._callbacks[event][func] + except: + pass + + def _fire_callbacks(self, event, *args): + if event not in self._callbacks: + return + for cb_func in list(self._callbacks[event].keys()): + cb_func(*args) + + +####################################################################### +# Edge Rules +####################################################################### +# These version of the chart rules only apply to a specific edge. +# This lets the user select an edge, and then apply a rule. + + +class EdgeRule: + """ + To create an edge rule, make an empty base class that uses + EdgeRule as the first base class, and the basic rule as the + second base class. (Order matters!) + """ + + def __init__(self, edge): + super = self.__class__.__bases__[1] + self._edge = edge + self.NUM_EDGES = super.NUM_EDGES - 1 + + def apply(self, chart, grammar, *edges): + super = self.__class__.__bases__[1] + edges += (self._edge,) + yield from super.apply(self, chart, grammar, *edges) + + def __str__(self): + super = self.__class__.__bases__[1] + return super.__str__(self) + + +class TopDownPredictEdgeRule(EdgeRule, TopDownPredictRule): + pass + + +class BottomUpEdgeRule(EdgeRule, BottomUpPredictRule): + pass + + +class BottomUpLeftCornerEdgeRule(EdgeRule, BottomUpPredictCombineRule): + pass + + +class FundamentalEdgeRule(EdgeRule, SingleEdgeFundamentalRule): + pass + + +####################################################################### +# Chart Parser Application +####################################################################### + + +class ChartParserApp: + def __init__(self, grammar, tokens, title="Chart Parser Application"): + # Initialize the parser + self._init_parser(grammar, tokens) + + self._root = None + try: + # Create the root window. + self._root = Tk() + self._root.title(title) + self._root.bind("", self.destroy) + + # Set up some frames. + frame3 = Frame(self._root) + frame2 = Frame(self._root) + frame1 = Frame(self._root) + frame3.pack(side="bottom", fill="none") + frame2.pack(side="bottom", fill="x") + frame1.pack(side="bottom", fill="both", expand=1) + + self._init_fonts(self._root) + self._init_animation() + self._init_chartview(frame1) + self._init_rulelabel(frame2) + self._init_buttons(frame3) + self._init_menubar() + + self._matrix = None + self._results = None + + # Set up keyboard bindings. + self._init_bindings() + + except: + print("Error creating Tree View") + self.destroy() + raise + + def destroy(self, *args): + if self._root is None: + return + self._root.destroy() + self._root = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._root.mainloop(*args, **kwargs) + + # //////////////////////////////////////////////////////////// + # Initialization Helpers + # //////////////////////////////////////////////////////////// + + def _init_parser(self, grammar, tokens): + self._grammar = grammar + self._tokens = tokens + self._reset_parser() + + def _reset_parser(self): + self._cp = SteppingChartParser(self._grammar) + self._cp.initialize(self._tokens) + self._chart = self._cp.chart() + + # Insert LeafEdges before the parsing starts. + for _new_edge in LeafInitRule().apply(self._chart, self._grammar): + pass + + # The step iterator -- use this to generate new edges + self._cpstep = self._cp.step() + + # The currently selected edge + self._selection = None + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_animation(self): + # Are we stepping? (default=yes) + self._step = IntVar(self._root) + self._step.set(1) + + # What's our animation speed (default=fast) + self._animate = IntVar(self._root) + self._animate.set(3) # Default speed = fast + + # Are we currently animating? + self._animating = 0 + + def _init_chartview(self, parent): + self._cv = ChartView(self._chart, parent, draw_tree=1, draw_sentence=1) + self._cv.add_callback("select", self._click_cv_edge) + + def _init_rulelabel(self, parent): + ruletxt = "Last edge generated by:" + + self._rulelabel1 = Label(parent, text=ruletxt, font=self._boldfont) + self._rulelabel2 = Label( + parent, width=40, relief="groove", anchor="w", font=self._boldfont + ) + self._rulelabel1.pack(side="left") + self._rulelabel2.pack(side="left") + step = Checkbutton(parent, variable=self._step, text="Step") + step.pack(side="right") + + def _init_buttons(self, parent): + frame1 = Frame(parent) + frame2 = Frame(parent) + frame1.pack(side="bottom", fill="x") + frame2.pack(side="top", fill="none") + + Button( + frame1, + text="Reset\nParser", + background="#90c0d0", + foreground="black", + command=self.reset, + ).pack(side="right") + # Button(frame1, text='Pause', + # background='#90c0d0', foreground='black', + # command=self.pause).pack(side='left') + + Button( + frame1, + text="Top Down\nStrategy", + background="#90c0d0", + foreground="black", + command=self.top_down_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nStrategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_strategy, + ).pack(side="left") + Button( + frame1, + text="Bottom Up\nLeft-Corner Strategy", + background="#90c0d0", + foreground="black", + command=self.bottom_up_leftcorner_strategy, + ).pack(side="left") + + Button( + frame2, + text="Top Down Init\nRule", + background="#90f090", + foreground="black", + command=self.top_down_init, + ).pack(side="left") + Button( + frame2, + text="Top Down Predict\nRule", + background="#90f090", + foreground="black", + command=self.top_down_predict, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Predict\nRule", + background="#90f090", + foreground="black", + command=self.bottom_up, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Bottom Up Left-Corner\nPredict Rule", + background="#90f090", + foreground="black", + command=self.bottom_up_leftcorner, + ).pack(side="left") + Frame(frame2, width=20).pack(side="left") + + Button( + frame2, + text="Fundamental\nRule", + background="#90f090", + foreground="black", + command=self.fundamental, + ).pack(side="left") + + def _init_bindings(self): + self._root.bind("", self._cv.scroll_up) + self._root.bind("", self._cv.scroll_down) + self._root.bind("", self._cv.page_up) + self._root.bind("", self._cv.page_down) + self._root.bind("", self.destroy) + self._root.bind("", self.destroy) + self._root.bind("", self.help) + + self._root.bind("", self.save_chart) + self._root.bind("", self.load_chart) + self._root.bind("", self.reset) + + self._root.bind("t", self.top_down_strategy) + self._root.bind("b", self.bottom_up_strategy) + self._root.bind("c", self.bottom_up_leftcorner_strategy) + self._root.bind("", self._stop_animation) + + self._root.bind("", self.edit_grammar) + self._root.bind("", self.edit_sentence) + + # Animation speed control + self._root.bind("-", lambda e, a=self._animate: a.set(1)) + self._root.bind("=", lambda e, a=self._animate: a.set(2)) + self._root.bind("+", lambda e, a=self._animate: a.set(3)) + + # Step control + self._root.bind("s", lambda e, s=self._step: s.set(not s.get())) + + def _init_menubar(self): + menubar = Menu(self._root) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Save Chart", + underline=0, + command=self.save_chart, + accelerator="Ctrl-s", + ) + filemenu.add_command( + label="Load Chart", + underline=0, + command=self.load_chart, + accelerator="Ctrl-o", + ) + filemenu.add_command( + label="Reset Chart", underline=0, command=self.reset, accelerator="Ctrl-r" + ) + filemenu.add_separator() + filemenu.add_command(label="Save Grammar", command=self.save_grammar) + filemenu.add_command(label="Load Grammar", command=self.load_grammar) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_command( + label="Chart Matrix", underline=6, command=self.view_matrix + ) + viewmenu.add_command(label="Results", underline=0, command=self.view_results) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Top Down Strategy", + underline=0, + command=self.top_down_strategy, + accelerator="t", + ) + rulemenu.add_command( + label="Bottom Up Strategy", + underline=0, + command=self.bottom_up_strategy, + accelerator="b", + ) + rulemenu.add_command( + label="Bottom Up Left-Corner Strategy", + underline=0, + command=self.bottom_up_leftcorner_strategy, + accelerator="c", + ) + rulemenu.add_separator() + rulemenu.add_command(label="Bottom Up Rule", command=self.bottom_up) + rulemenu.add_command( + label="Bottom Up Left-Corner Rule", command=self.bottom_up_leftcorner + ) + rulemenu.add_command(label="Top Down Init Rule", command=self.top_down_init) + rulemenu.add_command( + label="Top Down Predict Rule", command=self.top_down_predict + ) + rulemenu.add_command(label="Fundamental Rule", command=self.fundamental) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_checkbutton( + label="Step", underline=0, variable=self._step, accelerator="s" + ) + animatemenu.add_separator() + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=1, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=2, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=3, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + self._root.config(menu=menubar) + + # //////////////////////////////////////////////////////////// + # Selection Handling + # //////////////////////////////////////////////////////////// + + def _click_cv_edge(self, edge): + if edge != self._selection: + # Clicking on a new edge selects it. + self._select_edge(edge) + else: + # Repeated clicks on one edge cycle its trees. + self._cv.cycle_tree() + # [XX] this can get confused if animation is running + # faster than the callbacks... + + def _select_matrix_edge(self, edge): + self._select_edge(edge) + self._cv.view_edge(edge) + + def _select_edge(self, edge): + self._selection = edge + # Update the chart view. + self._cv.markonly_edge(edge, "#f00") + self._cv.draw_tree(edge) + # Update the matrix view. + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + + def _deselect_edge(self): + self._selection = None + # Update the chart view. + self._cv.unmark_edge() + self._cv.erase_tree() + # Update the matrix view + if self._matrix: + self._matrix.unmark_edge() + + def _show_new_edge(self, edge): + self._display_rule(self._cp.current_chartrule()) + # Update the chart view. + self._cv.update() + self._cv.draw_tree(edge) + self._cv.markonly_edge(edge, "#0df") + self._cv.view_edge(edge) + # Update the matrix view. + if self._matrix: + self._matrix.update() + if self._matrix: + self._matrix.markonly_edge(edge) + if self._matrix: + self._matrix.view_edge(edge) + # Update the results view. + if self._results: + self._results.update(edge) + + # //////////////////////////////////////////////////////////// + # Help/usage + # //////////////////////////////////////////////////////////// + + def help(self, *e): + self._animating = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._root, + "Help: Chart Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Chart Parser Application\n" + "Written by Edward Loper" + showinfo("About: Chart Parser Application", ABOUT) + + # //////////////////////////////////////////////////////////// + # File Menu + # //////////////////////////////////////////////////////////// + + CHART_FILE_TYPES = [("Pickle file", ".pickle"), ("All files", "*")] + GRAMMAR_FILE_TYPES = [ + ("Plaintext grammar file", ".cfg"), + ("Pickle file", ".pickle"), + ("All files", "*"), + ] + + def load_chart(self, *args): + "Load a chart from a pickle file" + filename = askopenfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "rb") as infile: + chart = pickle.load(infile) + self._chart = chart + self._cv.update(chart) + if self._matrix: + self._matrix.set_chart(chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(chart) + self._cp.set_chart(chart) + except Exception as e: + raise + showerror("Error Loading Chart", "Unable to open file: %r" % filename) + + def save_chart(self, *args): + "Save a chart to a pickle file" + filename = asksaveasfilename( + filetypes=self.CHART_FILE_TYPES, defaultextension=".pickle" + ) + if not filename: + return + try: + with open(filename, "wb") as outfile: + pickle.dump(self._chart, outfile) + except Exception as e: + raise + showerror("Error Saving Chart", "Unable to open file: %r" % filename) + + def load_grammar(self, *args): + "Load a grammar from a pickle file" + filename = askopenfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "rb") as infile: + grammar = pickle.load(infile) + else: + with open(filename) as infile: + grammar = CFG.fromstring(infile.read()) + self.set_grammar(grammar) + except Exception as e: + showerror("Error Loading Grammar", "Unable to open file: %r" % filename) + + def save_grammar(self, *args): + filename = asksaveasfilename( + filetypes=self.GRAMMAR_FILE_TYPES, defaultextension=".cfg" + ) + if not filename: + return + try: + if filename.endswith(".pickle"): + with open(filename, "wb") as outfile: + pickle.dump((self._chart, self._tokens), outfile) + else: + with open(filename, "w") as outfile: + prods = self._grammar.productions() + start = [p for p in prods if p.lhs() == self._grammar.start()] + rest = [p for p in prods if p.lhs() != self._grammar.start()] + for prod in start: + outfile.write("%s\n" % prod) + for prod in rest: + outfile.write("%s\n" % prod) + except Exception as e: + showerror("Error Saving Grammar", "Unable to open file: %r" % filename) + + def reset(self, *args): + self._animating = 0 + self._reset_parser() + self._cv.update(self._chart) + if self._matrix: + self._matrix.set_chart(self._chart) + if self._matrix: + self._matrix.deselect_cell() + if self._results: + self._results.set_chart(self._chart) + + # //////////////////////////////////////////////////////////// + # Edit + # //////////////////////////////////////////////////////////// + + def edit_grammar(self, *e): + CFGEditor(self._root, self._grammar, self.set_grammar) + + def set_grammar(self, grammar): + self._grammar = grammar + self._cp.set_grammar(grammar) + if self._results: + self._results.set_grammar(grammar) + + def edit_sentence(self, *e): + sentence = " ".join(self._tokens) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._root, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._tokens = list(sentence.split()) + self.reset() + + # //////////////////////////////////////////////////////////// + # View Menu + # //////////////////////////////////////////////////////////// + + def view_matrix(self, *e): + if self._matrix is not None: + self._matrix.destroy() + self._matrix = ChartMatrixView(self._root, self._chart) + self._matrix.add_callback("select", self._select_matrix_edge) + + def view_results(self, *e): + if self._results is not None: + self._results.destroy() + self._results = ChartResultsView(self._root, self._chart, self._grammar) + + # //////////////////////////////////////////////////////////// + # Zoom Menu + # //////////////////////////////////////////////////////////// + + def resize(self): + self._animating = 0 + self.set_font_size(self._size.get()) + + def set_font_size(self, size): + self._cv.set_font_size(size) + self._font.configure(size=-abs(size)) + self._boldfont.configure(size=-abs(size)) + self._sysfont.configure(size=-abs(size)) + + def get_font_size(self): + return abs(self._size.get()) + + # //////////////////////////////////////////////////////////// + # Parsing + # //////////////////////////////////////////////////////////// + + def apply_strategy(self, strategy, edge_strategy=None): + # If we're animating, then stop. + if self._animating: + self._animating = 0 + return + + # Clear the rule display & mark. + self._display_rule(None) + # self._cv.unmark_edge() + + if self._step.get(): + selection = self._selection + if (selection is not None) and (edge_strategy is not None): + # Apply the given strategy to the selected edge. + self._cp.set_strategy([edge_strategy(selection)]) + newedge = self._apply_strategy() + + # If it failed, then clear the selection. + if newedge is None: + self._cv.unmark_edge() + self._selection = None + else: + self._cp.set_strategy(strategy) + self._apply_strategy() + + else: + self._cp.set_strategy(strategy) + if self._animate.get(): + self._animating = 1 + self._animate_strategy() + else: + for edge in self._cpstep: + if edge is None: + break + self._cv.update() + if self._matrix: + self._matrix.update() + if self._results: + self._results.update() + + def _stop_animation(self, *e): + self._animating = 0 + + def _animate_strategy(self, speed=1): + if self._animating == 0: + return + if self._apply_strategy() is not None: + if self._animate.get() == 0 or self._step.get() == 1: + return + if self._animate.get() == 1: + self._root.after(3000, self._animate_strategy) + elif self._animate.get() == 2: + self._root.after(1000, self._animate_strategy) + else: + self._root.after(20, self._animate_strategy) + + def _apply_strategy(self): + new_edge = next(self._cpstep) + + if new_edge is not None: + self._show_new_edge(new_edge) + return new_edge + + def _display_rule(self, rule): + if rule is None: + self._rulelabel2["text"] = "" + else: + name = str(rule) + self._rulelabel2["text"] = name + size = self._cv.get_font_size() + + # //////////////////////////////////////////////////////////// + # Parsing Strategies + # //////////////////////////////////////////////////////////// + + # Basic rules: + _TD_INIT = [TopDownInitRule()] + _TD_PREDICT = [TopDownPredictRule()] + _BU_RULE = [BottomUpPredictRule()] + _BU_LC_RULE = [BottomUpPredictCombineRule()] + _FUNDAMENTAL = [SingleEdgeFundamentalRule()] + + # Complete strategies: + _TD_STRATEGY = _TD_INIT + _TD_PREDICT + _FUNDAMENTAL + _BU_STRATEGY = _BU_RULE + _FUNDAMENTAL + _BU_LC_STRATEGY = _BU_LC_RULE + _FUNDAMENTAL + + # Button callback functions: + def top_down_init(self, *e): + self.apply_strategy(self._TD_INIT, None) + + def top_down_predict(self, *e): + self.apply_strategy(self._TD_PREDICT, TopDownPredictEdgeRule) + + def bottom_up(self, *e): + self.apply_strategy(self._BU_RULE, BottomUpEdgeRule) + + def bottom_up_leftcorner(self, *e): + self.apply_strategy(self._BU_LC_RULE, BottomUpLeftCornerEdgeRule) + + def fundamental(self, *e): + self.apply_strategy(self._FUNDAMENTAL, FundamentalEdgeRule) + + def bottom_up_strategy(self, *e): + self.apply_strategy(self._BU_STRATEGY, BottomUpEdgeRule) + + def bottom_up_leftcorner_strategy(self, *e): + self.apply_strategy(self._BU_LC_STRATEGY, BottomUpLeftCornerEdgeRule) + + def top_down_strategy(self, *e): + self.apply_strategy(self._TD_STRATEGY, TopDownPredictEdgeRule) + + +def app(): + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + VP -> VP PP | V NP | V + NP -> Det N | NP PP + PP -> P NP + # Lexical productions. + NP -> 'John' | 'I' + Det -> 'the' | 'my' | 'a' + N -> 'dog' | 'cookie' | 'table' | 'cake' | 'fork' + V -> 'ate' | 'saw' + P -> 'on' | 'under' | 'with' + """ + ) + + sent = "John ate the cake on the table with a fork" + sent = "John ate the cake on the table" + tokens = list(sent.split()) + + print("grammar= (") + for rule in grammar.productions(): + print((" ", repr(rule) + ",")) + print(")") + print("tokens = %r" % tokens) + print('Calling "ChartParserApp(grammar, tokens)"...') + ChartParserApp(grammar, tokens).mainloop() + + +if __name__ == "__main__": + app() + + # Chart comparer: + # charts = ['/tmp/earley.pickle', + # '/tmp/topdown.pickle', + # '/tmp/bottomup.pickle'] + # ChartComparer(*charts).mainloop() + + # import profile + # profile.run('demo2()', '/tmp/profile.out') + # import pstats + # p = pstats.Stats('/tmp/profile.out') + # p.strip_dirs().sort_stats('time', 'cum').print_stats(60) + # p.strip_dirs().sort_stats('cum', 'time').print_stats(60) + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/chunkparser_app.py b/venv/lib/python3.10/site-packages/nltk/app/chunkparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..54a10a1e7db3dde0f3a18447575130e658ba3c51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/chunkparser_app.py @@ -0,0 +1,1500 @@ +# Natural Language Toolkit: Regexp Chunk Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the regular expression based chunk +parser ``nltk.chunk.RegexpChunkParser``. +""" + +# Todo: Add a way to select the development set from the menubar. This +# might just need to be a selection box (conll vs treebank etc) plus +# configuration parameters to select what's being chunked (eg VP vs NP) +# and what part of the data is being used as the development set. + +import random +import re +import textwrap +import time +from tkinter import ( + Button, + Canvas, + Checkbutton, + Frame, + IntVar, + Label, + Menu, + Scrollbar, + Text, + Tk, +) +from tkinter.filedialog import askopenfilename, asksaveasfilename +from tkinter.font import Font + +from nltk.chunk import ChunkScore, RegexpChunkParser +from nltk.chunk.regexp import RegexpChunkRule +from nltk.corpus import conll2000, treebank_chunk +from nltk.draw.util import ShowText +from nltk.tree import Tree +from nltk.util import in_idle + + +class RegexpChunkApp: + """ + A graphical tool for exploring the regular expression based chunk + parser ``nltk.chunk.RegexpChunkParser``. + + See ``HELP`` for instructional text. + """ + + ##///////////////////////////////////////////////////////////////// + ## Help Text + ##///////////////////////////////////////////////////////////////// + + #: A dictionary mapping from part of speech tags to descriptions, + #: which is used in the help text. (This should probably live with + #: the conll and/or treebank corpus instead.) + TAGSET = { + "CC": "Coordinating conjunction", + "PRP$": "Possessive pronoun", + "CD": "Cardinal number", + "RB": "Adverb", + "DT": "Determiner", + "RBR": "Adverb, comparative", + "EX": "Existential there", + "RBS": "Adverb, superlative", + "FW": "Foreign word", + "RP": "Particle", + "JJ": "Adjective", + "TO": "to", + "JJR": "Adjective, comparative", + "UH": "Interjection", + "JJS": "Adjective, superlative", + "VB": "Verb, base form", + "LS": "List item marker", + "VBD": "Verb, past tense", + "MD": "Modal", + "NNS": "Noun, plural", + "NN": "Noun, singular or masps", + "VBN": "Verb, past participle", + "VBZ": "Verb,3rd ps. sing. present", + "NNP": "Proper noun, singular", + "NNPS": "Proper noun plural", + "WDT": "wh-determiner", + "PDT": "Predeterminer", + "WP": "wh-pronoun", + "POS": "Possessive ending", + "WP$": "Possessive wh-pronoun", + "PRP": "Personal pronoun", + "WRB": "wh-adverb", + "(": "open parenthesis", + ")": "close parenthesis", + "``": "open quote", + ",": "comma", + "''": "close quote", + ".": "period", + "#": "pound sign (currency marker)", + "$": "dollar sign (currency marker)", + "IN": "Preposition/subord. conjunction", + "SYM": "Symbol (mathematical or scientific)", + "VBG": "Verb, gerund/present participle", + "VBP": "Verb, non-3rd ps. sing. present", + ":": "colon", + } + + #: Contents for the help box. This is a list of tuples, one for + #: each help page, where each tuple has four elements: + #: - A title (displayed as a tab) + #: - A string description of tabstops (see Tkinter.Text for details) + #: - The text contents for the help page. You can use expressions + #: like ... to colorize the text; see ``HELP_AUTOTAG`` + #: for a list of tags you can use for colorizing. + HELP = [ + ( + "Help", + "20", + "Welcome to the regular expression chunk-parser grammar editor. " + "You can use this editor to develop and test chunk parser grammars " + "based on NLTK's RegexpChunkParser class.\n\n" + # Help box. + "Use this box ('Help') to learn more about the editor; click on the " + "tabs for help on specific topics:" + "\n" + "Rules: grammar rule types\n" + "Regexps: regular expression syntax\n" + "Tags: part of speech tags\n\n" + # Grammar. + "Use the upper-left box ('Grammar') to edit your grammar. " + "Each line of your grammar specifies a single 'rule', " + "which performs an action such as creating a chunk or merging " + "two chunks.\n\n" + # Dev set. + "The lower-left box ('Development Set') runs your grammar on the " + "development set, and displays the results. " + "Your grammar's chunks are highlighted, and " + "the correct (gold standard) chunks are " + "underlined. If they " + "match, they are displayed in green; otherwise, " + "they are displayed in red. The box displays a single " + "sentence from the development set at a time; use the scrollbar or " + "the next/previous buttons view additional sentences.\n\n" + # Performance + "The lower-right box ('Evaluation') tracks the performance of " + "your grammar on the development set. The 'precision' axis " + "indicates how many of your grammar's chunks are correct; and " + "the 'recall' axis indicates how many of the gold standard " + "chunks your system generated. Typically, you should try to " + "design a grammar that scores high on both metrics. The " + "exact precision and recall of the current grammar, as well " + "as their harmonic mean (the 'f-score'), are displayed in " + "the status bar at the bottom of the window.", + ), + ( + "Rules", + "10", + "

{...regexp...}

" + "\nChunk rule: creates new chunks from words matching " + "regexp.\n\n" + "

}...regexp...{

" + "\nStrip rule: removes words matching regexp from existing " + "chunks.\n\n" + "

...regexp1...}{...regexp2...

" + "\nSplit rule: splits chunks that match regexp1 followed by " + "regexp2 in two.\n\n" + "

...regexp...{}...regexp...

" + "\nMerge rule: joins consecutive chunks that match regexp1 " + "and regexp2\n", + ), + ( + "Regexps", + "10 60", + # "Regular Expression Syntax Summary:\n\n" + "

Pattern\t\tMatches...

\n" + "" + "\t<T>\ta word with tag T " + "(where T may be a regexp).\n" + "\tx?\tan optional x\n" + "\tx+\ta sequence of 1 or more x's\n" + "\tx*\ta sequence of 0 or more x's\n" + "\tx|y\tx or y\n" + "\t.\tmatches any character\n" + "\t(x)\tTreats x as a group\n" + "\t# x...\tTreats x... " + "(to the end of the line) as a comment\n" + "\t\\C\tmatches character C " + "(useful when C is a special character " + "like + or #)\n" + "" + "\n

Examples:

\n" + "" + "\t\n" + '\t\tMatches "cow/NN"\n' + '\t\tMatches "green/NN"\n' + "\t\n" + '\t\tMatches "eating/VBG"\n' + '\t\tMatches "ate/VBD"\n' + "\t
\n" + '\t\tMatches "on/IN the/DT car/NN"\n' + "\t?\n" + '\t\tMatches "ran/VBD"\n' + '\t\tMatches "slowly/RB ate/VBD"\n' + r"\t<\#> # This is a comment...\n" + '\t\tMatches "#/# 100/CD"\n' + "", + ), + ( + "Tags", + "10 60", + "

Part of Speech Tags:

\n" + + "" + + "<>" + + "\n", # this gets auto-substituted w/ self.TAGSET + ), + ] + + HELP_AUTOTAG = [ + ("red", dict(foreground="#a00")), + ("green", dict(foreground="#080")), + ("highlight", dict(background="#ddd")), + ("underline", dict(underline=True)), + ("h1", dict(underline=True)), + ("indent", dict(lmargin1=20, lmargin2=20)), + ("hangindent", dict(lmargin1=0, lmargin2=60)), + ("var", dict(foreground="#88f")), + ("regexp", dict(foreground="#ba7")), + ("match", dict(foreground="#6a6")), + ] + + ##///////////////////////////////////////////////////////////////// + ## Config Parameters + ##///////////////////////////////////////////////////////////////// + + _EVAL_DELAY = 1 + """If the user has not pressed any key for this amount of time (in + seconds), and the current grammar has not been evaluated, then + the eval demon will evaluate it.""" + + _EVAL_CHUNK = 15 + """The number of sentences that should be evaluated by the eval + demon each time it runs.""" + _EVAL_FREQ = 0.2 + """The frequency (in seconds) at which the eval demon is run""" + _EVAL_DEMON_MIN = 0.02 + """The minimum amount of time that the eval demon should take each time + it runs -- if it takes less than this time, _EVAL_CHUNK will be + modified upwards.""" + _EVAL_DEMON_MAX = 0.04 + """The maximum amount of time that the eval demon should take each time + it runs -- if it takes more than this time, _EVAL_CHUNK will be + modified downwards.""" + + _GRAMMARBOX_PARAMS = dict( + width=40, + height=12, + background="#efe", + highlightbackground="#efe", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _HELPBOX_PARAMS = dict( + width=15, + height=15, + background="#efe", + highlightbackground="#efe", + foreground="#555", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + ) + _DEVSETBOX_PARAMS = dict( + width=70, + height=10, + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + wrap="word", + tabs=(30,), + ) + _STATUS_PARAMS = dict(background="#9bb", relief="groove", border=2) + _FONT_PARAMS = dict(family="helvetica", size=-20) + _FRAME_PARAMS = dict(background="#777", padx=2, pady=2, border=3) + _EVALBOX_PARAMS = dict( + background="#eef", + highlightbackground="#eef", + highlightthickness=1, + relief="groove", + border=2, + width=300, + height=280, + ) + _BUTTON_PARAMS = dict( + background="#777", activebackground="#777", highlightbackground="#777" + ) + _HELPTAB_BG_COLOR = "#aba" + _HELPTAB_FG_COLOR = "#efe" + + _HELPTAB_FG_PARAMS = dict(background="#efe") + _HELPTAB_BG_PARAMS = dict(background="#aba") + _HELPTAB_SPACER = 6 + + def normalize_grammar(self, grammar): + # Strip comments + grammar = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", grammar) + # Normalize whitespace + grammar = re.sub(" +", " ", grammar) + grammar = re.sub(r"\n\s+", r"\n", grammar) + grammar = grammar.strip() + # [xx] Hack: automatically backslash $! + grammar = re.sub(r"([^\\])\$", r"\1\\$", grammar) + return grammar + + def __init__( + self, + devset_name="conll2000", + devset=None, + grammar="", + chunk_label="NP", + tagset=None, + ): + """ + :param devset_name: The name of the development set; used for + display & for save files. If either the name 'treebank' + or the name 'conll2000' is used, and devset is None, then + devset will be set automatically. + :param devset: A list of chunked sentences + :param grammar: The initial grammar to display. + :param tagset: Dictionary from tags to string descriptions, used + for the help page. Defaults to ``self.TAGSET``. + """ + self._chunk_label = chunk_label + + if tagset is None: + tagset = self.TAGSET + self.tagset = tagset + + # Named development sets: + if devset is None: + if devset_name == "conll2000": + devset = conll2000.chunked_sents("train.txt") # [:100] + elif devset == "treebank": + devset = treebank_chunk.chunked_sents() # [:100] + else: + raise ValueError("Unknown development set %s" % devset_name) + + self.chunker = None + """The chunker built from the grammar string""" + + self.grammar = grammar + """The unparsed grammar string""" + + self.normalized_grammar = None + """A normalized version of ``self.grammar``.""" + + self.grammar_changed = 0 + """The last time() that the grammar was changed.""" + + self.devset = devset + """The development set -- a list of chunked sentences.""" + + self.devset_name = devset_name + """The name of the development set (for save files).""" + + self.devset_index = -1 + """The index into the development set of the first instance + that's currently being viewed.""" + + self._last_keypress = 0 + """The time() when a key was most recently pressed""" + + self._history = [] + """A list of (grammar, precision, recall, fscore) tuples for + grammars that the user has already tried.""" + + self._history_index = 0 + """When the user is scrolling through previous grammars, this + is used to keep track of which grammar they're looking at.""" + + self._eval_grammar = None + """The grammar that is being currently evaluated by the eval + demon.""" + + self._eval_normalized_grammar = None + """A normalized copy of ``_eval_grammar``.""" + + self._eval_index = 0 + """The index of the next sentence in the development set that + should be looked at by the eval demon.""" + + self._eval_score = ChunkScore(chunk_label=chunk_label) + """The ``ChunkScore`` object that's used to keep track of the score + of the current grammar on the development set.""" + + # Set up the main window. + top = self.top = Tk() + top.geometry("+50+50") + top.title("Regexp Chunk Parser App") + top.bind("", self.destroy) + + # Variable that restricts how much of the devset we look at. + self._devset_size = IntVar(top) + self._devset_size.set(100) + + # Set up all the tkinter widgets + self._init_fonts(top) + self._init_widgets(top) + self._init_bindings(top) + self._init_menubar(top) + self.grammarbox.focus() + + # If a grammar was given, then display it. + if grammar: + self.grammarbox.insert("end", grammar + "\n") + self.grammarbox.mark_set("insert", "1.0") + + # Display the first item in the development set + self.show_devset(0) + self.update() + + def _init_bindings(self, top): + top.bind("", self._devset_next) + top.bind("", self._devset_prev) + top.bind("", self.toggle_show_trace) + top.bind("", self.update) + top.bind("", lambda e: self.save_grammar()) + top.bind("", lambda e: self.load_grammar()) + self.grammarbox.bind("", self.toggle_show_trace) + self.grammarbox.bind("", self._devset_next) + self.grammarbox.bind("", self._devset_prev) + + # Redraw the eval graph when the window size changes + self.evalbox.bind("", self._eval_plot) + + def _init_fonts(self, top): + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(top) + self._size.set(20) + self._font = Font(family="helvetica", size=-self._size.get()) + self._smallfont = Font( + family="helvetica", size=-(int(self._size.get() * 14 // 20)) + ) + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command(label="Reset Application", underline=0, command=self.reset) + filemenu.add_command( + label="Save Current Grammar", + underline=0, + accelerator="Ctrl-s", + command=self.save_grammar, + ) + filemenu.add_command( + label="Load Grammar", + underline=0, + accelerator="Ctrl-o", + command=self.load_grammar, + ) + + filemenu.add_command( + label="Save Grammar History", underline=13, command=self.save_history + ) + + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=16, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=20, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=34, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + devsetmenu = Menu(menubar, tearoff=0) + devsetmenu.add_radiobutton( + label="50 sentences", + variable=self._devset_size, + value=50, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="100 sentences", + variable=self._devset_size, + value=100, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="200 sentences", + variable=self._devset_size, + value=200, + command=self.set_devset_size, + ) + devsetmenu.add_radiobutton( + label="500 sentences", + variable=self._devset_size, + value=500, + command=self.set_devset_size, + ) + menubar.add_cascade(label="Development-Set", underline=0, menu=devsetmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def toggle_show_trace(self, *e): + if self._showing_trace: + self.show_devset() + else: + self.show_trace() + return "break" + + _SCALE_N = 5 # center on the last 5 examples. + _DRAW_LINES = False + + def _eval_plot(self, *e, **config): + width = config.get("width", self.evalbox.winfo_width()) + height = config.get("height", self.evalbox.winfo_height()) + + # Clear the canvas + self.evalbox.delete("all") + + # Draw the precision & recall labels. + tag = self.evalbox.create_text( + 10, height // 2 - 10, justify="left", anchor="w", text="Precision" + ) + left, right = self.evalbox.bbox(tag)[2] + 5, width - 10 + tag = self.evalbox.create_text( + left + (width - left) // 2, + height - 10, + anchor="s", + text="Recall", + justify="center", + ) + top, bot = 10, self.evalbox.bbox(tag)[1] - 10 + + # Draw masks for clipping the plot. + bg = self._EVALBOX_PARAMS["background"] + self.evalbox.lower( + self.evalbox.create_rectangle(0, 0, left - 1, 5000, fill=bg, outline=bg) + ) + self.evalbox.lower( + self.evalbox.create_rectangle(0, bot + 1, 5000, 5000, fill=bg, outline=bg) + ) + + # Calculate the plot's scale. + if self._autoscale.get() and len(self._history) > 1: + max_precision = max_recall = 0 + min_precision = min_recall = 1 + for i in range(1, min(len(self._history), self._SCALE_N + 1)): + grammar, precision, recall, fmeasure = self._history[-i] + min_precision = min(precision, min_precision) + min_recall = min(recall, min_recall) + max_precision = max(precision, max_precision) + max_recall = max(recall, max_recall) + # if max_precision-min_precision > max_recall-min_recall: + # min_recall -= (max_precision-min_precision)/2 + # max_recall += (max_precision-min_precision)/2 + # else: + # min_precision -= (max_recall-min_recall)/2 + # max_precision += (max_recall-min_recall)/2 + # if min_recall < 0: + # max_recall -= min_recall + # min_recall = 0 + # if min_precision < 0: + # max_precision -= min_precision + # min_precision = 0 + min_precision = max(min_precision - 0.01, 0) + min_recall = max(min_recall - 0.01, 0) + max_precision = min(max_precision + 0.01, 1) + max_recall = min(max_recall + 0.01, 1) + else: + min_precision = min_recall = 0 + max_precision = max_recall = 1 + + # Draw the axis lines & grid lines + for i in range(11): + x = left + (right - left) * ( + (i / 10.0 - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (i / 10.0 - min_precision) / (max_precision - min_precision) + ) + if left < x < right: + self.evalbox.create_line(x, top, x, bot, fill="#888") + if top < y < bot: + self.evalbox.create_line(left, y, right, y, fill="#888") + self.evalbox.create_line(left, top, left, bot) + self.evalbox.create_line(left, bot, right, bot) + + # Display the plot's scale + self.evalbox.create_text( + left - 3, + bot, + justify="right", + anchor="se", + text="%d%%" % (100 * min_precision), + ) + self.evalbox.create_text( + left - 3, + top, + justify="right", + anchor="ne", + text="%d%%" % (100 * max_precision), + ) + self.evalbox.create_text( + left, + bot + 3, + justify="center", + anchor="nw", + text="%d%%" % (100 * min_recall), + ) + self.evalbox.create_text( + right, + bot + 3, + justify="center", + anchor="ne", + text="%d%%" % (100 * max_recall), + ) + + # Display the scores. + prev_x = prev_y = None + for i, (_, precision, recall, fscore) in enumerate(self._history): + x = left + (right - left) * ( + (recall - min_recall) / (max_recall - min_recall) + ) + y = bot - (bot - top) * ( + (precision - min_precision) / (max_precision - min_precision) + ) + if i == self._history_index: + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#0f0", outline="#000" + ) + self.status["text"] = ( + "Precision: %.2f%%\t" % (precision * 100) + + "Recall: %.2f%%\t" % (recall * 100) + + "F-score: %.2f%%" % (fscore * 100) + ) + else: + self.evalbox.lower( + self.evalbox.create_oval( + x - 2, y - 2, x + 2, y + 2, fill="#afa", outline="#8c8" + ) + ) + if prev_x is not None and self._eval_lines.get(): + self.evalbox.lower( + self.evalbox.create_line(prev_x, prev_y, x, y, fill="#8c8") + ) + prev_x, prev_y = x, y + + _eval_demon_running = False + + def _eval_demon(self): + if self.top is None: + return + if self.chunker is None: + self._eval_demon_running = False + return + + # Note our starting time. + t0 = time.time() + + # If are still typing, then wait for them to finish. + if ( + time.time() - self._last_keypress < self._EVAL_DELAY + and self.normalized_grammar != self._eval_normalized_grammar + ): + self._eval_demon_running = True + return self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + # If the grammar changed, restart the evaluation. + if self.normalized_grammar != self._eval_normalized_grammar: + # Check if we've seen this grammar already. If so, then + # just use the old evaluation values. + for (g, p, r, f) in self._history: + if self.normalized_grammar == self.normalize_grammar(g): + self._history.append((g, p, r, f)) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + return + self._eval_index = 0 + self._eval_score = ChunkScore(chunk_label=self._chunk_label) + self._eval_grammar = self.grammar + self._eval_normalized_grammar = self.normalized_grammar + + # If the grammar is empty, the don't bother evaluating it, or + # recording it in history -- the score will just be 0. + if self.normalized_grammar.strip() == "": + # self._eval_index = self._devset_size.get() + self._eval_demon_running = False + return + + # Score the next set of examples + for gold in self.devset[ + self._eval_index : min( + self._eval_index + self._EVAL_CHUNK, self._devset_size.get() + ) + ]: + guess = self._chunkparse(gold.leaves()) + self._eval_score.score(gold, guess) + + # update our index in the devset. + self._eval_index += self._EVAL_CHUNK + + # Check if we're done + if self._eval_index >= self._devset_size.get(): + self._history.append( + ( + self._eval_grammar, + self._eval_score.precision(), + self._eval_score.recall(), + self._eval_score.f_measure(), + ) + ) + self._history_index = len(self._history) - 1 + self._eval_plot() + self._eval_demon_running = False + self._eval_normalized_grammar = None + else: + progress = 100 * self._eval_index / self._devset_size.get() + self.status["text"] = "Evaluating on Development Set (%d%%)" % progress + self._eval_demon_running = True + self._adaptively_modify_eval_chunk(time.time() - t0) + self.top.after(int(self._EVAL_FREQ * 1000), self._eval_demon) + + def _adaptively_modify_eval_chunk(self, t): + """ + Modify _EVAL_CHUNK to try to keep the amount of time that the + eval demon takes between _EVAL_DEMON_MIN and _EVAL_DEMON_MAX. + + :param t: The amount of time that the eval demon took. + """ + if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5: + self._EVAL_CHUNK = min( + self._EVAL_CHUNK - 1, + max( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MAX / t)), + self._EVAL_CHUNK - 10, + ), + ) + elif t < self._EVAL_DEMON_MIN: + self._EVAL_CHUNK = max( + self._EVAL_CHUNK + 1, + min( + int(self._EVAL_CHUNK * (self._EVAL_DEMON_MIN / t)), + self._EVAL_CHUNK + 10, + ), + ) + + def _init_widgets(self, top): + frame0 = Frame(top, **self._FRAME_PARAMS) + frame0.grid_columnconfigure(0, weight=4) + frame0.grid_columnconfigure(3, weight=2) + frame0.grid_rowconfigure(1, weight=1) + frame0.grid_rowconfigure(5, weight=1) + + # The grammar + self.grammarbox = Text(frame0, font=self._font, **self._GRAMMARBOX_PARAMS) + self.grammarlabel = Label( + frame0, + font=self._font, + text="Grammar:", + highlightcolor="black", + background=self._GRAMMARBOX_PARAMS["background"], + ) + self.grammarlabel.grid(column=0, row=0, sticky="SW") + self.grammarbox.grid(column=0, row=1, sticky="NEWS") + + # Scroll bar for grammar + grammar_scrollbar = Scrollbar(frame0, command=self.grammarbox.yview) + grammar_scrollbar.grid(column=1, row=1, sticky="NWS") + self.grammarbox.config(yscrollcommand=grammar_scrollbar.set) + + # grammar buttons + bg = self._FRAME_PARAMS["background"] + frame3 = Frame(frame0, background=bg) + frame3.grid(column=0, row=2, sticky="EW") + Button( + frame3, + text="Prev Grammar", + command=self._history_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame3, + text="Next Grammar", + command=self._history_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + + # Help box + self.helpbox = Text(frame0, font=self._smallfont, **self._HELPBOX_PARAMS) + self.helpbox.grid(column=3, row=1, sticky="NEWS") + self.helptabs = {} + bg = self._FRAME_PARAMS["background"] + helptab_frame = Frame(frame0, background=bg) + helptab_frame.grid(column=3, row=0, sticky="SW") + for i, (tab, tabstops, text) in enumerate(self.HELP): + label = Label(helptab_frame, text=tab, font=self._smallfont) + label.grid(column=i * 2, row=0, sticky="S") + # help_frame.grid_columnconfigure(i, weight=1) + # label.pack(side='left') + label.bind("", lambda e, tab=tab: self.show_help(tab)) + self.helptabs[tab] = label + Frame( + helptab_frame, height=1, width=self._HELPTAB_SPACER, background=bg + ).grid(column=i * 2 + 1, row=0) + self.helptabs[self.HELP[0][0]].configure(font=self._font) + self.helpbox.tag_config("elide", elide=True) + for (tag, params) in self.HELP_AUTOTAG: + self.helpbox.tag_config("tag-%s" % tag, **params) + self.show_help(self.HELP[0][0]) + + # Scroll bar for helpbox + help_scrollbar = Scrollbar(frame0, command=self.helpbox.yview) + self.helpbox.config(yscrollcommand=help_scrollbar.set) + help_scrollbar.grid(column=4, row=1, sticky="NWS") + + # The dev set + frame4 = Frame(frame0, background=self._FRAME_PARAMS["background"]) + self.devsetbox = Text(frame4, font=self._font, **self._DEVSETBOX_PARAMS) + self.devsetbox.pack(expand=True, fill="both") + self.devsetlabel = Label( + frame0, + font=self._font, + text="Development Set:", + justify="right", + background=self._DEVSETBOX_PARAMS["background"], + ) + self.devsetlabel.grid(column=0, row=4, sticky="SW") + frame4.grid(column=0, row=5, sticky="NEWS") + + # dev set scrollbars + self.devset_scroll = Scrollbar(frame0, command=self._devset_scroll) + self.devset_scroll.grid(column=1, row=5, sticky="NWS") + self.devset_xscroll = Scrollbar( + frame4, command=self.devsetbox.xview, orient="horiz" + ) + self.devsetbox["xscrollcommand"] = self.devset_xscroll.set + self.devset_xscroll.pack(side="bottom", fill="x") + + # dev set buttons + bg = self._FRAME_PARAMS["background"] + frame1 = Frame(frame0, background=bg) + frame1.grid(column=0, row=7, sticky="EW") + Button( + frame1, + text="Prev Example (Ctrl-p)", + command=self._devset_prev, + **self._BUTTON_PARAMS, + ).pack(side="left") + Button( + frame1, + text="Next Example (Ctrl-n)", + command=self._devset_next, + **self._BUTTON_PARAMS, + ).pack(side="left") + self.devset_button = Button( + frame1, + text="Show example", + command=self.show_devset, + state="disabled", + **self._BUTTON_PARAMS, + ) + self.devset_button.pack(side="right") + self.trace_button = Button( + frame1, text="Show trace", command=self.show_trace, **self._BUTTON_PARAMS + ) + self.trace_button.pack(side="right") + + # evaluation box + self.evalbox = Canvas(frame0, **self._EVALBOX_PARAMS) + label = Label( + frame0, + font=self._font, + text="Evaluation:", + justify="right", + background=self._EVALBOX_PARAMS["background"], + ) + label.grid(column=3, row=4, sticky="SW") + self.evalbox.grid(column=3, row=5, sticky="NEWS", columnspan=2) + + # evaluation box buttons + bg = self._FRAME_PARAMS["background"] + frame2 = Frame(frame0, background=bg) + frame2.grid(column=3, row=7, sticky="EW") + self._autoscale = IntVar(self.top) + self._autoscale.set(False) + Checkbutton( + frame2, + variable=self._autoscale, + command=self._eval_plot, + text="Zoom", + **self._BUTTON_PARAMS, + ).pack(side="left") + self._eval_lines = IntVar(self.top) + self._eval_lines.set(False) + Checkbutton( + frame2, + variable=self._eval_lines, + command=self._eval_plot, + text="Lines", + **self._BUTTON_PARAMS, + ).pack(side="left") + Button(frame2, text="History", **self._BUTTON_PARAMS).pack(side="right") + + # The status label + self.status = Label(frame0, font=self._font, **self._STATUS_PARAMS) + self.status.grid(column=0, row=9, sticky="NEW", padx=3, pady=2, columnspan=5) + + # Help box & devset box can't be edited. + self.helpbox["state"] = "disabled" + self.devsetbox["state"] = "disabled" + + # Spacers + bg = self._FRAME_PARAMS["background"] + Frame(frame0, height=10, width=0, background=bg).grid(column=0, row=3) + Frame(frame0, height=0, width=10, background=bg).grid(column=2, row=0) + Frame(frame0, height=6, width=0, background=bg).grid(column=0, row=8) + + # pack the frame. + frame0.pack(fill="both", expand=True) + + # Set up colors for the devset box + self.devsetbox.tag_config("true-pos", background="#afa", underline="True") + self.devsetbox.tag_config("false-neg", underline="True", foreground="#800") + self.devsetbox.tag_config("false-pos", background="#faa") + self.devsetbox.tag_config("trace", foreground="#666", wrap="none") + self.devsetbox.tag_config("wrapindent", lmargin2=30, wrap="none") + self.devsetbox.tag_config("error", foreground="#800") + + # And for the grammarbox + self.grammarbox.tag_config("error", background="#fec") + self.grammarbox.tag_config("comment", foreground="#840") + self.grammarbox.tag_config("angle", foreground="#00f") + self.grammarbox.tag_config("brace", foreground="#0a0") + self.grammarbox.tag_config("hangindent", lmargin1=0, lmargin2=40) + + _showing_trace = False + + def show_trace(self, *e): + self._showing_trace = True + self.trace_button["state"] = "disabled" + self.devset_button["state"] = "normal" + + self.devsetbox["state"] = "normal" + # self.devsetbox['wrap'] = 'none' + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + if self.chunker is None: + self.devsetbox.insert("1.0", "Trace: waiting for a valid grammar.") + self.devsetbox.tag_add("error", "1.0", "end") + return # can't do anything more + + gold_tree = self.devset[self.devset_index] + rules = self.chunker.rules() + + # Calculate the tag sequence + tagseq = "\t" + charnum = [1] + for wordnum, (word, pos) in enumerate(gold_tree.leaves()): + tagseq += "%s " % pos + charnum.append(len(tagseq)) + self.charnum = { + (i, j): charnum[j] + for i in range(len(rules) + 1) + for j in range(len(charnum)) + } + self.linenum = {i: i * 2 + 2 for i in range(len(rules) + 1)} + + for i in range(len(rules) + 1): + if i == 0: + self.devsetbox.insert("end", "Start:\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + else: + self.devsetbox.insert("end", "Apply %s:\n" % rules[i - 1]) + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + # Display the tag sequence. + self.devsetbox.insert("end", tagseq + "\n") + self.devsetbox.tag_add("wrapindent", "end -2c linestart", "end -2c") + # Run a partial parser, and extract gold & test chunks + chunker = RegexpChunkParser(rules[:i]) + test_tree = self._chunkparse(gold_tree.leaves()) + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(i, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(i, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(i, chunk, "false-pos") + self.devsetbox.insert("end", "Finished.\n") + self.devsetbox.tag_add("trace", "end -2c linestart", "end -2c") + + # This is a hack, because the x-scrollbar isn't updating its + # position right -- I'm not sure what the underlying cause is + # though. (This is on OS X w/ python 2.5) + self.top.after(100, self.devset_xscroll.set, 0, 0.3) + + def show_help(self, tab): + self.helpbox["state"] = "normal" + self.helpbox.delete("1.0", "end") + for (name, tabstops, text) in self.HELP: + if name == tab: + text = text.replace( + "<>", + "\n".join( + "\t%s\t%s" % item + for item in sorted( + list(self.tagset.items()), + key=lambda t_w: re.match(r"\w+", t_w[0]) + and (0, t_w[0]) + or (1, t_w[0]), + ) + ), + ) + + self.helptabs[name].config(**self._HELPTAB_FG_PARAMS) + self.helpbox.config(tabs=tabstops) + self.helpbox.insert("1.0", text + "\n" * 20) + C = "1.0 + %d chars" + for (tag, params) in self.HELP_AUTOTAG: + pattern = f"(?s)(<{tag}>)(.*?)()" + for m in re.finditer(pattern, text): + self.helpbox.tag_add("elide", C % m.start(1), C % m.end(1)) + self.helpbox.tag_add( + "tag-%s" % tag, C % m.start(2), C % m.end(2) + ) + self.helpbox.tag_add("elide", C % m.start(3), C % m.end(3)) + else: + self.helptabs[name].config(**self._HELPTAB_BG_PARAMS) + self.helpbox["state"] = "disabled" + + def _history_prev(self, *e): + self._view_history(self._history_index - 1) + return "break" + + def _history_next(self, *e): + self._view_history(self._history_index + 1) + return "break" + + def _view_history(self, index): + # Bounds & sanity checking: + index = max(0, min(len(self._history) - 1, index)) + if not self._history: + return + # Already viewing the requested history item? + if index == self._history_index: + return + # Show the requested grammar. It will get added to _history + # only if they edit it (causing self.update() to get run.) + self.grammarbox["state"] = "normal" + self.grammarbox.delete("1.0", "end") + self.grammarbox.insert("end", self._history[index][0]) + self.grammarbox.mark_set("insert", "1.0") + self._history_index = index + self._syntax_highlight_grammar(self._history[index][0]) + # Record the normalized grammar & regenerate the chunker. + self.normalized_grammar = self.normalize_grammar(self._history[index][0]) + if self.normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in self.normalized_grammar.split("\n") + ] + else: + rules = [] + self.chunker = RegexpChunkParser(rules) + # Show the score. + self._eval_plot() + # Update the devset box + self._highlight_devset() + if self._showing_trace: + self.show_trace() + # Update the grammar label + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar {}/{}:".format( + self._history_index + 1, + len(self._history), + ) + else: + self.grammarlabel["text"] = "Grammar:" + + def _devset_next(self, *e): + self._devset_scroll("scroll", 1, "page") + return "break" + + def _devset_prev(self, *e): + self._devset_scroll("scroll", -1, "page") + return "break" + + def destroy(self, *e): + if self.top is None: + return + self.top.destroy() + self.top = None + + def _devset_scroll(self, command, *args): + N = 1 # size of a page -- one sentence. + showing_trace = self._showing_trace + if command == "scroll" and args[1].startswith("unit"): + self.show_devset(self.devset_index + int(args[0])) + elif command == "scroll" and args[1].startswith("page"): + self.show_devset(self.devset_index + N * int(args[0])) + elif command == "moveto": + self.show_devset(int(float(args[0]) * self._devset_size.get())) + else: + assert 0, f"bad scroll command {command} {args}" + if showing_trace: + self.show_trace() + + def show_devset(self, index=None): + if index is None: + index = self.devset_index + + # Bounds checking + index = min(max(0, index), self._devset_size.get() - 1) + + if index == self.devset_index and not self._showing_trace: + return + self.devset_index = index + + self._showing_trace = False + self.trace_button["state"] = "normal" + self.devset_button["state"] = "disabled" + + # Clear the text box. + self.devsetbox["state"] = "normal" + self.devsetbox["wrap"] = "word" + self.devsetbox.delete("1.0", "end") + self.devsetlabel["text"] = "Development Set (%d/%d)" % ( + (self.devset_index + 1, self._devset_size.get()) + ) + + # Add the sentences + sample = self.devset[self.devset_index : self.devset_index + 1] + self.charnum = {} + self.linenum = {0: 1} + for sentnum, sent in enumerate(sample): + linestr = "" + for wordnum, (word, pos) in enumerate(sent.leaves()): + self.charnum[sentnum, wordnum] = len(linestr) + linestr += f"{word}/{pos} " + self.charnum[sentnum, wordnum + 1] = len(linestr) + self.devsetbox.insert("end", linestr[:-1] + "\n\n") + + # Highlight chunks in the dev set + if self.chunker is not None: + self._highlight_devset() + self.devsetbox["state"] = "disabled" + + # Update the scrollbar + first = self.devset_index / self._devset_size.get() + last = (self.devset_index + 2) / self._devset_size.get() + self.devset_scroll.set(first, last) + + def _chunks(self, tree): + chunks = set() + wordnum = 0 + for child in tree: + if isinstance(child, Tree): + if child.label() == self._chunk_label: + chunks.add((wordnum, wordnum + len(child))) + wordnum += len(child) + else: + wordnum += 1 + return chunks + + def _syntax_highlight_grammar(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("comment", "1.0", "end") + self.grammarbox.tag_remove("angle", "1.0", "end") + self.grammarbox.tag_remove("brace", "1.0", "end") + self.grammarbox.tag_add("hangindent", "1.0", "end") + for lineno, line in enumerate(grammar.split("\n")): + if not line.strip(): + continue + m = re.match(r"(\\.|[^#])*(#.*)?", line) + comment_start = None + if m.group(2): + comment_start = m.start(2) + s = "%d.%d" % (lineno + 1, m.start(2)) + e = "%d.%d" % (lineno + 1, m.end(2)) + self.grammarbox.tag_add("comment", s, e) + for m in re.finditer("[<>{}]", line): + if comment_start is not None and m.start() >= comment_start: + break + s = "%d.%d" % (lineno + 1, m.start()) + e = "%d.%d" % (lineno + 1, m.end()) + if m.group() in "<>": + self.grammarbox.tag_add("angle", s, e) + else: + self.grammarbox.tag_add("brace", s, e) + + def _grammarcheck(self, grammar): + if self.top is None: + return + self.grammarbox.tag_remove("error", "1.0", "end") + self._grammarcheck_errs = [] + for lineno, line in enumerate(grammar.split("\n")): + line = re.sub(r"((\\.|[^#])*)(#.*)?", r"\1", line) + line = line.strip() + if line: + try: + RegexpChunkRule.fromstring(line) + except ValueError as e: + self.grammarbox.tag_add( + "error", "%s.0" % (lineno + 1), "%s.0 lineend" % (lineno + 1) + ) + self.status["text"] = "" + + def update(self, *event): + # Record when update was called (for grammarcheck) + if event: + self._last_keypress = time.time() + + # Read the grammar from the Text box. + self.grammar = grammar = self.grammarbox.get("1.0", "end") + + # If the grammar hasn't changed, do nothing: + normalized_grammar = self.normalize_grammar(grammar) + if normalized_grammar == self.normalized_grammar: + return + else: + self.normalized_grammar = normalized_grammar + + # If the grammar has changed, and we're looking at history, + # then stop looking at history. + if self._history_index < len(self._history) - 1: + self.grammarlabel["text"] = "Grammar:" + + self._syntax_highlight_grammar(grammar) + + # The grammar has changed; try parsing it. If it doesn't + # parse, do nothing. (flag error location?) + try: + # Note: the normalized grammar has no blank lines. + if normalized_grammar: + rules = [ + RegexpChunkRule.fromstring(line) + for line in normalized_grammar.split("\n") + ] + else: + rules = [] + except ValueError as e: + # Use the un-normalized grammar for error highlighting. + self._grammarcheck(grammar) + self.chunker = None + return + + self.chunker = RegexpChunkParser(rules) + self.grammarbox.tag_remove("error", "1.0", "end") + self.grammar_changed = time.time() + # Display the results + if self._showing_trace: + self.show_trace() + else: + self._highlight_devset() + # Start the eval demon + if not self._eval_demon_running: + self._eval_demon() + + def _highlight_devset(self, sample=None): + if sample is None: + sample = self.devset[self.devset_index : self.devset_index + 1] + + self.devsetbox.tag_remove("true-pos", "1.0", "end") + self.devsetbox.tag_remove("false-neg", "1.0", "end") + self.devsetbox.tag_remove("false-pos", "1.0", "end") + + # Run the grammar on the test cases. + for sentnum, gold_tree in enumerate(sample): + # Run the chunk parser + test_tree = self._chunkparse(gold_tree.leaves()) + # Extract gold & test chunks + gold_chunks = self._chunks(gold_tree) + test_chunks = self._chunks(test_tree) + # Compare them. + for chunk in gold_chunks.intersection(test_chunks): + self._color_chunk(sentnum, chunk, "true-pos") + for chunk in gold_chunks - test_chunks: + self._color_chunk(sentnum, chunk, "false-neg") + for chunk in test_chunks - gold_chunks: + self._color_chunk(sentnum, chunk, "false-pos") + + def _chunkparse(self, words): + try: + return self.chunker.parse(words) + except (ValueError, IndexError) as e: + # There's an error somewhere in the grammar, but we're not sure + # exactly where, so just mark the whole grammar as bad. + # E.g., this is caused by: "({})" + self.grammarbox.tag_add("error", "1.0", "end") + # Treat it as tagging nothing: + return words + + def _color_chunk(self, sentnum, chunk, tag): + start, end = chunk + self.devsetbox.tag_add( + tag, + f"{self.linenum[sentnum]}.{self.charnum[sentnum, start]}", + f"{self.linenum[sentnum]}.{self.charnum[sentnum, end] - 1}", + ) + + def reset(self): + # Clear various variables + self.chunker = None + self.grammar = None + self.normalized_grammar = None + self.grammar_changed = 0 + self._history = [] + self._history_index = 0 + # Update the on-screen display. + self.grammarbox.delete("1.0", "end") + self.show_devset(0) + self.update() + # self._eval_plot() + + SAVE_GRAMMAR_TEMPLATE = ( + "# Regexp Chunk Parsing Grammar\n" + "# Saved %(date)s\n" + "#\n" + "# Development set: %(devset)s\n" + "# Precision: %(precision)s\n" + "# Recall: %(recall)s\n" + "# F-score: %(fscore)s\n\n" + "%(grammar)s\n" + ) + + def save_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + if self._history and self.normalized_grammar == self.normalize_grammar( + self._history[-1][0] + ): + precision, recall, fscore = ( + "%.2f%%" % (100 * v) for v in self._history[-1][1:] + ) + elif self.chunker is None: + precision = recall = fscore = "Grammar not well formed" + else: + precision = recall = fscore = "Not finished evaluation yet" + + with open(filename, "w") as outfile: + outfile.write( + self.SAVE_GRAMMAR_TEMPLATE + % dict( + date=time.ctime(), + devset=self.devset_name, + precision=precision, + recall=recall, + fscore=fscore, + grammar=self.grammar.strip(), + ) + ) + + def load_grammar(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr", ".chunk"), ("All files", "*")] + filename = askopenfilename(filetypes=ftypes, defaultextension=".chunk") + if not filename: + return + self.grammarbox.delete("1.0", "end") + self.update() + with open(filename) as infile: + grammar = infile.read() + grammar = re.sub( + r"^\# Regexp Chunk Parsing Grammar[\s\S]*" "F-score:.*\n", "", grammar + ).lstrip() + self.grammarbox.insert("1.0", grammar) + self.update() + + def save_history(self, filename=None): + if not filename: + ftypes = [("Chunk Gramamr History", ".txt"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".txt") + if not filename: + return + + with open(filename, "w") as outfile: + outfile.write("# Regexp Chunk Parsing Grammar History\n") + outfile.write("# Saved %s\n" % time.ctime()) + outfile.write("# Development set: %s\n" % self.devset_name) + for i, (g, p, r, f) in enumerate(self._history): + hdr = ( + "Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, " + "fscore=%.2f%%)" + % (i + 1, len(self._history), p * 100, r * 100, f * 100) + ) + outfile.write("\n%s\n" % hdr) + outfile.write("".join(" %s\n" % line for line in g.strip().split())) + + if not ( + self._history + and self.normalized_grammar + == self.normalize_grammar(self._history[-1][0]) + ): + if self.chunker is None: + outfile.write("\nCurrent Grammar (not well-formed)\n") + else: + outfile.write("\nCurrent Grammar (not evaluated)\n") + outfile.write( + "".join(" %s\n" % line for line in self.grammar.strip().split()) + ) + + def about(self, *e): + ABOUT = "NLTK RegExp Chunk Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Regular Expression Chunk Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def set_devset_size(self, size=None): + if size is not None: + self._devset_size.set(size) + self._devset_size.set(min(len(self.devset), self._devset_size.get())) + self.show_devset(1) + self.show_devset(0) + # what about history? Evaluated at diff dev set sizes! + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._smallfont.configure(size=min(-10, -(abs(size)) * 14 // 20)) + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +def app(): + RegexpChunkApp().mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/collocations_app.py b/venv/lib/python3.10/site-packages/nltk/app/collocations_app.py new file mode 100644 index 0000000000000000000000000000000000000000..19c661368fd9e96d1a4bf1a47ebfbd07a4bb3d80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/collocations_app.py @@ -0,0 +1,438 @@ +# Natural Language Toolkit: Collocations Application +# Much of the GUI code is imported from concordance.py; We intend to merge these tools together +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT +# + + +import queue as q +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + machado, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.probability import FreqDist +from nltk.util import in_idle + +CORPUS_LOADED_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" +POLL_INTERVAL = 100 + +_DEFAULT = "English: Brown Corpus (Humor)" +_CORPORA = { + "Catalan: CESS-CAT Corpus": lambda: cess_cat.words(), + "English: Brown Corpus": lambda: brown.words(), + "English: Brown Corpus (Press)": lambda: brown.words( + categories=["news", "editorial", "reviews"] + ), + "English: Brown Corpus (Religion)": lambda: brown.words(categories="religion"), + "English: Brown Corpus (Learned)": lambda: brown.words(categories="learned"), + "English: Brown Corpus (Science Fiction)": lambda: brown.words( + categories="science_fiction" + ), + "English: Brown Corpus (Romance)": lambda: brown.words(categories="romance"), + "English: Brown Corpus (Humor)": lambda: brown.words(categories="humor"), + "English: NPS Chat Corpus": lambda: nps_chat.words(), + "English: Wall Street Journal Corpus": lambda: treebank.words(), + "Chinese: Sinica Corpus": lambda: sinica_treebank.words(), + "Dutch: Alpino Corpus": lambda: alpino.words(), + "Hindi: Indian Languages Corpus": lambda: indian.words(files="hindi.pos"), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.words(), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.words(), + "Portuguese: Machado Corpus (Brazil)": lambda: machado.words(), + "Spanish: CESS-ESP Corpus": lambda: cess_esp.words(), +} + + +class CollocationsView: + _BACKGROUND_COLOUR = "#FFF" # white + + def __init__(self): + self.queue = q.Queue() + self.model = CollocationsModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("550x650+50+50") + top.title("NLTK Collocations List") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(550, 650) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_menubar(self): + self._result_size = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.reset_current_page() + + def reset_current_page(self): + self.current_page = -1 + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_results_box() + self.freeze_editable() + self.reset_current_page() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_results_box() + self.reset_current_page() + # self.next() + collocations = self.model.next(self.current_page + 1) + self.write_results(collocations) + self.current_page += 1 + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def previous(self): + self.freeze_editable() + collocations = self.model.prev(self.current_page - 1) + self.current_page = self.current_page - 1 + self.clear_results_box() + self.write_results(collocations) + self.unfreeze_editable() + + def __next__(self): + self.freeze_editable() + collocations = self.model.next(self.current_page + 1) + self.clear_results_box() + self.write_results(collocations) + self.current_page += 1 + self.unfreeze_editable() + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def freeze_editable(self): + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + def unfreeze_editable(self): + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == -1 or self.current_page == 0: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.is_last_page(self.current_page): + self.next["state"] = "disabled" + else: + self.next["state"] = "normal" + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + self.results_box.insert(str(row) + ".0", each[0] + " " + each[1] + "\n") + row += 1 + self.results_box["state"] = "disabled" + + +class CollocationsModel: + def __init__(self, queue): + self.result_count = None + self.selected_corpus = None + self.collocations = None + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.queue = queue + self.reset_results() + + def reset_results(self): + self.result_pages = [] + self.results_returned = 0 + + def load_corpus(self, name): + self.selected_corpus = name + self.collocations = None + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + self.reset_results() + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def is_last_page(self, number): + if number < len(self.result_pages): + return False + return self.results_returned + ( + number - len(self.result_pages) + ) * self.result_count >= len(self.collocations) + + def next(self, page): + if (len(self.result_pages) - 1) < page: + for i in range(page - (len(self.result_pages) - 1)): + self.result_pages.append( + self.collocations[ + self.results_returned : self.results_returned + + self.result_count + ] + ) + self.results_returned += self.result_count + return self.result_pages[page] + + def prev(self, page): + if page == -1: + return [] + return self.result_pages[page] + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + words = self.model.CORPORA[self.name]() + from operator import itemgetter + + text = [w for w in words if len(w) > 2] + fd = FreqDist(tuple(text[i : i + 2]) for i in range(len(text) - 1)) + vocab = FreqDist(text) + scored = [ + ((w1, w2), fd[(w1, w2)] ** 3 / (vocab[w1] * vocab[w2])) + for w1, w2 in fd + ] + scored.sort(key=itemgetter(1), reverse=True) + self.model.collocations = list(map(itemgetter(0), scored)) + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + +# def collocations(): +# colloc_strings = [w1 + ' ' + w2 for w1, w2 in self._collocations[:num]] + + +def app(): + c = CollocationsView() + c.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/concordance_app.py b/venv/lib/python3.10/site-packages/nltk/app/concordance_app.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd9a991a0a969f87bf03986a915a0af18cd9b5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/concordance_app.py @@ -0,0 +1,709 @@ +# Natural Language Toolkit: Concordance Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +import queue as q +import re +import threading +from tkinter import ( + END, + LEFT, + SUNKEN, + Button, + Entry, + Frame, + IntVar, + Label, + Menu, + OptionMenu, + Scrollbar, + StringVar, + Text, + Tk, +) +from tkinter.font import Font + +from nltk.corpus import ( + alpino, + brown, + cess_cat, + cess_esp, + floresta, + indian, + mac_morpho, + nps_chat, + sinica_treebank, + treebank, +) +from nltk.draw.util import ShowText +from nltk.util import in_idle + +WORD_OR_TAG = "[^/ ]+" +BOUNDARY = r"\b" + +CORPUS_LOADED_EVENT = "<>" +SEARCH_TERMINATED_EVENT = "<>" +SEARCH_ERROR_EVENT = "<>" +ERROR_LOADING_CORPUS_EVENT = "<>" + +POLL_INTERVAL = 50 + +# NB All corpora must be specified in a lambda expression so as not to be +# loaded when the module is imported. + +_DEFAULT = "English: Brown Corpus (Humor, simplified)" +_CORPORA = { + "Catalan: CESS-CAT Corpus (simplified)": lambda: cess_cat.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus": lambda: brown.tagged_sents(), + "English: Brown Corpus (simplified)": lambda: brown.tagged_sents( + tagset="universal" + ), + "English: Brown Corpus (Press, simplified)": lambda: brown.tagged_sents( + categories=["news", "editorial", "reviews"], tagset="universal" + ), + "English: Brown Corpus (Religion, simplified)": lambda: brown.tagged_sents( + categories="religion", tagset="universal" + ), + "English: Brown Corpus (Learned, simplified)": lambda: brown.tagged_sents( + categories="learned", tagset="universal" + ), + "English: Brown Corpus (Science Fiction, simplified)": lambda: brown.tagged_sents( + categories="science_fiction", tagset="universal" + ), + "English: Brown Corpus (Romance, simplified)": lambda: brown.tagged_sents( + categories="romance", tagset="universal" + ), + "English: Brown Corpus (Humor, simplified)": lambda: brown.tagged_sents( + categories="humor", tagset="universal" + ), + "English: NPS Chat Corpus": lambda: nps_chat.tagged_posts(), + "English: NPS Chat Corpus (simplified)": lambda: nps_chat.tagged_posts( + tagset="universal" + ), + "English: Wall Street Journal Corpus": lambda: treebank.tagged_sents(), + "English: Wall Street Journal Corpus (simplified)": lambda: treebank.tagged_sents( + tagset="universal" + ), + "Chinese: Sinica Corpus": lambda: sinica_treebank.tagged_sents(), + "Chinese: Sinica Corpus (simplified)": lambda: sinica_treebank.tagged_sents( + tagset="universal" + ), + "Dutch: Alpino Corpus": lambda: alpino.tagged_sents(), + "Dutch: Alpino Corpus (simplified)": lambda: alpino.tagged_sents( + tagset="universal" + ), + "Hindi: Indian Languages Corpus": lambda: indian.tagged_sents(files="hindi.pos"), + "Hindi: Indian Languages Corpus (simplified)": lambda: indian.tagged_sents( + files="hindi.pos", tagset="universal" + ), + "Portuguese: Floresta Corpus (Portugal)": lambda: floresta.tagged_sents(), + "Portuguese: Floresta Corpus (Portugal, simplified)": lambda: floresta.tagged_sents( + tagset="universal" + ), + "Portuguese: MAC-MORPHO Corpus (Brazil)": lambda: mac_morpho.tagged_sents(), + "Portuguese: MAC-MORPHO Corpus (Brazil, simplified)": lambda: mac_morpho.tagged_sents( + tagset="universal" + ), + "Spanish: CESS-ESP Corpus (simplified)": lambda: cess_esp.tagged_sents( + tagset="universal" + ), +} + + +class ConcordanceSearchView: + _BACKGROUND_COLOUR = "#FFF" # white + + # Colour of highlighted results + _HIGHLIGHT_WORD_COLOUR = "#F00" # red + _HIGHLIGHT_WORD_TAG = "HL_WRD_TAG" + + _HIGHLIGHT_LABEL_COLOUR = "#C0C0C0" # dark grey + _HIGHLIGHT_LABEL_TAG = "HL_LBL_TAG" + + # Percentage of text left of the scrollbar position + _FRACTION_LEFT_TEXT = 0.30 + + def __init__(self): + self.queue = q.Queue() + self.model = ConcordanceSearchModel(self.queue) + self.top = Tk() + self._init_top(self.top) + self._init_menubar() + self._init_widgets(self.top) + self.load_corpus(self.model.DEFAULT_CORPUS) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def _init_top(self, top): + top.geometry("950x680+50+50") + top.title("NLTK Concordance Search") + top.bind("", self.destroy) + top.protocol("WM_DELETE_WINDOW", self.destroy) + top.minsize(950, 680) + + def _init_widgets(self, parent): + self.main_frame = Frame( + parent, dict(background=self._BACKGROUND_COLOUR, padx=1, pady=1, border=1) + ) + self._init_corpus_select(self.main_frame) + self._init_query_box(self.main_frame) + self._init_results_box(self.main_frame) + self._init_paging(self.main_frame) + self._init_status(self.main_frame) + self.main_frame.pack(fill="both", expand=True) + + def _init_menubar(self): + self._result_size = IntVar(self.top) + self._cntx_bf_len = IntVar(self.top) + self._cntx_af_len = IntVar(self.top) + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0, borderwidth=0) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-q" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + rescntmenu = Menu(editmenu, tearoff=0) + rescntmenu.add_radiobutton( + label="20", + variable=self._result_size, + underline=0, + value=20, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="50", + variable=self._result_size, + underline=0, + value=50, + command=self.set_result_size, + ) + rescntmenu.add_radiobutton( + label="100", + variable=self._result_size, + underline=0, + value=100, + command=self.set_result_size, + ) + rescntmenu.invoke(1) + editmenu.add_cascade(label="Result Count", underline=0, menu=rescntmenu) + + cntxmenu = Menu(editmenu, tearoff=0) + cntxbfmenu = Menu(cntxmenu, tearoff=0) + cntxbfmenu.add_radiobutton( + label="60 characters", + variable=self._cntx_bf_len, + underline=0, + value=60, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="80 characters", + variable=self._cntx_bf_len, + underline=0, + value=80, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.add_radiobutton( + label="100 characters", + variable=self._cntx_bf_len, + underline=0, + value=100, + command=self.set_cntx_bf_len, + ) + cntxbfmenu.invoke(1) + cntxmenu.add_cascade(label="Before", underline=0, menu=cntxbfmenu) + + cntxafmenu = Menu(cntxmenu, tearoff=0) + cntxafmenu.add_radiobutton( + label="70 characters", + variable=self._cntx_af_len, + underline=0, + value=70, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="90 characters", + variable=self._cntx_af_len, + underline=0, + value=90, + command=self.set_cntx_af_len, + ) + cntxafmenu.add_radiobutton( + label="110 characters", + variable=self._cntx_af_len, + underline=0, + value=110, + command=self.set_cntx_af_len, + ) + cntxafmenu.invoke(1) + cntxmenu.add_cascade(label="After", underline=0, menu=cntxafmenu) + + editmenu.add_cascade(label="Context", underline=0, menu=cntxmenu) + + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + self.top.config(menu=menubar) + + def set_result_size(self, **kwargs): + self.model.result_count = self._result_size.get() + + def set_cntx_af_len(self, **kwargs): + self._char_after = self._cntx_af_len.get() + + def set_cntx_bf_len(self, **kwargs): + self._char_before = self._cntx_bf_len.get() + + def _init_corpus_select(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.var = StringVar(innerframe) + self.var.set(self.model.DEFAULT_CORPUS) + Label( + innerframe, + justify=LEFT, + text=" Corpus: ", + background=self._BACKGROUND_COLOUR, + padx=2, + pady=1, + border=0, + ).pack(side="left") + + other_corpora = list(self.model.CORPORA.keys()).remove( + self.model.DEFAULT_CORPUS + ) + om = OptionMenu( + innerframe, + self.var, + self.model.DEFAULT_CORPUS, + command=self.corpus_selected, + *self.model.non_default_corpora() + ) + om["borderwidth"] = 0 + om["highlightthickness"] = 1 + om.pack(side="left") + innerframe.pack(side="top", fill="x", anchor="n") + + def _init_status(self, parent): + self.status = Label( + parent, + justify=LEFT, + relief=SUNKEN, + background=self._BACKGROUND_COLOUR, + border=0, + padx=1, + pady=0, + ) + self.status.pack(side="top", anchor="sw") + + def _init_query_box(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + another = Frame(innerframe, background=self._BACKGROUND_COLOUR) + self.query_box = Entry(another, width=60) + self.query_box.pack(side="left", fill="x", pady=25, anchor="center") + self.search_button = Button( + another, + text="Search", + command=self.search, + borderwidth=1, + highlightthickness=1, + ) + self.search_button.pack(side="left", fill="x", pady=25, anchor="center") + self.query_box.bind("", self.search_enter_keypress_handler) + another.pack() + innerframe.pack(side="top", fill="x", anchor="n") + + def search_enter_keypress_handler(self, *event): + self.search() + + def _init_results_box(self, parent): + innerframe = Frame(parent) + i1 = Frame(innerframe) + i2 = Frame(innerframe) + vscrollbar = Scrollbar(i1, borderwidth=1) + hscrollbar = Scrollbar(i2, borderwidth=1, orient="horiz") + self.results_box = Text( + i1, + font=Font(family="courier", size="16"), + state="disabled", + borderwidth=1, + yscrollcommand=vscrollbar.set, + xscrollcommand=hscrollbar.set, + wrap="none", + width="40", + height="20", + exportselection=1, + ) + self.results_box.pack(side="left", fill="both", expand=True) + self.results_box.tag_config( + self._HIGHLIGHT_WORD_TAG, foreground=self._HIGHLIGHT_WORD_COLOUR + ) + self.results_box.tag_config( + self._HIGHLIGHT_LABEL_TAG, foreground=self._HIGHLIGHT_LABEL_COLOUR + ) + vscrollbar.pack(side="left", fill="y", anchor="e") + vscrollbar.config(command=self.results_box.yview) + hscrollbar.pack(side="left", fill="x", expand=True, anchor="w") + hscrollbar.config(command=self.results_box.xview) + # there is no other way of avoiding the overlap of scrollbars while using pack layout manager!!! + Label(i2, text=" ", background=self._BACKGROUND_COLOUR).pack( + side="left", anchor="e" + ) + i1.pack(side="top", fill="both", expand=True, anchor="n") + i2.pack(side="bottom", fill="x", anchor="s") + innerframe.pack(side="top", fill="both", expand=True) + + def _init_paging(self, parent): + innerframe = Frame(parent, background=self._BACKGROUND_COLOUR) + self.prev = prev = Button( + innerframe, + text="Previous", + command=self.previous, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + prev.pack(side="left", anchor="center") + self.next = next = Button( + innerframe, + text="Next", + command=self.__next__, + width="10", + borderwidth=1, + highlightthickness=1, + state="disabled", + ) + next.pack(side="right", anchor="center") + innerframe.pack(side="top", fill="y") + self.current_page = 0 + + def previous(self): + self.clear_results_box() + self.freeze_editable() + self.model.prev(self.current_page - 1) + + def __next__(self): + self.clear_results_box() + self.freeze_editable() + self.model.next(self.current_page + 1) + + def about(self, *e): + ABOUT = "NLTK Concordance Search Demo\n" + TITLE = "About: NLTK Concordance Search Demo" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE, parent=self.main_frame).show() + except: + ShowText(self.top, TITLE, ABOUT) + + def _bind_event_handlers(self): + self.top.bind(CORPUS_LOADED_EVENT, self.handle_corpus_loaded) + self.top.bind(SEARCH_TERMINATED_EVENT, self.handle_search_terminated) + self.top.bind(SEARCH_ERROR_EVENT, self.handle_search_error) + self.top.bind(ERROR_LOADING_CORPUS_EVENT, self.handle_error_loading_corpus) + + def _poll(self): + try: + event = self.queue.get(block=False) + except q.Empty: + pass + else: + if event == CORPUS_LOADED_EVENT: + self.handle_corpus_loaded(event) + elif event == SEARCH_TERMINATED_EVENT: + self.handle_search_terminated(event) + elif event == SEARCH_ERROR_EVENT: + self.handle_search_error(event) + elif event == ERROR_LOADING_CORPUS_EVENT: + self.handle_error_loading_corpus(event) + self.after = self.top.after(POLL_INTERVAL, self._poll) + + def handle_error_loading_corpus(self, event): + self.status["text"] = "Error in loading " + self.var.get() + self.unfreeze_editable() + self.clear_all() + self.freeze_editable() + + def handle_corpus_loaded(self, event): + self.status["text"] = self.var.get() + " is loaded" + self.unfreeze_editable() + self.clear_all() + self.query_box.focus_set() + + def handle_search_terminated(self, event): + # todo: refactor the model such that it is less state sensitive + results = self.model.get_results() + self.write_results(results) + self.status["text"] = "" + if len(results) == 0: + self.status["text"] = "No results found for " + self.model.query + else: + self.current_page = self.model.last_requested_page + self.unfreeze_editable() + self.results_box.xview_moveto(self._FRACTION_LEFT_TEXT) + + def handle_search_error(self, event): + self.status["text"] = "Error in query " + self.model.query + self.unfreeze_editable() + + def corpus_selected(self, *args): + new_selection = self.var.get() + self.load_corpus(new_selection) + + def load_corpus(self, selection): + if self.model.selected_corpus != selection: + self.status["text"] = "Loading " + selection + "..." + self.freeze_editable() + self.model.load_corpus(selection) + + def search(self): + self.current_page = 0 + self.clear_results_box() + self.model.reset_results() + query = self.query_box.get() + if len(query.strip()) == 0: + return + self.status["text"] = "Searching for " + query + self.freeze_editable() + self.model.search(query, self.current_page + 1) + + def write_results(self, results): + self.results_box["state"] = "normal" + row = 1 + for each in results: + sent, pos1, pos2 = each[0].strip(), each[1], each[2] + if len(sent) != 0: + if pos1 < self._char_before: + sent, pos1, pos2 = self.pad(sent, pos1, pos2) + sentence = sent[pos1 - self._char_before : pos1 + self._char_after] + if not row == len(results): + sentence += "\n" + self.results_box.insert(str(row) + ".0", sentence) + word_markers, label_markers = self.words_and_labels(sent, pos1, pos2) + for marker in word_markers: + self.results_box.tag_add( + self._HIGHLIGHT_WORD_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + for marker in label_markers: + self.results_box.tag_add( + self._HIGHLIGHT_LABEL_TAG, + str(row) + "." + str(marker[0]), + str(row) + "." + str(marker[1]), + ) + row += 1 + self.results_box["state"] = "disabled" + + def words_and_labels(self, sentence, pos1, pos2): + search_exp = sentence[pos1:pos2] + words, labels = [], [] + labeled_words = search_exp.split(" ") + index = 0 + for each in labeled_words: + if each == "": + index += 1 + else: + word, label = each.split("/") + words.append( + (self._char_before + index, self._char_before + index + len(word)) + ) + index += len(word) + 1 + labels.append( + (self._char_before + index, self._char_before + index + len(label)) + ) + index += len(label) + index += 1 + return words, labels + + def pad(self, sent, hstart, hend): + if hstart >= self._char_before: + return sent, hstart, hend + d = self._char_before - hstart + sent = "".join([" "] * d) + sent + return sent, hstart + d, hend + d + + def destroy(self, *e): + if self.top is None: + return + self.top.after_cancel(self.after) + self.top.destroy() + self.top = None + + def clear_all(self): + self.query_box.delete(0, END) + self.model.reset_query() + self.clear_results_box() + + def clear_results_box(self): + self.results_box["state"] = "normal" + self.results_box.delete("1.0", END) + self.results_box["state"] = "disabled" + + def freeze_editable(self): + self.query_box["state"] = "disabled" + self.search_button["state"] = "disabled" + self.prev["state"] = "disabled" + self.next["state"] = "disabled" + + def unfreeze_editable(self): + self.query_box["state"] = "normal" + self.search_button["state"] = "normal" + self.set_paging_button_states() + + def set_paging_button_states(self): + if self.current_page == 0 or self.current_page == 1: + self.prev["state"] = "disabled" + else: + self.prev["state"] = "normal" + if self.model.has_more_pages(self.current_page): + self.next["state"] = "normal" + else: + self.next["state"] = "disabled" + + def fire_event(self, event): + # Firing an event so that rendering of widgets happen in the mainloop thread + self.top.event_generate(event, when="tail") + + def mainloop(self, *args, **kwargs): + if in_idle(): + return + self.top.mainloop(*args, **kwargs) + + +class ConcordanceSearchModel: + def __init__(self, queue): + self.queue = queue + self.CORPORA = _CORPORA + self.DEFAULT_CORPUS = _DEFAULT + self.selected_corpus = None + self.reset_query() + self.reset_results() + self.result_count = None + self.last_sent_searched = 0 + + def non_default_corpora(self): + copy = [] + copy.extend(list(self.CORPORA.keys())) + copy.remove(self.DEFAULT_CORPUS) + copy.sort() + return copy + + def load_corpus(self, name): + self.selected_corpus = name + self.tagged_sents = [] + runner_thread = self.LoadCorpus(name, self) + runner_thread.start() + + def search(self, query, page): + self.query = query + self.last_requested_page = page + self.SearchCorpus(self, page, self.result_count).start() + + def next(self, page): + self.last_requested_page = page + if len(self.results) < page: + self.search(self.query, page) + else: + self.queue.put(SEARCH_TERMINATED_EVENT) + + def prev(self, page): + self.last_requested_page = page + self.queue.put(SEARCH_TERMINATED_EVENT) + + def reset_results(self): + self.last_sent_searched = 0 + self.results = [] + self.last_page = None + + def reset_query(self): + self.query = None + + def set_results(self, page, resultset): + self.results.insert(page - 1, resultset) + + def get_results(self): + return self.results[self.last_requested_page - 1] + + def has_more_pages(self, page): + if self.results == [] or self.results[0] == []: + return False + if self.last_page is None: + return True + return page < self.last_page + + class LoadCorpus(threading.Thread): + def __init__(self, name, model): + threading.Thread.__init__(self) + self.model, self.name = model, name + + def run(self): + try: + ts = self.model.CORPORA[self.name]() + self.model.tagged_sents = [ + " ".join(w + "/" + t for (w, t) in sent) for sent in ts + ] + self.model.queue.put(CORPUS_LOADED_EVENT) + except Exception as e: + print(e) + self.model.queue.put(ERROR_LOADING_CORPUS_EVENT) + + class SearchCorpus(threading.Thread): + def __init__(self, model, page, count): + self.model, self.count, self.page = model, count, page + threading.Thread.__init__(self) + + def run(self): + q = self.processed_query() + sent_pos, i, sent_count = [], 0, 0 + for sent in self.model.tagged_sents[self.model.last_sent_searched :]: + try: + m = re.search(q, sent) + except re.error: + self.model.reset_results() + self.model.queue.put(SEARCH_ERROR_EVENT) + return + if m: + sent_pos.append((sent, m.start(), m.end())) + i += 1 + if i > self.count: + self.model.last_sent_searched += sent_count - 1 + break + sent_count += 1 + if self.count >= len(sent_pos): + self.model.last_sent_searched += sent_count - 1 + self.model.last_page = self.page + self.model.set_results(self.page, sent_pos) + else: + self.model.set_results(self.page, sent_pos[:-1]) + self.model.queue.put(SEARCH_TERMINATED_EVENT) + + def processed_query(self): + new = [] + for term in self.model.query.split(): + term = re.sub(r"\.", r"[^/ ]", term) + if re.match("[A-Z]+$", term): + new.append(BOUNDARY + WORD_OR_TAG + "/" + term + BOUNDARY) + elif "/" in term: + new.append(BOUNDARY + term + BOUNDARY) + else: + new.append(BOUNDARY + term + "/" + WORD_OR_TAG + BOUNDARY) + return " ".join(new) + + +def app(): + d = ConcordanceSearchView() + d.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/nemo_app.py b/venv/lib/python3.10/site-packages/nltk/app/nemo_app.py new file mode 100644 index 0000000000000000000000000000000000000000..df0ceb1be59e40bb48289f4f1411653789ca7a17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/nemo_app.py @@ -0,0 +1,163 @@ +# Finding (and Replacing) Nemo, Version 1.1, Aristide Grange 2006/06/06 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496783 + +""" +Finding (and Replacing) Nemo + +Instant Regular Expressions +Created by Aristide Grange +""" +import itertools +import re +from tkinter import SEL_FIRST, SEL_LAST, Frame, Label, PhotoImage, Scrollbar, Text, Tk + +windowTitle = "Finding (and Replacing) Nemo" +initialFind = r"n(.*?)e(.*?)m(.*?)o" +initialRepl = r"M\1A\2K\3I" +initialText = """\ +Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +""" +images = { + "FIND": "R0lGODlhMAAiAPcAMf/////37//35//n1v97Off///f/9/f37/fexvfOvfeEQvd7QvdrQvdrKfdaKfdSMfdSIe/v9+/v7+/v5+/n3u/e1u/Wxu/Gre+1lO+tnO+thO+Ua+97Y+97Oe97Me9rOe9rMe9jOe9jMe9jIe9aMefe5+fe3ufezuece+eEWudzQudaIedSIedKMedKIedCKedCId7e1t7Wzt7Oxt7Gvd69vd69rd61pd6ljN6UjN6Ue96EY95zY95rUt5rQt5jMd5SId5KIdbn59be3tbGztbGvda1rdaEa9Z7a9Z7WtZzQtZzOdZzMdZjMdZaQtZSOdZSMdZKMdZCKdZCGNY5Ic7W1s7Oxs7Gtc69xs69tc69rc6tpc6llM6clM6cjM6Ue86EY85zWs5rSs5SKc5KKc5KGMa1tcatrcalvcalnMaUpcZ7c8ZzMcZrUsZrOcZrMcZaQsZSOcZSMcZKMcZCKcZCGMYxIcYxGL3Gxr21tb21rb2lpb2crb2cjL2UnL2UlL2UhL2Ec717Wr17Ur1zWr1rMb1jUr1KMb1KIb1CIb0xGLWlrbWlpbWcnLWEe7V7c7VzY7VzUrVSKbVKMbVCMbVCIbU5KbUxIbUxEK2lta2lpa2clK2UjK2MnK2MlK2Ea617e61za61rY61rMa1jSq1aUq1aSq1SQq1KKa0xEKWlnKWcnKWUnKWUhKWMjKWEa6Vza6VrWqVjMaVaUqVaKaVSMaVCMaU5KaUxIaUxGJyclJyMe5yElJyEhJx7e5x7c5xrOZxaQpxSOZxKQpw5IZSMhJSEjJR7c5Rre5RrY5RrUpRSQpRSKZRCOZRCKZQxKZQxIYyEhIx7hIxza4xzY4xrc4xjUoxaa4xaUoxSSoxKQoxCMYw5GIR7c4Rzc4Rre4RjY4RjWoRaa4RSWoRSUoRSMYRKQoRCOYQ5KYQxIXtra3taY3taSntKOXtCMXtCKXNCMXM5MXMxIWtSUmtKSmtKQmtCOWs5MWs5KWs5IWNCKWMxIVIxKUIQCDkhGAAAACH+AS4ALAAAAAAwACIAAAj/AAEIHEiwoMGDCBMqXMiwoUOHMqxIeEiRoZVp7cpZ29WrF4WKIAd208dGAQEVbiTVChUjZMU9+pYQmPmBZpxgvVw+nDdKwQICNVcIXQEkTgKdDdUJ+/nggVAXK1xI3TEA6UIr2uJ8iBqka1cXXTlkqGoVYRZ7iLyqBSs0iiEtZQVKiDGxBI1u3NR6lUpGDKg8MSgEQCphU7Z22vhg0dILXRCpYLuSCcYJT4wqXASBQaBzU7klHxC127OHD7ZDJFpERqRt0x5OnwQpmZmCLEhrbgg4WIHO1RY+nbQ9WRGEDJlmnXwJ+9FBgXMCIzYMVijBBgYMFxIMqJBMSc0Ht7qh/+Gjpte2rnYsYeNlasWIBgQ6yCewIoPCCp/cyP/wgUGbXVu0QcADZNBDnh98gHMLGXYQUw02w61QU3wdbNWDbQVVIIhMMwFF1DaZiPLBAy7E04kafrjSizaK3LFNNc0AAYRQDsAHHQlJ2IDQJ2zE1+EKDjiAijShkECCC8Qgw4cr7ZgyzC2WaHPNLWWoNeNWPiRAw0QFWQFMhz8C+QQ20yAiVSrY+MGOJCsccsst2GCzoHFxxEGGC+8hgs0MB2kyCpgzrUDCbs1Es41UdtATHFFkWELMOtsoQsYcgvRRQw5RSDgGOjZMR1AvPQIq6KCo9AKOJWDd48owQlHR4DXEKP9iyRrK+DNNBTu4RwIPFeTAGUG7hAomkA84gEg1m6ADljy9PBKGGJY4ig0xlsTBRSn98FOFDUC8pwQOPkgHbCGAzhTkA850s0c7j6Hjix9+gBIrMXLeAccWXUCyiRBcBEECdEJ98KtAqtBCYQc/OvDENnl4gYpUxISCIjjzylkGGV9okYUVNogRhAOBuuAEhjG08wOgDYzAgA5bCjIoCe5uwUk80RKTTSppPREGGGCIISOQ9AXBg6cC6WIywvCpoMHAocRBwhP4bHLFLujYkV42xNxBRhAyGrc113EgYtRBerDDDHMoDCyQEL5sE083EkgwQyBhxGFHMM206DUixGxmE0wssbQjCQ4JCaFKFwgQTVAVVhQUwAVPIFJKrHfYYRwi6OCDzzuIJIFhXAD0EccPsYRiSyqKSDpFcWSMIcZRoBMkQyA2BGZDIKSYcggih8TRRg4VxM5QABVYYLxgwiev/PLMCxQQADs=", + "find": "R0lGODlhMAAiAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OSkpKRgYGAAAAAAAAAAAAAAAAAAAACH+AS4ALAAAAAAwACIAAAX/ICCOZGmeaKquY2AGLiuvMCAUBuHWc48Kh0iFInEYCb4kSQCxPBiMxkMigRQEgJiSFVBYHNGG0RiZOHjblWAiiY4fkDhEYoBp06dAWfyAQyKAgAwDaHgnB0RwgYASgQ0IhDuGJDAIFhMRVFSLEX8QCJJ4AQM5AgQHTZqqjBAOCQQEkWkCDRMUFQsICQ4Vm5maEwwHOAsPDTpKMAsUDlO4CssTcb+2DAp8YGCyNFoCEsZwFQ3QDRTTVBRS0g1QbgsCd5QAAwgIBwYFAwStzQ8UEdCKVchky0yVBw7YuXkAKt4IAg74vXHVagqFBRgXSCAyYWAVCH0SNhDTitCJfSL5/4RbAPKPhQYYjVCYYAvCP0BxEDaD8CheAAHNwqh8MMGPSwgLeJWhwHSjqkYI+xg4MMCEgQjtRvZ7UAYCpghMF7CxONOWJkYR+rCpY4JlVpVxKDwYWEactKW9mhYRtqCTgwgWEMArERSK1j5q//6T8KXonFsShpiJkAECgQYVjykooCVA0JGHEWNiYCHThTFeb3UkoiCCBgwGEKQ1kuAJlhFwhA71h5SukwUM5qqeCSGBgicEWkfNiWSERtBad4JNIBaQBaQah1ToyGZBAnsIuIJs1qnqiAIVjIE2gnAB1T5x0icgzXT79ipgMOOEH6HBbREBMJCeGEY08IoLAkzB1YYFwjxwSUGSNULQJnNUwRYlCcyEkALIxECAP9cNMMABYpRhy3ZsSLDaR70oUAiABGCkAxowCGCAAfDYIQACXoElGRsdXWDBdg2Y90IWktDYGYAB9PWHP0PMdFZaF07SQgAFNDAMAQg0QA1UC8xoZQl22JGFPgWkOUCOL1pZQyhjxinnnCWEAAA7", + "REPL": "R0lGODlhMAAjAPcAMf/////3//+lOf+UKf+MEPf///f39/f35/fv7/ecQvecOfecKfeUIfeUGPeUEPeUCPeMAO/37+/v9+/v3u/n3u/n1u+9jO+9c++1hO+ta++tY++tWu+tUu+tSu+lUu+lQu+lMe+UMe+UKe+UGO+UEO+UAO+MCOfv5+fvxufn7+fn5+fnzue9lOe9c+e1jOe1e+e1c+e1a+etWuetUuelQuecOeeUUueUCN7e597e3t7e1t7ezt7evd7Wzt7Oxt7Ovd7Otd7Opd7OnN7Gtd7Gpd69lN61hN6ta96lStbextberdbW3tbWztbWxtbOvdbOrda1hNalUtaECM7W1s7Ozs7Oxs7Otc7Gxs7Gvc69tc69rc69pc61jM6lc8bWlMbOvcbGxsbGpca9tca9pca1nMaMAL3OhL3Gtb21vb21tb2tpb2tnL2tlLW9tbW9pbW9e7W1pbWtjLWcKa21nK2tra2tnK2tlK2lpa2llK2ljK2le6WlnKWljKWUe6WUc6WUY5y1QpyclJycjJychJyUc5yMY5StY5SUe5SMhJSMe5SMc5SMWpSEa5SESoyUe4yMhIyEY4SlKYScWoSMe4SEe4SEa4R7c4R7Y3uMY3uEe3t7e3t7c3tza3tzY3trKXtjIXOcAHOUMXOEY3Nzc3NzWnNrSmulCGuUMWuMGGtzWmtrY2taMWtaGGOUOWOMAGNzUmNjWmNjSmNaUmNaQmNaOWNaIWNSCFqcAFpjUlpSMVpSIVpSEFpKKVKMAFJSUlJSSlJSMVJKMVJKGFJKAFI5CEqUAEqEAEpzQkpKIUpCQkpCGEpCAEo5EEoxAEJjOUJCOUJCAEI5IUIxADl7ADlaITlCOTkxMTkxKTkxEDkhADFzADFrGDE5OTExADEpEClrCCkxKSkpKSkpISkpACkhCCkhACkYACFzACFrACEhCCEYGBhjEBhjABghABgYCBgYABgQEBgQABAQABAIAAhjAAhSAAhKAAgIEAgICABaAABCAAAhAAAQAAAIAAAAAAAAACH+AS4ALAAAAAAwACMAAAj/AAEIHEiwoMGDCBMqXMiwocOHAA4cgEixIIIJO3JMmAjADIqKFU/8MHIkg5EgYXx4iaTkI0iHE6wE2TCggYILQayEAgXIy8uGCKz8sDCAQAMRG3iEcXULlJkJPwli3OFjh9UdYYLE6NBhA04UXHoVA2XoTZgfPKBWlOBDphAWOdfMcfMDLloeO3hIMjbWVCQ5Fn6E2UFxgpsgFjYIEBADrZU6luqEEfqjTqpt54z1uuWqTIcgWAk7PECGzIUQDRosDmxlUrVJkwQJkqVuX71v06YZcyUlROAdbnLAJKPFyAYFAhoMwFlnEh0rWkpz8raPHm7dqKKc/KFFkBUrVn1M/ziBcEIeLUEQI8/AYk0i9Be4sqjsrN66c9/OnbobhpR3HkIUoZ0WVnBE0AGLFKKFD0HAFUQe77HQgQI1hRBDEHMcY0899bBzihZuCPILJD8EccEGGzwAQhFaUHHQH82sUkgeNHISDBk8WCCCcsqFUEQWmOyzjz3sUGNNOO5Y48YOEgowAAQhnBScQV00k82V47jzjy9CXZBcjziFoco//4CDiSOyhPMPLkJZkEBqJmRQxA9uZGEQD8Ncmc044/zzDF2IZQBCCDYE8QMZz/iiCSx0neHGI7BIhhhNn+1gxRpokEcQAp7seWU7/PwTyxqG/iCEEVzQmUombnDRxRExzP9nBR2PCKLFD3UJwcMPa/SRqUGNWJmNOVn+M44ukMRB4KGcWDNLVhuUMEIJAlzwA3DJBHMJIXm4sQYhqyxCRQQGLSIsn1qac2UzysQSyzX/hLMGD0F0IMCODYAQBA9W/PKPOcRiw0wzwxTiokF9dLMnuv/Mo+fCZF7jBr0xbDDCACWEYKgb1vzjDp/jZNOMLX0IZxAKq2TZTjtaOjwOsXyG+s8sZJTIQsUdIGHoJPf8w487QI/TDSt5mGwQFZxc406o8HiDJchk/ltLHpSlJwSvz5DpTjvmuGNOM57koelBOaAhiCaaPBLL0wwbm003peRBnBZqJMJL1ECz/HXYYx/NdAIOOVCxQyLorswymU93o0wuwfAiTDNR/xz0MLXU0XdCE+UwSTRZAq2lsSATu+4wkGvt+TjNzPLrQyegAUku2Hij5cd8LhxyM8QIg4w18HgcdC6BTBFSDmfQqsovttveDcG7lFLHI75cE841sARCxeWsnxC4G9HADPK6ywzDCRqBo0EHHWhMgT1IJzziNci1N7PMKnSYfML96/90AiJKey/0KtbLX1QK0rrNnQ541xugQ7SHhkXBghN0SKACWRc4KlAhBwKcIOYymJCAAAA7", + "repl": "R0lGODlhMAAjAPQAMf////f39+/v7+fn597e3tbW1s7OzsbGxr29vbW1ta2traWlpZycnJSUlIyMjISEhHt7e3Nzc2tra2NjY1paWlJSUkpKSkJCQjk5OTExMSkpKSEhIRgYGBAQEAgICAAAACH+AS4ALAAAAAAwACMAAAX/ICCOZGmeaKqubOu+gCDANBkIQ1EMQhAghFptYEAkEgjEwXBo7ISvweGgWCwUysPjwTgEoCafTySYIhYMxgLBjEQgCULvCw0QdAZdoVhUIJUFChISEAxYeQM1N1OMTAp+UwZ5eA4TEhFbDWYFdC4ECVMJjwl5BwsQa0umEhUVlhESDgqlBp0rAn5nVpBMDxeZDRQbHBgWFBSWDgtLBnFjKwRYCI9VqQsPs0YKEcMXFq0UEalFDWx4BAO2IwPjppAKDkrTWKYUGd7fEJJFEZpM00cOzCgh4EE8SaoWxKNixQooBRMyZMBwAYIRBhUgLDGS4MoBJeoANMhAgQsaCRZm/5lqaCUJhA4cNHjDoKEDBlJUHqkBlYBTiQUZNGjYMMxDhY3VWk6R4MEDBoMUak5AqoYBqANIBo4wcGGDUKIeLlzVZmWJggsVIkwAZaQSA3kdZzlKkIiEAAlDvW5oOkEBs488JTw44oeUIwdvVTFTUK7uiAAPgubt8GFDhQepqETAQCFU1UMGzlqAgFhUsAcCS0AO6lUDhw8xNRSbENGDhgWSHjWUe6ACbKITizmopZoBa6KvOwj9uuHDhwxyj3xekgDDhw5EvWKo0IB4iQLCOCC/njc7ZQ8UeGvza+ABZZgcxJNc4FO1gc0cOsCUrHevc8tdIMTIAhc4F198G2Qwwd8CBIQUAwEINABBBJUwR9R5wElgVRLwWODBBx4cGB8GEzDQIAo33CGJA8gh+JoH/clUgQU0YvDhdfmJdwEFC6Sjgg8yEPAABsPkh2F22cl2AQbn6QdTghTQ5eAJAQyQAAQV0MSBB9gRVZ4GE1mw5JZOAmiAVi1UWcAZDrDyZXYTeaOhA/bIVuIBPtKQ4h7ViYekUPdcEAEbzTzCRp5CADmAAwj+ORGPBcgwAAHo9ABGCYtm0ChwFHShlRiXhmHlkAcCiOeUodqQw5W0oXLAiamy4MOkjOyAaqxUymApDCEAADs=", +} +colors = ["#FF7B39", "#80F121"] +emphColors = ["#DAFC33", "#F42548"] +fieldParams = { + "height": 3, + "width": 70, + "font": ("monaco", 14), + "highlightthickness": 0, + "borderwidth": 0, + "background": "white", +} +textParams = { + "bg": "#F7E0D4", + "fg": "#2321F1", + "highlightthickness": 0, + "width": 1, + "height": 10, + "font": ("verdana", 16), + "wrap": "word", +} + + +class Zone: + def __init__(self, image, initialField, initialText): + frm = Frame(root) + frm.config(background="white") + self.image = PhotoImage(format="gif", data=images[image.upper()]) + self.imageDimmed = PhotoImage(format="gif", data=images[image]) + self.img = Label(frm) + self.img.config(borderwidth=0) + self.img.pack(side="left") + self.fld = Text(frm, **fieldParams) + self.initScrollText(frm, self.fld, initialField) + frm = Frame(root) + self.txt = Text(frm, **textParams) + self.initScrollText(frm, self.txt, initialText) + for i in range(2): + self.txt.tag_config(colors[i], background=colors[i]) + self.txt.tag_config("emph" + colors[i], foreground=emphColors[i]) + + def initScrollText(self, frm, txt, contents): + scl = Scrollbar(frm) + scl.config(command=txt.yview) + scl.pack(side="right", fill="y") + txt.pack(side="left", expand=True, fill="x") + txt.config(yscrollcommand=scl.set) + txt.insert("1.0", contents) + frm.pack(fill="x") + Frame(height=2, bd=1, relief="ridge").pack(fill="x") + + def refresh(self): + self.colorCycle = itertools.cycle(colors) + try: + self.substitute() + self.img.config(image=self.image) + except re.error: + self.img.config(image=self.imageDimmed) + + +class FindZone(Zone): + def addTags(self, m): + color = next(self.colorCycle) + self.txt.tag_add(color, "1.0+%sc" % m.start(), "1.0+%sc" % m.end()) + try: + self.txt.tag_add( + "emph" + color, "1.0+%sc" % m.start("emph"), "1.0+%sc" % m.end("emph") + ) + except: + pass + + def substitute(self, *args): + for color in colors: + self.txt.tag_remove(color, "1.0", "end") + self.txt.tag_remove("emph" + color, "1.0", "end") + self.rex = re.compile("") # default value in case of malformed regexp + self.rex = re.compile(self.fld.get("1.0", "end")[:-1], re.MULTILINE) + try: + re.compile("(?P%s)" % self.fld.get(SEL_FIRST, SEL_LAST)) + self.rexSel = re.compile( + "%s(?P%s)%s" + % ( + self.fld.get("1.0", SEL_FIRST), + self.fld.get(SEL_FIRST, SEL_LAST), + self.fld.get(SEL_LAST, "end")[:-1], + ), + re.MULTILINE, + ) + except: + self.rexSel = self.rex + self.rexSel.sub(self.addTags, self.txt.get("1.0", "end")) + + +class ReplaceZone(Zone): + def addTags(self, m): + s = sz.rex.sub(self.repl, m.group()) + self.txt.delete( + "1.0+%sc" % (m.start() + self.diff), "1.0+%sc" % (m.end() + self.diff) + ) + self.txt.insert("1.0+%sc" % (m.start() + self.diff), s, next(self.colorCycle)) + self.diff += len(s) - (m.end() - m.start()) + + def substitute(self): + self.txt.delete("1.0", "end") + self.txt.insert("1.0", sz.txt.get("1.0", "end")[:-1]) + self.diff = 0 + self.repl = rex0.sub(r"\\g<\1>", self.fld.get("1.0", "end")[:-1]) + sz.rex.sub(self.addTags, sz.txt.get("1.0", "end")[:-1]) + + +def launchRefresh(_): + sz.fld.after_idle(sz.refresh) + rz.fld.after_idle(rz.refresh) + + +def app(): + global root, sz, rz, rex0 + root = Tk() + root.resizable(height=False, width=True) + root.title(windowTitle) + root.minsize(width=250, height=0) + sz = FindZone("find", initialFind, initialText) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.fld.bind("", launchRefresh) + sz.rexSel = re.compile("") + rz = ReplaceZone("repl", initialRepl, "") + rex0 = re.compile(r"(?", launchRefresh) + launchRefresh(None) + root.mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/rdparser_app.py b/venv/lib/python3.10/site-packages/nltk/app/rdparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..16de5a442659171763da4b4d19e9f56ef9db6277 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/rdparser_app.py @@ -0,0 +1,1052 @@ +# Natural Language Toolkit: Recursive Descent Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the recursive descent parser. + +The recursive descent parser maintains a tree, which records the +structure of the portion of the text that has been parsed. It uses +CFG productions to expand the fringe of the tree, and matches its +leaves against the text. Initially, the tree contains the start +symbol ("S"). It is shown in the main canvas, to the right of the +list of available expansions. + +The parser builds up a tree structure for the text using three +operations: + + - "expand" uses a CFG production to add children to a node on the + fringe of the tree. + - "match" compares a leaf in the tree to a text token. + - "backtrack" returns the tree to its state before the most recent + expand or match operation. + +The parser maintains a list of tree locations called a "frontier" to +remember which nodes have not yet been expanded and which leaves have +not yet been matched against the text. The leftmost frontier node is +shown in green, and the other frontier nodes are shown in blue. The +parser always performs expand and match operations on the leftmost +element of the frontier. + +You can control the parser's operation by using the "expand," "match," +and "backtrack" buttons; or you can use the "step" button to let the +parser automatically decide which operation to apply. The parser uses +the following rules to decide which operation to apply: + + - If the leftmost frontier element is a token, try matching it. + - If the leftmost frontier element is a node, try expanding it with + the first untried expansion. + - Otherwise, backtrack. + +The "expand" button applies the untried expansion whose CFG production +is listed earliest in the grammar. To manually choose which expansion +to apply, click on a CFG production from the list of available +expansions, on the left side of the main window. + +The "autostep" button will let the parser continue applying +applications to the tree until it reaches a complete parse. You can +cancel an autostep in progress at any time by clicking on the +"autostep" button again. + +Keyboard Shortcuts:: + [Space]\t Perform the next expand, match, or backtrack operation + [a]\t Step through operations until the next complete parse + [e]\t Perform an expand operation + [m]\t Perform a match operation + [b]\t Perform a backtrack operation + [Delete]\t Reset the parser + [g]\t Show/hide available expansions list + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingRecursiveDescentParser +from nltk.tree import Tree +from nltk.util import in_idle + + +class RecursiveDescentApp: + """ + A graphical tool for exploring the recursive descent parser. The tool + displays the parser's tree and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can expand subtrees on the frontier, match tokens on the frontier + against the text, and backtrack. A "step" button simply steps + through the parsing process, performing the operations that + ``RecursiveDescentParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingRecursiveDescentParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Recursive Descent Parser Application") + + # Set up key bindings. + self._init_bindings() + + # Initialize the fonts. + self._init_fonts(self._top) + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animation_frames = IntVar(self._top) + self._animation_frames.set(5) + self._animating_lock = 0 + self._autostep = 0 + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # Initialize the parser. + self._parser.initialize(self._sent) + + # Resize callback + self._canvas.bind("", self._configure) + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + if self._size.get() < 0: + big = self._size.get() - 2 + else: + big = self._size.get() + 2 + self._bigfont = Font(family="helvetica", weight="bold", size=big) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Expansions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + def _init_bindings(self): + # Key bindings are a good thing. + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("e", self.expand) + # self._top.bind('', self.expand) + # self._top.bind('', self.expand) + self._top.bind("m", self.match) + self._top.bind("", self.match) + self._top.bind("", self.match) + self._top.bind("b", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("", self.backtrack) + self._top.bind("a", self.autostep) + # self._top.bind('', self.autostep) + self._top.bind("", self.autostep) + self._top.bind("", self.cancel_autostep) + self._top.bind("", self.step) + self._top.bind("", self.reset) + self._top.bind("", self.postscript) + # self._top.bind('', self.help) + # self._top.bind('', self.help) + self._top.bind("", self.help) + self._top.bind("", self.help) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + # self._top.bind('', self.toggle_grammar) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom", padx=3, pady=2) + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Autostep", + background="#90c0d0", + foreground="black", + command=self.autostep, + ).pack(side="left") + Button( + buttonframe, + text="Expand", + underline=0, + background="#90f090", + foreground="black", + command=self.expand, + ).pack(side="left") + Button( + buttonframe, + text="Match", + underline=0, + background="#90f090", + foreground="black", + command=self.match, + ).pack(side="left") + Button( + buttonframe, + text="Backtrack", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.backtrack, + ).pack(side="left") + # Replace autostep... + + # self._autostep_button = Button(buttonframe, text='Autostep', + # underline=0, command=self.autostep) + # self._autostep_button.pack(side='left') + + def _configure(self, event): + self._autostep = 0 + (x1, y1, x2, y2) = self._cframe.scrollregion() + y2 = event.height - 6 + self._canvas["scrollregion"] = "%d %d %d %d" % (x1, y1, x2, y2) + self._redraw() + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + # width=525, height=250, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + # Initially, there's no tree or text + self._tree = None + self._textwidgets = [] + self._textline = None + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Match", underline=0, command=self.match, accelerator="Ctrl-m" + ) + rulemenu.add_command( + label="Expand", underline=0, command=self.expand, accelerator="Ctrl-e" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Backtrack", underline=0, command=self.backtrack, accelerator="Ctrl-b" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animation_frames, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animation_frames, + value=10, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animation_frames, + value=5, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animation_frames, + value=2, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + ######################################### + ## Helper + ######################################### + + def _get(self, widget, treeloc): + for i in treeloc: + widget = widget.subtrees()[i] + if isinstance(widget, TreeSegmentWidget): + widget = widget.label() + return widget + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + canvas = self._canvas + + # Delete the old tree, widgets, etc. + if self._tree is not None: + self._cframe.destroy_widget(self._tree) + for twidget in self._textwidgets: + self._cframe.destroy_widget(twidget) + if self._textline is not None: + self._canvas.delete(self._textline) + + # Draw the tree. + helv = ("helvetica", -self._size.get()) + bold = ("helvetica", -self._size.get(), "bold") + attribs = { + "tree_color": "#000000", + "tree_width": 2, + "node_font": bold, + "leaf_font": helv, + } + tree = self._parser.tree() + self._tree = tree_to_treesegment(canvas, tree, **attribs) + self._cframe.add_widget(self._tree, 30, 5) + + # Draw the text. + helv = ("helvetica", -self._size.get()) + bottom = y = self._cframe.scrollregion()[3] + self._textwidgets = [ + TextWidget(canvas, word, font=self._font) for word in self._sent + ] + for twidget in self._textwidgets: + self._cframe.add_widget(twidget, 0, 0) + twidget.move(0, bottom - twidget.bbox()[3] - 5) + y = min(y, twidget.bbox()[1]) + + # Draw a line over the text, to separate it from the tree. + self._textline = canvas.create_line(-5000, y - 5, 5000, y - 5, dash=".") + + # Highlight appropriate nodes. + self._highlight_nodes() + self._highlight_prodlist() + + # Make sure the text lines up. + self._position_text() + + def _redraw_quick(self): + # This should be more-or-less sufficient after an animation. + self._highlight_nodes() + self._highlight_prodlist() + self._position_text() + + def _highlight_nodes(self): + # Highlight the list of nodes to be checked. + bold = ("helvetica", -self._size.get(), "bold") + for treeloc in self._parser.frontier()[:1]: + self._get(self._tree, treeloc)["color"] = "#20a050" + self._get(self._tree, treeloc)["font"] = bold + for treeloc in self._parser.frontier()[1:]: + self._get(self._tree, treeloc)["color"] = "#008080" + + def _highlight_prodlist(self): + # Highlight the productions that can be expanded. + # Boy, too bad tkinter doesn't implement Listbox.itemconfig; + # that would be pretty useful here. + self._prodlist.delete(0, "end") + expandable = self._parser.expandable_productions() + untried = self._parser.untried_expandable_productions() + productions = self._productions + for index in range(len(productions)): + if productions[index] in expandable: + if productions[index] in untried: + self._prodlist.insert(index, " %s" % productions[index]) + else: + self._prodlist.insert(index, " %s (TRIED)" % productions[index]) + self._prodlist.selection_set(index) + else: + self._prodlist.insert(index, " %s" % productions[index]) + + def _position_text(self): + # Line up the text widgets that are matched against the tree + numwords = len(self._sent) + num_matched = numwords - len(self._parser.remaining_text()) + leaves = self._tree_leaves()[:num_matched] + xmax = self._tree.bbox()[0] + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + widget["color"] = "#006040" + leaf["color"] = "#006040" + widget.move(leaf.bbox()[0] - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # Line up the text widgets that are not matched against the tree. + for i in range(len(leaves), numwords): + widget = self._textwidgets[i] + widget["color"] = "#a0a0a0" + widget.move(xmax - widget.bbox()[0], 0) + xmax = widget.bbox()[2] + 10 + + # If we have a complete parse, make everything green :) + if self._parser.currently_complete(): + for twidget in self._textwidgets: + twidget["color"] = "#00a000" + + # Move the matched leaves down to the text. + for i in range(0, len(leaves)): + widget = self._textwidgets[i] + leaf = leaves[i] + dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0 + dy = max(dy, leaf.parent().label().bbox()[3] - leaf.bbox()[3] + 10) + leaf.move(0, dy) + + def _tree_leaves(self, tree=None): + if tree is None: + tree = self._tree + if isinstance(tree, TreeSegmentWidget): + leaves = [] + for child in tree.subtrees(): + leaves += self._tree_leaves(child) + return leaves + else: + return [tree] + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + self._autostep = 0 + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._autostep = 0 + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset Application" + self._lastoper2["text"] = "" + self._redraw() + + def autostep(self, *e): + if self._animation_frames.get() == 0: + self._animation_frames.set(2) + if self._autostep: + self._autostep = 0 + else: + self._autostep = 1 + self._step() + + def cancel_autostep(self, *e): + # self._autostep_button['text'] = 'Autostep' + self._autostep = 0 + + # Make sure to stop auto-stepping if we get any user input. + def step(self, *e): + self._autostep = 0 + self._step() + + def match(self, *e): + self._autostep = 0 + self._match() + + def expand(self, *e): + self._autostep = 0 + self._expand() + + def backtrack(self, *e): + self._autostep = 0 + self._backtrack() + + def _step(self): + if self._animating_lock: + return + + # Try expanding, matching, and backtracking (in that order) + if self._expand(): + pass + elif self._parser.untried_match() and self._match(): + pass + elif self._backtrack(): + pass + else: + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + self._autostep = 0 + + # Check if we just completed a parse. + if self._parser.currently_complete(): + self._autostep = 0 + self._lastoper2["text"] += " [COMPLETE PARSE]" + + def _expand(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.expand() + if rv is not None: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = rv + self._prodlist.selection_clear(0, "end") + index = self._productions.index(rv) + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = "(all expansions tried)" + return False + + def _match(self, *e): + if self._animating_lock: + return + old_frontier = self._parser.frontier() + rv = self._parser.match() + if rv is not None: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = rv + self._animate_match(old_frontier[0]) + return True + else: + self._lastoper1["text"] = "Match:" + self._lastoper2["text"] = "(failed)" + return False + + def _backtrack(self, *e): + if self._animating_lock: + return + if self._parser.backtrack(): + elt = self._parser.tree() + for i in self._parser.frontier()[0]: + elt = elt[i] + self._lastoper1["text"] = "Backtrack" + self._lastoper2["text"] = "" + if isinstance(elt, Tree): + self._animate_backtrack(self._parser.frontier()[0]) + else: + self._animate_match_backtrack(self._parser.frontier()[0]) + return True + else: + self._autostep = 0 + self._lastoper1["text"] = "Finished" + self._lastoper2["text"] = "" + return False + + def about(self, *e): + ABOUT = ( + "NLTK Recursive Descent Parser Application\n" + "Written by Edward Loper" + ) + TITLE = "About: Recursive Descent Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def help(self, *e): + self._autostep = 0 + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Recursive Descent Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def postscript(self, *e): + self._autostep = 0 + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + self._bigfont.configure(size=-(abs(size + 2))) + self._redraw() + + ######################################### + ## Expand Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + # def toggle_grammar(self, *e): + # self._show_grammar = not self._show_grammar + # if self._show_grammar: + # self._prodframe.pack(fill='both', expand='y', side='left', + # after=self._feedbackframe) + # self._lastoper1['text'] = 'Show Grammar' + # else: + # self._prodframe.pack_forget() + # self._lastoper1['text'] = 'Hide Grammar' + # self._lastoper2['text'] = '' + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + old_frontier = self._parser.frontier() + production = self._parser.expand(self._productions[index]) + + if production: + self._lastoper1["text"] = "Expand:" + self._lastoper2["text"] = production + self._prodlist.selection_clear(0, "end") + self._prodlist.selection_set(index) + self._animate_expand(old_frontier[0]) + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.expandable_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Animation + ######################################### + + def _animate_expand(self, treeloc): + oldwidget = self._get(self._tree, treeloc) + oldtree = oldwidget.parent() + top = not isinstance(oldtree.parent(), TreeSegmentWidget) + + tree = self._parser.tree() + for i in treeloc: + tree = tree[i] + + widget = tree_to_treesegment( + self._canvas, + tree, + node_font=self._boldfont, + leaf_color="white", + tree_width=2, + tree_color="white", + node_color="white", + leaf_font=self._font, + ) + widget.label()["color"] = "#20a050" + + (oldx, oldy) = oldtree.label().bbox()[:2] + (newx, newy) = widget.label().bbox()[:2] + widget.move(oldx - newx, oldy - newy) + + if top: + self._cframe.add_widget(widget, 0, 5) + widget.move(30 - widget.label().bbox()[0], 0) + self._tree = widget + else: + oldtree.parent().replace_child(oldtree, widget) + + # Move the children over so they don't overlap. + # Line the children up in a strange way. + if widget.subtrees(): + dx = ( + oldx + + widget.label().width() / 2 + - widget.subtrees()[0].bbox()[0] / 2 + - widget.subtrees()[0].bbox()[2] / 2 + ) + for subtree in widget.subtrees(): + subtree.move(dx, 0) + + self._makeroom(widget) + + if top: + self._cframe.destroy_widget(oldtree) + else: + oldtree.destroy() + + colors = [ + "gray%d" % (10 * int(10 * x / self._animation_frames.get())) + for x in range(self._animation_frames.get(), 0, -1) + ] + + # Move the text string down, if necessary. + dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1] + if dy > 0: + for twidget in self._textwidgets: + twidget.move(0, dy) + self._canvas.move(self._textline, 0, dy) + + self._animate_expand_frame(widget, colors) + + def _makeroom(self, treeseg): + """ + Make sure that no sibling tree bbox's overlap. + """ + parent = treeseg.parent() + if not isinstance(parent, TreeSegmentWidget): + return + + index = parent.subtrees().index(treeseg) + + # Handle siblings to the right + rsiblings = parent.subtrees()[index + 1 :] + if rsiblings: + dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10 + for sibling in rsiblings: + sibling.move(dx, 0) + + # Handle siblings to the left + if index > 0: + lsibling = parent.subtrees()[index - 1] + dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10) + treeseg.move(dx, 0) + + # Keep working up the tree. + self._makeroom(parent) + + def _animate_expand_frame(self, widget, colors): + if len(colors) > 0: + self._animating_lock = 1 + widget["color"] = colors[0] + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = colors[0] + else: + subtree["color"] = colors[0] + self._top.after(50, self._animate_expand_frame, widget, colors[1:]) + else: + widget["color"] = "black" + for subtree in widget.subtrees(): + if isinstance(subtree, TreeSegmentWidget): + subtree.label()["color"] = "black" + else: + subtree["color"] = "black" + self._redraw_quick() + widget.label()["color"] = "black" + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_backtrack(self, treeloc): + # Flash red first, if we're animating. + if self._animation_frames.get() == 0: + colors = [] + else: + colors = ["#a00000", "#000000", "#a00000"] + colors += [ + "gray%d" % (10 * int(10 * x / (self._animation_frames.get()))) + for x in range(1, self._animation_frames.get() + 1) + ] + + widgets = [self._get(self._tree, treeloc).parent()] + for subtree in widgets[0].subtrees(): + if isinstance(subtree, TreeSegmentWidget): + widgets.append(subtree.label()) + else: + widgets.append(subtree) + + self._animate_backtrack_frame(widgets, colors) + + def _animate_backtrack_frame(self, widgets, colors): + if len(colors) > 0: + self._animating_lock = 1 + for widget in widgets: + widget["color"] = colors[0] + self._top.after(50, self._animate_backtrack_frame, widgets, colors[1:]) + else: + for widget in widgets[0].subtrees(): + widgets[0].remove_child(widget) + widget.destroy() + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack(self, treeloc): + widget = self._get(self._tree, treeloc) + node = widget.parent().label() + dy = (node.bbox()[3] - widget.bbox()[1] + 14) / max( + 1, self._animation_frames.get() + ) + self._animate_match_backtrack_frame(self._animation_frames.get(), widget, dy) + + def _animate_match(self, treeloc): + widget = self._get(self._tree, treeloc) + + dy = (self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) / max( + 1, self._animation_frames.get() + ) + self._animate_match_frame(self._animation_frames.get(), widget, dy) + + def _animate_match_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after(10, self._animate_match_frame, frame - 1, widget, dy) + else: + widget["color"] = "#006040" + self._redraw_quick() + self._animating_lock = 0 + if self._autostep: + self._step() + + def _animate_match_backtrack_frame(self, frame, widget, dy): + if frame > 0: + self._animating_lock = 1 + widget.move(0, dy) + self._top.after( + 10, self._animate_match_backtrack_frame, frame - 1, widget, dy + ) + else: + widget.parent().remove_child(widget) + widget.destroy() + self._animating_lock = 0 + if self._autostep: + self._step() + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sentence): + self._sent = sentence.split() # [XX] use tagged? + self.reset() + + +def app(): + """ + Create a recursive descent parser demo, using a simple grammar and + text. + """ + from nltk.grammar import CFG + + grammar = CFG.fromstring( + """ + # Grammatical productions. + S -> NP VP + NP -> Det N PP | Det N + VP -> V NP PP | V NP | V + PP -> P NP + # Lexical productions. + NP -> 'I' + Det -> 'the' | 'a' + N -> 'man' | 'park' | 'dog' | 'telescope' + V -> 'ate' | 'saw' + P -> 'in' | 'under' | 'with' + """ + ) + + sent = "the dog saw a man in the park".split() + + RecursiveDescentApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/srparser_app.py b/venv/lib/python3.10/site-packages/nltk/app/srparser_app.py new file mode 100644 index 0000000000000000000000000000000000000000..cca5cb2de2149cc573b6d471cd5fef2a57cbbb7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/srparser_app.py @@ -0,0 +1,937 @@ +# Natural Language Toolkit: Shift-Reduce Parser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +A graphical tool for exploring the shift-reduce parser. + +The shift-reduce parser maintains a stack, which records the structure +of the portion of the text that has been parsed. The stack is +initially empty. Its contents are shown on the left side of the main +canvas. + +On the right side of the main canvas is the remaining text. This is +the portion of the text which has not yet been considered by the +parser. + +The parser builds up a tree structure for the text using two +operations: + + - "shift" moves the first token from the remaining text to the top + of the stack. In the demo, the top of the stack is its right-hand + side. + - "reduce" uses a grammar production to combine the rightmost stack + elements into a single tree token. + +You can control the parser's operation by using the "shift" and +"reduce" buttons; or you can use the "step" button to let the parser +automatically decide which operation to apply. The parser uses the +following rules to decide which operation to apply: + + - Only shift if no reductions are available. + - If multiple reductions are available, then apply the reduction + whose CFG production is listed earliest in the grammar. + +The "reduce" button applies the reduction whose CFG production is +listed earliest in the grammar. There are two ways to manually choose +which reduction to apply: + + - Click on a CFG production from the list of available reductions, + on the left side of the main window. The reduction based on that + production will be applied to the top of the stack. + - Click on one of the stack elements. A popup window will appear, + containing all available reductions. Select one, and it will be + applied to the top of the stack. + +Note that reductions can only be applied to the top of the stack. + +Keyboard Shortcuts:: + [Space]\t Perform the next shift or reduce operation + [s]\t Perform a shift operation + [r]\t Perform a reduction operation + [Ctrl-z]\t Undo most recent operation + [Delete]\t Reset the parser + [g]\t Show/hide available production list + [Ctrl-a]\t Toggle animations + [h]\t Help + [Ctrl-p]\t Print + [q]\t Quit + +""" + +from tkinter import Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk +from tkinter.font import Font + +from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import CanvasFrame, EntryDialog, ShowText, TextWidget +from nltk.parse import SteppingShiftReduceParser +from nltk.tree import Tree +from nltk.util import in_idle + +""" +Possible future improvements: + - button/window to change and/or select text. Just pop up a window + with an entry, and let them modify the text; and then retokenize + it? Maybe give a warning if it contains tokens whose types are + not in the grammar. + - button/window to change and/or select grammar. Select from + several alternative grammars? Or actually change the grammar? If + the later, then I'd want to define nltk.draw.cfg, which would be + responsible for that. +""" + + +class ShiftReduceApp: + """ + A graphical tool for exploring the shift-reduce parser. The tool + displays the parser's stack and the remaining text, and allows the + user to control the parser's operation. In particular, the user + can shift tokens onto the stack, and can perform reductions on the + top elements of the stack. A "step" button simply steps through + the parsing process, performing the operations that + ``nltk.parse.ShiftReduceParser`` would use. + """ + + def __init__(self, grammar, sent, trace=0): + self._sent = sent + self._parser = SteppingShiftReduceParser(grammar, trace) + + # Set up the main window. + self._top = Tk() + self._top.title("Shift Reduce Parser Application") + + # Animations. animating_lock is a lock to prevent the demo + # from performing new operations while it's animating. + self._animating_lock = 0 + self._animate = IntVar(self._top) + self._animate.set(10) # = medium + + # The user can hide the grammar. + self._show_grammar = IntVar(self._top) + self._show_grammar.set(1) + + # Initialize fonts. + self._init_fonts(self._top) + + # Set up key bindings. + self._init_bindings() + + # Create the basic frames. + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_feedback(self._top) + self._init_grammar(self._top) + self._init_canvas(self._top) + + # A popup menu for reducing. + self._reduce_menu = Menu(self._canvas, tearoff=0) + + # Reset the demo, and set the feedback frame to empty. + self.reset() + self._lastoper1["text"] = "" + + ######################################### + ## Initialization Helpers + ######################################### + + def _init_fonts(self, root): + # See: + self._sysfont = Font(font=Button()["font"]) + root.option_add("*Font", self._sysfont) + + # TWhat's our font size (default=same as sysfont) + self._size = IntVar(root) + self._size.set(self._sysfont.cget("size")) + + self._boldfont = Font(family="helvetica", weight="bold", size=self._size.get()) + self._font = Font(family="helvetica", size=self._size.get()) + + def _init_grammar(self, parent): + # Grammar view. + self._prodframe = listframe = Frame(parent) + self._prodframe.pack(fill="both", side="left", padx=2) + self._prodlist_label = Label( + self._prodframe, font=self._boldfont, text="Available Reductions" + ) + self._prodlist_label.pack() + self._prodlist = Listbox( + self._prodframe, + selectmode="single", + relief="groove", + background="white", + foreground="#909090", + font=self._font, + selectforeground="#004040", + selectbackground="#c0f0c0", + ) + + self._prodlist.pack(side="right", fill="both", expand=1) + + self._productions = list(self._parser.grammar().productions()) + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + self._prodlist.config(height=min(len(self._productions), 25)) + + # Add a scrollbar if there are more than 25 productions. + if 1: # len(self._productions) > 25: + listscroll = Scrollbar(self._prodframe, orient="vertical") + self._prodlist.config(yscrollcommand=listscroll.set) + listscroll.config(command=self._prodlist.yview) + listscroll.pack(side="left", fill="y") + + # If they select a production, apply it. + self._prodlist.bind("<>", self._prodlist_select) + + # When they hover over a production, highlight it. + self._hover = -1 + self._prodlist.bind("", self._highlight_hover) + self._prodlist.bind("", self._clear_hover) + + def _init_bindings(self): + # Quit + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + # Ops (step, shift, reduce, undo) + self._top.bind("", self.step) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.shift) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reduce) + self._top.bind("", self.reset) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + self._top.bind("", self.undo) + + # Misc + self._top.bind("", self.postscript) + self._top.bind("", self.help) + self._top.bind("", self.help) + self._top.bind("", self.edit_grammar) + self._top.bind("", self.edit_sentence) + + # Animation speed control + self._top.bind("-", lambda e, a=self._animate: a.set(20)) + self._top.bind("=", lambda e, a=self._animate: a.set(10)) + self._top.bind("+", lambda e, a=self._animate: a.set(4)) + + def _init_buttons(self, parent): + # Set up the frames. + self._buttonframe = buttonframe = Frame(parent) + buttonframe.pack(fill="none", side="bottom") + Button( + buttonframe, + text="Step", + background="#90c0d0", + foreground="black", + command=self.step, + ).pack(side="left") + Button( + buttonframe, + text="Shift", + underline=0, + background="#90f090", + foreground="black", + command=self.shift, + ).pack(side="left") + Button( + buttonframe, + text="Reduce", + underline=0, + background="#90f090", + foreground="black", + command=self.reduce, + ).pack(side="left") + Button( + buttonframe, + text="Undo", + underline=0, + background="#f0a0a0", + foreground="black", + command=self.undo, + ).pack(side="left") + + def _init_menubar(self, parent): + menubar = Menu(parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Reset Parser", underline=0, command=self.reset, accelerator="Del" + ) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.postscript, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + editmenu = Menu(menubar, tearoff=0) + editmenu.add_command( + label="Edit Grammar", + underline=5, + command=self.edit_grammar, + accelerator="Ctrl-g", + ) + editmenu.add_command( + label="Edit Text", + underline=5, + command=self.edit_sentence, + accelerator="Ctrl-t", + ) + menubar.add_cascade(label="Edit", underline=0, menu=editmenu) + + rulemenu = Menu(menubar, tearoff=0) + rulemenu.add_command( + label="Step", underline=1, command=self.step, accelerator="Space" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Shift", underline=0, command=self.shift, accelerator="Ctrl-s" + ) + rulemenu.add_command( + label="Reduce", underline=0, command=self.reduce, accelerator="Ctrl-r" + ) + rulemenu.add_separator() + rulemenu.add_command( + label="Undo", underline=0, command=self.undo, accelerator="Ctrl-u" + ) + menubar.add_cascade(label="Apply", underline=0, menu=rulemenu) + + viewmenu = Menu(menubar, tearoff=0) + viewmenu.add_checkbutton( + label="Show Grammar", + underline=0, + variable=self._show_grammar, + command=self._toggle_grammar, + ) + viewmenu.add_separator() + viewmenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=18, + command=self.resize, + ) + viewmenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=24, + command=self.resize, + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + animatemenu = Menu(menubar, tearoff=0) + animatemenu.add_radiobutton( + label="No Animation", underline=0, variable=self._animate, value=0 + ) + animatemenu.add_radiobutton( + label="Slow Animation", + underline=0, + variable=self._animate, + value=20, + accelerator="-", + ) + animatemenu.add_radiobutton( + label="Normal Animation", + underline=0, + variable=self._animate, + value=10, + accelerator="=", + ) + animatemenu.add_radiobutton( + label="Fast Animation", + underline=0, + variable=self._animate, + value=4, + accelerator="+", + ) + menubar.add_cascade(label="Animate", underline=1, menu=animatemenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + + parent.config(menu=menubar) + + def _init_feedback(self, parent): + self._feedbackframe = feedbackframe = Frame(parent) + feedbackframe.pack(fill="x", side="bottom", padx=3, pady=3) + self._lastoper_label = Label( + feedbackframe, text="Last Operation:", font=self._font + ) + self._lastoper_label.pack(side="left") + lastoperframe = Frame(feedbackframe, relief="sunken", border=1) + lastoperframe.pack(fill="x", side="right", expand=1, padx=5) + self._lastoper1 = Label( + lastoperframe, foreground="#007070", background="#f0f0f0", font=self._font + ) + self._lastoper2 = Label( + lastoperframe, + anchor="w", + width=30, + foreground="#004040", + background="#f0f0f0", + font=self._font, + ) + self._lastoper1.pack(side="left") + self._lastoper2.pack(side="left", fill="x", expand=1) + + def _init_canvas(self, parent): + self._cframe = CanvasFrame( + parent, + background="white", + width=525, + closeenough=10, + border=2, + relief="sunken", + ) + self._cframe.pack(expand=1, fill="both", side="top", pady=2) + canvas = self._canvas = self._cframe.canvas() + + self._stackwidgets = [] + self._rtextwidgets = [] + self._titlebar = canvas.create_rectangle( + 0, 0, 0, 0, fill="#c0f0f0", outline="black" + ) + self._exprline = canvas.create_line(0, 0, 0, 0, dash=".") + self._stacktop = canvas.create_line(0, 0, 0, 0, fill="#408080") + size = self._size.get() + 4 + self._stacklabel = TextWidget( + canvas, "Stack", color="#004040", font=self._boldfont + ) + self._rtextlabel = TextWidget( + canvas, "Remaining Text", color="#004040", font=self._boldfont + ) + self._cframe.add_widget(self._stacklabel) + self._cframe.add_widget(self._rtextlabel) + + ######################################### + ## Main draw procedure + ######################################### + + def _redraw(self): + scrollregion = self._canvas["scrollregion"].split() + (cx1, cy1, cx2, cy2) = (int(c) for c in scrollregion) + + # Delete the old stack & rtext widgets. + for stackwidget in self._stackwidgets: + self._cframe.destroy_widget(stackwidget) + self._stackwidgets = [] + for rtextwidget in self._rtextwidgets: + self._cframe.destroy_widget(rtextwidget) + self._rtextwidgets = [] + + # Position the titlebar & exprline + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + self._canvas.coords(self._titlebar, -5000, 0, 5000, y - 4) + self._canvas.coords(self._exprline, 0, y * 2 - 10, 5000, y * 2 - 10) + + # Position the titlebar labels.. + (x1, y1, x2, y2) = self._stacklabel.bbox() + self._stacklabel.move(5 - x1, 3 - y1) + (x1, y1, x2, y2) = self._rtextlabel.bbox() + self._rtextlabel.move(cx2 - x2 - 5, 3 - y1) + + # Draw the stack. + stackx = 5 + for tok in self._parser.stack(): + if isinstance(tok, Tree): + attribs = { + "tree_color": "#4080a0", + "tree_width": 2, + "node_font": self._boldfont, + "node_color": "#006060", + "leaf_color": "#006060", + "leaf_font": self._font, + } + widget = tree_to_treesegment(self._canvas, tok, **attribs) + widget.label()["color"] = "#000000" + else: + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + widget.bind_click(self._popup_reduce) + self._stackwidgets.append(widget) + self._cframe.add_widget(widget, stackx, y) + stackx = widget.bbox()[2] + 10 + + # Draw the remaining text. + rtextwidth = 0 + for tok in self._parser.remaining_text(): + widget = TextWidget(self._canvas, tok, color="#000000", font=self._font) + self._rtextwidgets.append(widget) + self._cframe.add_widget(widget, rtextwidth, y) + rtextwidth = widget.bbox()[2] + 4 + + # Allow enough room to shift the next token (for animations) + if len(self._rtextwidgets) > 0: + stackx += self._rtextwidgets[0].width() + + # Move the remaining text to the correct location (keep it + # right-justified, when possible); and move the remaining text + # label, if necessary. + stackx = max(stackx, self._stacklabel.width() + 25) + rlabelwidth = self._rtextlabel.width() + 10 + if stackx >= cx2 - max(rtextwidth, rlabelwidth): + cx2 = stackx + max(rtextwidth, rlabelwidth) + for rtextwidget in self._rtextwidgets: + rtextwidget.move(4 + cx2 - rtextwidth, 0) + self._rtextlabel.move(cx2 - self._rtextlabel.bbox()[2] - 5, 0) + + midx = (stackx + cx2 - max(rtextwidth, rlabelwidth)) / 2 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + (x1, y1, x2, y2) = self._stacklabel.bbox() + + # Set up binding to allow them to shift a token by dragging it. + if len(self._rtextwidgets) > 0: + + def drag_shift(widget, midx=midx, self=self): + if widget.bbox()[0] < midx: + self.shift() + else: + self._redraw() + + self._rtextwidgets[0].bind_drag(drag_shift) + self._rtextwidgets[0].bind_click(self.shift) + + # Draw the stack top. + self._highlight_productions() + + def _draw_stack_top(self, widget): + # hack.. + midx = widget.bbox()[2] + 50 + self._canvas.coords(self._stacktop, midx, 0, midx, 5000) + + def _highlight_productions(self): + # Highlight the productions that can be reduced. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + ######################################### + ## Button Callbacks + ######################################### + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def reset(self, *e): + self._parser.initialize(self._sent) + self._lastoper1["text"] = "Reset App" + self._lastoper2["text"] = "" + self._redraw() + + def step(self, *e): + if self.reduce(): + return True + elif self.shift(): + return True + else: + if list(self._parser.parses()): + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Success" + else: + self._lastoper1["text"] = "Finished:" + self._lastoper2["text"] = "Failure" + + def shift(self, *e): + if self._animating_lock: + return + if self._parser.shift(): + tok = self._parser.stack()[-1] + self._lastoper1["text"] = "Shift:" + self._lastoper2["text"] = "%r" % tok + if self._animate.get(): + self._animate_shift() + else: + self._redraw() + return True + return False + + def reduce(self, *e): + if self._animating_lock: + return + production = self._parser.reduce() + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + return production + + def undo(self, *e): + if self._animating_lock: + return + if self._parser.undo(): + self._redraw() + + def postscript(self, *e): + self._cframe.print_to_file() + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + ######################################### + ## Menubar callbacks + ######################################### + + def resize(self, size=None): + if size is not None: + self._size.set(size) + size = self._size.get() + self._font.configure(size=-(abs(size))) + self._boldfont.configure(size=-(abs(size))) + self._sysfont.configure(size=-(abs(size))) + + # self._stacklabel['font'] = ('helvetica', -size-4, 'bold') + # self._rtextlabel['font'] = ('helvetica', -size-4, 'bold') + # self._lastoper_label['font'] = ('helvetica', -size) + # self._lastoper1['font'] = ('helvetica', -size) + # self._lastoper2['font'] = ('helvetica', -size) + # self._prodlist['font'] = ('helvetica', -size) + # self._prodlist_label['font'] = ('helvetica', -size-2, 'bold') + self._redraw() + + def help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._top, + "Help: Shift-Reduce Parser Application", + (__doc__ or "").strip(), + width=75, + ) + + def about(self, *e): + ABOUT = "NLTK Shift-Reduce Parser Application\n" + "Written by Edward Loper" + TITLE = "About: Shift-Reduce Parser Application" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except: + ShowText(self._top, TITLE, ABOUT) + + def edit_grammar(self, *e): + CFGEditor(self._top, self._parser.grammar(), self.set_grammar) + + def set_grammar(self, grammar): + self._parser.set_grammar(grammar) + self._productions = list(grammar.productions()) + self._prodlist.delete(0, "end") + for production in self._productions: + self._prodlist.insert("end", (" %s" % production)) + + def edit_sentence(self, *e): + sentence = " ".join(self._sent) + title = "Edit Text" + instr = "Enter a new sentence to parse." + EntryDialog(self._top, sentence, instr, self.set_sentence, title) + + def set_sentence(self, sent): + self._sent = sent.split() # [XX] use tagged? + self.reset() + + ######################################### + ## Reduce Production Selection + ######################################### + + def _toggle_grammar(self, *e): + if self._show_grammar.get(): + self._prodframe.pack( + fill="both", side="left", padx=2, after=self._feedbackframe + ) + self._lastoper1["text"] = "Show Grammar" + else: + self._prodframe.pack_forget() + self._lastoper1["text"] = "Hide Grammar" + self._lastoper2["text"] = "" + + def _prodlist_select(self, event): + selection = self._prodlist.curselection() + if len(selection) != 1: + return + index = int(selection[0]) + production = self._parser.reduce(self._productions[index]) + if production: + self._lastoper1["text"] = "Reduce:" + self._lastoper2["text"] = "%s" % production + if self._animate.get(): + self._animate_reduce() + else: + self._redraw() + else: + # Reset the production selections. + self._prodlist.selection_clear(0, "end") + for prod in self._parser.reducible_productions(): + index = self._productions.index(prod) + self._prodlist.selection_set(index) + + def _popup_reduce(self, widget): + # Remove old commands. + productions = self._parser.reducible_productions() + if len(productions) == 0: + return + + self._reduce_menu.delete(0, "end") + for production in productions: + self._reduce_menu.add_command(label=str(production), command=self.reduce) + self._reduce_menu.post( + self._canvas.winfo_pointerx(), self._canvas.winfo_pointery() + ) + + ######################################### + ## Animations + ######################################### + + def _animate_shift(self): + # What widget are we shifting? + widget = self._rtextwidgets[0] + + # Where are we shifting from & to? + right = widget.bbox()[0] + if len(self._stackwidgets) == 0: + left = 5 + else: + left = self._stackwidgets[-1].bbox()[2] + 10 + + # Start animating. + dt = self._animate.get() + dx = (left - right) * 1.0 / dt + self._animate_shift_frame(dt, widget, dx) + + def _animate_shift_frame(self, frame, widget, dx): + if frame > 0: + self._animating_lock = 1 + widget.move(dx, 0) + self._top.after(10, self._animate_shift_frame, frame - 1, widget, dx) + else: + # but: stacktop?? + + # Shift the widget to the stack. + del self._rtextwidgets[0] + self._stackwidgets.append(widget) + self._animating_lock = 0 + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + def _animate_reduce(self): + # What widgets are we shifting? + numwidgets = len(self._parser.stack()[-1]) # number of children + widgets = self._stackwidgets[-numwidgets:] + + # How far are we moving? + if isinstance(widgets[0], TreeSegmentWidget): + ydist = 15 + widgets[0].label().height() + else: + ydist = 15 + widgets[0].height() + + # Start animating. + dt = self._animate.get() + dy = ydist * 2.0 / dt + self._animate_reduce_frame(dt / 2, widgets, dy) + + def _animate_reduce_frame(self, frame, widgets, dy): + if frame > 0: + self._animating_lock = 1 + for widget in widgets: + widget.move(0, dy) + self._top.after(10, self._animate_reduce_frame, frame - 1, widgets, dy) + else: + del self._stackwidgets[-len(widgets) :] + for widget in widgets: + self._cframe.remove_widget(widget) + tok = self._parser.stack()[-1] + if not isinstance(tok, Tree): + raise ValueError() + label = TextWidget( + self._canvas, str(tok.label()), color="#006060", font=self._boldfont + ) + widget = TreeSegmentWidget(self._canvas, label, widgets, width=2) + (x1, y1, x2, y2) = self._stacklabel.bbox() + y = y2 - y1 + 10 + if not self._stackwidgets: + x = 5 + else: + x = self._stackwidgets[-1].bbox()[2] + 10 + self._cframe.add_widget(widget, x, y) + self._stackwidgets.append(widget) + + # Display the available productions. + self._draw_stack_top(widget) + self._highlight_productions() + + # # Delete the old widgets.. + # del self._stackwidgets[-len(widgets):] + # for widget in widgets: + # self._cframe.destroy_widget(widget) + # + # # Make a new one. + # tok = self._parser.stack()[-1] + # if isinstance(tok, Tree): + # attribs = {'tree_color': '#4080a0', 'tree_width': 2, + # 'node_font': bold, 'node_color': '#006060', + # 'leaf_color': '#006060', 'leaf_font':self._font} + # widget = tree_to_treesegment(self._canvas, tok.type(), + # **attribs) + # widget.node()['color'] = '#000000' + # else: + # widget = TextWidget(self._canvas, tok.type(), + # color='#000000', font=self._font) + # widget.bind_click(self._popup_reduce) + # (x1, y1, x2, y2) = self._stacklabel.bbox() + # y = y2-y1+10 + # if not self._stackwidgets: x = 5 + # else: x = self._stackwidgets[-1].bbox()[2] + 10 + # self._cframe.add_widget(widget, x, y) + # self._stackwidgets.append(widget) + + # self._redraw() + self._animating_lock = 0 + + ######################################### + ## Hovering. + ######################################### + + def _highlight_hover(self, event): + # What production are we hovering over? + index = self._prodlist.nearest(event.y) + if self._hover == index: + return + + # Clear any previous hover highlighting. + self._clear_hover() + + # If the production corresponds to an available reduction, + # highlight the stack. + selection = [int(s) for s in self._prodlist.curselection()] + if index in selection: + rhslen = len(self._productions[index].rhs()) + for stackwidget in self._stackwidgets[-rhslen:]: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "#00a000" + else: + stackwidget["color"] = "#00a000" + + # Remember what production we're hovering over. + self._hover = index + + def _clear_hover(self, *event): + # Clear any previous hover highlighting. + if self._hover == -1: + return + self._hover = -1 + for stackwidget in self._stackwidgets: + if isinstance(stackwidget, TreeSegmentWidget): + stackwidget.label()["color"] = "black" + else: + stackwidget["color"] = "black" + + +def app(): + """ + Create a shift reduce parser app, using a simple grammar and + text. + """ + + from nltk.grammar import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + grammar = CFG(S, productions) + + # tokenize the sentence + sent = "my dog saw a man in the park with a statue".split() + + ShiftReduceApp(grammar, sent).mainloop() + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/wordfreq_app.py b/venv/lib/python3.10/site-packages/nltk/app/wordfreq_app.py new file mode 100644 index 0000000000000000000000000000000000000000..2846b31216be4611aeabb539782137f2f0decac7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/wordfreq_app.py @@ -0,0 +1,36 @@ +# Natural Language Toolkit: Wordfreq Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sumukh Ghodke +# URL: +# For license information, see LICENSE.TXT + +from matplotlib import pylab + +from nltk.corpus import gutenberg +from nltk.text import Text + + +def plot_word_freq_dist(text): + fd = text.vocab() + + samples = [item for item, _ in fd.most_common(50)] + values = [fd[sample] for sample in samples] + values = [sum(values[: i + 1]) * 100.0 / fd.N() for i in range(len(values))] + pylab.title(text.name) + pylab.xlabel("Samples") + pylab.ylabel("Cumulative Percentage") + pylab.plot(values) + pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90) + pylab.show() + + +def app(): + t1 = Text(gutenberg.words("melville-moby_dick.txt")) + plot_word_freq_dist(t1) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/app/wordnet_app.py b/venv/lib/python3.10/site-packages/nltk/app/wordnet_app.py new file mode 100644 index 0000000000000000000000000000000000000000..afed38b947d0ec231fe4d6f2f56614358d98c7b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/app/wordnet_app.py @@ -0,0 +1,1005 @@ +# Natural Language Toolkit: WordNet Browser Application +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Jussi Salmela +# Paul Bone +# URL: +# For license information, see LICENSE.TXT + +""" +A WordNet Browser application which launches the default browser +(if it is not already running) and opens a new tab with a connection +to http://localhost:port/ . It also starts an HTTP server on the +specified port and begins serving browser requests. The default +port is 8000. (For command-line help, run "python wordnet -h") +This application requires that the user's web browser supports +Javascript. + +BrowServer is a server for browsing the NLTK Wordnet database It first +launches a browser client to be used for browsing and then starts +serving the requests of that and maybe other clients + +Usage:: + + browserver.py -h + browserver.py [-s] [-p ] + +Options:: + + -h or --help + Display this help message. + + -l or --log-file + Logs messages to the given file, If this option is not specified + messages are silently dropped. + + -p or --port + Run the web server on this TCP port, defaults to 8000. + + -s or --server-mode + Do not start a web browser, and do not allow a user to + shutdown the server through the web interface. +""" +# TODO: throughout this package variable names and docstrings need +# modifying to be compliant with NLTK's coding standards. Tests also +# need to be develop to ensure this continues to work in the face of +# changes to other NLTK packages. + +import base64 +import copy +import getopt +import io +import os +import pickle +import sys +import threading +import time +import webbrowser +from collections import defaultdict +from http.server import BaseHTTPRequestHandler, HTTPServer + +# Allow this program to run inside the NLTK source tree. +from sys import argv +from urllib.parse import unquote_plus + +from nltk.corpus import wordnet as wn +from nltk.corpus.reader.wordnet import Lemma, Synset + +firstClient = True + +# True if we're not also running a web browser. The value f server_mode +# gets set by demo(). +server_mode = None + +# If set this is a file object for writing log messages. +logfile = None + + +class MyServerHandler(BaseHTTPRequestHandler): + def do_HEAD(self): + self.send_head() + + def do_GET(self): + global firstClient + sp = self.path[1:] + if unquote_plus(sp) == "SHUTDOWN THE SERVER": + if server_mode: + page = "Server must be killed with SIGTERM." + type = "text/plain" + else: + print("Server shutting down!") + os._exit(0) + + elif sp == "": # First request. + type = "text/html" + if not server_mode and firstClient: + firstClient = False + page = get_static_index_page(True) + else: + page = get_static_index_page(False) + word = "green" + + elif sp.endswith(".html"): # Trying to fetch a HTML file TODO: + type = "text/html" + usp = unquote_plus(sp) + if usp == "NLTK Wordnet Browser Database Info.html": + word = "* Database Info *" + if os.path.isfile(usp): + with open(usp) as infile: + page = infile.read() + else: + page = ( + (html_header % word) + "

The database info file:" + "

" + + usp + + "" + + "

was not found. Run this:" + + "

python dbinfo_html.py" + + "

to produce it." + + html_trailer + ) + else: + # Handle files here. + word = sp + try: + page = get_static_page_by_path(usp) + except FileNotFoundError: + page = "Internal error: Path for static page '%s' is unknown" % usp + # Set type to plain to prevent XSS by printing the path as HTML + type = "text/plain" + elif sp.startswith("search"): + # This doesn't seem to work with MWEs. + type = "text/html" + parts = (sp.split("?")[1]).split("&") + word = [ + p.split("=")[1].replace("+", " ") + for p in parts + if p.startswith("nextWord") + ][0] + page, word = page_from_word(word) + elif sp.startswith("lookup_"): + # TODO add a variation of this that takes a non ecoded word or MWE. + type = "text/html" + sp = sp[len("lookup_") :] + page, word = page_from_href(sp) + elif sp == "start_page": + # if this is the first request we should display help + # information, and possibly set a default word. + type = "text/html" + page, word = page_from_word("wordnet") + else: + type = "text/plain" + page = "Could not parse request: '%s'" % sp + + # Send result. + self.send_head(type) + self.wfile.write(page.encode("utf8")) + + def send_head(self, type=None): + self.send_response(200) + self.send_header("Content-type", type) + self.end_headers() + + def log_message(self, format, *args): + global logfile + + if logfile: + logfile.write( + "%s - - [%s] %s\n" + % (self.address_string(), self.log_date_time_string(), format % args) + ) + + +def get_unique_counter_from_url(sp): + """ + Extract the unique counter from the URL if it has one. Otherwise return + null. + """ + pos = sp.rfind("%23") + if pos != -1: + return int(sp[(pos + 3) :]) + else: + return None + + +def wnb(port=8000, runBrowser=True, logfilename=None): + """ + Run NLTK Wordnet Browser Server. + + :param port: The port number for the server to listen on, defaults to + 8000 + :type port: int + + :param runBrowser: True to start a web browser and point it at the web + server. + :type runBrowser: bool + """ + # The webbrowser module is unpredictable, typically it blocks if it uses + # a console web browser, and doesn't block if it uses a GUI webbrowser, + # so we need to force it to have a clear correct behaviour. + # + # Normally the server should run for as long as the user wants. they + # should idealy be able to control this from the UI by closing the + # window or tab. Second best would be clicking a button to say + # 'Shutdown' that first shutsdown the server and closes the window or + # tab, or exits the text-mode browser. Both of these are unfreasable. + # + # The next best alternative is to start the server, have it close when + # it receives SIGTERM (default), and run the browser as well. The user + # may have to shutdown both programs. + # + # Since webbrowser may block, and the webserver will block, we must run + # them in separate threads. + # + global server_mode, logfile + server_mode = not runBrowser + + # Setup logging. + if logfilename: + try: + logfile = open(logfilename, "a", 1) # 1 means 'line buffering' + except OSError as e: + sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e) + sys.exit(1) + else: + logfile = None + + # Compute URL and start web browser + url = "http://localhost:" + str(port) + + server_ready = None + browser_thread = None + + if runBrowser: + server_ready = threading.Event() + browser_thread = startBrowser(url, server_ready) + + # Start the server. + server = HTTPServer(("", port), MyServerHandler) + if logfile: + logfile.write("NLTK Wordnet browser server running serving: %s\n" % url) + if runBrowser: + server_ready.set() + + try: + server.serve_forever() + except KeyboardInterrupt: + pass + + if runBrowser: + browser_thread.join() + + if logfile: + logfile.close() + + +def startBrowser(url, server_ready): + def run(): + server_ready.wait() + time.sleep(1) # Wait a little bit more, there's still the chance of + # a race condition. + webbrowser.open(url, new=2, autoraise=1) + + t = threading.Thread(target=run) + t.start() + return t + + +##################################################################### +# Utilities +##################################################################### + + +""" +WordNet Browser Utilities. + +This provides a backend to both wxbrowse and browserver.py. +""" + +################################################################################ +# +# Main logic for wordnet browser. +# + +# This is wrapped inside a function since wn is only available if the +# WordNet corpus is installed. +def _pos_tuples(): + return [ + (wn.NOUN, "N", "noun"), + (wn.VERB, "V", "verb"), + (wn.ADJ, "J", "adj"), + (wn.ADV, "R", "adv"), + ] + + +def _pos_match(pos_tuple): + """ + This function returns the complete pos tuple for the partial pos + tuple given to it. It attempts to match it against the first + non-null component of the given pos tuple. + """ + if pos_tuple[0] == "s": + pos_tuple = ("a", pos_tuple[1], pos_tuple[2]) + for n, x in enumerate(pos_tuple): + if x is not None: + break + for pt in _pos_tuples(): + if pt[n] == pos_tuple[n]: + return pt + return None + + +HYPONYM = 0 +HYPERNYM = 1 +CLASS_REGIONAL = 2 +PART_HOLONYM = 3 +PART_MERONYM = 4 +ATTRIBUTE = 5 +SUBSTANCE_HOLONYM = 6 +SUBSTANCE_MERONYM = 7 +MEMBER_HOLONYM = 8 +MEMBER_MERONYM = 9 +VERB_GROUP = 10 +INSTANCE_HYPONYM = 12 +INSTANCE_HYPERNYM = 13 +CAUSE = 14 +ALSO_SEE = 15 +SIMILAR = 16 +ENTAILMENT = 17 +ANTONYM = 18 +FRAMES = 19 +PERTAINYM = 20 + +CLASS_CATEGORY = 21 +CLASS_USAGE = 22 +CLASS_REGIONAL = 23 +CLASS_USAGE = 24 +CLASS_CATEGORY = 11 + +DERIVATIONALLY_RELATED_FORM = 25 + +INDIRECT_HYPERNYMS = 26 + + +def lemma_property(word, synset, func): + def flattern(l): + if l == []: + return [] + else: + return l[0] + flattern(l[1:]) + + return flattern([func(l) for l in synset.lemmas() if l.name == word]) + + +def rebuild_tree(orig_tree): + node = orig_tree[0] + children = orig_tree[1:] + return (node, [rebuild_tree(t) for t in children]) + + +def get_relations_data(word, synset): + """ + Get synset relations data for a synset. Note that this doesn't + yet support things such as full hyponym vs direct hyponym. + """ + if synset.pos() == wn.NOUN: + return ( + (HYPONYM, "Hyponyms", synset.hyponyms()), + (INSTANCE_HYPONYM, "Instance hyponyms", synset.instance_hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + # hypernyms', 'Sister terms', + (INSTANCE_HYPERNYM, "Instance hypernyms", synset.instance_hypernyms()), + # (CLASS_REGIONAL, ['domain term region'], ), + (PART_HOLONYM, "Part holonyms", synset.part_holonyms()), + (PART_MERONYM, "Part meronyms", synset.part_meronyms()), + (SUBSTANCE_HOLONYM, "Substance holonyms", synset.substance_holonyms()), + (SUBSTANCE_MERONYM, "Substance meronyms", synset.substance_meronyms()), + (MEMBER_HOLONYM, "Member holonyms", synset.member_holonyms()), + (MEMBER_MERONYM, "Member meronyms", synset.member_meronyms()), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.VERB: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (HYPONYM, "Hyponym", synset.hyponyms()), + (HYPERNYM, "Direct hypernyms", synset.hypernyms()), + ( + INDIRECT_HYPERNYMS, + "Indirect hypernyms", + rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1], + ), + (ENTAILMENT, "Entailments", synset.entailments()), + (CAUSE, "Causes", synset.causes()), + (ALSO_SEE, "Also see", synset.also_sees()), + (VERB_GROUP, "Verb Groups", synset.verb_groups()), + ( + DERIVATIONALLY_RELATED_FORM, + "Derivationally related form", + lemma_property( + word, synset, lambda l: l.derivationally_related_forms() + ), + ), + ) + elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT: + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + (SIMILAR, "Similar to", synset.similar_tos()), + # Participle of verb - not supported by corpus + ( + PERTAINYM, + "Pertainyms", + lemma_property(word, synset, lambda l: l.pertainyms()), + ), + (ATTRIBUTE, "Attributes", synset.attributes()), + (ALSO_SEE, "Also see", synset.also_sees()), + ) + elif synset.pos() == wn.ADV: + # This is weird. adverbs such as 'quick' and 'fast' don't seem + # to have antonyms returned by the corpus.a + return ( + (ANTONYM, "Antonym", lemma_property(word, synset, lambda l: l.antonyms())), + ) + # Derived from adjective - not supported by corpus + else: + raise TypeError("Unhandles synset POS type: " + str(synset.pos())) + + +html_header = """ + + + + + +NLTK Wordnet Browser display of: %s + +""" +html_trailer = """ + + +""" + +explanation = """ +

Search Help

+
  • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
  • +
  • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
  • +
  • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
  • +
  • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
  • +
  • Clicking S: opens a section showing the relations for that synset. +
  • +
  • Clicking on a relation name opens a section that displays the associated +synsets.
  • +
  • Type a search word in the Word field and start the search by the +Enter/Return key or click the Search button.
  • +
+
+""" + +# HTML oriented functions + + +def _bold(txt): + return "%s" % txt + + +def _center(txt): + return "
%s
" % txt + + +def _hlev(n, txt): + return "%s" % (n, txt, n) + + +def _italic(txt): + return "%s" % txt + + +def _li(txt): + return "
  • %s
  • " % txt + + +def pg(word, body): + """ + Return a HTML page of NLTK Browser format constructed from the + word and body + + :param word: The word that the body corresponds to + :type word: str + :param body: The HTML body corresponding to the word + :type body: str + :return: a HTML page for the word-body combination + :rtype: str + """ + return (html_header % word) + body + html_trailer + + +def _ul(txt): + return "
      " + txt + "
    " + + +def _abbc(txt): + """ + abbc = asterisks, breaks, bold, center + """ + return _center(_bold("
    " * 10 + "*" * 10 + " " + txt + " " + "*" * 10)) + + +full_hyponym_cont_text = _ul(_li(_italic("(has full hyponym continuation)"))) + "\n" + + +def _get_synset(synset_key): + """ + The synset key is the unique name of the synset, this can be + retrieved via synset.name() + """ + return wn.synset(synset_key) + + +def _collect_one_synset(word, synset, synset_relations): + """ + Returns the HTML string for one synset or word + + :param word: the current word + :type word: str + :param synset: a synset + :type synset: synset + :param synset_relations: information about which synset relations + to display. + :type synset_relations: dict(synset_key, set(relation_id)) + :return: The HTML string built for this synset + :rtype: str + """ + if isinstance(synset, tuple): # It's a word + raise NotImplementedError("word not supported by _collect_one_synset") + + typ = "S" + pos_tuple = _pos_match((synset.pos(), None, None)) + assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos() + descr = pos_tuple[2] + ref = copy.deepcopy(Reference(word, synset_relations)) + ref.toggle_synset(synset) + synset_label = typ + ";" + if synset.name() in synset_relations: + synset_label = _bold(synset_label) + s = f"
  • {make_lookup_link(ref, synset_label)} ({descr}) " + + def format_lemma(w): + w = w.replace("_", " ") + if w.lower() == word: + return _bold(w) + else: + ref = Reference(w) + return make_lookup_link(ref, w) + + s += ", ".join(format_lemma(l.name()) for l in synset.lemmas()) + + gl = " ({}) {} ".format( + synset.definition(), + "; ".join('"%s"' % e for e in synset.examples()), + ) + return s + gl + _synset_relations(word, synset, synset_relations) + "
  • \n" + + +def _collect_all_synsets(word, pos, synset_relations=dict()): + """ + Return a HTML unordered list of synsets for the given word and + part of speech. + """ + return "
      %s\n
    \n" % "".join( + _collect_one_synset(word, synset, synset_relations) + for synset in wn.synsets(word, pos) + ) + + +def _synset_relations(word, synset, synset_relations): + """ + Builds the HTML string for the relations of a synset + + :param word: The current word + :type word: str + :param synset: The synset for which we're building the relations. + :type synset: Synset + :param synset_relations: synset keys and relation types for which to display relations. + :type synset_relations: dict(synset_key, set(relation_type)) + :return: The HTML for a synset's relations + :rtype: str + """ + + if not synset.name() in synset_relations: + return "" + ref = Reference(word, synset_relations) + + def relation_html(r): + if isinstance(r, Synset): + return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0]) + elif isinstance(r, Lemma): + return relation_html(r.synset()) + elif isinstance(r, tuple): + # It's probably a tuple containing a Synset and a list of + # similar tuples. This forms a tree of synsets. + return "{}\n
      {}
    \n".format( + relation_html(r[0]), + "".join("
  • %s
  • \n" % relation_html(sr) for sr in r[1]), + ) + else: + raise TypeError( + "r must be a synset, lemma or list, it was: type(r) = %s, r = %s" + % (type(r), r) + ) + + def make_synset_html(db_name, disp_name, rels): + synset_html = "%s\n" % make_lookup_link( + copy.deepcopy(ref).toggle_synset_relation(synset, db_name), + disp_name, + ) + + if db_name in ref.synset_relations[synset.name()]: + synset_html += "
      %s
    \n" % "".join( + "
  • %s
  • \n" % relation_html(r) for r in rels + ) + + return synset_html + + html = ( + "
      " + + "\n".join( + "
    • %s
    • " % make_synset_html(*rel_data) + for rel_data in get_relations_data(word, synset) + if rel_data[2] != [] + ) + + "
    " + ) + + return html + + +class RestrictedUnpickler(pickle.Unpickler): + """ + Unpickler that prevents any class or function from being used during loading. + """ + + def find_class(self, module, name): + # Forbid every function + raise pickle.UnpicklingError(f"global '{module}.{name}' is forbidden") + + +class Reference: + """ + A reference to a page that may be generated by page_word + """ + + def __init__(self, word, synset_relations=dict()): + """ + Build a reference to a new page. + + word is the word or words (separated by commas) for which to + search for synsets of + + synset_relations is a dictionary of synset keys to sets of + synset relation identifaiers to unfold a list of synset + relations for. + """ + self.word = word + self.synset_relations = synset_relations + + def encode(self): + """ + Encode this reference into a string to be used in a URL. + """ + # This uses a tuple rather than an object since the python + # pickle representation is much smaller and there is no need + # to represent the complete object. + string = pickle.dumps((self.word, self.synset_relations), -1) + return base64.urlsafe_b64encode(string).decode() + + @staticmethod + def decode(string): + """ + Decode a reference encoded with Reference.encode + """ + string = base64.urlsafe_b64decode(string.encode()) + word, synset_relations = RestrictedUnpickler(io.BytesIO(string)).load() + return Reference(word, synset_relations) + + def toggle_synset_relation(self, synset, relation): + """ + Toggle the display of the relations for the given synset and + relation type. + + This function will throw a KeyError if the synset is currently + not being displayed. + """ + if relation in self.synset_relations[synset.name()]: + self.synset_relations[synset.name()].remove(relation) + else: + self.synset_relations[synset.name()].add(relation) + + return self + + def toggle_synset(self, synset): + """ + Toggle displaying of the relation types for the given synset + """ + if synset.name() in self.synset_relations: + del self.synset_relations[synset.name()] + else: + self.synset_relations[synset.name()] = set() + + return self + + +def make_lookup_link(ref, label): + return f'{label}' + + +def page_from_word(word): + """ + Return a HTML page for the given word. + + :type word: str + :param word: The currently active word + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference(word)) + + +def page_from_href(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + return page_from_reference(Reference.decode(href)) + + +def page_from_reference(href): + """ + Returns a tuple of the HTML page built and the new current word + + :param href: The hypertext reference to be solved + :type href: str + :return: A tuple (page,word), where page is the new current HTML page + to be sent to the browser and + word is the new current word + :rtype: A tuple (str,str) + """ + word = href.word + pos_forms = defaultdict(list) + words = word.split(",") + words = [w for w in [w.strip().lower().replace(" ", "_") for w in words] if w != ""] + if len(words) == 0: + # No words were found. + return "", "Please specify a word to search for." + + # This looks up multiple words at once. This is probably not + # necessary and may lead to problems. + for w in words: + for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]: + form = wn.morphy(w, pos) + if form and form not in pos_forms[pos]: + pos_forms[pos].append(form) + body = "" + for pos, pos_str, name in _pos_tuples(): + if pos in pos_forms: + body += _hlev(3, name) + "\n" + for w in pos_forms[pos]: + # Not all words of exc files are in the database, skip + # to the next word if a KeyError is raised. + try: + body += _collect_all_synsets(w, pos, href.synset_relations) + except KeyError: + pass + if not body: + body = "The word or words '%s' were not found in the dictionary." % word + return body, word + + +##################################################################### +# Static pages +##################################################################### + + +def get_static_page_by_path(path): + """ + Return a static HTML page from the path given. + """ + if path == "index_2.html": + return get_static_index_page(False) + elif path == "index.html": + return get_static_index_page(True) + elif path == "NLTK Wordnet Browser Database Info.html": + return "Display of Wordnet Database Statistics is not supported" + elif path == "upper_2.html": + return get_static_upper_page(False) + elif path == "upper.html": + return get_static_upper_page(True) + elif path == "web_help.html": + return get_static_web_help_page() + elif path == "wx_help.html": + return get_static_wx_help_page() + raise FileNotFoundError() + + +def get_static_web_help_page(): + """ + Return the static web help page. + """ + return """ + + + + + + NLTK Wordnet Browser display of: * Help * + + +

    NLTK Wordnet Browser Help

    +

    The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database. +

    You are using the Javascript client part of the NLTK Wordnet BrowseServer. We assume your browser is in tab sheets enabled mode.

    +

    For background information on Wordnet, see the Wordnet project home page: https://wordnet.princeton.edu/. For more information on the NLTK project, see the project home: +https://www.nltk.org/. To get an idea of what the Wordnet version used by this browser includes choose Show Database Info from the View submenu.

    +

    Word search

    +

    The word to be searched is typed into the New Word field and the search started with Enter or by clicking the Search button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.

    +

    In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing fLIeS as an obscure example gives one this. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination.

    +

    The result of a search is a display of one or more +synsets for every part of speech in which a form of the +search word was found to occur. A synset is a set of words +having the same sense or meaning. Each word in a synset that is +underlined is a hyperlink which can be clicked to trigger an +automatic search for that word.

    +

    Every synset has a hyperlink S: at the start of its +display line. Clicking that symbol shows you the name of every +relation that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.

    +

    It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this cheer up,clear up, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the Alt+LeftArrow key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.

    +

    +There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink W: at their beginning. Clicking this link shows more info on the word in question.

    +

    The Buttons

    +

    The Search and Help buttons need no more explanation.

    +

    The Show Database Info button shows a collection of Wordnet database statistics.

    +

    The Shutdown the Server button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns. +

    + +""" + + +def get_static_welcome_message(): + """ + Get the static welcome page. + """ + return """ +

    Search Help

    +
    • The display below the line is an example of the output the browser +shows you when you enter a search word. The search word was green.
    • +
    • The search result shows for different parts of speech the synsets +i.e. different meanings for the word.
    • +
    • All underlined texts are hypertext links. There are two types of links: +word links and others. Clicking a word link carries out a search for the word +in the Wordnet database.
    • +
    • Clicking a link of the other type opens a display section of data attached +to that link. Clicking that link a second time closes the section again.
    • +
    • Clicking S: opens a section showing the relations for that synset.
    • +
    • Clicking on a relation name opens a section that displays the associated +synsets.
    • +
    • Type a search word in the Next Word field and start the search by the +Enter/Return key or click the Search button.
    • +
    +""" + + +def get_static_index_page(with_shutdown): + """ + Get the static index page. + """ + template = """ + + + + + NLTK Wordnet Browser + + + + + + + +""" + if with_shutdown: + upper_link = "upper.html" + else: + upper_link = "upper_2.html" + + return template % upper_link + + +def get_static_upper_page(with_shutdown): + """ + Return the upper frame page, + + If with_shutdown is True then a 'shutdown' button is also provided + to shutdown the server. + """ + template = """ + + + + + + Untitled Document + + +
    + Current Word:  + Next Word:  + +
    + Help + %s + + + +""" + if with_shutdown: + shutdown_link = 'Shutdown' + else: + shutdown_link = "" + + return template % shutdown_link + + +def usage(): + """ + Display the command line help message. + """ + print(__doc__) + + +def app(): + # Parse and interpret options. + (opts, _) = getopt.getopt( + argv[1:], "l:p:sh", ["logfile=", "port=", "server-mode", "help"] + ) + port = 8000 + server_mode = False + help_mode = False + logfilename = None + for (opt, value) in opts: + if (opt == "-l") or (opt == "--logfile"): + logfilename = str(value) + elif (opt == "-p") or (opt == "--port"): + port = int(value) + elif (opt == "-s") or (opt == "--server-mode"): + server_mode = True + elif (opt == "-h") or (opt == "--help"): + help_mode = True + + if help_mode: + usage() + else: + wnb(port, not server_mode, logfilename) + + +if __name__ == "__main__": + app() + +__all__ = ["app"] diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__init__.py b/venv/lib/python3.10/site-packages/nltk/chunk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208da9f5678f4b79282d5e6886502627ab9161ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chunk/__init__.py @@ -0,0 +1,197 @@ +# Natural Language Toolkit: Chunkers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +Classes and interfaces for identifying non-overlapping linguistic +groups (such as base noun phrases) in unrestricted text. This task is +called "chunk parsing" or "chunking", and the identified groups are +called "chunks". The chunked text is represented using a shallow +tree called a "chunk structure." A chunk structure is a tree +containing tokens and chunks, where each chunk is a subtree containing +only tokens. For example, the chunk structure for base noun phrase +chunks in the sentence "I saw the big dog on the hill" is:: + + (SENTENCE: + (NP: ) + + (NP: ) + + (NP: )) + +To convert a chunk structure back to a list of tokens, simply use the +chunk structure's ``leaves()`` method. + +This module defines ``ChunkParserI``, a standard interface for +chunking texts; and ``RegexpChunkParser``, a regular-expression based +implementation of that interface. It also defines ``ChunkScore``, a +utility class for scoring chunk parsers. + +RegexpChunkParser +================= + +``RegexpChunkParser`` is an implementation of the chunk parser interface +that uses regular-expressions over tags to chunk a text. Its +``parse()`` method first constructs a ``ChunkString``, which encodes a +particular chunking of the input text. Initially, nothing is +chunked. ``parse.RegexpChunkParser`` then applies a sequence of +``RegexpChunkRule`` rules to the ``ChunkString``, each of which modifies +the chunking that it encodes. Finally, the ``ChunkString`` is +transformed back into a chunk structure, which is returned. + +``RegexpChunkParser`` can only be used to chunk a single kind of phrase. +For example, you can use an ``RegexpChunkParser`` to chunk the noun +phrases in a text, or the verb phrases in a text; but you can not +use it to simultaneously chunk both noun phrases and verb phrases in +the same text. (This is a limitation of ``RegexpChunkParser``, not of +chunk parsers in general.) + +RegexpChunkRules +---------------- + +A ``RegexpChunkRule`` is a transformational rule that updates the +chunking of a text by modifying its ``ChunkString``. Each +``RegexpChunkRule`` defines the ``apply()`` method, which modifies +the chunking encoded by a ``ChunkString``. The +``RegexpChunkRule`` class itself can be used to implement any +transformational rule based on regular expressions. There are +also a number of subclasses, which can be used to implement +simpler types of rules: + + - ``ChunkRule`` chunks anything that matches a given regular + expression. + - ``StripRule`` strips anything that matches a given regular + expression. + - ``UnChunkRule`` will un-chunk any chunk that matches a given + regular expression. + - ``MergeRule`` can be used to merge two contiguous chunks. + - ``SplitRule`` can be used to split a single chunk into two + smaller chunks. + - ``ExpandLeftRule`` will expand a chunk to incorporate new + unchunked material on the left. + - ``ExpandRightRule`` will expand a chunk to incorporate new + unchunked material on the right. + +Tag Patterns +~~~~~~~~~~~~ + +A ``RegexpChunkRule`` uses a modified version of regular +expression patterns, called "tag patterns". Tag patterns are +used to match sequences of tags. Examples of tag patterns are:: + + r'(
    ||)+' + r'+' + r'' + +The differences between regular expression patterns and tag +patterns are: + + - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so + ``'+'`` matches one or more repetitions of ``''``, not + ``''``. + - Whitespace in tag patterns is ignored. So + ``'
    | '`` is equivalent to ``'
    |'`` + - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so + ``''`` matches any single tag starting with ``'NN'``. + +The function ``tag_pattern2re_pattern`` can be used to transform +a tag pattern to an equivalent regular expression pattern. + +Efficiency +---------- + +Preliminary tests indicate that ``RegexpChunkParser`` can chunk at a +rate of about 300 tokens/second, with a moderately complex rule set. + +There may be problems if ``RegexpChunkParser`` is used with more than +5,000 tokens at a time. In particular, evaluation of some regular +expressions may cause the Python regular expression engine to +exceed its maximum recursion depth. We have attempted to minimize +these problems, but it is impossible to avoid them completely. We +therefore recommend that you apply the chunk parser to a single +sentence at a time. + +Emacs Tip +--------- + +If you evaluate the following elisp expression in emacs, it will +colorize a ``ChunkString`` when you use an interactive python shell +with emacs or xemacs ("C-c !"):: + + (let () + (defconst comint-mode-font-lock-keywords + '(("<[^>]+>" 0 'font-lock-reference-face) + ("[{}]" 0 'font-lock-function-name-face))) + (add-hook 'comint-mode-hook (lambda () (turn-on-font-lock)))) + +You can evaluate this code by copying it to a temporary buffer, +placing the cursor after the last close parenthesis, and typing +"``C-x C-e``". You should evaluate it before running the interactive +session. The change will last until you close emacs. + +Unresolved Issues +----------------- + +If we use the ``re`` module for regular expressions, Python's +regular expression engine generates "maximum recursion depth +exceeded" errors when processing very large texts, even for +regular expressions that should not require any recursion. We +therefore use the ``pre`` module instead. But note that ``pre`` +does not include Unicode support, so this module will not work +with unicode strings. Note also that ``pre`` regular expressions +are not quite as advanced as ``re`` ones (e.g., no leftward +zero-length assertions). + +:type CHUNK_TAG_PATTERN: regexp +:var CHUNK_TAG_PATTERN: A regular expression to test whether a tag + pattern is valid. +""" + +from nltk.chunk.api import ChunkParserI +from nltk.chunk.regexp import RegexpChunkParser, RegexpParser +from nltk.chunk.util import ( + ChunkScore, + accuracy, + conllstr2tree, + conlltags2tree, + ieerstr2tree, + tagstr2tree, + tree2conllstr, + tree2conlltags, +) +from nltk.data import load + +# Standard treebank POS tagger +_BINARY_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_binary.pickle" +_MULTICLASS_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_multiclass.pickle" + + +def ne_chunk(tagged_tokens, binary=False): + """ + Use NLTK's currently recommended named entity chunker to + chunk the given list of tagged tokens. + """ + if binary: + chunker_pickle = _BINARY_NE_CHUNKER + else: + chunker_pickle = _MULTICLASS_NE_CHUNKER + chunker = load(chunker_pickle) + return chunker.parse(tagged_tokens) + + +def ne_chunk_sents(tagged_sentences, binary=False): + """ + Use NLTK's currently recommended named entity chunker to chunk the + given list of tagged sentences, each consisting of a list of tagged tokens. + """ + if binary: + chunker_pickle = _BINARY_NE_CHUNKER + else: + chunker_pickle = _MULTICLASS_NE_CHUNKER + chunker = load(chunker_pickle) + return chunker.parse_sents(tagged_sentences) diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f6971ff0cae51c15b965ccd62600912b2aef708 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..801aaa3a0462bc03ab3f104a3c87d33654741f3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c987d19c8eff02ec7dd9a7471ae576dbd2c44dde Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a0800f69511a48392231b167161bd1c4293b4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42deb93813c5dee1f82ace2d276d06eb2b17d39e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/api.py b/venv/lib/python3.10/site-packages/nltk/chunk/api.py new file mode 100644 index 0000000000000000000000000000000000000000..858490a7abb82375fba271d98037e53da6a17129 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chunk/api.py @@ -0,0 +1,56 @@ +# Natural Language Toolkit: Chunk parsing API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +##////////////////////////////////////////////////////// +## Chunk Parser Interface +##////////////////////////////////////////////////////// + +from nltk.chunk.util import ChunkScore +from nltk.internals import deprecated +from nltk.parse import ParserI + + +class ChunkParserI(ParserI): + """ + A processing interface for identifying non-overlapping groups in + unrestricted text. Typically, chunk parsers are used to find base + syntactic constituents, such as base noun phrases. Unlike + ``ParserI``, ``ChunkParserI`` guarantees that the ``parse()`` method + will always generate a parse. + """ + + def parse(self, tokens): + """ + Return the best chunk structure for the given tokens + and return a tree. + + :param tokens: The list of (word, tag) tokens to be chunked. + :type tokens: list(tuple) + :rtype: Tree + """ + raise NotImplementedError() + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the chunker against the gold standard. + Remove the chunking the gold standard text, rechunk it using + the chunker, and return a ``ChunkScore`` object + reflecting the performance of this chunk parser. + + :type gold: list(Tree) + :param gold: The list of chunked sentences to score the chunker on. + :rtype: ChunkScore + """ + chunkscore = ChunkScore() + for correct in gold: + chunkscore.score(correct, self.parse(correct.leaves())) + return chunkscore diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/named_entity.py b/venv/lib/python3.10/site-packages/nltk/chunk/named_entity.py new file mode 100644 index 0000000000000000000000000000000000000000..b8ab97742c9f0721a0bc1744703871ea278aba07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chunk/named_entity.py @@ -0,0 +1,352 @@ +# Natural Language Toolkit: Chunk parsing API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Named entity chunker +""" + +import os +import pickle +import re +from xml.etree import ElementTree as ET + +from nltk.tag import ClassifierBasedTagger, pos_tag + +try: + from nltk.classify import MaxentClassifier +except ImportError: + pass + +from nltk.chunk.api import ChunkParserI +from nltk.chunk.util import ChunkScore +from nltk.data import find +from nltk.tokenize import word_tokenize +from nltk.tree import Tree + + +class NEChunkParserTagger(ClassifierBasedTagger): + """ + The IOB tagger used by the chunk parser. + """ + + def __init__(self, train): + ClassifierBasedTagger.__init__( + self, train=train, classifier_builder=self._classifier_builder + ) + + def _classifier_builder(self, train): + return MaxentClassifier.train( + train, algorithm="megam", gaussian_prior_sigma=1, trace=2 + ) + + def _english_wordlist(self): + try: + wl = self._en_wordlist + except AttributeError: + from nltk.corpus import words + + self._en_wordlist = set(words.words("en-basic")) + wl = self._en_wordlist + return wl + + def _feature_detector(self, tokens, index, history): + word = tokens[index][0] + pos = simplify_pos(tokens[index][1]) + if index == 0: + prevword = prevprevword = None + prevpos = prevprevpos = None + prevshape = prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1][0].lower() + prevprevword = None + prevpos = simplify_pos(tokens[index - 1][1]) + prevprevpos = None + prevtag = history[index - 1][0] + prevshape = prevprevtag = None + else: + prevword = tokens[index - 1][0].lower() + prevprevword = tokens[index - 2][0].lower() + prevpos = simplify_pos(tokens[index - 1][1]) + prevprevpos = simplify_pos(tokens[index - 2][1]) + prevtag = history[index - 1] + prevprevtag = history[index - 2] + prevshape = shape(prevword) + if index == len(tokens) - 1: + nextword = nextnextword = None + nextpos = nextnextpos = None + elif index == len(tokens) - 2: + nextword = tokens[index + 1][0].lower() + nextpos = tokens[index + 1][1].lower() + nextnextword = None + nextnextpos = None + else: + nextword = tokens[index + 1][0].lower() + nextpos = tokens[index + 1][1].lower() + nextnextword = tokens[index + 2][0].lower() + nextnextpos = tokens[index + 2][1].lower() + + # 89.6 + features = { + "bias": True, + "shape": shape(word), + "wordlen": len(word), + "prefix3": word[:3].lower(), + "suffix3": word[-3:].lower(), + "pos": pos, + "word": word, + "en-wordlist": (word in self._english_wordlist()), + "prevtag": prevtag, + "prevpos": prevpos, + "nextpos": nextpos, + "prevword": prevword, + "nextword": nextword, + "word+nextpos": f"{word.lower()}+{nextpos}", + "pos+prevtag": f"{pos}+{prevtag}", + "shape+prevtag": f"{prevshape}+{prevtag}", + } + + return features + + +class NEChunkParser(ChunkParserI): + """ + Expected input: list of pos-tagged words + """ + + def __init__(self, train): + self._train(train) + + def parse(self, tokens): + """ + Each token should be a pos-tagged word + """ + tagged = self._tagger.tag(tokens) + tree = self._tagged_to_parse(tagged) + return tree + + def _train(self, corpus): + # Convert to tagged sequence + corpus = [self._parse_to_tagged(s) for s in corpus] + + self._tagger = NEChunkParserTagger(train=corpus) + + def _tagged_to_parse(self, tagged_tokens): + """ + Convert a list of tagged tokens to a chunk-parse tree. + """ + sent = Tree("S", []) + + for (tok, tag) in tagged_tokens: + if tag == "O": + sent.append(tok) + elif tag.startswith("B-"): + sent.append(Tree(tag[2:], [tok])) + elif tag.startswith("I-"): + if sent and isinstance(sent[-1], Tree) and sent[-1].label() == tag[2:]: + sent[-1].append(tok) + else: + sent.append(Tree(tag[2:], [tok])) + return sent + + @staticmethod + def _parse_to_tagged(sent): + """ + Convert a chunk-parse tree to a list of tagged tokens. + """ + toks = [] + for child in sent: + if isinstance(child, Tree): + if len(child) == 0: + print("Warning -- empty chunk in sentence") + continue + toks.append((child[0], f"B-{child.label()}")) + for tok in child[1:]: + toks.append((tok, f"I-{child.label()}")) + else: + toks.append((child, "O")) + return toks + + +def shape(word): + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word, re.UNICODE): + return "number" + elif re.match(r"\W+$", word, re.UNICODE): + return "punct" + elif re.match(r"\w+$", word, re.UNICODE): + if word.istitle(): + return "upcase" + elif word.islower(): + return "downcase" + else: + return "mixedcase" + else: + return "other" + + +def simplify_pos(s): + if s.startswith("V"): + return "V" + else: + return s.split("-")[0] + + +def postag_tree(tree): + # Part-of-speech tagging. + words = tree.leaves() + tag_iter = (pos for (word, pos) in pos_tag(words)) + newtree = Tree("S", []) + for child in tree: + if isinstance(child, Tree): + newtree.append(Tree(child.label(), [])) + for subchild in child: + newtree[-1].append((subchild, next(tag_iter))) + else: + newtree.append((child, next(tag_iter))) + return newtree + + +def load_ace_data(roots, fmt="binary", skip_bnews=True): + for root in roots: + for root, dirs, files in os.walk(root): + if root.endswith("bnews") and skip_bnews: + continue + for f in files: + if f.endswith(".sgm"): + yield from load_ace_file(os.path.join(root, f), fmt) + + +def load_ace_file(textfile, fmt): + print(f" - {os.path.split(textfile)[1]}") + annfile = textfile + ".tmx.rdc.xml" + + # Read the xml file, and get a list of entities + entities = [] + with open(annfile) as infile: + xml = ET.parse(infile).getroot() + for entity in xml.findall("document/entity"): + typ = entity.find("entity_type").text + for mention in entity.findall("entity_mention"): + if mention.get("TYPE") != "NAME": + continue # only NEs + s = int(mention.find("head/charseq/start").text) + e = int(mention.find("head/charseq/end").text) + 1 + entities.append((s, e, typ)) + + # Read the text file, and mark the entities. + with open(textfile) as infile: + text = infile.read() + + # Strip XML tags, since they don't count towards the indices + text = re.sub("<(?!/?TEXT)[^>]+>", "", text) + + # Blank out anything before/after + def subfunc(m): + return " " * (m.end() - m.start() - 6) + + text = re.sub(r"[\s\S]*", subfunc, text) + text = re.sub(r"[\s\S]*", "", text) + + # Simplify quotes + text = re.sub("``", ' "', text) + text = re.sub("''", '" ', text) + + entity_types = {typ for (s, e, typ) in entities} + + # Binary distinction (NE or not NE) + if fmt == "binary": + i = 0 + toks = Tree("S", []) + for (s, e, typ) in sorted(entities): + if s < i: + s = i # Overlapping! Deal with this better? + if e <= s: + continue + toks.extend(word_tokenize(text[i:s])) + toks.append(Tree("NE", text[s:e].split())) + i = e + toks.extend(word_tokenize(text[i:])) + yield toks + + # Multiclass distinction (NE type) + elif fmt == "multiclass": + i = 0 + toks = Tree("S", []) + for (s, e, typ) in sorted(entities): + if s < i: + s = i # Overlapping! Deal with this better? + if e <= s: + continue + toks.extend(word_tokenize(text[i:s])) + toks.append(Tree(typ, text[s:e].split())) + i = e + toks.extend(word_tokenize(text[i:])) + yield toks + + else: + raise ValueError("bad fmt value") + + +# This probably belongs in a more general-purpose location (as does +# the parse_to_tagged function). +def cmp_chunks(correct, guessed): + correct = NEChunkParser._parse_to_tagged(correct) + guessed = NEChunkParser._parse_to_tagged(guessed) + ellipsis = False + for (w, ct), (w, gt) in zip(correct, guessed): + if ct == gt == "O": + if not ellipsis: + print(f" {ct:15} {gt:15} {w}") + print(" {:15} {:15} {2}".format("...", "...", "...")) + ellipsis = True + else: + ellipsis = False + print(f" {ct:15} {gt:15} {w}") + + +def build_model(fmt="binary"): + print("Loading training data...") + train_paths = [ + find("corpora/ace_data/ace.dev"), + find("corpora/ace_data/ace.heldout"), + find("corpora/ace_data/bbn.dev"), + find("corpora/ace_data/muc.dev"), + ] + train_trees = load_ace_data(train_paths, fmt) + train_data = [postag_tree(t) for t in train_trees] + print("Training...") + cp = NEChunkParser(train_data) + del train_data + + print("Loading eval data...") + eval_paths = [find("corpora/ace_data/ace.eval")] + eval_trees = load_ace_data(eval_paths, fmt) + eval_data = [postag_tree(t) for t in eval_trees] + + print("Evaluating...") + chunkscore = ChunkScore() + for i, correct in enumerate(eval_data): + guess = cp.parse(correct.leaves()) + chunkscore.score(correct, guess) + if i < 3: + cmp_chunks(correct, guess) + print(chunkscore) + + outfilename = f"/tmp/ne_chunker_{fmt}.pickle" + print(f"Saving chunker to {outfilename}...") + + with open(outfilename, "wb") as outfile: + pickle.dump(cp, outfile, -1) + + return cp + + +if __name__ == "__main__": + # Make sure that the pickled object has the right class name: + from nltk.chunk.named_entity import build_model + + build_model("binary") + build_model("multiclass") diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/regexp.py b/venv/lib/python3.10/site-packages/nltk/chunk/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..4369119706106db2892e47675ffd039e85db888d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chunk/regexp.py @@ -0,0 +1,1475 @@ +# Natural Language Toolkit: Regular Expression Chunkers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +import re + +import regex + +from nltk.chunk.api import ChunkParserI +from nltk.tree import Tree + +# ////////////////////////////////////////////////////// +# ChunkString +# ////////////////////////////////////////////////////// + + +class ChunkString: + """ + A string-based encoding of a particular chunking of a text. + Internally, the ``ChunkString`` class uses a single string to + encode the chunking of the input text. This string contains a + sequence of angle-bracket delimited tags, with chunking indicated + by braces. An example of this encoding is:: + + {
    }{
    }<.>{
    }<.> + + ``ChunkString`` are created from tagged texts (i.e., lists of + ``tokens`` whose type is ``TaggedType``). Initially, nothing is + chunked. + + The chunking of a ``ChunkString`` can be modified with the ``xform()`` + method, which uses a regular expression to transform the string + representation. These transformations should only add and remove + braces; they should *not* modify the sequence of angle-bracket + delimited tags. + + :type _str: str + :ivar _str: The internal string representation of the text's + encoding. This string representation contains a sequence of + angle-bracket delimited tags, with chunking indicated by + braces. An example of this encoding is:: + + {
    }{
    }<.>{
    }<.> + + :type _pieces: list(tagged tokens and chunks) + :ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``. + :ivar _debug: The debug level. See the constructor docs. + + :cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that + will only match positions that are in chunks. + :cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that + will only match positions that are in strips. + """ + + CHUNK_TAG_CHAR = r"[^\{\}<>]" + CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR + + IN_CHUNK_PATTERN = r"(?=[^\{]*\})" + IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))" + + # These are used by _verify + _CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG + _STRIP = r"(%s+?)+?" % CHUNK_TAG + _VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG) + _BRACKETS = re.compile(r"[^\{\}]+") + _BALANCED_BRACKETS = re.compile(r"(\{\})*$") + + def __init__(self, chunk_struct, debug_level=1): + """ + Construct a new ``ChunkString`` that encodes the chunking of + the text ``tagged_tokens``. + + :type chunk_struct: Tree + :param chunk_struct: The chunk structure to be further chunked. + :type debug_level: int + :param debug_level: The level of debugging which should be + applied to transformations on the ``ChunkString``. The + valid levels are: + + - 0: no checks + - 1: full check on to_chunkstruct + - 2: full check on to_chunkstruct and cursory check after + each transformation. + - 3: full check on to_chunkstruct and full check after + each transformation. + + We recommend you use at least level 1. You should + probably use level 3 if you use any non-standard + subclasses of ``RegexpChunkRule``. + """ + self._root_label = chunk_struct.label() + self._pieces = chunk_struct[:] + tags = [self._tag(tok) for tok in self._pieces] + self._str = "<" + "><".join(tags) + ">" + self._debug = debug_level + + def _tag(self, tok): + if isinstance(tok, tuple): + return tok[1] + elif isinstance(tok, Tree): + return tok.label() + else: + raise ValueError("chunk structures must contain tagged " "tokens or trees") + + def _verify(self, s, verify_tags): + """ + Check to make sure that ``s`` still corresponds to some chunked + version of ``_pieces``. + + :type verify_tags: bool + :param verify_tags: Whether the individual tags should be + checked. If this is false, ``_verify`` will check to make + sure that ``_str`` encodes a chunked version of *some* + list of tokens. If this is true, then ``_verify`` will + check to make sure that the tags in ``_str`` match those in + ``_pieces``. + + :raise ValueError: if the internal string representation of + this ``ChunkString`` is invalid or not consistent with _pieces. + """ + # Check overall form + if not ChunkString._VALID.match(s): + raise ValueError( + "Transformation generated invalid " "chunkstring:\n %s" % s + ) + + # Check that parens are balanced. If the string is long, we + # have to do this in pieces, to avoid a maximum recursion + # depth limit for regular expressions. + brackets = ChunkString._BRACKETS.sub("", s) + for i in range(1 + len(brackets) // 5000): + substr = brackets[i * 5000 : i * 5000 + 5000] + if not ChunkString._BALANCED_BRACKETS.match(substr): + raise ValueError( + "Transformation generated invalid " "chunkstring:\n %s" % s + ) + + if verify_tags <= 0: + return + + tags1 = (re.split(r"[\{\}<>]+", s))[1:-1] + tags2 = [self._tag(piece) for piece in self._pieces] + if tags1 != tags2: + raise ValueError( + "Transformation generated invalid " "chunkstring: tag changed" + ) + + def to_chunkstruct(self, chunk_label="CHUNK"): + """ + Return the chunk structure encoded by this ``ChunkString``. + + :rtype: Tree + :raise ValueError: If a transformation has generated an + invalid chunkstring. + """ + if self._debug > 0: + self._verify(self._str, 1) + + # Use this alternating list to create the chunkstruct. + pieces = [] + index = 0 + piece_in_chunk = 0 + for piece in re.split("[{}]", self._str): + + # Find the list of tokens contained in this piece. + length = piece.count("<") + subsequence = self._pieces[index : index + length] + + # Add this list of tokens to our pieces. + if piece_in_chunk: + pieces.append(Tree(chunk_label, subsequence)) + else: + pieces += subsequence + + # Update index, piece_in_chunk + index += length + piece_in_chunk = not piece_in_chunk + + return Tree(self._root_label, pieces) + + def xform(self, regexp, repl): + """ + Apply the given transformation to the string encoding of this + ``ChunkString``. In particular, find all occurrences that match + ``regexp``, and replace them using ``repl`` (as done by + ``re.sub``). + + This transformation should only add and remove braces; it + should *not* modify the sequence of angle-bracket delimited + tags. Furthermore, this transformation may not result in + improper bracketing. Note, in particular, that bracketing may + not be nested. + + :type regexp: str or regexp + :param regexp: A regular expression matching the substring + that should be replaced. This will typically include a + named group, which can be used by ``repl``. + :type repl: str + :param repl: An expression specifying what should replace the + matched substring. Typically, this will include a named + replacement group, specified by ``regexp``. + :rtype: None + :raise ValueError: If this transformation generated an + invalid chunkstring. + """ + # Do the actual substitution + s = re.sub(regexp, repl, self._str) + + # The substitution might have generated "empty chunks" + # (substrings of the form "{}"). Remove them, so they don't + # interfere with other transformations. + s = re.sub(r"\{\}", "", s) + + # Make sure that the transformation was legal. + if self._debug > 1: + self._verify(s, self._debug - 2) + + # Commit the transformation. + self._str = s + + def __repr__(self): + """ + Return a string representation of this ``ChunkString``. + It has the form:: + + }{
    }'> + + :rtype: str + """ + return "" % repr(self._str) + + def __str__(self): + """ + Return a formatted representation of this ``ChunkString``. + This representation will include extra spaces to ensure that + tags will line up with the representation of other + ``ChunkStrings`` for the same text, regardless of the chunking. + + :rtype: str + """ + # Add spaces to make everything line up. + str = re.sub(r">(?!\})", r"> ", self._str) + str = re.sub(r"([^\{])<", r"\1 <", str) + if str[0] == "<": + str = " " + str + return str + + +# ////////////////////////////////////////////////////// +# Chunking Rules +# ////////////////////////////////////////////////////// + + +class RegexpChunkRule: + """ + A rule specifying how to modify the chunking in a ``ChunkString``, + using a transformational regular expression. The + ``RegexpChunkRule`` class itself can be used to implement any + transformational rule based on regular expressions. There are + also a number of subclasses, which can be used to implement + simpler types of rules, based on matching regular expressions. + + Each ``RegexpChunkRule`` has a regular expression and a + replacement expression. When a ``RegexpChunkRule`` is "applied" + to a ``ChunkString``, it searches the ``ChunkString`` for any + substring that matches the regular expression, and replaces it + using the replacement expression. This search/replace operation + has the same semantics as ``re.sub``. + + Each ``RegexpChunkRule`` also has a description string, which + gives a short (typically less than 75 characters) description of + the purpose of the rule. + + This transformation defined by this ``RegexpChunkRule`` should + only add and remove braces; it should *not* modify the sequence + of angle-bracket delimited tags. Furthermore, this transformation + may not result in nested or mismatched bracketing. + """ + + def __init__(self, regexp, repl, descr): + """ + Construct a new RegexpChunkRule. + + :type regexp: regexp or str + :param regexp: The regular expression for this ``RegexpChunkRule``. + When this rule is applied to a ``ChunkString``, any + substring that matches ``regexp`` will be replaced using + the replacement string ``repl``. Note that this must be a + normal regular expression, not a tag pattern. + :type repl: str + :param repl: The replacement expression for this ``RegexpChunkRule``. + When this rule is applied to a ``ChunkString``, any substring + that matches ``regexp`` will be replaced using ``repl``. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + if isinstance(regexp, str): + regexp = re.compile(regexp) + self._repl = repl + self._descr = descr + self._regexp = regexp + + def apply(self, chunkstr): + # Keep docstring generic so we can inherit it. + """ + Apply this rule to the given ``ChunkString``. See the + class reference documentation for a description of what it + means to apply a rule. + + :type chunkstr: ChunkString + :param chunkstr: The chunkstring to which this rule is applied. + :rtype: None + :raise ValueError: If this transformation generated an + invalid chunkstring. + """ + chunkstr.xform(self._regexp, self._repl) + + def descr(self): + """ + Return a short description of the purpose and/or effect of + this rule. + + :rtype: str + """ + return self._descr + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + }'->''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + + repr(self._repl) + + ">" + ) + + @staticmethod + def fromstring(s): + """ + Create a RegexpChunkRule from a string description. + Currently, the following formats are supported:: + + {regexp} # chunk rule + }regexp{ # strip rule + regexp}{regexp # split rule + regexp{}regexp # merge rule + + Where ``regexp`` is a regular expression for the rule. Any + text following the comment marker (``#``) will be used as + the rule's description: + + >>> from nltk.chunk.regexp import RegexpChunkRule + >>> RegexpChunkRule.fromstring('{
    ?+}') + ?+'> + """ + # Split off the comment (but don't split on '\#') + m = re.match(r"(?P(\\.|[^#])*)(?P#.*)?", s) + rule = m.group("rule").strip() + comment = (m.group("comment") or "")[1:].strip() + + # Pattern bodies: chunk, strip, split, merge + try: + if not rule: + raise ValueError("Empty chunk pattern") + if rule[0] == "{" and rule[-1] == "}": + return ChunkRule(rule[1:-1], comment) + elif rule[0] == "}" and rule[-1] == "{": + return StripRule(rule[1:-1], comment) + elif "}{" in rule: + left, right = rule.split("}{") + return SplitRule(left, right, comment) + elif "{}" in rule: + left, right = rule.split("{}") + return MergeRule(left, right, comment) + elif re.match("[^{}]*{[^{}]*}[^{}]*", rule): + left, chunk, right = re.split("[{}]", rule) + return ChunkRuleWithContext(left, chunk, right, comment) + else: + raise ValueError("Illegal chunk pattern: %s" % rule) + except (ValueError, re.error) as e: + raise ValueError("Illegal chunk pattern: %s" % rule) from e + + +class ChunkRule(RegexpChunkRule): + """ + A rule specifying how to add chunks to a ``ChunkString``, using a + matching tag pattern. When applied to a ``ChunkString``, it will + find any substring that matches this tag pattern and that is not + already part of a chunk, and create a new chunk containing that + substring. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``ChunkRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + chunk any substring that matches this tag pattern and that + is not already part of a chunk. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile( + "(?P%s)%s" + % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_STRIP_PATTERN) + ) + RegexpChunkRule.__init__(self, regexp, r"{\g}", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class StripRule(RegexpChunkRule): + """ + A rule specifying how to remove strips to a ``ChunkString``, + using a matching tag pattern. When applied to a + ``ChunkString``, it will find any substring that matches this + tag pattern and that is contained in a chunk, and remove it + from that chunk, thus creating two new chunks. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``StripRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + find any substring that matches this tag pattern and that + is contained in a chunk, and remove it from that chunk, + thus creating two new chunks. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile( + "(?P%s)%s" + % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_CHUNK_PATTERN) + ) + RegexpChunkRule.__init__(self, regexp, r"}\g{", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class UnChunkRule(RegexpChunkRule): + """ + A rule specifying how to remove chunks to a ``ChunkString``, + using a matching tag pattern. When applied to a + ``ChunkString``, it will find any complete chunk that matches this + tag pattern, and un-chunk it. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``UnChunkRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + find any complete chunk that matches this tag pattern, + and un-chunk it. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile(r"\{(?P%s)\}" % tag_pattern2re_pattern(tag_pattern)) + RegexpChunkRule.__init__(self, regexp, r"\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class MergeRule(RegexpChunkRule): + """ + A rule specifying how to merge chunks in a ``ChunkString``, using + two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk whose end + matches left pattern, and immediately followed by a chunk whose + beginning matches right pattern. It will then merge those two + chunks into a single chunk. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``MergeRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + ``left_tag_pattern``, and immediately followed by a chunk + whose beginning matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + this pattern, and immediately followed by a chunk + whose beginning matches ``right_tag_pattern``. It will + then merge those two chunks into a single chunk. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + "(?P%s)}{(?=%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class SplitRule(RegexpChunkRule): + """ + A rule specifying how to split chunks in a ``ChunkString``, using + two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk that + matches the left pattern followed by the right pattern. It will + then split the chunk into two new chunks, at the point between the + two pattern matches. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``SplitRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this rule will + find any chunk containing a substring that matches + ``left_tag_pattern`` followed by this pattern. It will + then split the chunk into two new chunks at the point + between these two matching patterns. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this rule will + find any chunk containing a substring that matches this + pattern followed by ``right_tag_pattern``. It will then + split the chunk into two new chunks at the point between + these two matching patterns. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + "(?P%s)(?=%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g}{", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', '
    '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ExpandLeftRule(RegexpChunkRule): + """ + A rule specifying how to expand chunks in a ``ChunkString`` to the left, + using two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk whose beginning + matches right pattern, and immediately preceded by a strip whose + end matches left pattern. It will then expand the chunk to incorporate + the new material on the left. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``ExpandRightRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose beginning matches + ``right_tag_pattern``, and immediately preceded by a strip + whose end matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose beginning matches + this pattern, and immediately preceded by a strip + whose end matches ``left_tag_pattern``. It will + then expand the chunk to incorporate the new material on the left. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + r"(?P%s)\{(?P%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"{\g\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ExpandRightRule(RegexpChunkRule): + """ + A rule specifying how to expand chunks in a ``ChunkString`` to the + right, using two matching tag patterns: a left pattern, and a + right pattern. When applied to a ``ChunkString``, it will find any + chunk whose end matches left pattern, and immediately followed by + a strip whose beginning matches right pattern. It will then + expand the chunk to incorporate the new material on the right. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``ExpandRightRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + ``left_tag_pattern``, and immediately followed by a strip + whose beginning matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + this pattern, and immediately followed by a strip + whose beginning matches ``right_tag_pattern``. It will + then expand the chunk to incorporate the new material on the right. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + r"(?P%s)\}(?P%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g\g}", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ChunkRuleWithContext(RegexpChunkRule): + """ + A rule specifying how to add chunks to a ``ChunkString``, using + three matching tag patterns: one for the left context, one for the + chunk, and one for the right context. When applied to a + ``ChunkString``, it will find any substring that matches the chunk + tag pattern, is surrounded by substrings that match the two + context patterns, and is not already part of a chunk; and create a + new chunk containing the substring that matched the chunk tag + pattern. + + Caveat: Both the left and right context are consumed when this + rule matches; therefore, if you need to find overlapping matches, + you will need to apply your rule more than once. + """ + + def __init__( + self, + left_context_tag_pattern, + chunk_tag_pattern, + right_context_tag_pattern, + descr, + ): + """ + Construct a new ``ChunkRuleWithContext``. + + :type left_context_tag_pattern: str + :param left_context_tag_pattern: A tag pattern that must match + the left context of ``chunk_tag_pattern`` for this rule to + apply. + :type chunk_tag_pattern: str + :param chunk_tag_pattern: A tag pattern that must match for this + rule to apply. If the rule does apply, then this pattern + also identifies the substring that will be made into a chunk. + :type right_context_tag_pattern: str + :param right_context_tag_pattern: A tag pattern that must match + the right context of ``chunk_tag_pattern`` for this rule to + apply. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_context_tag_pattern)) + re.compile(tag_pattern2re_pattern(chunk_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_context_tag_pattern)) + + self._left_context_tag_pattern = left_context_tag_pattern + self._chunk_tag_pattern = chunk_tag_pattern + self._right_context_tag_pattern = right_context_tag_pattern + regexp = re.compile( + "(?P%s)(?P%s)(?P%s)%s" + % ( + tag_pattern2re_pattern(left_context_tag_pattern), + tag_pattern2re_pattern(chunk_tag_pattern), + tag_pattern2re_pattern(right_context_tag_pattern), + ChunkString.IN_STRIP_PATTERN, + ) + ) + replacement = r"\g{\g}\g" + RegexpChunkRule.__init__(self, regexp, replacement, descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', '', '
    '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "".format( + self._left_context_tag_pattern, + self._chunk_tag_pattern, + self._right_context_tag_pattern, + ) + + +# ////////////////////////////////////////////////////// +# Tag Pattern Format Conversion +# ////////////////////////////////////////////////////// + +# this should probably be made more strict than it is -- e.g., it +# currently accepts 'foo'. +CHUNK_TAG_PATTERN = re.compile( + r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+") +) + + +def tag_pattern2re_pattern(tag_pattern): + """ + Convert a tag pattern to a regular expression pattern. A "tag + pattern" is a modified version of a regular expression, designed + for matching sequences of tags. The differences between regular + expression patterns and tag patterns are: + + - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so + ``'+'`` matches one or more repetitions of ``''``, not + ``''``. + - Whitespace in tag patterns is ignored. So + ``'
    | '`` is equivalent to ``'
    |'`` + - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so + ``''`` matches any single tag starting with ``'NN'``. + + In particular, ``tag_pattern2re_pattern`` performs the following + transformations on the given pattern: + + - Replace '.' with '[^<>{}]' + - Remove any whitespace + - Add extra parens around '<' and '>', to make '<' and '>' act + like parentheses. E.g., so that in '+', the '+' has scope + over the entire ''; and so that in '', the '|' has + scope over 'NN' and 'IN', but not '<' or '>'. + - Check to make sure the resulting pattern is valid. + + :type tag_pattern: str + :param tag_pattern: The tag pattern to convert to a regular + expression pattern. + :raise ValueError: If ``tag_pattern`` is not a valid tag pattern. + In particular, ``tag_pattern`` should not include braces; and it + should not contain nested or mismatched angle-brackets. + :rtype: str + :return: A regular expression pattern corresponding to + ``tag_pattern``. + """ + # Clean up the regular expression + tag_pattern = re.sub(r"\s", "", tag_pattern) + tag_pattern = re.sub(r"<", "(<(", tag_pattern) + tag_pattern = re.sub(r">", ")>)", tag_pattern) + + # Check the regular expression + if not CHUNK_TAG_PATTERN.match(tag_pattern): + raise ValueError("Bad tag pattern: %r" % tag_pattern) + + # Replace "." with CHUNK_TAG_CHAR. + # We have to do this after, since it adds {}[]<>s, which would + # confuse CHUNK_TAG_PATTERN. + # PRE doesn't have lookback assertions, so reverse twice, and do + # the pattern backwards (with lookahead assertions). This can be + # made much cleaner once we can switch back to SRE. + def reverse_str(str): + lst = list(str) + lst.reverse() + return "".join(lst) + + tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR) + reversed = reverse_str(tag_pattern) + reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed) + tag_pattern = reverse_str(reversed) + + return tag_pattern + + +# ////////////////////////////////////////////////////// +# RegexpChunkParser +# ////////////////////////////////////////////////////// + + +class RegexpChunkParser(ChunkParserI): + """ + A regular expression based chunk parser. ``RegexpChunkParser`` uses a + sequence of "rules" to find chunks of a single type within a + text. The chunking of the text is encoded using a ``ChunkString``, + and each rule acts by modifying the chunking in the + ``ChunkString``. The rules are all implemented using regular + expression matching and substitution. + + The ``RegexpChunkRule`` class and its subclasses (``ChunkRule``, + ``StripRule``, ``UnChunkRule``, ``MergeRule``, and ``SplitRule``) + define the rules that are used by ``RegexpChunkParser``. Each rule + defines an ``apply()`` method, which modifies the chunking encoded + by a given ``ChunkString``. + + :type _rules: list(RegexpChunkRule) + :ivar _rules: The list of rules that should be applied to a text. + :type _trace: int + :ivar _trace: The default level of tracing. + + """ + + def __init__(self, rules, chunk_label="NP", root_label="S", trace=0): + """ + Construct a new ``RegexpChunkParser``. + + :type rules: list(RegexpChunkRule) + :param rules: The sequence of rules that should be used to + generate the chunking for a tagged text. + :type chunk_label: str + :param chunk_label: The node value that should be used for + chunk subtrees. This is typically a short string + describing the type of information contained by the chunk, + such as ``"NP"`` for base noun phrases. + :type root_label: str + :param root_label: The node value that should be used for the + top node of the chunk structure. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + """ + self._rules = rules + self._trace = trace + self._chunk_label = chunk_label + self._root_label = root_label + + def _trace_apply(self, chunkstr, verbose): + """ + Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in + turn. Generate trace output between each rule. If ``verbose`` + is true, then generate verbose output. + + :type chunkstr: ChunkString + :param chunkstr: The chunk string to which each rule should be + applied. + :type verbose: bool + :param verbose: Whether output should be verbose. + :rtype: None + """ + print("# Input:") + print(chunkstr) + for rule in self._rules: + rule.apply(chunkstr) + if verbose: + print("#", rule.descr() + " (" + repr(rule) + "):") + else: + print("#", rule.descr() + ":") + print(chunkstr) + + def _notrace_apply(self, chunkstr): + """ + Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in + turn. + + :param chunkstr: The chunk string to which each rule should be + applied. + :type chunkstr: ChunkString + :rtype: None + """ + + for rule in self._rules: + rule.apply(chunkstr) + + def parse(self, chunk_struct, trace=None): + """ + :type chunk_struct: Tree + :param chunk_struct: the chunk structure to be (further) chunked + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. This value + overrides the trace level value that was given to the + constructor. + :rtype: Tree + :return: a chunk structure that encodes the chunks in a given + tagged sentence. A chunk is a non-overlapping linguistic + group, such as a noun phrase. The set of chunks + identified in the chunk structure depends on the rules + used to define this ``RegexpChunkParser``. + """ + if len(chunk_struct) == 0: + print("Warning: parsing empty text") + return Tree(self._root_label, []) + + try: + chunk_struct.label() + except AttributeError: + chunk_struct = Tree(self._root_label, chunk_struct) + + # Use the default trace value? + if trace is None: + trace = self._trace + + chunkstr = ChunkString(chunk_struct) + + # Apply the sequence of rules to the chunkstring. + if trace: + verbose = trace > 1 + self._trace_apply(chunkstr, verbose) + else: + self._notrace_apply(chunkstr) + + # Use the chunkstring to create a chunk structure. + return chunkstr.to_chunkstruct(self._chunk_label) + + def rules(self): + """ + :return: the sequence of rules used by ``RegexpChunkParser``. + :rtype: list(RegexpChunkRule) + """ + return self._rules + + def __repr__(self): + """ + :return: a concise string representation of this + ``RegexpChunkParser``. + :rtype: str + """ + return "" % len(self._rules) + + def __str__(self): + """ + :return: a verbose string representation of this ``RegexpChunkParser``. + :rtype: str + """ + s = "RegexpChunkParser with %d rules:\n" % len(self._rules) + margin = 0 + for rule in self._rules: + margin = max(margin, len(rule.descr())) + if margin < 35: + format = " %" + repr(-(margin + 3)) + "s%s\n" + else: + format = " %s\n %s\n" + for rule in self._rules: + s += format % (rule.descr(), repr(rule)) + return s[:-1] + + +# ////////////////////////////////////////////////////// +# Chunk Grammar +# ////////////////////////////////////////////////////// + + +class RegexpParser(ChunkParserI): + r""" + A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of + regular expression patterns to specify the behavior of the parser. + The chunking of the text is encoded using a ``ChunkString``, and + each rule acts by modifying the chunking in the ``ChunkString``. + The rules are all implemented using regular expression matching + and substitution. + + A grammar contains one or more clauses in the following form:: + + NP: + {} # chunk determiners and adjectives + }<[\.VI].*>+{ # strip any tag beginning with V, I, or . + <.*>}{
    # split a chunk at a determiner + {} # merge chunk ending with det/adj + # with one starting with a noun + + The patterns of a clause are executed in order. An earlier + pattern may introduce a chunk boundary that prevents a later + pattern from executing. Sometimes an individual pattern will + match on multiple, overlapping extents of the input. As with + regular expression substitution more generally, the chunker will + identify the first match possible, then continue looking for matches + after this one has ended. + + The clauses of a grammar are also executed in order. A cascaded + chunk parser is one having more than one clause. The maximum depth + of a parse tree created by this chunk parser is the same as the + number of clauses in the grammar. + + When tracing is turned on, the comment portion of a line is displayed + each time the corresponding pattern is applied. + + :type _start: str + :ivar _start: The start symbol of the grammar (the root node of + resulting trees) + :type _stages: int + :ivar _stages: The list of parsing stages corresponding to the grammar + + """ + + def __init__(self, grammar, root_label="S", loop=1, trace=0): + """ + Create a new chunk parser, from the given start state + and set of chunk patterns. + + :param grammar: The grammar, or a list of RegexpChunkParser objects + :type grammar: str or list(RegexpChunkParser) + :param root_label: The top node of the tree being created + :type root_label: str or Nonterminal + :param loop: The number of times to run through the patterns + :type loop: int + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + """ + self._trace = trace + self._stages = [] + self._grammar = grammar + self._loop = loop + + if isinstance(grammar, str): + self._read_grammar(grammar, root_label, trace) + else: + # Make sur the grammar looks like it has the right type: + type_err = ( + "Expected string or list of RegexpChunkParsers " "for the grammar." + ) + try: + grammar = list(grammar) + except BaseException as e: + raise TypeError(type_err) from e + for elt in grammar: + if not isinstance(elt, RegexpChunkParser): + raise TypeError(type_err) + self._stages = grammar + + def _read_grammar(self, grammar, root_label, trace): + """ + Helper function for __init__: read the grammar if it is a + string. + """ + rules = [] + lhs = None + pattern = regex.compile("(?P(\\.|[^:])*)(:(?P.*))") + for line in grammar.split("\n"): + line = line.strip() + + # New stage begins if there's an unescaped ':' + m = pattern.match(line) + if m: + # Record the stage that we just completed. + self._add_stage(rules, lhs, root_label, trace) + # Start a new stage. + lhs = m.group("nonterminal").strip() + rules = [] + line = m.group("rule").strip() + + # Skip blank & comment-only lines + if line == "" or line.startswith("#"): + continue + + # Add the rule + rules.append(RegexpChunkRule.fromstring(line)) + + # Record the final stage + self._add_stage(rules, lhs, root_label, trace) + + def _add_stage(self, rules, lhs, root_label, trace): + """ + Helper function for __init__: add a new stage to the parser. + """ + if rules != []: + if not lhs: + raise ValueError("Expected stage marker (eg NP:)") + parser = RegexpChunkParser( + rules, chunk_label=lhs, root_label=root_label, trace=trace + ) + self._stages.append(parser) + + def parse(self, chunk_struct, trace=None): + """ + Apply the chunk parser to this input. + + :type chunk_struct: Tree + :param chunk_struct: the chunk structure to be (further) chunked + (this tree is modified, and is also returned) + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. This value + overrides the trace level value that was given to the + constructor. + :return: the chunked output. + :rtype: Tree + """ + if trace is None: + trace = self._trace + for i in range(self._loop): + for parser in self._stages: + chunk_struct = parser.parse(chunk_struct, trace=trace) + return chunk_struct + + def __repr__(self): + """ + :return: a concise string representation of this ``chunk.RegexpParser``. + :rtype: str + """ + return "" % len(self._stages) + + def __str__(self): + """ + :return: a verbose string representation of this + ``RegexpParser``. + :rtype: str + """ + s = "chunk.RegexpParser with %d stages:\n" % len(self._stages) + margin = 0 + for parser in self._stages: + s += "%s\n" % parser + return s[:-1] + + +# ////////////////////////////////////////////////////// +# Demonstration code +# ////////////////////////////////////////////////////// + + +def demo_eval(chunkparser, text): + """ + Demonstration code for evaluating a chunk parser, using a + ``ChunkScore``. This function assumes that ``text`` contains one + sentence per line, and that each sentence has the form expected by + ``tree.chunk``. It runs the given chunk parser on each sentence in + the text, and scores the result. It prints the final score + (precision, recall, and f-measure); and reports the set of chunks + that were missed and the set of chunks that were incorrect. (At + most 10 missing chunks and 10 incorrect chunks are reported). + + :param chunkparser: The chunkparser to be tested + :type chunkparser: ChunkParserI + :param text: The chunked tagged text that should be used for + evaluation. + :type text: str + """ + from nltk import chunk + from nltk.tree import Tree + + # Evaluate our chunk parser. + chunkscore = chunk.ChunkScore() + + for sentence in text.split("\n"): + print(sentence) + sentence = sentence.strip() + if not sentence: + continue + gold = chunk.tagstr2tree(sentence) + tokens = gold.leaves() + test = chunkparser.parse(Tree("S", tokens), trace=1) + chunkscore.score(gold, test) + print() + + print("/" + ("=" * 75) + "\\") + print("Scoring", chunkparser) + print("-" * 77) + print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ") + print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ") + print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100)) + + # Missed chunks. + if chunkscore.missed(): + print("Missed:") + missed = chunkscore.missed() + for chunk in missed[:10]: + print(" ", " ".join(map(str, chunk))) + if len(chunkscore.missed()) > 10: + print(" ...") + + # Incorrect chunks. + if chunkscore.incorrect(): + print("Incorrect:") + incorrect = chunkscore.incorrect() + for chunk in incorrect[:10]: + print(" ", " ".join(map(str, chunk))) + if len(chunkscore.incorrect()) > 10: + print(" ...") + + print("\\" + ("=" * 75) + "/") + print() + + +def demo(): + """ + A demonstration for the ``RegexpChunkParser`` class. A single text is + parsed with four different chunk parsers, using a variety of rules + and strategies. + """ + + from nltk import Tree, chunk + + text = """\ + [ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./. + [ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./. + [ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./. + """ + + print("*" * 75) + print("Evaluation text:") + print(text) + print("*" * 75) + print() + + grammar = r""" + NP: # NP stage + {
    ?*} # chunk determiners, adjectives and nouns + {+} # chunk proper nouns + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: + {<.*>} # start by chunking each tag + }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods + {} # merge det/adj with nouns + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: {
    ?*} # chunk determiners, adjectives and nouns + VP: {?} # VP = verb words + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: {<.*>*} # start by chunking everything + }<[\.VI].*>+{ # strip any verbs, prepositions or periods + <.*>}{
    # separate on determiners + PP: {} # PP = preposition + noun phrase + VP: {*} # VP = verb words + NPs and PPs + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + # Evaluation + + from nltk.corpus import conll2000 + + print() + print("Demonstration of empty grammar:") + + cp = chunk.RegexpParser("") + print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",)))) + + print() + print("Demonstration of accuracy evaluation using CoNLL tags:") + + grammar = r""" + NP: + {<.*>} # start by chunking each tag + }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods + {} # merge det/adj with nouns + """ + cp = chunk.RegexpParser(grammar) + print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5])) + + print() + print("Demonstration of tagged token input") + + grammar = r""" + NP: {<.*>*} # start by chunking everything + }<[\.VI].*>+{ # strip any verbs, prepositions or periods + <.*>}{
    # separate on determiners + PP: {} # PP = preposition + noun phrase + VP: {*} # VP = verb words + NPs and PPs + """ + cp = chunk.RegexpParser(grammar) + print( + cp.parse( + [ + ("the", "DT"), + ("little", "JJ"), + ("cat", "NN"), + ("sat", "VBD"), + ("on", "IN"), + ("the", "DT"), + ("mat", "NN"), + (".", "."), + ] + ) + ) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/chunk/util.py b/venv/lib/python3.10/site-packages/nltk/chunk/util.py new file mode 100644 index 0000000000000000000000000000000000000000..64ab90f52d1cecd133d3c6511c71e10e44b7bbf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/chunk/util.py @@ -0,0 +1,643 @@ +# Natural Language Toolkit: Chunk format conversions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +import re + +from nltk.metrics import accuracy as _accuracy +from nltk.tag.mapping import map_tag +from nltk.tag.util import str2tuple +from nltk.tree import Tree + +##////////////////////////////////////////////////////// +## EVALUATION +##////////////////////////////////////////////////////// + + +def accuracy(chunker, gold): + """ + Score the accuracy of the chunker against the gold standard. + Strip the chunk information from the gold standard and rechunk it using + the chunker, then compute the accuracy score. + + :type chunker: ChunkParserI + :param chunker: The chunker being evaluated. + :type gold: tree + :param gold: The chunk structures to score the chunker on. + :rtype: float + """ + + gold_tags = [] + test_tags = [] + for gold_tree in gold: + test_tree = chunker.parse(gold_tree.flatten()) + gold_tags += tree2conlltags(gold_tree) + test_tags += tree2conlltags(test_tree) + + # print 'GOLD:', gold_tags[:50] + # print 'TEST:', test_tags[:50] + return _accuracy(gold_tags, test_tags) + + +# Patched for increased performance by Yoav Goldberg , 2006-01-13 +# -- statistics are evaluated only on demand, instead of at every sentence evaluation +# +# SB: use nltk.metrics for precision/recall scoring? +# +class ChunkScore: + """ + A utility class for scoring chunk parsers. ``ChunkScore`` can + evaluate a chunk parser's output, based on a number of statistics + (precision, recall, f-measure, misssed chunks, incorrect chunks). + It can also combine the scores from the parsing of multiple texts; + this makes it significantly easier to evaluate a chunk parser that + operates one sentence at a time. + + Texts are evaluated with the ``score`` method. The results of + evaluation can be accessed via a number of accessor methods, such + as ``precision`` and ``f_measure``. A typical use of the + ``ChunkScore`` class is:: + + >>> chunkscore = ChunkScore() # doctest: +SKIP + >>> for correct in correct_sentences: # doctest: +SKIP + ... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP + ... chunkscore.score(correct, guess) # doctest: +SKIP + >>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP + F Measure: 0.823 + + :ivar kwargs: Keyword arguments: + + - max_tp_examples: The maximum number actual examples of true + positives to record. This affects the ``correct`` member + function: ``correct`` will not return more than this number + of true positive examples. This does *not* affect any of + the numerical metrics (precision, recall, or f-measure) + + - max_fp_examples: The maximum number actual examples of false + positives to record. This affects the ``incorrect`` member + function and the ``guessed`` member function: ``incorrect`` + will not return more than this number of examples, and + ``guessed`` will not return more than this number of true + positive examples. This does *not* affect any of the + numerical metrics (precision, recall, or f-measure) + + - max_fn_examples: The maximum number actual examples of false + negatives to record. This affects the ``missed`` member + function and the ``correct`` member function: ``missed`` + will not return more than this number of examples, and + ``correct`` will not return more than this number of true + negative examples. This does *not* affect any of the + numerical metrics (precision, recall, or f-measure) + + - chunk_label: A regular expression indicating which chunks + should be compared. Defaults to ``'.*'`` (i.e., all chunks). + + :type _tp: list(Token) + :ivar _tp: List of true positives + :type _fp: list(Token) + :ivar _fp: List of false positives + :type _fn: list(Token) + :ivar _fn: List of false negatives + + :type _tp_num: int + :ivar _tp_num: Number of true positives + :type _fp_num: int + :ivar _fp_num: Number of false positives + :type _fn_num: int + :ivar _fn_num: Number of false negatives. + """ + + def __init__(self, **kwargs): + self._correct = set() + self._guessed = set() + self._tp = set() + self._fp = set() + self._fn = set() + self._max_tp = kwargs.get("max_tp_examples", 100) + self._max_fp = kwargs.get("max_fp_examples", 100) + self._max_fn = kwargs.get("max_fn_examples", 100) + self._chunk_label = kwargs.get("chunk_label", ".*") + self._tp_num = 0 + self._fp_num = 0 + self._fn_num = 0 + self._count = 0 + self._tags_correct = 0.0 + self._tags_total = 0.0 + + self._measuresNeedUpdate = False + + def _updateMeasures(self): + if self._measuresNeedUpdate: + self._tp = self._guessed & self._correct + self._fn = self._correct - self._guessed + self._fp = self._guessed - self._correct + self._tp_num = len(self._tp) + self._fp_num = len(self._fp) + self._fn_num = len(self._fn) + self._measuresNeedUpdate = False + + def score(self, correct, guessed): + """ + Given a correctly chunked sentence, score another chunked + version of the same sentence. + + :type correct: chunk structure + :param correct: The known-correct ("gold standard") chunked + sentence. + :type guessed: chunk structure + :param guessed: The chunked sentence to be scored. + """ + self._correct |= _chunksets(correct, self._count, self._chunk_label) + self._guessed |= _chunksets(guessed, self._count, self._chunk_label) + self._count += 1 + self._measuresNeedUpdate = True + # Keep track of per-tag accuracy (if possible) + try: + correct_tags = tree2conlltags(correct) + guessed_tags = tree2conlltags(guessed) + except ValueError: + # This exception case is for nested chunk structures, + # where tree2conlltags will fail with a ValueError: "Tree + # is too deeply nested to be printed in CoNLL format." + correct_tags = guessed_tags = () + self._tags_total += len(correct_tags) + self._tags_correct += sum( + 1 for (t, g) in zip(guessed_tags, correct_tags) if t == g + ) + + def accuracy(self): + """ + Return the overall tag-based accuracy for all text that have + been scored by this ``ChunkScore``, using the IOB (conll2000) + tag encoding. + + :rtype: float + """ + if self._tags_total == 0: + return 1 + return self._tags_correct / self._tags_total + + def precision(self): + """ + Return the overall precision for all texts that have been + scored by this ``ChunkScore``. + + :rtype: float + """ + self._updateMeasures() + div = self._tp_num + self._fp_num + if div == 0: + return 0 + else: + return self._tp_num / div + + def recall(self): + """ + Return the overall recall for all texts that have been + scored by this ``ChunkScore``. + + :rtype: float + """ + self._updateMeasures() + div = self._tp_num + self._fn_num + if div == 0: + return 0 + else: + return self._tp_num / div + + def f_measure(self, alpha=0.5): + """ + Return the overall F measure for all texts that have been + scored by this ``ChunkScore``. + + :param alpha: the relative weighting of precision and recall. + Larger alpha biases the score towards the precision value, + while smaller alpha biases the score towards the recall + value. ``alpha`` should have a value in the range [0,1]. + :type alpha: float + :rtype: float + """ + self._updateMeasures() + p = self.precision() + r = self.recall() + if p == 0 or r == 0: # what if alpha is 0 or 1? + return 0 + return 1 / (alpha / p + (1 - alpha) / r) + + def missed(self): + """ + Return the chunks which were included in the + correct chunk structures, but not in the guessed chunk + structures, listed in input order. + + :rtype: list of chunks + """ + self._updateMeasures() + chunks = list(self._fn) + return [c[1] for c in chunks] # discard position information + + def incorrect(self): + """ + Return the chunks which were included in the guessed chunk structures, + but not in the correct chunk structures, listed in input order. + + :rtype: list of chunks + """ + self._updateMeasures() + chunks = list(self._fp) + return [c[1] for c in chunks] # discard position information + + def correct(self): + """ + Return the chunks which were included in the correct + chunk structures, listed in input order. + + :rtype: list of chunks + """ + chunks = list(self._correct) + return [c[1] for c in chunks] # discard position information + + def guessed(self): + """ + Return the chunks which were included in the guessed + chunk structures, listed in input order. + + :rtype: list of chunks + """ + chunks = list(self._guessed) + return [c[1] for c in chunks] # discard position information + + def __len__(self): + self._updateMeasures() + return self._tp_num + self._fn_num + + def __repr__(self): + """ + Return a concise representation of this ``ChunkScoring``. + + :rtype: str + """ + return "" + + def __str__(self): + """ + Return a verbose representation of this ``ChunkScoring``. + This representation includes the precision, recall, and + f-measure scores. For other information about the score, + use the accessor methods (e.g., ``missed()`` and ``incorrect()``). + + :rtype: str + """ + return ( + "ChunkParse score:\n" + + (f" IOB Accuracy: {self.accuracy() * 100:5.1f}%%\n") + + (f" Precision: {self.precision() * 100:5.1f}%%\n") + + (f" Recall: {self.recall() * 100:5.1f}%%\n") + + (f" F-Measure: {self.f_measure() * 100:5.1f}%%") + ) + + +# extract chunks, and assign unique id, the absolute position of +# the first word of the chunk +def _chunksets(t, count, chunk_label): + pos = 0 + chunks = [] + for child in t: + if isinstance(child, Tree): + if re.match(chunk_label, child.label()): + chunks.append(((count, pos), child.freeze())) + pos += len(child.leaves()) + else: + pos += 1 + return set(chunks) + + +def tagstr2tree( + s, chunk_label="NP", root_label="S", sep="/", source_tagset=None, target_tagset=None +): + """ + Divide a string of bracketted tagged text into + chunks and unchunked tokens, and produce a Tree. + Chunks are marked by square brackets (``[...]``). Words are + delimited by whitespace, and each word should have the form + ``text/tag``. Words that do not contain a slash are + assigned a ``tag`` of None. + + :param s: The string to be converted + :type s: str + :param chunk_label: The label to use for chunk nodes + :type chunk_label: str + :param root_label: The label to use for the root of the tree + :type root_label: str + :rtype: Tree + """ + + WORD_OR_BRACKET = re.compile(r"\[|\]|[^\[\]\s]+") + + stack = [Tree(root_label, [])] + for match in WORD_OR_BRACKET.finditer(s): + text = match.group() + if text[0] == "[": + if len(stack) != 1: + raise ValueError(f"Unexpected [ at char {match.start():d}") + chunk = Tree(chunk_label, []) + stack[-1].append(chunk) + stack.append(chunk) + elif text[0] == "]": + if len(stack) != 2: + raise ValueError(f"Unexpected ] at char {match.start():d}") + stack.pop() + else: + if sep is None: + stack[-1].append(text) + else: + word, tag = str2tuple(text, sep) + if source_tagset and target_tagset: + tag = map_tag(source_tagset, target_tagset, tag) + stack[-1].append((word, tag)) + + if len(stack) != 1: + raise ValueError(f"Expected ] at char {len(s):d}") + return stack[0] + + +### CONLL + +_LINE_RE = re.compile(r"(\S+)\s+(\S+)\s+([IOB])-?(\S+)?") + + +def conllstr2tree(s, chunk_types=("NP", "PP", "VP"), root_label="S"): + """ + Return a chunk structure for a single sentence + encoded in the given CONLL 2000 style string. + This function converts a CoNLL IOB string into a tree. + It uses the specified chunk types + (defaults to NP, PP and VP), and creates a tree rooted at a node + labeled S (by default). + + :param s: The CoNLL string to be converted. + :type s: str + :param chunk_types: The chunk types to be converted. + :type chunk_types: tuple + :param root_label: The node label to use for the root. + :type root_label: str + :rtype: Tree + """ + + stack = [Tree(root_label, [])] + + for lineno, line in enumerate(s.split("\n")): + if not line.strip(): + continue + + # Decode the line. + match = _LINE_RE.match(line) + if match is None: + raise ValueError(f"Error on line {lineno:d}") + (word, tag, state, chunk_type) = match.groups() + + # If it's a chunk type we don't care about, treat it as O. + if chunk_types is not None and chunk_type not in chunk_types: + state = "O" + + # For "Begin"/"Outside", finish any completed chunks - + # also do so for "Inside" which don't match the previous token. + mismatch_I = state == "I" and chunk_type != stack[-1].label() + if state in "BO" or mismatch_I: + if len(stack) == 2: + stack.pop() + + # For "Begin", start a new chunk. + if state == "B" or mismatch_I: + chunk = Tree(chunk_type, []) + stack[-1].append(chunk) + stack.append(chunk) + + # Add the new word token. + stack[-1].append((word, tag)) + + return stack[0] + + +def tree2conlltags(t): + """ + Return a list of 3-tuples containing ``(word, tag, IOB-tag)``. + Convert a tree to the CoNLL IOB tag format. + + :param t: The tree to be converted. + :type t: Tree + :rtype: list(tuple) + """ + + tags = [] + for child in t: + try: + category = child.label() + prefix = "B-" + for contents in child: + if isinstance(contents, Tree): + raise ValueError( + "Tree is too deeply nested to be printed in CoNLL format" + ) + tags.append((contents[0], contents[1], prefix + category)) + prefix = "I-" + except AttributeError: + tags.append((child[0], child[1], "O")) + return tags + + +def conlltags2tree( + sentence, chunk_types=("NP", "PP", "VP"), root_label="S", strict=False +): + """ + Convert the CoNLL IOB format to a tree. + """ + tree = Tree(root_label, []) + for (word, postag, chunktag) in sentence: + if chunktag is None: + if strict: + raise ValueError("Bad conll tag sequence") + else: + # Treat as O + tree.append((word, postag)) + elif chunktag.startswith("B-"): + tree.append(Tree(chunktag[2:], [(word, postag)])) + elif chunktag.startswith("I-"): + if ( + len(tree) == 0 + or not isinstance(tree[-1], Tree) + or tree[-1].label() != chunktag[2:] + ): + if strict: + raise ValueError("Bad conll tag sequence") + else: + # Treat as B-* + tree.append(Tree(chunktag[2:], [(word, postag)])) + else: + tree[-1].append((word, postag)) + elif chunktag == "O": + tree.append((word, postag)) + else: + raise ValueError(f"Bad conll tag {chunktag!r}") + return tree + + +def tree2conllstr(t): + """ + Return a multiline string where each line contains a word, tag and IOB tag. + Convert a tree to the CoNLL IOB string format + + :param t: The tree to be converted. + :type t: Tree + :rtype: str + """ + lines = [" ".join(token) for token in tree2conlltags(t)] + return "\n".join(lines) + + +### IEER + +_IEER_DOC_RE = re.compile( + r"\s*" + r"(\s*(?P.+?)\s*\s*)?" + r"(\s*(?P.+?)\s*\s*)?" + r"(\s*(?P.+?)\s*\s*)?" + r"\s*" + r"(\s*(?P.+?)\s*\s*)?" + r"(?P.*?)\s*" + r"\s*\s*", + re.DOTALL, +) + +_IEER_TYPE_RE = re.compile(r']*?type="(?P\w+)"') + + +def _ieer_read_text(s, root_label): + stack = [Tree(root_label, [])] + # s will be None if there is no headline in the text + # return the empty list in place of a Tree + if s is None: + return [] + for piece_m in re.finditer(r"<[^>]+>|[^\s<]+", s): + piece = piece_m.group() + try: + if piece.startswith(".... + m = _IEER_DOC_RE.match(s) + if m: + return { + "text": _ieer_read_text(m.group("text"), root_label), + "docno": m.group("docno"), + "doctype": m.group("doctype"), + "date_time": m.group("date_time"), + #'headline': m.group('headline') + # we want to capture NEs in the headline too! + "headline": _ieer_read_text(m.group("headline"), root_label), + } + else: + return _ieer_read_text(s, root_label) + + +def demo(): + + s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./." + import nltk + + t = nltk.chunk.tagstr2tree(s, chunk_label="NP") + t.pprint() + print() + + s = """ +These DT B-NP +research NN I-NP +protocols NNS I-NP +offer VBP B-VP +to TO B-PP +the DT B-NP +patient NN I-NP +not RB O +only RB O +the DT B-NP +very RB I-NP +best JJS I-NP +therapy NN I-NP +which WDT B-NP +we PRP B-NP +have VBP B-VP +established VBN I-VP +today NN B-NP +but CC B-NP +also RB I-NP +the DT B-NP +hope NN I-NP +of IN B-PP +something NN B-NP +still RB B-ADJP +better JJR I-ADJP +. . O +""" + + conll_tree = conllstr2tree(s, chunk_types=("NP", "PP")) + conll_tree.pprint() + + # Demonstrate CoNLL output + print("CoNLL output:") + print(nltk.chunk.tree2conllstr(conll_tree)) + print() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..678548faedd82bc1e00f7c018e3ca677ae970de9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bcp47.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df35f1dab114179d097d775a112bf9394cd0bd4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bnc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..443740251d0ed3f6c4998e103eb5b64b2e67b9c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/cmudict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7e3d7e1d92761becac0a2943d84b6ba0a7ad189 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/dependency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d1fc0496b050a0ebf23b84f11da6e26fb00d063 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/nkjp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9b4c2362ae05f81714321deeed1dc7d1530d0ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8385b4b0e4005cabc4b1d1e818926986a26511e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1005c1d54ec5c2482f8790443bdd04cc3c5aecc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/semcor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5685995f6e7fb3191d8f5c633935e54ba31772a8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/verbnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e609cf9a83b3196b58af219a3fe29f2900a679fd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ycoe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__init__.py b/venv/lib/python3.10/site-packages/nltk/draw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4308b5e4be658f94a175631eadc62c84008b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/__init__.py @@ -0,0 +1,27 @@ +# Natural Language Toolkit: graphical representations package +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Import Tkinter-based modules if Tkinter is installed +try: + import tkinter +except ImportError: + import warnings + + warnings.warn("nltk.draw package not loaded (please install Tkinter library).") +else: + from nltk.draw.cfg import ProductionList, CFGEditor, CFGDemo + from nltk.draw.tree import ( + TreeSegmentWidget, + tree_to_treesegment, + TreeWidget, + TreeView, + draw_trees, + ) + from nltk.draw.table import Table + +from nltk.draw.dispersion import dispersion_plot diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a426d846c3b64ebc0760be8aad3b0b941e4fd595 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d774a90debbbbda4eac865c4d2ccf0aba3731ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/cfg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3044d1137d87a21c38ae238c4b4a589eb9265ca3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/dispersion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ca77afb1a44e6a576f9c2ffa8b5d654c76d2580 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/table.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c40fea1702ab2d1f0c9304683d781f85bc72b79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/tree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2e0accf938b920e475e24d97ef515211a566ce8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/draw/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/draw/cfg.py b/venv/lib/python3.10/site-packages/nltk/draw/cfg.py new file mode 100644 index 0000000000000000000000000000000000000000..650162abf095d439cf7ca2ba3f0f36c81f0ed041 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/cfg.py @@ -0,0 +1,859 @@ +# Natural Language Toolkit: CFG visualization +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Visualization tools for CFGs. +""" + +# Idea for a nice demo: +# - 3 panes: grammar, treelet, working area +# - grammar is a list of productions +# - when you select a production, the treelet that it licenses appears +# in the treelet area +# - the working area has the text on the bottom, and S at top. When +# you select a production, it shows (ghosted) the locations where +# that production's treelet could be attached to either the text +# or the tree rooted at S. +# - the user can drag the treelet onto one of those (or click on them?) +# - the user can delete pieces of the tree from the working area +# (right click?) +# - connecting top to bottom? drag one NP onto another? +# +# +-------------------------------------------------------------+ +# | S -> NP VP | S | +# |[NP -> Det N ]| / \ | +# | ... | NP VP | +# | N -> 'dog' | | +# | N -> 'cat' | | +# | ... | | +# +--------------+ | +# | NP | Det N | +# | / \ | | | | +# | Det N | the cat saw the dog | +# | | | +# +--------------+----------------------------------------------+ +# +# Operations: +# - connect a new treelet -- drag or click shadow +# - delete a treelet -- right click +# - if only connected to top, delete everything below +# - if only connected to bottom, delete everything above +# - connect top & bottom -- drag a leaf to a root or a root to a leaf +# - disconnect top & bottom -- right click +# - if connected to top & bottom, then disconnect + +import re +from tkinter import ( + Button, + Canvas, + Entry, + Frame, + IntVar, + Label, + Scrollbar, + Text, + Tk, + Toplevel, +) + +from nltk.draw.tree import TreeSegmentWidget, tree_to_treesegment +from nltk.draw.util import ( + CanvasFrame, + ColorizedList, + ShowText, + SymbolWidget, + TextWidget, +) +from nltk.grammar import CFG, Nonterminal, _read_cfg_production, nonterminals +from nltk.tree import Tree + +###################################################################### +# Production List +###################################################################### + + +class ProductionList(ColorizedList): + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + + def _init_colortags(self, textwidget, options): + textwidget.tag_config("terminal", foreground="#006000") + textwidget.tag_config("arrow", font="symbol", underline="0") + textwidget.tag_config( + "nonterminal", foreground="blue", font=("helvetica", -12, "bold") + ) + + def _item_repr(self, item): + contents = [] + contents.append(("%s\t" % item.lhs(), "nonterminal")) + contents.append((self.ARROW, "arrow")) + for elt in item.rhs(): + if isinstance(elt, Nonterminal): + contents.append((" %s" % elt.symbol(), "nonterminal")) + else: + contents.append((" %r" % elt, "terminal")) + return contents + + +###################################################################### +# CFG Editor +###################################################################### + +_CFGEditor_HELP = """ + +The CFG Editor can be used to create or modify context free grammars. +A context free grammar consists of a start symbol and a list of +productions. The start symbol is specified by the text entry field in +the upper right hand corner of the editor; and the list of productions +are specified in the main text editing box. + +Every non-blank line specifies a single production. Each production +has the form "LHS -> RHS," where LHS is a single nonterminal, and RHS +is a list of nonterminals and terminals. + +Nonterminals must be a single word, such as S or NP or NP_subj. +Currently, nonterminals must consists of alphanumeric characters and +underscores (_). Nonterminals are colored blue. If you place the +mouse over any nonterminal, then all occurrences of that nonterminal +will be highlighted. + +Terminals must be surrounded by single quotes (') or double +quotes(\"). For example, "dog" and "New York" are terminals. +Currently, the string within the quotes must consist of alphanumeric +characters, underscores, and spaces. + +To enter a new production, go to a blank line, and type a nonterminal, +followed by an arrow (->), followed by a sequence of terminals and +nonterminals. Note that "->" (dash + greater-than) is automatically +converted to an arrow symbol. When you move your cursor to a +different line, your production will automatically be colorized. If +there are any errors, they will be highlighted in red. + +Note that the order of the productions is significant for some +algorithms. To re-order the productions, use cut and paste to move +them. + +Use the buttons at the bottom of the window when you are done editing +the CFG: + - Ok: apply the new CFG, and exit the editor. + - Apply: apply the new CFG, and do not exit the editor. + - Reset: revert to the original CFG, and do not exit the editor. + - Cancel: revert to the original CFG, and exit the editor. + +""" + + +class CFGEditor: + """ + A dialog window for creating and editing context free grammars. + ``CFGEditor`` imposes the following restrictions: + + - All nonterminals must be strings consisting of word + characters. + - All terminals must be strings consisting of word characters + and space characters. + """ + + # Regular expressions used by _analyze_line. Precompile them, so + # we can process the text faster. + ARROW = SymbolWidget.SYMBOLS["rightarrow"] + _LHS_RE = re.compile(r"(^\s*\w+\s*)(->|(" + ARROW + "))") + _ARROW_RE = re.compile(r"\s*(->|(" + ARROW + r"))\s*") + _PRODUCTION_RE = re.compile( + r"(^\s*\w+\s*)" + + "(->|(" # LHS + + ARROW + + r"))\s*" + + r"((\w+|'[\w ]*'|\"[\w ]*\"|\|)\s*)*$" # arrow + ) # RHS + _TOKEN_RE = re.compile("\\w+|->|'[\\w ]+'|\"[\\w ]+\"|(" + ARROW + ")") + _BOLD = ("helvetica", -12, "bold") + + def __init__(self, parent, cfg=None, set_cfg_callback=None): + self._parent = parent + if cfg is not None: + self._cfg = cfg + else: + self._cfg = CFG(Nonterminal("S"), []) + self._set_cfg_callback = set_cfg_callback + + self._highlight_matching_nonterminals = 1 + + # Create the top-level window. + self._top = Toplevel(parent) + self._init_bindings() + + self._init_startframe() + self._startframe.pack(side="top", fill="x", expand=0) + self._init_prodframe() + self._prodframe.pack(side="top", fill="both", expand=1) + self._init_buttons() + self._buttonframe.pack(side="bottom", fill="x", expand=0) + + self._textwidget.focus() + + def _init_startframe(self): + frame = self._startframe = Frame(self._top) + self._start = Entry(frame) + self._start.pack(side="right") + Label(frame, text="Start Symbol:").pack(side="right") + Label(frame, text="Productions:").pack(side="left") + self._start.insert(0, self._cfg.start().symbol()) + + def _init_buttons(self): + frame = self._buttonframe = Frame(self._top) + Button(frame, text="Ok", command=self._ok, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Apply", command=self._apply, underline=0, takefocus=0).pack( + side="left" + ) + Button(frame, text="Reset", command=self._reset, underline=0, takefocus=0).pack( + side="left" + ) + Button( + frame, text="Cancel", command=self._cancel, underline=0, takefocus=0 + ).pack(side="left") + Button(frame, text="Help", command=self._help, underline=0, takefocus=0).pack( + side="right" + ) + + def _init_bindings(self): + self._top.title("CFG Editor") + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + # self._top.bind('', self._cancel) + self._top.bind("", self._cancel) + + self._top.bind("", self._ok) + self._top.bind("", self._ok) + self._top.bind("", self._apply) + self._top.bind("", self._apply) + self._top.bind("", self._reset) + self._top.bind("", self._reset) + self._top.bind("", self._help) + self._top.bind("", self._help) + self._top.bind("", self._help) + + def _init_prodframe(self): + self._prodframe = Frame(self._top) + + # Create the basic Text widget & scrollbar. + self._textwidget = Text( + self._prodframe, background="#e0e0e0", exportselection=1 + ) + self._textscroll = Scrollbar(self._prodframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags. Each nonterminal gets its + # own tag, so they aren't listed here. + self._textwidget.tag_config("terminal", foreground="#006000") + self._textwidget.tag_config("arrow", font="symbol") + self._textwidget.tag_config("error", background="red") + + # Keep track of what line they're on. We use that to remember + # to re-analyze a line whenever they leave it. + self._linenum = 0 + + # Expand "->" to an arrow. + self._top.bind(">", self._replace_arrows) + + # Re-colorize lines when appropriate. + self._top.bind("<>", self._analyze) + self._top.bind("", self._check_analyze) + self._top.bind("", self._check_analyze) + + # Tab cycles focus. (why doesn't this work??) + def cycle(e, textwidget=self._textwidget): + textwidget.tk_focusNext().focus() + + self._textwidget.bind("", cycle) + + prod_tuples = [(p.lhs(), [p.rhs()]) for p in self._cfg.productions()] + for i in range(len(prod_tuples) - 1, 0, -1): + if prod_tuples[i][0] == prod_tuples[i - 1][0]: + if () in prod_tuples[i][1]: + continue + if () in prod_tuples[i - 1][1]: + continue + print(prod_tuples[i - 1][1]) + print(prod_tuples[i][1]) + prod_tuples[i - 1][1].extend(prod_tuples[i][1]) + del prod_tuples[i] + + for lhs, rhss in prod_tuples: + print(lhs, rhss) + s = "%s ->" % lhs + for rhs in rhss: + for elt in rhs: + if isinstance(elt, Nonterminal): + s += " %s" % elt + else: + s += " %r" % elt + s += " |" + s = s[:-2] + "\n" + self._textwidget.insert("end", s) + + self._analyze() + + # # Add the producitons to the text widget, and colorize them. + # prod_by_lhs = {} + # for prod in self._cfg.productions(): + # if len(prod.rhs()) > 0: + # prod_by_lhs.setdefault(prod.lhs(),[]).append(prod) + # for (lhs, prods) in prod_by_lhs.items(): + # self._textwidget.insert('end', '%s ->' % lhs) + # self._textwidget.insert('end', self._rhs(prods[0])) + # for prod in prods[1:]: + # print '\t|'+self._rhs(prod), + # self._textwidget.insert('end', '\t|'+self._rhs(prod)) + # print + # self._textwidget.insert('end', '\n') + # for prod in self._cfg.productions(): + # if len(prod.rhs()) == 0: + # self._textwidget.insert('end', '%s' % prod) + # self._analyze() + + # def _rhs(self, prod): + # s = '' + # for elt in prod.rhs(): + # if isinstance(elt, Nonterminal): s += ' %s' % elt.symbol() + # else: s += ' %r' % elt + # return s + + def _clear_tags(self, linenum): + """ + Remove all tags (except ``arrow`` and ``sel``) from the given + line of the text widget used for editing the productions. + """ + start = "%d.0" % linenum + end = "%d.end" % linenum + for tag in self._textwidget.tag_names(): + if tag not in ("arrow", "sel"): + self._textwidget.tag_remove(tag, start, end) + + def _check_analyze(self, *e): + """ + Check if we've moved to a new line. If we have, then remove + all colorization from the line we moved to, and re-colorize + the line that we moved from. + """ + linenum = int(self._textwidget.index("insert").split(".")[0]) + if linenum != self._linenum: + self._clear_tags(linenum) + self._analyze_line(self._linenum) + self._linenum = linenum + + def _replace_arrows(self, *e): + """ + Replace any ``'->'`` text strings with arrows (char \\256, in + symbol font). This searches the whole buffer, but is fast + enough to be done anytime they press '>'. + """ + arrow = "1.0" + while True: + arrow = self._textwidget.search("->", arrow, "end+1char") + if arrow == "": + break + self._textwidget.delete(arrow, arrow + "+2char") + self._textwidget.insert(arrow, self.ARROW, "arrow") + self._textwidget.insert(arrow, "\t") + + arrow = "1.0" + while True: + arrow = self._textwidget.search(self.ARROW, arrow + "+1char", "end+1char") + if arrow == "": + break + self._textwidget.tag_add("arrow", arrow, arrow + "+1char") + + def _analyze_token(self, match, linenum): + """ + Given a line number and a regexp match for a token on that + line, colorize the token. Note that the regexp match gives us + the token's text, start index (on the line), and end index (on + the line). + """ + # What type of token is it? + if match.group()[0] in "'\"": + tag = "terminal" + elif match.group() in ("->", self.ARROW): + tag = "arrow" + else: + # If it's a nonterminal, then set up new bindings, so we + # can highlight all instances of that nonterminal when we + # put the mouse over it. + tag = "nonterminal_" + match.group() + if tag not in self._textwidget.tag_names(): + self._init_nonterminal_tag(tag) + + start = "%d.%d" % (linenum, match.start()) + end = "%d.%d" % (linenum, match.end()) + self._textwidget.tag_add(tag, start, end) + + def _init_nonterminal_tag(self, tag, foreground="blue"): + self._textwidget.tag_config(tag, foreground=foreground, font=CFGEditor._BOLD) + if not self._highlight_matching_nonterminals: + return + + def enter(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="#80ff80") + + def leave(e, textwidget=self._textwidget, tag=tag): + textwidget.tag_config(tag, background="") + + self._textwidget.tag_bind(tag, "", enter) + self._textwidget.tag_bind(tag, "", leave) + + def _analyze_line(self, linenum): + """ + Colorize a given line. + """ + # Get rid of any tags that were previously on the line. + self._clear_tags(linenum) + + # Get the line line's text string. + line = self._textwidget.get(repr(linenum) + ".0", repr(linenum) + ".end") + + # If it's a valid production, then colorize each token. + if CFGEditor._PRODUCTION_RE.match(line): + # It's valid; Use _TOKEN_RE to tokenize the production, + # and call analyze_token on each token. + def analyze_token(match, self=self, linenum=linenum): + self._analyze_token(match, linenum) + return "" + + CFGEditor._TOKEN_RE.sub(analyze_token, line) + elif line.strip() != "": + # It's invalid; show the user where the error is. + self._mark_error(linenum, line) + + def _mark_error(self, linenum, line): + """ + Mark the location of an error in a line. + """ + arrowmatch = CFGEditor._ARROW_RE.search(line) + if not arrowmatch: + # If there's no arrow at all, highlight the whole line. + start = "%d.0" % linenum + end = "%d.end" % linenum + elif not CFGEditor._LHS_RE.match(line): + # Otherwise, if the LHS is bad, highlight it. + start = "%d.0" % linenum + end = "%d.%d" % (linenum, arrowmatch.start()) + else: + # Otherwise, highlight the RHS. + start = "%d.%d" % (linenum, arrowmatch.end()) + end = "%d.end" % linenum + + # If we're highlighting 0 chars, highlight the whole line. + if self._textwidget.compare(start, "==", end): + start = "%d.0" % linenum + end = "%d.end" % linenum + self._textwidget.tag_add("error", start, end) + + def _analyze(self, *e): + """ + Replace ``->`` with arrows, and colorize the entire buffer. + """ + self._replace_arrows() + numlines = int(self._textwidget.index("end").split(".")[0]) + for linenum in range(1, numlines + 1): # line numbers start at 1. + self._analyze_line(linenum) + + def _parse_productions(self): + """ + Parse the current contents of the textwidget buffer, to create + a list of productions. + """ + productions = [] + + # Get the text, normalize it, and split it into lines. + text = self._textwidget.get("1.0", "end") + text = re.sub(self.ARROW, "->", text) + text = re.sub("\t", " ", text) + lines = text.split("\n") + + # Convert each line to a CFG production + for line in lines: + line = line.strip() + if line == "": + continue + productions += _read_cfg_production(line) + # if line.strip() == '': continue + # if not CFGEditor._PRODUCTION_RE.match(line): + # raise ValueError('Bad production string %r' % line) + # + # (lhs_str, rhs_str) = line.split('->') + # lhs = Nonterminal(lhs_str.strip()) + # rhs = [] + # def parse_token(match, rhs=rhs): + # token = match.group() + # if token[0] in "'\"": rhs.append(token[1:-1]) + # else: rhs.append(Nonterminal(token)) + # return '' + # CFGEditor._TOKEN_RE.sub(parse_token, rhs_str) + # + # productions.append(Production(lhs, *rhs)) + + return productions + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + productions = self._parse_productions() + start = Nonterminal(self._start.get()) + cfg = CFG(start, productions) + if self._set_cfg_callback is not None: + self._set_cfg_callback(cfg) + + def _reset(self, *e): + self._textwidget.delete("1.0", "end") + for production in self._cfg.productions(): + self._textwidget.insert("end", "%s\n" % production) + self._analyze() + if self._set_cfg_callback is not None: + self._set_cfg_callback(self._cfg) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + font="fixed", + ) + except: + ShowText( + self._parent, + "Help: Chart Parser Demo", + (_CFGEditor_HELP).strip(), + width=75, + ) + + +###################################################################### +# New Demo (built tree based on cfg) +###################################################################### + + +class CFGDemo: + def __init__(self, grammar, text): + self._grammar = grammar + self._text = text + + # Set up the main window. + self._top = Tk() + self._top.title("Context Free Grammar Demo") + + # Base font size + self._size = IntVar(self._top) + self._size.set(12) # = medium + + # Set up the key bindings + self._init_bindings(self._top) + + # Create the basic frames + frame1 = Frame(self._top) + frame1.pack(side="left", fill="y", expand=0) + self._init_menubar(self._top) + self._init_buttons(self._top) + self._init_grammar(frame1) + self._init_treelet(frame1) + self._init_workspace(self._top) + + # ////////////////////////////////////////////////// + # Initialization + # ////////////////////////////////////////////////// + + def _init_bindings(self, top): + top.bind("", self.destroy) + + def _init_menubar(self, parent): + pass + + def _init_buttons(self, parent): + pass + + def _init_grammar(self, parent): + self._prodlist = ProductionList(parent, self._grammar, width=20) + self._prodlist.pack(side="top", fill="both", expand=1) + self._prodlist.focus() + self._prodlist.add_callback("select", self._selectprod_cb) + self._prodlist.add_callback("move", self._selectprod_cb) + + def _init_treelet(self, parent): + self._treelet_canvas = Canvas(parent, background="white") + self._treelet_canvas.pack(side="bottom", fill="x") + self._treelet = None + + def _init_workspace(self, parent): + self._workspace = CanvasFrame(parent, background="white") + self._workspace.pack(side="right", fill="both", expand=1) + self._tree = None + self.reset_workspace() + + # ////////////////////////////////////////////////// + # Workspace + # ////////////////////////////////////////////////// + + def reset_workspace(self): + c = self._workspace.canvas() + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + + # Remove the old tree + if self._tree is not None: + self._workspace.remove_widget(self._tree) + + # The root of the tree. + start = self._grammar.start().symbol() + rootnode = TextWidget(c, start, font=node_font, draggable=1) + + # The leaves of the tree. + leaves = [] + for word in self._text: + leaves.append(TextWidget(c, word, font=leaf_font, draggable=1)) + + # Put it all together into one tree + self._tree = TreeSegmentWidget(c, rootnode, leaves, color="white") + + # Add it to the workspace. + self._workspace.add_widget(self._tree) + + # Move the leaves to the bottom of the workspace. + for leaf in leaves: + leaf.move(0, 100) + + # self._nodes = {start:1} + # self._leaves = dict([(l,1) for l in leaves]) + + def workspace_markprod(self, production): + pass + + def _markproduction(self, prod, tree=None): + if tree is None: + tree = self._tree + for i in range(len(tree.subtrees()) - len(prod.rhs())): + if tree["color", i] == "white": + self._markproduction # FIXME: Is this necessary at all? + + for j, node in enumerate(prod.rhs()): + widget = tree.subtrees()[i + j] + if ( + isinstance(node, Nonterminal) + and isinstance(widget, TreeSegmentWidget) + and node.symbol == widget.label().text() + ): + pass # matching nonterminal + elif ( + isinstance(node, str) + and isinstance(widget, TextWidget) + and node == widget.text() + ): + pass # matching nonterminal + else: + break + else: + # Everything matched! + print("MATCH AT", i) + + # ////////////////////////////////////////////////// + # Grammar + # ////////////////////////////////////////////////// + + def _selectprod_cb(self, production): + canvas = self._treelet_canvas + + self._prodlist.highlight(production) + if self._treelet is not None: + self._treelet.destroy() + + # Convert the production to a tree. + rhs = production.rhs() + for (i, elt) in enumerate(rhs): + if isinstance(elt, Nonterminal): + elt = Tree(elt) + tree = Tree(production.lhs().symbol(), *rhs) + + # Draw the tree in the treelet area. + fontsize = int(self._size.get()) + node_font = ("helvetica", -(fontsize + 4), "bold") + leaf_font = ("helvetica", -(fontsize + 2)) + self._treelet = tree_to_treesegment( + canvas, tree, node_font=node_font, leaf_font=leaf_font + ) + self._treelet["draggable"] = 1 + + # Center the treelet. + (x1, y1, x2, y2) = self._treelet.bbox() + w, h = int(canvas["width"]), int(canvas["height"]) + self._treelet.move((w - x1 - x2) / 2, (h - y1 - y2) / 2) + + # Mark the places where we can add it to the workspace. + self._markproduction(production) + + def destroy(self, *args): + self._top.destroy() + + def mainloop(self, *args, **kwargs): + self._top.mainloop(*args, **kwargs) + + +def demo2(): + from nltk import CFG, Nonterminal, Production + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + grammar = CFG(S, productions) + + text = "I saw a man in the park".split() + d = CFGDemo(grammar, text) + d.mainloop() + + +###################################################################### +# Old Demo +###################################################################### + + +def demo(): + from nltk import CFG, Nonterminal + + nonterminals = "S VP NP PP P N Name V Det" + (S, VP, NP, PP, P, N, Name, V, Det) = (Nonterminal(s) for s in nonterminals.split()) + + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N + NP -> NP PP + VP -> V NP + VP -> VP PP + Det -> 'a' + Det -> 'the' + Det -> 'my' + NP -> 'I' + N -> 'dog' + N -> 'man' + N -> 'park' + N -> 'statue' + V -> 'saw' + P -> 'in' + P -> 'up' + P -> 'over' + P -> 'with' + """ + ) + + def cb(grammar): + print(grammar) + + top = Tk() + editor = CFGEditor(top, grammar, cb) + Label(top, text="\nTesting CFG Editor\n").pack() + Button(top, text="Quit", command=top.destroy).pack() + top.mainloop() + + +def demo3(): + from nltk import Production + + (S, VP, NP, PP, P, N, Name, V, Det) = nonterminals( + "S, VP, NP, PP, P, N, Name, V, Det" + ) + + productions = ( + # Syntactic Productions + Production(S, [NP, VP]), + Production(NP, [Det, N]), + Production(NP, [NP, PP]), + Production(VP, [VP, PP]), + Production(VP, [V, NP, PP]), + Production(VP, [V, NP]), + Production(PP, [P, NP]), + Production(PP, []), + Production(PP, ["up", "over", NP]), + # Lexical Productions + Production(NP, ["I"]), + Production(Det, ["the"]), + Production(Det, ["a"]), + Production(N, ["man"]), + Production(V, ["saw"]), + Production(P, ["in"]), + Production(P, ["with"]), + Production(N, ["park"]), + Production(N, ["dog"]), + Production(N, ["statue"]), + Production(Det, ["my"]), + ) + + t = Tk() + + def destroy(e, t=t): + t.destroy() + + t.bind("q", destroy) + p = ProductionList(t, productions) + p.pack(expand=1, fill="both") + p.add_callback("select", p.markonly) + p.add_callback("move", p.markonly) + p.focus() + p.mark(productions[2]) + p.mark(productions[8]) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/draw/dispersion.py b/venv/lib/python3.10/site-packages/nltk/draw/dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..0991194dc42e1c258b6e62c3e8dfb71d44bb3ce6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/dispersion.py @@ -0,0 +1,63 @@ +# Natural Language Toolkit: Dispersion Plots +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +A utility for displaying lexical dispersion. +""" + + +def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): + """ + Generate a lexical dispersion plot. + + :param text: The source text + :type text: list(str) or iter(str) + :param words: The target words + :type words: list of str + :param ignore_case: flag to set if case should be ignored when searching text + :type ignore_case: bool + :return: a matplotlib Axes object that may still be modified before plotting + :rtype: Axes + """ + + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ImportError( + "The plot function requires matplotlib to be installed. " + "See https://matplotlib.org/" + ) from e + + word2y = { + word.casefold() if ignore_case else word: y + for y, word in enumerate(reversed(words)) + } + xs, ys = [], [] + for x, token in enumerate(text): + token = token.casefold() if ignore_case else token + y = word2y.get(token) + if y is not None: + xs.append(x) + ys.append(y) + + _, ax = plt.subplots() + ax.plot(xs, ys, "|") + ax.set_yticks(list(range(len(words))), words, color="C0") + ax.set_ylim(-1, len(words)) + ax.set_title(title) + ax.set_xlabel("Word Offset") + return ax + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + from nltk.corpus import gutenberg + + words = ["Elinor", "Marianne", "Edward", "Willoughby"] + dispersion_plot(gutenberg.words("austen-sense.txt"), words) + plt.show() diff --git a/venv/lib/python3.10/site-packages/nltk/draw/table.py b/venv/lib/python3.10/site-packages/nltk/draw/table.py new file mode 100644 index 0000000000000000000000000000000000000000..0d3526d5f1bf223684a1293dd5ff32ef6cbbbf55 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/table.py @@ -0,0 +1,1177 @@ +# Natural Language Toolkit: Table widget +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tkinter widgets for displaying multi-column listboxes and tables. +""" + +import operator +from tkinter import Frame, Label, Listbox, Scrollbar, Tk + +###################################################################### +# Multi-Column Listbox +###################################################################### + + +class MultiListbox(Frame): + """ + A multi-column listbox, where the current selection applies to an + entire row. Based on the MultiListbox Tkinter widget + recipe from the Python Cookbook (https://code.activestate.com/recipes/52266/) + + For the most part, ``MultiListbox`` methods delegate to its + contained listboxes. For any methods that do not have docstrings, + see ``Tkinter.Listbox`` for a description of what that method does. + """ + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + #: Default configuration values for the frame. + FRAME_CONFIG = dict(background="#888", takefocus=True, highlightthickness=1) + + #: Default configurations for the column labels. + LABEL_CONFIG = dict( + borderwidth=1, + relief="raised", + font="helvetica -16 bold", + background="#444", + foreground="white", + ) + + #: Default configuration for the column listboxes. + LISTBOX_CONFIG = dict( + borderwidth=1, + selectborderwidth=0, + highlightthickness=0, + exportselection=False, + selectbackground="#888", + activestyle="none", + takefocus=False, + ) + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, master, columns, column_weights=None, cnf={}, **kw): + """ + Construct a new multi-column listbox widget. + + :param master: The widget that should contain the new + multi-column listbox. + + :param columns: Specifies what columns should be included in + the new multi-column listbox. If ``columns`` is an integer, + then it is the number of columns to include. If it is + a list, then its length indicates the number of columns + to include; and each element of the list will be used as + a label for the corresponding column. + + :param cnf, kw: Configuration parameters for this widget. + Use ``label_*`` to configure all labels; and ``listbox_*`` + to configure all listboxes. E.g.: + >>> root = Tk() # doctest: +SKIP + >>> MultiListbox(root, ["Subject", "Sender", "Date"], label_foreground='red').pack() # doctest: +SKIP + """ + # If columns was specified as an int, convert it to a list. + if isinstance(columns, int): + columns = list(range(columns)) + include_labels = False + else: + include_labels = True + + if len(columns) == 0: + raise ValueError("Expected at least one column") + + # Instance variables + self._column_names = tuple(columns) + self._listboxes = [] + self._labels = [] + + # Pick a default value for column_weights, if none was specified. + if column_weights is None: + column_weights = [1] * len(columns) + elif len(column_weights) != len(columns): + raise ValueError("Expected one column_weight for each column") + self._column_weights = column_weights + + # Configure our widgets. + Frame.__init__(self, master, **self.FRAME_CONFIG) + self.grid_rowconfigure(1, weight=1) + for i, label in enumerate(self._column_names): + self.grid_columnconfigure(i, weight=column_weights[i]) + + # Create a label for the column + if include_labels: + l = Label(self, text=label, **self.LABEL_CONFIG) + self._labels.append(l) + l.grid(column=i, row=0, sticky="news", padx=0, pady=0) + l.column_index = i + + # Create a listbox for the column + lb = Listbox(self, **self.LISTBOX_CONFIG) + self._listboxes.append(lb) + lb.grid(column=i, row=1, sticky="news", padx=0, pady=0) + lb.column_index = i + + # Clicking or dragging selects: + lb.bind("", self._select) + lb.bind("", self._select) + # Scroll wheel scrolls: + lb.bind("", lambda e: self._scroll(-1)) + lb.bind("", lambda e: self._scroll(+1)) + lb.bind("", lambda e: self._scroll(e.delta)) + # Button 2 can be used to scan: + lb.bind("", lambda e: self.scan_mark(e.x, e.y)) + lb.bind("", lambda e: self.scan_dragto(e.x, e.y)) + # Dragging outside the window has no effect (disable + # the default listbox behavior, which scrolls): + lb.bind("", lambda e: "break") + # Columns can be resized by dragging them: + lb.bind("", self._resize_column) + + # Columns can be resized by dragging them. (This binding is + # used if they click on the grid between columns:) + self.bind("", self._resize_column) + + # Set up key bindings for the widget: + self.bind("", lambda e: self.select(delta=-1)) + self.bind("", lambda e: self.select(delta=1)) + self.bind("", lambda e: self.select(delta=-self._pagesize())) + self.bind("", lambda e: self.select(delta=self._pagesize())) + + # Configuration customizations + self.configure(cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Column Resizing + # ///////////////////////////////////////////////////////////////// + + def _resize_column(self, event): + """ + Callback used to resize a column of the table. Return ``True`` + if the column is actually getting resized (if the user clicked + on the far left or far right 5 pixels of a label); and + ``False`` otherwies. + """ + # If we're already waiting for a button release, then ignore + # the new button press. + if event.widget.bind(""): + return False + + # Decide which column (if any) to resize. + self._resize_column_index = None + if event.widget is self: + for i, lb in enumerate(self._listboxes): + if abs(event.x - (lb.winfo_x() + lb.winfo_width())) < 10: + self._resize_column_index = i + elif event.x > (event.widget.winfo_width() - 5): + self._resize_column_index = event.widget.column_index + elif event.x < 5 and event.widget.column_index != 0: + self._resize_column_index = event.widget.column_index - 1 + + # Bind callbacks that are used to resize it. + if self._resize_column_index is not None: + event.widget.bind("", self._resize_column_motion_cb) + event.widget.bind( + "" % event.num, self._resize_column_buttonrelease_cb + ) + return True + else: + return False + + def _resize_column_motion_cb(self, event): + lb = self._listboxes[self._resize_column_index] + charwidth = lb.winfo_width() / lb["width"] + + x1 = event.x + event.widget.winfo_x() + x2 = lb.winfo_x() + lb.winfo_width() + + lb["width"] = max(3, lb["width"] + (x1 - x2) // charwidth) + + def _resize_column_buttonrelease_cb(self, event): + event.widget.unbind("" % event.num) + event.widget.unbind("") + + # ///////////////////////////////////////////////////////////////// + # Properties + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """ + A tuple containing the names of the columns used by this + multi-column listbox. + """ + return self._column_names + + @property + def column_labels(self): + """ + A tuple containing the ``Tkinter.Label`` widgets used to + display the label of each column. If this multi-column + listbox was created without labels, then this will be an empty + tuple. These widgets will all be augmented with a + ``column_index`` attribute, which can be used to determine + which column they correspond to. This can be convenient, + e.g., when defining callbacks for bound events. + """ + return tuple(self._labels) + + @property + def listboxes(self): + """ + A tuple containing the ``Tkinter.Listbox`` widgets used to + display individual columns. These widgets will all be + augmented with a ``column_index`` attribute, which can be used + to determine which column they correspond to. This can be + convenient, e.g., when defining callbacks for bound events. + """ + return tuple(self._listboxes) + + # ///////////////////////////////////////////////////////////////// + # Mouse & Keyboard Callback Functions + # ///////////////////////////////////////////////////////////////// + + def _select(self, e): + i = e.widget.nearest(e.y) + self.selection_clear(0, "end") + self.selection_set(i) + self.activate(i) + self.focus() + + def _scroll(self, delta): + for lb in self._listboxes: + lb.yview_scroll(delta, "unit") + return "break" + + def _pagesize(self): + """:return: The number of rows that makes up one page""" + return int(self.index("@0,1000000")) - int(self.index("@0,0")) + + # ///////////////////////////////////////////////////////////////// + # Row selection + # ///////////////////////////////////////////////////////////////// + + def select(self, index=None, delta=None, see=True): + """ + Set the selected row. If ``index`` is specified, then select + row ``index``. Otherwise, if ``delta`` is specified, then move + the current selection by ``delta`` (negative numbers for up, + positive numbers for down). This will not move the selection + past the top or the bottom of the list. + + :param see: If true, then call ``self.see()`` with the newly + selected index, to ensure that it is visible. + """ + if (index is not None) and (delta is not None): + raise ValueError("specify index or delta, but not both") + + # If delta was given, then calculate index. + if delta is not None: + if len(self.curselection()) == 0: + index = -1 + delta + else: + index = int(self.curselection()[0]) + delta + + # Clear all selected rows. + self.selection_clear(0, "end") + + # Select the specified index + if index is not None: + index = min(max(index, 0), self.size() - 1) + # self.activate(index) + self.selection_set(index) + if see: + self.see(index) + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + def configure(self, cnf={}, **kw): + """ + Configure this widget. Use ``label_*`` to configure all + labels; and ``listbox_*`` to configure all listboxes. E.g.: + + >>> master = Tk() # doctest: +SKIP + >>> mlb = MultiListbox(master, 5) # doctest: +SKIP + >>> mlb.configure(label_foreground='red') # doctest: +SKIP + >>> mlb.configure(listbox_foreground='red') # doctest: +SKIP + """ + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key.startswith("label_") or key.startswith("label-"): + for label in self._labels: + label.configure({key[6:]: val}) + elif key.startswith("listbox_") or key.startswith("listbox-"): + for listbox in self._listboxes: + listbox.configure({key[8:]: val}) + else: + Frame.configure(self, {key: val}) + + def __setitem__(self, key, val): + """ + Configure this widget. This is equivalent to + ``self.configure({key,val``)}. See ``configure()``. + """ + self.configure({key: val}) + + def rowconfigure(self, row_index, cnf={}, **kw): + """ + Configure all table cells in the given row. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + for lb in self._listboxes: + lb.itemconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """ + Configure all table cells in the given column. Valid keyword + arguments are: ``background``, ``bg``, ``foreground``, ``fg``, + ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + + cnf = dict(list(cnf.items()) + list(kw.items())) + for (key, val) in list(cnf.items()): + if key in ( + "background", + "bg", + "foreground", + "fg", + "selectbackground", + "selectforeground", + ): + for i in range(lb.size()): + lb.itemconfigure(i, {key: val}) + else: + lb.configure({key: val}) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """ + Configure the table cell at the given row and column. Valid + keyword arguments are: ``background``, ``bg``, ``foreground``, + ``fg``, ``selectbackground``, ``selectforeground``. + """ + lb = self._listboxes[col_index] + return lb.itemconfigure(row_index, cnf, **kw) + + # ///////////////////////////////////////////////////////////////// + # Value Access + # ///////////////////////////////////////////////////////////////// + + def insert(self, index, *rows): + """ + Insert the given row or rows into the table, at the given + index. Each row value should be a tuple of cell values, one + for each column in the row. Index may be an integer or any of + the special strings (such as ``'end'``) accepted by + ``Tkinter.Listbox``. + """ + for elt in rows: + if len(elt) != len(self._column_names): + raise ValueError( + "rows should be tuples whose length " + "is equal to the number of columns" + ) + for (lb, elts) in zip(self._listboxes, list(zip(*rows))): + lb.insert(index, *elts) + + def get(self, first, last=None): + """ + Return the value(s) of the specified row(s). If ``last`` is + not specified, then return a single row value; otherwise, + return a list of row values. Each row value is a tuple of + cell values, one for each column in the row. + """ + values = [lb.get(first, last) for lb in self._listboxes] + if last: + return [tuple(row) for row in zip(*values)] + else: + return tuple(values) + + def bbox(self, row, col): + """ + Return the bounding box for the given table cell, relative to + this widget's top-left corner. The bounding box is a tuple + of integers ``(left, top, width, height)``. + """ + dx, dy, _, _ = self.grid_bbox(row=0, column=col) + x, y, w, h = self._listboxes[col].bbox(row) + return int(x) + int(dx), int(y) + int(dy), int(w), int(h) + + # ///////////////////////////////////////////////////////////////// + # Hide/Show Columns + # ///////////////////////////////////////////////////////////////// + + def hide_column(self, col_index): + """ + Hide the given column. The column's state is still + maintained: its values will still be returned by ``get()``, and + you must supply its values when calling ``insert()``. It is + safe to call this on a column that is already hidden. + + :see: ``show_column()`` + """ + if self._labels: + self._labels[col_index].grid_forget() + self.listboxes[col_index].grid_forget() + self.grid_columnconfigure(col_index, weight=0) + + def show_column(self, col_index): + """ + Display a column that has been hidden using ``hide_column()``. + It is safe to call this on a column that is not hidden. + """ + weight = self._column_weights[col_index] + if self._labels: + self._labels[col_index].grid( + column=col_index, row=0, sticky="news", padx=0, pady=0 + ) + self._listboxes[col_index].grid( + column=col_index, row=1, sticky="news", padx=0, pady=0 + ) + self.grid_columnconfigure(col_index, weight=weight) + + # ///////////////////////////////////////////////////////////////// + # Binding Methods + # ///////////////////////////////////////////////////////////////// + + def bind_to_labels(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return [label.bind(sequence, func, add) for label in self.column_labels] + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Listbox`` widget in this + mult-column listbox that will call ``func`` in response to the + event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + for listbox in self.listboxes: + listbox.bind(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """ + Add a binding to each ``Tkinter.Label`` and ``Tkinter.Listbox`` + widget in this mult-column listbox that will call ``func`` in + response to the event sequence. + + :return: A list of the identifiers of replaced binding + functions (if any), allowing for their deletion (to + prevent a memory leak). + """ + return self.bind_to_labels(sequence, func, add) + self.bind_to_listboxes( + sequence, func, add + ) + + # ///////////////////////////////////////////////////////////////// + # Simple Delegation + # ///////////////////////////////////////////////////////////////// + + # These methods delegate to the first listbox: + def curselection(self, *args, **kwargs): + return self._listboxes[0].curselection(*args, **kwargs) + + def selection_includes(self, *args, **kwargs): + return self._listboxes[0].selection_includes(*args, **kwargs) + + def itemcget(self, *args, **kwargs): + return self._listboxes[0].itemcget(*args, **kwargs) + + def size(self, *args, **kwargs): + return self._listboxes[0].size(*args, **kwargs) + + def index(self, *args, **kwargs): + return self._listboxes[0].index(*args, **kwargs) + + def nearest(self, *args, **kwargs): + return self._listboxes[0].nearest(*args, **kwargs) + + # These methods delegate to each listbox (and return None): + def activate(self, *args, **kwargs): + for lb in self._listboxes: + lb.activate(*args, **kwargs) + + def delete(self, *args, **kwargs): + for lb in self._listboxes: + lb.delete(*args, **kwargs) + + def scan_mark(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_mark(*args, **kwargs) + + def scan_dragto(self, *args, **kwargs): + for lb in self._listboxes: + lb.scan_dragto(*args, **kwargs) + + def see(self, *args, **kwargs): + for lb in self._listboxes: + lb.see(*args, **kwargs) + + def selection_anchor(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_anchor(*args, **kwargs) + + def selection_clear(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_clear(*args, **kwargs) + + def selection_set(self, *args, **kwargs): + for lb in self._listboxes: + lb.selection_set(*args, **kwargs) + + def yview(self, *args, **kwargs): + for lb in self._listboxes: + v = lb.yview(*args, **kwargs) + return v # if called with no arguments + + def yview_moveto(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_moveto(*args, **kwargs) + + def yview_scroll(self, *args, **kwargs): + for lb in self._listboxes: + lb.yview_scroll(*args, **kwargs) + + # ///////////////////////////////////////////////////////////////// + # Aliases + # ///////////////////////////////////////////////////////////////// + + itemconfig = itemconfigure + rowconfig = rowconfigure + columnconfig = columnconfigure + select_anchor = selection_anchor + select_clear = selection_clear + select_includes = selection_includes + select_set = selection_set + + # ///////////////////////////////////////////////////////////////// + # These listbox methods are not defined for multi-listbox + # ///////////////////////////////////////////////////////////////// + # def xview(self, *what): pass + # def xview_moveto(self, fraction): pass + # def xview_scroll(self, number, what): pass + + +###################################################################### +# Table +###################################################################### + + +class Table: + """ + A display widget for a table of values, based on a ``MultiListbox`` + widget. For many purposes, ``Table`` can be treated as a + list-of-lists. E.g., table[i] is a list of the values for row i; + and table.append(row) adds a new row with the given list of + values. Individual cells can be accessed using table[i,j], which + refers to the j-th column of the i-th row. This can be used to + both read and write values from the table. E.g.: + + >>> table[i,j] = 'hello' # doctest: +SKIP + + The column (j) can be given either as an index number, or as a + column name. E.g., the following prints the value in the 3rd row + for the 'First Name' column: + + >>> print(table[3, 'First Name']) # doctest: +SKIP + John + + You can configure the colors for individual rows, columns, or + cells using ``rowconfig()``, ``columnconfig()``, and ``itemconfig()``. + The color configuration for each row will be preserved if the + table is modified; however, when new rows are added, any color + configurations that have been made for *columns* will not be + applied to the new row. + + Note: Although ``Table`` acts like a widget in some ways (e.g., it + defines ``grid()``, ``pack()``, and ``bind()``), it is not itself a + widget; it just contains one. This is because widgets need to + define ``__getitem__()``, ``__setitem__()``, and ``__nonzero__()`` in + a way that's incompatible with the fact that ``Table`` behaves as a + list-of-lists. + + :ivar _mlb: The multi-column listbox used to display this table's data. + :ivar _rows: A list-of-lists used to hold the cell values of this + table. Each element of _rows is a row value, i.e., a list of + cell values, one for each column in the row. + """ + + def __init__( + self, + master, + column_names, + rows=None, + column_weights=None, + scrollbar=True, + click_to_sort=True, + reprfunc=None, + cnf={}, + **kw + ): + """ + Construct a new Table widget. + + :type master: Tkinter.Widget + :param master: The widget that should contain the new table. + :type column_names: list(str) + :param column_names: A list of names for the columns; these + names will be used to create labels for each column; + and can be used as an index when reading or writing + cell values from the table. + :type rows: list(list) + :param rows: A list of row values used to initialize the table. + Each row value should be a tuple of cell values, one for + each column in the row. + :type scrollbar: bool + :param scrollbar: If true, then create a scrollbar for the + new table widget. + :type click_to_sort: bool + :param click_to_sort: If true, then create bindings that will + sort the table's rows by a given column's values if the + user clicks on that colum's label. + :type reprfunc: function + :param reprfunc: If specified, then use this function to + convert each table cell value to a string suitable for + display. ``reprfunc`` has the following signature: + reprfunc(row_index, col_index, cell_value) -> str + (Note that the column is specified by index, not by name.) + :param cnf, kw: Configuration parameters for this widget's + contained ``MultiListbox``. See ``MultiListbox.__init__()`` + for details. + """ + self._num_columns = len(column_names) + self._reprfunc = reprfunc + self._frame = Frame(master) + + self._column_name_to_index = {c: i for (i, c) in enumerate(column_names)} + + # Make a copy of the rows & check that it's valid. + if rows is None: + self._rows = [] + else: + self._rows = [[v for v in row] for row in rows] + for row in self._rows: + self._checkrow(row) + + # Create our multi-list box. + self._mlb = MultiListbox(self._frame, column_names, column_weights, cnf, **kw) + self._mlb.pack(side="left", expand=True, fill="both") + + # Optional scrollbar + if scrollbar: + sb = Scrollbar(self._frame, orient="vertical", command=self._mlb.yview) + self._mlb.listboxes[0]["yscrollcommand"] = sb.set + # for listbox in self._mlb.listboxes: + # listbox['yscrollcommand'] = sb.set + sb.pack(side="right", fill="y") + self._scrollbar = sb + + # Set up sorting + self._sortkey = None + if click_to_sort: + for i, l in enumerate(self._mlb.column_labels): + l.bind("", self._sort) + + # Fill in our multi-list box. + self._fill_table() + + # ///////////////////////////////////////////////////////////////// + # { Widget-like Methods + # ///////////////////////////////////////////////////////////////// + # These all just delegate to either our frame or our MLB. + + def pack(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.pack()`` for more info.""" + self._frame.pack(*args, **kwargs) + + def grid(self, *args, **kwargs): + """Position this table's main frame widget in its parent + widget. See ``Tkinter.Frame.grid()`` for more info.""" + self._frame.grid(*args, **kwargs) + + def focus(self): + """Direct (keyboard) input foxus to this widget.""" + self._mlb.focus() + + def bind(self, sequence=None, func=None, add=None): + """Add a binding to this table's main frame that will call + ``func`` in response to the event sequence.""" + self._mlb.bind(sequence, func, add) + + def rowconfigure(self, row_index, cnf={}, **kw): + """:see: ``MultiListbox.rowconfigure()``""" + self._mlb.rowconfigure(row_index, cnf, **kw) + + def columnconfigure(self, col_index, cnf={}, **kw): + """:see: ``MultiListbox.columnconfigure()``""" + col_index = self.column_index(col_index) + self._mlb.columnconfigure(col_index, cnf, **kw) + + def itemconfigure(self, row_index, col_index, cnf=None, **kw): + """:see: ``MultiListbox.itemconfigure()``""" + col_index = self.column_index(col_index) + return self._mlb.itemconfigure(row_index, col_index, cnf, **kw) + + def bind_to_labels(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_labels()``""" + return self._mlb.bind_to_labels(sequence, func, add) + + def bind_to_listboxes(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_listboxes()``""" + return self._mlb.bind_to_listboxes(sequence, func, add) + + def bind_to_columns(self, sequence=None, func=None, add=None): + """:see: ``MultiListbox.bind_to_columns()``""" + return self._mlb.bind_to_columns(sequence, func, add) + + rowconfig = rowconfigure + columnconfig = columnconfigure + itemconfig = itemconfigure + + # ///////////////////////////////////////////////////////////////// + # { Table as list-of-lists + # ///////////////////////////////////////////////////////////////// + + def insert(self, row_index, rowvalue): + """ + Insert a new row into the table, so that its row index will be + ``row_index``. If the table contains any rows whose row index + is greater than or equal to ``row_index``, then they will be + shifted down. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self._checkrow(rowvalue) + self._rows.insert(row_index, rowvalue) + if self._reprfunc is not None: + rowvalue = [ + self._reprfunc(row_index, j, v) for (j, v) in enumerate(rowvalue) + ] + self._mlb.insert(row_index, rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def extend(self, rowvalues): + """ + Add new rows at the end of the table. + + :param rowvalues: A list of row values used to initialize the + table. Each row value should be a tuple of cell values, + one for each column in the row. + """ + for rowvalue in rowvalues: + self.append(rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def append(self, rowvalue): + """ + Add a new row to the end of the table. + + :param rowvalue: A tuple of cell values, one for each column + in the new row. + """ + self.insert(len(self._rows), rowvalue) + if self._DEBUG: + self._check_table_vs_mlb() + + def clear(self): + """ + Delete all rows in this table. + """ + self._rows = [] + self._mlb.delete(0, "end") + if self._DEBUG: + self._check_table_vs_mlb() + + def __getitem__(self, index): + """ + Return the value of a row or a cell in this table. If + ``index`` is an integer, then the row value for the ``index``th + row. This row value consists of a tuple of cell values, one + for each column in the row. If ``index`` is a tuple of two + integers, ``(i,j)``, then return the value of the cell in the + ``i``th row and the ``j``th column. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + elif isinstance(index, tuple) and len(index) == 2: + return self._rows[index[0]][self.column_index(index[1])] + else: + return tuple(self._rows[index]) + + def __setitem__(self, index, val): + """ + Replace the value of a row or a cell in this table with + ``val``. + + If ``index`` is an integer, then ``val`` should be a row value + (i.e., a tuple of cell values, one for each column). In this + case, the values of the ``index``th row of the table will be + replaced with the values in ``val``. + + If ``index`` is a tuple of integers, ``(i,j)``, then replace the + value of the cell in the ``i``th row and ``j``th column with + ``val``. + """ + if isinstance(index, slice): + raise ValueError("Slicing not supported") + + # table[i,j] = val + elif isinstance(index, tuple) and len(index) == 2: + i, j = index[0], self.column_index(index[1]) + config_cookie = self._save_config_info([i]) + self._rows[i][j] = val + if self._reprfunc is not None: + val = self._reprfunc(i, j, val) + self._mlb.listboxes[j].insert(i, val) + self._mlb.listboxes[j].delete(i + 1) + self._restore_config_info(config_cookie) + + # table[i] = val + else: + config_cookie = self._save_config_info([index]) + self._checkrow(val) + self._rows[index] = list(val) + if self._reprfunc is not None: + val = [self._reprfunc(index, j, v) for (j, v) in enumerate(val)] + self._mlb.insert(index, val) + self._mlb.delete(index + 1) + self._restore_config_info(config_cookie) + + def __delitem__(self, row_index): + """ + Delete the ``row_index``th row from this table. + """ + if isinstance(row_index, slice): + raise ValueError("Slicing not supported") + if isinstance(row_index, tuple) and len(row_index) == 2: + raise ValueError("Cannot delete a single cell!") + del self._rows[row_index] + self._mlb.delete(row_index) + if self._DEBUG: + self._check_table_vs_mlb() + + def __len__(self): + """ + :return: the number of rows in this table. + """ + return len(self._rows) + + def _checkrow(self, rowvalue): + """ + Helper function: check that a given row value has the correct + number of elements; and if not, raise an exception. + """ + if len(rowvalue) != self._num_columns: + raise ValueError( + "Row %r has %d columns; expected %d" + % (rowvalue, len(rowvalue), self._num_columns) + ) + + # ///////////////////////////////////////////////////////////////// + # Columns + # ///////////////////////////////////////////////////////////////// + + @property + def column_names(self): + """A list of the names of the columns in this table.""" + return self._mlb.column_names + + def column_index(self, i): + """ + If ``i`` is a valid column index integer, then return it as is. + Otherwise, check if ``i`` is used as the name for any column; + if so, return that column's index. Otherwise, raise a + ``KeyError`` exception. + """ + if isinstance(i, int) and 0 <= i < self._num_columns: + return i + else: + # This raises a key error if the column is not found. + return self._column_name_to_index[i] + + def hide_column(self, column_index): + """:see: ``MultiListbox.hide_column()``""" + self._mlb.hide_column(self.column_index(column_index)) + + def show_column(self, column_index): + """:see: ``MultiListbox.show_column()``""" + self._mlb.show_column(self.column_index(column_index)) + + # ///////////////////////////////////////////////////////////////// + # Selection + # ///////////////////////////////////////////////////////////////// + + def selected_row(self): + """ + Return the index of the currently selected row, or None if + no row is selected. To get the row value itself, use + ``table[table.selected_row()]``. + """ + sel = self._mlb.curselection() + if sel: + return int(sel[0]) + else: + return None + + def select(self, index=None, delta=None, see=True): + """:see: ``MultiListbox.select()``""" + self._mlb.select(index, delta, see) + + # ///////////////////////////////////////////////////////////////// + # Sorting + # ///////////////////////////////////////////////////////////////// + + def sort_by(self, column_index, order="toggle"): + """ + Sort the rows in this table, using the specified column's + values as a sort key. + + :param column_index: Specifies which column to sort, using + either a column index (int) or a column's label name + (str). + + :param order: Specifies whether to sort the values in + ascending or descending order: + + - ``'ascending'``: Sort from least to greatest. + - ``'descending'``: Sort from greatest to least. + - ``'toggle'``: If the most recent call to ``sort_by()`` + sorted the table by the same column (``column_index``), + then reverse the rows; otherwise sort in ascending + order. + """ + if order not in ("ascending", "descending", "toggle"): + raise ValueError( + 'sort_by(): order should be "ascending", ' '"descending", or "toggle".' + ) + column_index = self.column_index(column_index) + config_cookie = self._save_config_info(index_by_id=True) + + # Sort the rows. + if order == "toggle" and column_index == self._sortkey: + self._rows.reverse() + else: + self._rows.sort( + key=operator.itemgetter(column_index), reverse=(order == "descending") + ) + self._sortkey = column_index + + # Redraw the table. + self._fill_table() + self._restore_config_info(config_cookie, index_by_id=True, see=True) + if self._DEBUG: + self._check_table_vs_mlb() + + def _sort(self, event): + """Event handler for clicking on a column label -- sort by + that column.""" + column_index = event.widget.column_index + + # If they click on the far-left of far-right of a column's + # label, then resize rather than sorting. + if self._mlb._resize_column(event): + return "continue" + + # Otherwise, sort. + else: + self.sort_by(column_index) + return "continue" + + # ///////////////////////////////////////////////////////////////// + # { Table Drawing Helpers + # ///////////////////////////////////////////////////////////////// + + def _fill_table(self, save_config=True): + """ + Re-draw the table from scratch, by clearing out the table's + multi-column listbox; and then filling it in with values from + ``self._rows``. Note that any cell-, row-, or column-specific + color configuration that has been done will be lost. The + selection will also be lost -- i.e., no row will be selected + after this call completes. + """ + self._mlb.delete(0, "end") + for i, row in enumerate(self._rows): + if self._reprfunc is not None: + row = [self._reprfunc(i, j, v) for (j, v) in enumerate(row)] + self._mlb.insert("end", row) + + def _get_itemconfig(self, r, c): + return { + k: self._mlb.itemconfig(r, c, k)[-1] + for k in ( + "foreground", + "selectforeground", + "background", + "selectbackground", + ) + } + + def _save_config_info(self, row_indices=None, index_by_id=False): + """ + Return a 'cookie' containing information about which row is + selected, and what color configurations have been applied. + this information can the be re-applied to the table (after + making modifications) using ``_restore_config_info()``. Color + configuration information will be saved for any rows in + ``row_indices``, or in the entire table, if + ``row_indices=None``. If ``index_by_id=True``, the the cookie + will associate rows with their configuration information based + on the rows' python id. This is useful when performing + operations that re-arrange the rows (e.g. ``sort``). If + ``index_by_id=False``, then it is assumed that all rows will be + in the same order when ``_restore_config_info()`` is called. + """ + # Default value for row_indices is all rows. + if row_indices is None: + row_indices = list(range(len(self._rows))) + + # Look up our current selection. + selection = self.selected_row() + if index_by_id and selection is not None: + selection = id(self._rows[selection]) + + # Look up the color configuration info for each row. + if index_by_id: + config = { + id(self._rows[r]): [ + self._get_itemconfig(r, c) for c in range(self._num_columns) + ] + for r in row_indices + } + else: + config = { + r: [self._get_itemconfig(r, c) for c in range(self._num_columns)] + for r in row_indices + } + + return selection, config + + def _restore_config_info(self, cookie, index_by_id=False, see=False): + """ + Restore selection & color configuration information that was + saved using ``_save_config_info``. + """ + selection, config = cookie + + # Clear the selection. + if selection is None: + self._mlb.selection_clear(0, "end") + + # Restore selection & color config + if index_by_id: + for r, row in enumerate(self._rows): + if id(row) in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[id(row)][c]) + if id(row) == selection: + self._mlb.select(r, see=see) + else: + if selection is not None: + self._mlb.select(selection, see=see) + for r in config: + for c in range(self._num_columns): + self._mlb.itemconfigure(r, c, config[r][c]) + + # ///////////////////////////////////////////////////////////////// + # Debugging (Invariant Checker) + # ///////////////////////////////////////////////////////////////// + + _DEBUG = False + """If true, then run ``_check_table_vs_mlb()`` after any operation + that modifies the table.""" + + def _check_table_vs_mlb(self): + """ + Verify that the contents of the table's ``_rows`` variable match + the contents of its multi-listbox (``_mlb``). This is just + included for debugging purposes, to make sure that the + list-modifying operations are working correctly. + """ + for col in self._mlb.listboxes: + assert len(self) == col.size() + for row in self: + assert len(row) == self._num_columns + assert self._num_columns == len(self._mlb.column_names) + # assert self._column_names == self._mlb.column_names + for i, row in enumerate(self): + for j, cell in enumerate(row): + if self._reprfunc is not None: + cell = self._reprfunc(i, j, cell) + assert self._mlb.get(i)[j] == cell + + +###################################################################### +# Demo/Test Function +###################################################################### + +# update this to use new WordNet API +def demo(): + root = Tk() + root.bind("", lambda e: root.destroy()) + + table = Table( + root, + "Word Synset Hypernym Hyponym".split(), + column_weights=[0, 1, 1, 1], + reprfunc=(lambda i, j, s: " %s" % s), + ) + table.pack(expand=True, fill="both") + + from nltk.corpus import brown, wordnet + + for word, pos in sorted(set(brown.tagged_words()[:500])): + if pos[0] != "N": + continue + word = word.lower() + for synset in wordnet.synsets(word): + try: + hyper_def = synset.hypernyms()[0].definition() + except: + hyper_def = "*none*" + try: + hypo_def = synset.hypernyms()[0].definition() + except: + hypo_def = "*none*" + table.append([word, synset.definition(), hyper_def, hypo_def]) + + table.columnconfig("Word", background="#afa") + table.columnconfig("Synset", background="#efe") + table.columnconfig("Hypernym", background="#fee") + table.columnconfig("Hyponym", background="#ffe") + for row in range(len(table)): + for column in ("Hypernym", "Hyponym"): + if table[row, column] == "*none*": + table.itemconfig( + row, column, foreground="#666", selectforeground="#666" + ) + root.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/draw/tree.py b/venv/lib/python3.10/site-packages/nltk/draw/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2791428fcab5a47dd6d88561971d6907f74084 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/tree.py @@ -0,0 +1,1129 @@ +# Natural Language Toolkit: Graphical Representations for Trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Graphically display a Tree. +""" + +from tkinter import IntVar, Menu, Tk + +from nltk.draw.util import ( + BoxWidget, + CanvasFrame, + CanvasWidget, + OvalWidget, + ParenWidget, + TextWidget, +) +from nltk.tree import Tree +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## Tree Segment +##////////////////////////////////////////////////////// + + +class TreeSegmentWidget(CanvasWidget): + """ + A canvas widget that displays a single segment of a hierarchical + tree. Each ``TreeSegmentWidget`` connects a single "node widget" + to a sequence of zero or more "subtree widgets". By default, the + bottom of the node is connected to the top of each subtree by a + single line. However, if the ``roof`` attribute is set, then a + single triangular "roof" will connect the node to all of its + children. + + Attributes: + - ``roof``: What sort of connection to draw between the node and + its subtrees. If ``roof`` is true, draw a single triangular + "roof" over the subtrees. If ``roof`` is false, draw a line + between each subtree and the node. Default value is false. + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + - ``color``: The color of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is ``'#006060'``. + - ``fill``: The fill color for the triangular roof. Default + value is ``''`` (no fill). + - ``width``: The width of the lines connecting the node to its + subtrees; and of the outline of the triangular roof. Default + value is 1. + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__(self, canvas, label, subtrees, **attribs): + """ + :type node: + :type subtrees: list(CanvasWidgetI) + """ + self._label = label + self._subtrees = subtrees + + # Attributes + self._horizontal = 0 + self._roof = 0 + self._xspace = 10 + self._yspace = 15 + self._ordered = False + + # Create canvas objects. + self._lines = [canvas.create_line(0, 0, 0, 0, fill="#006060") for c in subtrees] + self._polygon = canvas.create_polygon( + 0, 0, fill="", state="hidden", outline="#006060" + ) + + # Register child widgets (label + subtrees) + self._add_child_widget(label) + for subtree in subtrees: + self._add_child_widget(subtree) + + # Are we currently managing? + self._managing = False + + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + canvas = self.canvas() + if attr == "roof": + self._roof = value + if self._roof: + for l in self._lines: + canvas.itemconfig(l, state="hidden") + canvas.itemconfig(self._polygon, state="normal") + else: + for l in self._lines: + canvas.itemconfig(l, state="normal") + canvas.itemconfig(self._polygon, state="hidden") + elif attr == "orientation": + if value == "horizontal": + self._horizontal = 1 + elif value == "vertical": + self._horizontal = 0 + else: + raise ValueError("orientation must be horizontal or vertical") + elif attr == "color": + for l in self._lines: + canvas.itemconfig(l, fill=value) + canvas.itemconfig(self._polygon, outline=value) + elif isinstance(attr, tuple) and attr[0] == "color": + # Set the color of an individual line. + l = self._lines[int(attr[1])] + canvas.itemconfig(l, fill=value) + elif attr == "fill": + canvas.itemconfig(self._polygon, fill=value) + elif attr == "width": + canvas.itemconfig(self._polygon, {attr: value}) + for l in self._lines: + canvas.itemconfig(l, {attr: value}) + elif attr in ("xspace", "yspace"): + if attr == "xspace": + self._xspace = value + elif attr == "yspace": + self._yspace = value + self.update(self._label) + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "roof": + return self._roof + elif attr == "width": + return self.canvas().itemcget(self._polygon, attr) + elif attr == "color": + return self.canvas().itemcget(self._polygon, "outline") + elif isinstance(attr, tuple) and attr[0] == "color": + l = self._lines[int(attr[1])] + return self.canvas().itemcget(l, "fill") + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + if self._horizontal: + return "horizontal" + else: + return "vertical" + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def label(self): + return self._label + + def subtrees(self): + return self._subtrees[:] + + def set_label(self, label): + """ + Set the node label to ``label``. + """ + self._remove_child_widget(self._label) + self._add_child_widget(label) + self._label = label + self.update(self._label) + + def replace_child(self, oldchild, newchild): + """ + Replace the child ``oldchild`` with ``newchild``. + """ + index = self._subtrees.index(oldchild) + self._subtrees[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + index = self._subtrees.index(child) + del self._subtrees[index] + self._remove_child_widget(child) + self.canvas().delete(self._lines.pop()) + self.update(self._label) + + def insert_child(self, index, child): + canvas = self.canvas() + self._subtrees.insert(index, child) + self._add_child_widget(child) + self._lines.append(canvas.create_line(0, 0, 0, 0, fill="#006060")) + self.update(self._label) + + # but.. lines??? + + def _tags(self): + if self._roof: + return [self._polygon] + else: + return self._lines + + def _subtree_top(self, child): + if isinstance(child, TreeSegmentWidget): + bbox = child.label().bbox() + else: + bbox = child.bbox() + if self._horizontal: + return (bbox[0], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[1]) + + def _node_bottom(self): + bbox = self._label.bbox() + if self._horizontal: + return (bbox[2], (bbox[1] + bbox[3]) / 2.0) + else: + return ((bbox[0] + bbox[2]) / 2.0, bbox[3]) + + def _update(self, child): + if len(self._subtrees) == 0: + return + if self._label.bbox() is None: + return # [XX] ??? + + # Which lines need to be redrawn? + if child is self._label: + need_update = self._subtrees + else: + need_update = [child] + + if self._ordered and not self._managing: + need_update = self._maintain_order(child) + + # Update the polygon. + (nodex, nodey) = self._node_bottom() + (xmin, ymin, xmax, ymax) = self._subtrees[0].bbox() + for subtree in self._subtrees[1:]: + bbox = subtree.bbox() + xmin = min(xmin, bbox[0]) + ymin = min(ymin, bbox[1]) + xmax = max(xmax, bbox[2]) + ymax = max(ymax, bbox[3]) + + if self._horizontal: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmin, ymax, nodex, nodey + ) + else: + self.canvas().coords( + self._polygon, nodex, nodey, xmin, ymin, xmax, ymin, nodex, nodey + ) + + # Redraw all lines that need it. + for subtree in need_update: + (nodex, nodey) = self._node_bottom() + line = self._lines[self._subtrees.index(subtree)] + (subtreex, subtreey) = self._subtree_top(subtree) + self.canvas().coords(line, nodex, nodey, subtreex, subtreey) + + def _maintain_order(self, child): + if self._horizontal: + return self._maintain_order_horizontal(child) + else: + return self._maintain_order_vertical(child) + + def _maintain_order_vertical(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if bot + self._yspace > y1: + subtree.move(0, bot + self._yspace - y1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves to our right. + x = right + self._xspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x > x1: + self._subtrees[i].move(x - x1, 0) + x += x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check leaves to our left. + x = left - self._xspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if x < x2: + self._subtrees[i].move(x - x2, 0) + x -= x2 - x1 + self._xspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if y2 > top - self._yspace: + self._label.move(0, top - self._yspace - y2) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _maintain_order_horizontal(self, child): + (left, top, right, bot) = child.bbox() + + if child is self._label: + # Check all the leaves + for subtree in self._subtrees: + (x1, y1, x2, y2) = subtree.bbox() + if right + self._xspace > x1: + subtree.move(right + self._xspace - x1) + + return self._subtrees + else: + moved = [child] + index = self._subtrees.index(child) + + # Check leaves below us. + y = bot + self._yspace + for i in range(index + 1, len(self._subtrees)): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y > y1: + self._subtrees[i].move(0, y - y1) + y += y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check leaves above us + y = top - self._yspace + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._subtrees[i].bbox() + if y < y2: + self._subtrees[i].move(0, y - y2) + y -= y2 - y1 + self._yspace + moved.append(self._subtrees[i]) + + # Check the node + (x1, y1, x2, y2) = self._label.bbox() + if x2 > left - self._xspace: + self._label.move(left - self._xspace - x2, 0) + moved = self._subtrees + + # Return a list of the nodes we moved + return moved + + def _manage_horizontal(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + y = 20 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dx = nodex - subtree_bbox[0] + self._xspace + dy = y - subtree_bbox[1] + subtree.move(dx, dy) + y += subtree_bbox[3] - subtree_bbox[1] + self._yspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[1] + center /= len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(0, nodey - center) + + def _manage_vertical(self): + (nodex, nodey) = self._node_bottom() + + # Put the subtrees in a line. + x = 0 + for subtree in self._subtrees: + subtree_bbox = subtree.bbox() + dy = nodey - subtree_bbox[1] + self._yspace + dx = x - subtree_bbox[0] + subtree.move(dx, dy) + x += subtree_bbox[2] - subtree_bbox[0] + self._xspace + + # Find the center of their tops. + center = 0.0 + for subtree in self._subtrees: + center += self._subtree_top(subtree)[0] / len(self._subtrees) + + # Center the subtrees with the node. + for subtree in self._subtrees: + subtree.move(nodex - center, 0) + + def _manage(self): + self._managing = True + (nodex, nodey) = self._node_bottom() + if len(self._subtrees) == 0: + return + + if self._horizontal: + self._manage_horizontal() + else: + self._manage_vertical() + + # Update lines to subtrees. + for subtree in self._subtrees: + self._update(subtree) + + self._managing = False + + def __repr__(self): + return f"[TreeSeg {self._label}: {self._subtrees}]" + + +def _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, +): + if isinstance(t, Tree): + label = make_node(canvas, t.label(), **node_attribs) + subtrees = [ + _tree_to_treeseg( + canvas, + child, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + for child in t + ] + return TreeSegmentWidget(canvas, label, subtrees, **tree_attribs) + else: + return make_leaf(canvas, t, **leaf_attribs) + + +def tree_to_treesegment( + canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs +): + """ + Convert a Tree into a ``TreeSegmentWidget``. + + :param make_node: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_node`` is used to convert + the Tree's nodes into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param make_leaf: A ``CanvasWidget`` constructor or a function that + creates ``CanvasWidgets``. ``make_leaf`` is used to convert + the Tree's leafs into ``CanvasWidgets``. If no constructor + is specified, then ``TextWidget`` will be used. + :param attribs: Attributes for the canvas widgets that make up the + returned ``TreeSegmentWidget``. Any attribute beginning with + ``'tree_'`` will be passed to all ``TreeSegmentWidgets`` (with + the ``'tree_'`` prefix removed. Any attribute beginning with + ``'node_'`` will be passed to all nodes. Any attribute + beginning with ``'leaf_'`` will be passed to all leaves. And + any attribute beginning with ``'loc_'`` will be passed to all + text locations (for Trees). + """ + # Process attribs. + tree_attribs = {} + node_attribs = {} + leaf_attribs = {} + loc_attribs = {} + + for (key, value) in list(attribs.items()): + if key[:5] == "tree_": + tree_attribs[key[5:]] = value + elif key[:5] == "node_": + node_attribs[key[5:]] = value + elif key[:5] == "leaf_": + leaf_attribs[key[5:]] = value + elif key[:4] == "loc_": + loc_attribs[key[4:]] = value + else: + raise ValueError("Bad attribute: %s" % key) + return _tree_to_treeseg( + canvas, + t, + make_node, + make_leaf, + tree_attribs, + node_attribs, + leaf_attribs, + loc_attribs, + ) + + +##////////////////////////////////////////////////////// +## Tree Widget +##////////////////////////////////////////////////////// + + +class TreeWidget(CanvasWidget): + """ + A canvas widget that displays a single Tree. + ``TreeWidget`` manages a group of ``TreeSegmentWidgets`` that are + used to display a Tree. + + Attributes: + + - ``node_attr``: Sets the attribute ``attr`` on all of the + node widgets for this ``TreeWidget``. + - ``node_attr``: Sets the attribute ``attr`` on all of the + leaf widgets for this ``TreeWidget``. + - ``loc_attr``: Sets the attribute ``attr`` on all of the + location widgets for this ``TreeWidget`` (if it was built from + a Tree). Note that a location widget is a ``TextWidget``. + + - ``xspace``: The amount of horizontal space to leave between + subtrees when managing this widget. Default value is 10. + - ``yspace``: The amount of space to place between the node and + its children when managing this widget. Default value is 15. + + - ``line_color``: The color of the lines connecting each expanded + node to its subtrees. + - ``roof_color``: The color of the outline of the triangular roof + for collapsed trees. + - ``roof_fill``: The fill color for the triangular roof for + collapsed trees. + - ``width`` + + - ``orientation``: Determines whether the tree branches downwards + or rightwards. Possible values are ``'horizontal'`` and + ``'vertical'``. The default value is ``'vertical'`` (i.e., + branch downwards). + + - ``shapeable``: whether the subtrees can be independently + dragged by the user. THIS property simply sets the + ``DRAGGABLE`` property on all of the ``TreeWidget``'s tree + segments. + - ``draggable``: whether the widget can be dragged by the user. + """ + + def __init__( + self, canvas, t, make_node=TextWidget, make_leaf=TextWidget, **attribs + ): + # Node & leaf canvas widget constructors + self._make_node = make_node + self._make_leaf = make_leaf + self._tree = t + + # Attributes. + self._nodeattribs = {} + self._leafattribs = {} + self._locattribs = {"color": "#008000"} + self._line_color = "#008080" + self._line_width = 1 + self._roof_color = "#008080" + self._roof_fill = "#c0c0c0" + self._shapeable = False + self._xspace = 10 + self._yspace = 10 + self._orientation = "vertical" + self._ordered = False + + # Build trees. + self._keys = {} # treeseg -> key + self._expanded_trees = {} + self._collapsed_trees = {} + self._nodes = [] + self._leaves = [] + # self._locs = [] + self._make_collapsed_trees(canvas, t, ()) + self._treeseg = self._make_expanded_tree(canvas, t, ()) + self._add_child_widget(self._treeseg) + + CanvasWidget.__init__(self, canvas, **attribs) + + def expanded_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._expanded_trees[path_to_tree] + + def collapsed_tree(self, *path_to_tree): + """ + Return the ``TreeSegmentWidget`` for the specified subtree. + + :param path_to_tree: A list of indices i1, i2, ..., in, where + the desired widget is the widget corresponding to + ``tree.children()[i1].children()[i2]....children()[in]``. + For the root, the path is ``()``. + """ + return self._collapsed_trees[path_to_tree] + + def bind_click_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_click(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_click(callback, button) + + def bind_drag_trees(self, callback, button=1): + """ + Add a binding to all tree segments. + """ + for tseg in list(self._expanded_trees.values()): + tseg.bind_drag(callback, button) + for tseg in list(self._collapsed_trees.values()): + tseg.bind_drag(callback, button) + + def bind_click_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_click(callback, button) + for leaf in self._leaves: + leaf.bind_click(callback, button) + + def bind_drag_leaves(self, callback, button=1): + """ + Add a binding to all leaves. + """ + for leaf in self._leaves: + leaf.bind_drag(callback, button) + for leaf in self._leaves: + leaf.bind_drag(callback, button) + + def bind_click_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_click(callback, button) + for node in self._nodes: + node.bind_click(callback, button) + + def bind_drag_nodes(self, callback, button=1): + """ + Add a binding to all nodes. + """ + for node in self._nodes: + node.bind_drag(callback, button) + for node in self._nodes: + node.bind_drag(callback, button) + + def _make_collapsed_trees(self, canvas, t, key): + if not isinstance(t, Tree): + return + make_node = self._make_node + make_leaf = self._make_leaf + + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + leaves = [make_leaf(canvas, l, **self._leafattribs) for l in t.leaves()] + self._leaves += leaves + treeseg = TreeSegmentWidget( + canvas, + node, + leaves, + roof=1, + color=self._roof_color, + fill=self._roof_fill, + width=self._line_width, + ) + + self._collapsed_trees[key] = treeseg + self._keys[treeseg] = key + # self._add_child_widget(treeseg) + treeseg.hide() + + # Build trees for children. + for i in range(len(t)): + child = t[i] + self._make_collapsed_trees(canvas, child, key + (i,)) + + def _make_expanded_tree(self, canvas, t, key): + make_node = self._make_node + make_leaf = self._make_leaf + + if isinstance(t, Tree): + node = make_node(canvas, t.label(), **self._nodeattribs) + self._nodes.append(node) + children = t + subtrees = [ + self._make_expanded_tree(canvas, children[i], key + (i,)) + for i in range(len(children)) + ] + treeseg = TreeSegmentWidget( + canvas, node, subtrees, color=self._line_color, width=self._line_width + ) + self._expanded_trees[key] = treeseg + self._keys[treeseg] = key + return treeseg + else: + leaf = make_leaf(canvas, t, **self._leafattribs) + self._leaves.append(leaf) + return leaf + + def __setitem__(self, attr, value): + if attr[:5] == "node_": + for node in self._nodes: + node[attr[5:]] = value + elif attr[:5] == "leaf_": + for leaf in self._leaves: + leaf[attr[5:]] = value + elif attr == "line_color": + self._line_color = value + for tseg in list(self._expanded_trees.values()): + tseg["color"] = value + elif attr == "line_width": + self._line_width = value + for tseg in list(self._expanded_trees.values()): + tseg["width"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["width"] = value + elif attr == "roof_color": + self._roof_color = value + for tseg in list(self._collapsed_trees.values()): + tseg["color"] = value + elif attr == "roof_fill": + self._roof_fill = value + for tseg in list(self._collapsed_trees.values()): + tseg["fill"] = value + elif attr == "shapeable": + self._shapeable = value + for tseg in list(self._expanded_trees.values()): + tseg["draggable"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["draggable"] = value + for leaf in self._leaves: + leaf["draggable"] = value + elif attr == "xspace": + self._xspace = value + for tseg in list(self._expanded_trees.values()): + tseg["xspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["xspace"] = value + self.manage() + elif attr == "yspace": + self._yspace = value + for tseg in list(self._expanded_trees.values()): + tseg["yspace"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["yspace"] = value + self.manage() + elif attr == "orientation": + self._orientation = value + for tseg in list(self._expanded_trees.values()): + tseg["orientation"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["orientation"] = value + self.manage() + elif attr == "ordered": + self._ordered = value + for tseg in list(self._expanded_trees.values()): + tseg["ordered"] = value + for tseg in list(self._collapsed_trees.values()): + tseg["ordered"] = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr[:5] == "node_": + return self._nodeattribs.get(attr[5:], None) + elif attr[:5] == "leaf_": + return self._leafattribs.get(attr[5:], None) + elif attr[:4] == "loc_": + return self._locattribs.get(attr[4:], None) + elif attr == "line_color": + return self._line_color + elif attr == "line_width": + return self._line_width + elif attr == "roof_color": + return self._roof_color + elif attr == "roof_fill": + return self._roof_fill + elif attr == "shapeable": + return self._shapeable + elif attr == "xspace": + return self._xspace + elif attr == "yspace": + return self._yspace + elif attr == "orientation": + return self._orientation + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _manage(self): + segs = list(self._expanded_trees.values()) + list( + self._collapsed_trees.values() + ) + for tseg in segs: + if tseg.hidden(): + tseg.show() + tseg.manage() + tseg.hide() + + def toggle_collapsed(self, treeseg): + """ + Collapse/expand a tree. + """ + old_treeseg = treeseg + if old_treeseg["roof"]: + new_treeseg = self._expanded_trees[self._keys[old_treeseg]] + else: + new_treeseg = self._collapsed_trees[self._keys[old_treeseg]] + + # Replace the old tree with the new tree. + if old_treeseg.parent() is self: + self._remove_child_widget(old_treeseg) + self._add_child_widget(new_treeseg) + self._treeseg = new_treeseg + else: + old_treeseg.parent().replace_child(old_treeseg, new_treeseg) + + # Move the new tree to where the old tree was. Show it first, + # so we can find its bounding box. + new_treeseg.show() + (newx, newy) = new_treeseg.label().bbox()[:2] + (oldx, oldy) = old_treeseg.label().bbox()[:2] + new_treeseg.move(oldx - newx, oldy - newy) + + # Hide the old tree + old_treeseg.hide() + + # We could do parent.manage() here instead, if we wanted. + new_treeseg.parent().update(new_treeseg) + + +##////////////////////////////////////////////////////// +## draw_trees +##////////////////////////////////////////////////////// + + +class TreeView: + def __init__(self, *trees): + from math import ceil, sqrt + + self._trees = trees + + self._top = Tk() + self._top.title("NLTK") + self._top.bind("", self.destroy) + self._top.bind("", self.destroy) + + cf = self._cframe = CanvasFrame(self._top) + self._top.bind("", self._cframe.print_to_file) + + # Size is variable. + self._size = IntVar(self._top) + self._size.set(12) + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + + # Lay the trees out in a square. + self._width = int(ceil(sqrt(len(trees)))) + self._widgets = [] + for i in range(len(trees)): + widget = TreeWidget( + cf.canvas(), + trees[i], + node_font=bold, + leaf_color="#008040", + node_color="#004080", + roof_color="#004040", + roof_fill="white", + line_color="#004040", + draggable=1, + leaf_font=helv, + ) + widget.bind_click_trees(widget.toggle_collapsed) + self._widgets.append(widget) + cf.add_widget(widget, 0, 0) + + self._layout() + self._cframe.pack(expand=1, fill="both") + self._init_menubar() + + def _layout(self): + i = x = y = ymax = 0 + width = self._width + for i in range(len(self._widgets)): + widget = self._widgets[i] + (oldx, oldy) = widget.bbox()[:2] + if i % width == 0: + y = ymax + x = 0 + widget.move(x - oldx, y - oldy) + x = widget.bbox()[2] + 10 + ymax = max(ymax, widget.bbox()[3] + 10) + + def _init_menubar(self): + menubar = Menu(self._top) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self._cframe.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + zoommenu = Menu(menubar, tearoff=0) + zoommenu.add_radiobutton( + label="Tiny", + variable=self._size, + underline=0, + value=10, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Small", + variable=self._size, + underline=0, + value=12, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Medium", + variable=self._size, + underline=0, + value=14, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Large", + variable=self._size, + underline=0, + value=28, + command=self.resize, + ) + zoommenu.add_radiobutton( + label="Huge", + variable=self._size, + underline=0, + value=50, + command=self.resize, + ) + menubar.add_cascade(label="Zoom", underline=0, menu=zoommenu) + + self._top.config(menu=menubar) + + def resize(self, *e): + bold = ("helvetica", -self._size.get(), "bold") + helv = ("helvetica", -self._size.get()) + xspace = self._size.get() + yspace = self._size.get() + for widget in self._widgets: + widget["node_font"] = bold + widget["leaf_font"] = helv + widget["xspace"] = xspace + widget["yspace"] = yspace + if self._size.get() < 20: + widget["line_width"] = 1 + elif self._size.get() < 30: + widget["line_width"] = 2 + else: + widget["line_width"] = 3 + self._layout() + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this demo is created from a non-interactive program (e.g. + from a secript); otherwise, the demo will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +def draw_trees(*trees): + """ + Open a new window containing a graphical diagram of the given + trees. + + :rtype: None + """ + TreeView(*trees).mainloop() + return + + +##////////////////////////////////////////////////////// +## Demo Code +##////////////////////////////////////////////////////// + + +def demo(): + import random + + def fill(cw): + cw["fill"] = "#%06d" % random.randint(0, 999999) + + cf = CanvasFrame(width=550, height=450, closeenough=2) + + t = Tree.fromstring( + """ + (S (NP the very big cat) + (VP (Adv sorta) (V saw) (NP (Det the) (N dog))))""" + ) + + tc = TreeWidget( + cf.canvas(), + t, + draggable=1, + node_font=("helvetica", -14, "bold"), + leaf_font=("helvetica", -12, "italic"), + roof_fill="white", + roof_color="black", + leaf_color="green4", + node_color="blue2", + ) + cf.add_widget(tc, 10, 10) + + def boxit(canvas, text): + big = ("helvetica", -16, "bold") + return BoxWidget(canvas, TextWidget(canvas, text, font=big), fill="green") + + def ovalit(canvas, text): + return OvalWidget(canvas, TextWidget(canvas, text), fill="cyan") + + treetok = Tree.fromstring("(S (NP this tree) (VP (V is) (AdjP shapeable)))") + tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1) + + def color(node): + node["color"] = "#%04d00" % random.randint(0, 9999) + + def color2(treeseg): + treeseg.label()["fill"] = "#%06d" % random.randint(0, 9999) + treeseg.label().child()["color"] = "white" + + tc.bind_click_trees(tc.toggle_collapsed) + tc2.bind_click_trees(tc2.toggle_collapsed) + tc.bind_click_nodes(color, 3) + tc2.expanded_tree(1).bind_click(color2, 3) + tc2.expanded_tree().bind_click(color2, 3) + + paren = ParenWidget(cf.canvas(), tc2) + cf.add_widget(paren, tc.bbox()[2] + 10, 10) + + tree3 = Tree.fromstring( + """ + (S (NP this tree) (AUX was) + (VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))""" + ) + tc3 = tree_to_treesegment( + cf.canvas(), tree3, tree_color="green4", tree_xspace=2, tree_width=2 + ) + tc3["draggable"] = 1 + cf.add_widget(tc3, 10, tc.bbox()[3] + 10) + + def orientswitch(treewidget): + if treewidget["orientation"] == "horizontal": + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("vertical") + treewidget.collapsed_tree(1).subtrees()[1].set_text("vertical") + treewidget.collapsed_tree().subtrees()[3].set_text("vertical") + treewidget["orientation"] = "vertical" + else: + treewidget.expanded_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1, 1).subtrees()[0].set_text("horizontal") + treewidget.collapsed_tree(1).subtrees()[1].set_text("horizontal") + treewidget.collapsed_tree().subtrees()[3].set_text("horizontal") + treewidget["orientation"] = "horizontal" + + text = """ +Try clicking, right clicking, and dragging +different elements of each of the trees. +The top-left tree is a TreeWidget built from +a Tree. The top-right is a TreeWidget built +from a Tree, using non-default widget +constructors for the nodes & leaves (BoxWidget +and OvalWidget). The bottom-left tree is +built from tree_to_treesegment.""" + twidget = TextWidget(cf.canvas(), text.strip()) + textbox = BoxWidget(cf.canvas(), twidget, fill="white", draggable=1) + cf.add_widget(textbox, tc3.bbox()[2] + 10, tc2.bbox()[3] + 10) + + tree4 = Tree.fromstring("(S (NP this tree) (VP (V is) (Adj horizontal)))") + tc4 = TreeWidget( + cf.canvas(), + tree4, + draggable=1, + line_color="brown2", + roof_color="brown2", + node_font=("helvetica", -12, "bold"), + node_color="brown4", + orientation="horizontal", + ) + tc4.manage() + cf.add_widget(tc4, tc3.bbox()[2] + 10, textbox.bbox()[3] + 10) + tc4.bind_click(orientswitch) + tc4.bind_click_trees(tc4.toggle_collapsed, 3) + + # Run mainloop + cf.mainloop() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/draw/util.py b/venv/lib/python3.10/site-packages/nltk/draw/util.py new file mode 100644 index 0000000000000000000000000000000000000000..31ae442099a892a6e84a0dbf3ff284d7aa184b3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/draw/util.py @@ -0,0 +1,2575 @@ +# Natural Language Toolkit: Drawing utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Tools for graphically displaying and interacting with the objects and +processing classes defined by the Toolkit. These tools are primarily +intended to help students visualize the objects that they create. + +The graphical tools are typically built using "canvas widgets", each +of which encapsulates the graphical elements and bindings used to +display a complex object on a Tkinter ``Canvas``. For example, NLTK +defines canvas widgets for displaying trees and directed graphs, as +well as a number of simpler widgets. These canvas widgets make it +easier to build new graphical tools and demos. See the class +documentation for ``CanvasWidget`` for more information. + +The ``nltk.draw`` module defines the abstract ``CanvasWidget`` base +class, and a number of simple canvas widgets. The remaining canvas +widgets are defined by submodules, such as ``nltk.draw.tree``. + +The ``nltk.draw`` module also defines ``CanvasFrame``, which +encapsulates a ``Canvas`` and its scrollbars. It uses a +``ScrollWatcherWidget`` to ensure that all canvas widgets contained on +its canvas are within the scroll region. + +Acknowledgements: Many of the ideas behind the canvas widget system +are derived from ``CLIG``, a Tk-based grapher for linguistic data +structures. For more information, see the CLIG +homepage (http://www.ags.uni-sb.de/~konrad/clig.html). + +""" +from abc import ABCMeta, abstractmethod +from tkinter import ( + RAISED, + Button, + Canvas, + Entry, + Frame, + Label, + Menu, + Menubutton, + Scrollbar, + StringVar, + Text, + Tk, + Toplevel, + Widget, +) +from tkinter.filedialog import asksaveasfilename + +from nltk.util import in_idle + +##////////////////////////////////////////////////////// +## CanvasWidget +##////////////////////////////////////////////////////// + + +class CanvasWidget(metaclass=ABCMeta): + """ + A collection of graphical elements and bindings used to display a + complex object on a Tkinter ``Canvas``. A canvas widget is + responsible for managing the ``Canvas`` tags and callback bindings + necessary to display and interact with the object. Canvas widgets + are often organized into hierarchies, where parent canvas widgets + control aspects of their child widgets. + + Each canvas widget is bound to a single ``Canvas``. This ``Canvas`` + is specified as the first argument to the ``CanvasWidget``'s + constructor. + + Attributes. Each canvas widget can support a variety of + "attributes", which control how the canvas widget is displayed. + Some typical examples attributes are ``color``, ``font``, and + ``radius``. Each attribute has a default value. This default + value can be overridden in the constructor, using keyword + arguments of the form ``attribute=value``: + + >>> from nltk.draw.util import TextWidget + >>> cn = TextWidget(Canvas(), 'test', color='red') # doctest: +SKIP + + Attribute values can also be changed after a canvas widget has + been constructed, using the ``__setitem__`` operator: + + >>> cn['font'] = 'times' # doctest: +SKIP + + The current value of an attribute value can be queried using the + ``__getitem__`` operator: + + >>> cn['color'] # doctest: +SKIP + 'red' + + For a list of the attributes supported by a type of canvas widget, + see its class documentation. + + Interaction. The attribute ``'draggable'`` controls whether the + user can drag a canvas widget around the canvas. By default, + canvas widgets are not draggable. + + ``CanvasWidget`` provides callback support for two types of user + interaction: clicking and dragging. The method ``bind_click`` + registers a callback function that is called whenever the canvas + widget is clicked. The method ``bind_drag`` registers a callback + function that is called after the canvas widget is dragged. If + the user clicks or drags a canvas widget with no registered + callback function, then the interaction event will propagate to + its parent. For each canvas widget, only one callback function + may be registered for an interaction event. Callback functions + can be deregistered with the ``unbind_click`` and ``unbind_drag`` + methods. + + Subclassing. ``CanvasWidget`` is an abstract class. Subclasses + are required to implement the following methods: + + - ``__init__``: Builds a new canvas widget. It must perform the + following three tasks (in order): + + - Create any new graphical elements. + - Call ``_add_child_widget`` on each child widget. + - Call the ``CanvasWidget`` constructor. + - ``_tags``: Returns a list of the canvas tags for all graphical + elements managed by this canvas widget, not including + graphical elements managed by its child widgets. + - ``_manage``: Arranges the child widgets of this canvas widget. + This is typically only called when the canvas widget is + created. + - ``_update``: Update this canvas widget in response to a + change in a single child. + + For a ``CanvasWidget`` with no child widgets, the default + definitions for ``_manage`` and ``_update`` may be used. + + If a subclass defines any attributes, then it should implement + ``__getitem__`` and ``__setitem__``. If either of these methods is + called with an unknown attribute, then they should propagate the + request to ``CanvasWidget``. + + Most subclasses implement a number of additional methods that + modify the ``CanvasWidget`` in some way. These methods must call + ``parent.update(self)`` after making any changes to the canvas + widget's graphical elements. The canvas widget must also call + ``parent.update(self)`` after changing any attribute value that + affects the shape or position of the canvas widget's graphical + elements. + + :type __canvas: Tkinter.Canvas + :ivar __canvas: This ``CanvasWidget``'s canvas. + + :type __parent: CanvasWidget or None + :ivar __parent: This ``CanvasWidget``'s hierarchical parent widget. + :type __children: list(CanvasWidget) + :ivar __children: This ``CanvasWidget``'s hierarchical child widgets. + + :type __updating: bool + :ivar __updating: Is this canvas widget currently performing an + update? If it is, then it will ignore any new update requests + from child widgets. + + :type __draggable: bool + :ivar __draggable: Is this canvas widget draggable? + :type __press: event + :ivar __press: The ButtonPress event that we're currently handling. + :type __drag_x: int + :ivar __drag_x: Where it's been moved to (to find dx) + :type __drag_y: int + :ivar __drag_y: Where it's been moved to (to find dy) + :type __callbacks: dictionary + :ivar __callbacks: Registered callbacks. Currently, four keys are + used: ``1``, ``2``, ``3``, and ``'drag'``. The values are + callback functions. Each callback function takes a single + argument, which is the ``CanvasWidget`` that triggered the + callback. + """ + + def __init__(self, canvas, parent=None, **attribs): + """ + Create a new canvas widget. This constructor should only be + called by subclass constructors; and it should be called only + "after" the subclass has constructed all graphical canvas + objects and registered all child widgets. + + :param canvas: This canvas widget's canvas. + :type canvas: Tkinter.Canvas + :param parent: This canvas widget's hierarchical parent. + :type parent: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + if self.__class__ == CanvasWidget: + raise TypeError("CanvasWidget is an abstract base class") + + if not isinstance(canvas, Canvas): + raise TypeError("Expected a canvas!") + + self.__canvas = canvas + self.__parent = parent + + # If the subclass constructor called _add_child_widget, then + # self.__children will already exist. + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + + # Is this widget hidden? + self.__hidden = 0 + + # Update control (prevents infinite loops) + self.__updating = 0 + + # Button-press and drag callback handling. + self.__press = None + self.__drag_x = self.__drag_y = 0 + self.__callbacks = {} + self.__draggable = 0 + + # Set up attributes. + for (attr, value) in list(attribs.items()): + self[attr] = value + + # Manage this canvas widget + self._manage() + + # Register any new bindings + for tag in self._tags(): + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + self.__canvas.tag_bind(tag, "", self.__press_cb) + + ##////////////////////////////////////////////////////// + ## Inherited methods. + ##////////////////////////////////////////////////////// + + def bbox(self): + """ + :return: A bounding box for this ``CanvasWidget``. The bounding + box is a tuple of four coordinates, *(xmin, ymin, xmax, ymax)*, + for a rectangle which encloses all of the canvas + widget's graphical elements. Bounding box coordinates are + specified with respect to the coordinate space of the ``Canvas``. + :rtype: tuple(int, int, int, int) + """ + if self.__hidden: + return (0, 0, 0, 0) + if len(self.tags()) == 0: + raise ValueError("No tags") + return self.__canvas.bbox(*self.tags()) + + def width(self): + """ + :return: The width of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[2] - bbox[0] + + def height(self): + """ + :return: The height of this canvas widget's bounding box, in + its ``Canvas``'s coordinate space. + :rtype: int + """ + if len(self.tags()) == 0: + raise ValueError("No tags") + bbox = self.__canvas.bbox(*self.tags()) + return bbox[3] - bbox[1] + + def parent(self): + """ + :return: The hierarchical parent of this canvas widget. + ``self`` is considered a subpart of its parent for + purposes of user interaction. + :rtype: CanvasWidget or None + """ + return self.__parent + + def child_widgets(self): + """ + :return: A list of the hierarchical children of this canvas + widget. These children are considered part of ``self`` + for purposes of user interaction. + :rtype: list of CanvasWidget + """ + return self.__children + + def canvas(self): + """ + :return: The canvas that this canvas widget is bound to. + :rtype: Tkinter.Canvas + """ + return self.__canvas + + def move(self, dx, dy): + """ + Move this canvas widget by a given distance. In particular, + shift the canvas widget right by ``dx`` pixels, and down by + ``dy`` pixels. Both ``dx`` and ``dy`` may be negative, resulting + in leftward or upward movement. + + :type dx: int + :param dx: The number of pixels to move this canvas widget + rightwards. + :type dy: int + :param dy: The number of pixels to move this canvas widget + downwards. + :rtype: None + """ + if dx == dy == 0: + return + for tag in self.tags(): + self.__canvas.move(tag, dx, dy) + if self.__parent: + self.__parent.update(self) + + def moveto(self, x, y, anchor="NW"): + """ + Move this canvas widget to the given location. In particular, + shift the canvas widget such that the corner or side of the + bounding box specified by ``anchor`` is at location (``x``, + ``y``). + + :param x,y: The location that the canvas widget should be moved + to. + :param anchor: The corner or side of the canvas widget that + should be moved to the specified location. ``'N'`` + specifies the top center; ``'NE'`` specifies the top right + corner; etc. + """ + x1, y1, x2, y2 = self.bbox() + if anchor == "NW": + self.move(x - x1, y - y1) + if anchor == "N": + self.move(x - x1 / 2 - x2 / 2, y - y1) + if anchor == "NE": + self.move(x - x2, y - y1) + if anchor == "E": + self.move(x - x2, y - y1 / 2 - y2 / 2) + if anchor == "SE": + self.move(x - x2, y - y2) + if anchor == "S": + self.move(x - x1 / 2 - x2 / 2, y - y2) + if anchor == "SW": + self.move(x - x1, y - y2) + if anchor == "W": + self.move(x - x1, y - y1 / 2 - y2 / 2) + + def destroy(self): + """ + Remove this ``CanvasWidget`` from its ``Canvas``. After a + ``CanvasWidget`` has been destroyed, it should not be accessed. + + Note that you only need to destroy a top-level + ``CanvasWidget``; its child widgets will be destroyed + automatically. If you destroy a non-top-level + ``CanvasWidget``, then the entire top-level widget will be + destroyed. + + :raise ValueError: if this ``CanvasWidget`` has a parent. + :rtype: None + """ + if self.__parent is not None: + self.__parent.destroy() + return + + for tag in self.tags(): + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.tag_unbind(tag, "") + self.__canvas.delete(*self.tags()) + self.__canvas = None + + def update(self, child): + """ + Update the graphical display of this canvas widget, and all of + its ancestors, in response to a change in one of this canvas + widget's children. + + :param child: The child widget that changed. + :type child: CanvasWidget + """ + if self.__hidden or child.__hidden: + return + # If we're already updating, then do nothing. This prevents + # infinite loops when _update modifies its children. + if self.__updating: + return + self.__updating = 1 + + # Update this CanvasWidget. + self._update(child) + + # Propagate update request to the parent. + if self.__parent: + self.__parent.update(self) + + # We're done updating. + self.__updating = 0 + + def manage(self): + """ + Arrange this canvas widget and all of its descendants. + + :rtype: None + """ + if self.__hidden: + return + for child in self.__children: + child.manage() + self._manage() + + def tags(self): + """ + :return: a list of the canvas tags for all graphical + elements managed by this canvas widget, including + graphical elements managed by its child widgets. + :rtype: list of int + """ + if self.__canvas is None: + raise ValueError("Attempt to access a destroyed canvas widget") + tags = [] + tags += self._tags() + for child in self.__children: + tags += child.tags() + return tags + + def __setitem__(self, attr, value): + """ + Set the value of the attribute ``attr`` to ``value``. See the + class documentation for a list of attributes supported by this + canvas widget. + + :rtype: None + """ + if attr == "draggable": + self.__draggable = value + else: + raise ValueError("Unknown attribute %r" % attr) + + def __getitem__(self, attr): + """ + :return: the value of the attribute ``attr``. See the class + documentation for a list of attributes supported by this + canvas widget. + :rtype: (any) + """ + if attr == "draggable": + return self.__draggable + else: + raise ValueError("Unknown attribute %r" % attr) + + def __repr__(self): + """ + :return: a string representation of this canvas widget. + :rtype: str + """ + return "<%s>" % self.__class__.__name__ + + def hide(self): + """ + Temporarily hide this canvas widget. + + :rtype: None + """ + self.__hidden = 1 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="hidden") + + def show(self): + """ + Show a hidden canvas widget. + + :rtype: None + """ + self.__hidden = 0 + for tag in self.tags(): + self.__canvas.itemconfig(tag, state="normal") + + def hidden(self): + """ + :return: True if this canvas widget is hidden. + :rtype: bool + """ + return self.__hidden + + ##////////////////////////////////////////////////////// + ## Callback interface + ##////////////////////////////////////////////////////// + + def bind_click(self, callback, button=1): + """ + Register a new callback that will be called whenever this + ``CanvasWidget`` is clicked on. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + self.__callbacks[button] = callback + + def bind_drag(self, callback): + """ + Register a new callback that will be called after this + ``CanvasWidget`` is dragged. This implicitly makes this + ``CanvasWidget`` draggable. + + :type callback: function + :param callback: The callback function that will be called + whenever this ``CanvasWidget`` is clicked. This function + will be called with this ``CanvasWidget`` as its argument. + """ + self.__draggable = 1 + self.__callbacks["drag"] = callback + + def unbind_click(self, button=1): + """ + Remove a callback that was registered with ``bind_click``. + + :type button: int + :param button: Which button the user should use to click on + this ``CanvasWidget``. Typically, this should be 1 (left + button), 3 (right button), or 2 (middle button). + """ + try: + del self.__callbacks[button] + except: + pass + + def unbind_drag(self): + """ + Remove a callback that was registered with ``bind_drag``. + """ + try: + del self.__callbacks["drag"] + except: + pass + + ##////////////////////////////////////////////////////// + ## Callback internals + ##////////////////////////////////////////////////////// + + def __press_cb(self, event): + """ + Handle a button-press event: + - record the button press event in ``self.__press`` + - register a button-release callback. + - if this CanvasWidget or any of its ancestors are + draggable, then register the appropriate motion callback. + """ + # If we're already waiting for a button release, then ignore + # this new button press. + if ( + self.__canvas.bind("") + or self.__canvas.bind("") + or self.__canvas.bind("") + ): + return + + # Unbind motion (just in case; this shouldn't be necessary) + self.__canvas.unbind("") + + # Record the button press event. + self.__press = event + + # If any ancestor is draggable, set up a motion callback. + # (Only if they pressed button number 1) + if event.num == 1: + widget = self + while widget is not None: + if widget["draggable"]: + widget.__start_drag(event) + break + widget = widget.parent() + + # Set up the button release callback. + self.__canvas.bind("" % event.num, self.__release_cb) + + def __start_drag(self, event): + """ + Begin dragging this object: + - register a motion callback + - record the drag coordinates + """ + self.__canvas.bind("", self.__motion_cb) + self.__drag_x = event.x + self.__drag_y = event.y + + def __motion_cb(self, event): + """ + Handle a motion event: + - move this object to the new location + - record the new drag coordinates + """ + self.move(event.x - self.__drag_x, event.y - self.__drag_y) + self.__drag_x = event.x + self.__drag_y = event.y + + def __release_cb(self, event): + """ + Handle a release callback: + - unregister motion & button release callbacks. + - decide whether they clicked, dragged, or cancelled + - call the appropriate handler. + """ + # Unbind the button release & motion callbacks. + self.__canvas.unbind("" % event.num) + self.__canvas.unbind("") + + # Is it a click or a drag? + if ( + event.time - self.__press.time < 100 + and abs(event.x - self.__press.x) + abs(event.y - self.__press.y) < 5 + ): + # Move it back, if we were dragging. + if self.__draggable and event.num == 1: + self.move( + self.__press.x - self.__drag_x, self.__press.y - self.__drag_y + ) + self.__click(event.num) + elif event.num == 1: + self.__drag() + + self.__press = None + + def __drag(self): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a drag callback, and + call it. If no ancestors have a drag callback, do nothing. + """ + if self.__draggable: + if "drag" in self.__callbacks: + cb = self.__callbacks["drag"] + try: + cb(self) + except: + print("Error in drag callback for %r" % self) + elif self.__parent is not None: + self.__parent.__drag() + + def __click(self, button): + """ + If this ``CanvasWidget`` has a drag callback, then call it; + otherwise, find the closest ancestor with a click callback, and + call it. If no ancestors have a click callback, do nothing. + """ + if button in self.__callbacks: + cb = self.__callbacks[button] + # try: + cb(self) + # except: + # print('Error in click callback for %r' % self) + # raise + elif self.__parent is not None: + self.__parent.__click(button) + + ##////////////////////////////////////////////////////// + ## Child/parent Handling + ##////////////////////////////////////////////////////// + + def _add_child_widget(self, child): + """ + Register a hierarchical child widget. The child will be + considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to this canvas widget. + - It adds ``child`` to the list of canvas widgets returned by + the ``child_widgets`` member function. + + :param child: The new child widget. ``child`` must not already + have a parent. + :type child: CanvasWidget + """ + if not hasattr(self, "_CanvasWidget__children"): + self.__children = [] + if child.__parent is not None: + raise ValueError(f"{child} already has a parent") + child.__parent = self + self.__children.append(child) + + def _remove_child_widget(self, child): + """ + Remove a hierarchical child widget. This child will no longer + be considered part of this canvas widget for purposes of user + interaction. ``_add_child_widget`` has two direct effects: + - It sets ``child``'s parent to None. + - It removes ``child`` from the list of canvas widgets + returned by the ``child_widgets`` member function. + + :param child: The child widget to remove. ``child`` must be a + child of this canvas widget. + :type child: CanvasWidget + """ + self.__children.remove(child) + child.__parent = None + + ##////////////////////////////////////////////////////// + ## Defined by subclass + ##////////////////////////////////////////////////////// + + @abstractmethod + def _tags(self): + """ + :return: a list of canvas tags for all graphical elements + managed by this canvas widget, not including graphical + elements managed by its child widgets. + :rtype: list of int + """ + + def _manage(self): + """ + Arrange the child widgets of this canvas widget. This method + is called when the canvas widget is initially created. It is + also called if the user calls the ``manage`` method on this + canvas widget or any of its ancestors. + + :rtype: None + """ + + def _update(self, child): + """ + Update this canvas widget in response to a change in one of + its children. + + :param child: The child that changed. + :type child: CanvasWidget + :rtype: None + """ + + +##////////////////////////////////////////////////////// +## Basic widgets. +##////////////////////////////////////////////////////// + + +class TextWidget(CanvasWidget): + """ + A canvas widget that displays a single string of text. + + Attributes: + - ``color``: the color of the text. + - ``font``: the font used to display the text. + - ``justify``: justification for multi-line texts. Valid values + are ``left``, ``center``, and ``right``. + - ``width``: the width of the text. If the text is wider than + this width, it will be line-wrapped at whitespace. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, text, **attribs): + """ + Create a new text widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type text: str + :param text: The string of text to display. + :param attribs: The new canvas widget's attributes. + """ + self._text = text + self._tag = canvas.create_text(1, 1, text=text) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr in ("color", "font", "justify", "width"): + if attr == "color": + attr = "fill" + self.canvas().itemconfig(self._tag, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "width": + return int(self.canvas().itemcget(self._tag, attr)) + elif attr in ("color", "font", "justify"): + if attr == "color": + attr = "fill" + return self.canvas().itemcget(self._tag, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [self._tag] + + def text(self): + """ + :return: The text displayed by this text widget. + :rtype: str + """ + return self.canvas().itemcget(self._tag, "TEXT") + + def set_text(self, text): + """ + Change the text that is displayed by this text widget. + + :type text: str + :param text: The string of text to display. + :rtype: None + """ + self.canvas().itemconfig(self._tag, text=text) + if self.parent() is not None: + self.parent().update(self) + + def __repr__(self): + return "[Text: %r]" % self._text + + +class SymbolWidget(TextWidget): + """ + A canvas widget that displays special symbols, such as the + negation sign and the exists operator. Symbols are specified by + name. Currently, the following symbol names are defined: ``neg``, + ``disj``, ``conj``, ``lambda``, ``merge``, ``forall``, ``exists``, + ``subseteq``, ``subset``, ``notsubset``, ``emptyset``, ``imp``, + ``rightarrow``, ``equal``, ``notequal``, ``epsilon``. + + Attributes: + + - ``color``: the color of the text. + - ``draggable``: whether the text can be dragged by the user. + + :cvar SYMBOLS: A dictionary mapping from symbols to the character + in the ``symbol`` font used to render them. + """ + + SYMBOLS = { + "neg": "\330", + "disj": "\332", + "conj": "\331", + "lambda": "\154", + "merge": "\304", + "forall": "\042", + "exists": "\044", + "subseteq": "\315", + "subset": "\314", + "notsubset": "\313", + "emptyset": "\306", + "imp": "\336", + "rightarrow": chr(222), #'\256', + "equal": "\75", + "notequal": "\271", + "intersection": "\307", + "union": "\310", + "epsilon": "e", + } + + def __init__(self, canvas, symbol, **attribs): + """ + Create a new symbol widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type symbol: str + :param symbol: The name of the symbol to display. + :param attribs: The new canvas widget's attributes. + """ + attribs["font"] = "symbol" + TextWidget.__init__(self, canvas, "", **attribs) + self.set_symbol(symbol) + + def symbol(self): + """ + :return: the name of the symbol that is displayed by this + symbol widget. + :rtype: str + """ + return self._symbol + + def set_symbol(self, symbol): + """ + Change the symbol that is displayed by this symbol widget. + + :type symbol: str + :param symbol: The name of the symbol to display. + """ + if symbol not in SymbolWidget.SYMBOLS: + raise ValueError("Unknown symbol: %s" % symbol) + self._symbol = symbol + self.set_text(SymbolWidget.SYMBOLS[symbol]) + + def __repr__(self): + return "[Symbol: %r]" % self._symbol + + @staticmethod + def symbolsheet(size=20): + """ + Open a new Tkinter window that displays the entire alphabet + for the symbol font. This is useful for constructing the + ``SymbolWidget.SYMBOLS`` dictionary. + """ + top = Tk() + + def destroy(e, top=top): + top.destroy() + + top.bind("q", destroy) + Button(top, text="Quit", command=top.destroy).pack(side="bottom") + text = Text(top, font=("helvetica", -size), width=20, height=30) + text.pack(side="left") + sb = Scrollbar(top, command=text.yview) + text["yscrollcommand"] = sb.set + sb.pack(side="right", fill="y") + text.tag_config("symbol", font=("symbol", -size)) + for i in range(256): + if i in (0, 10): + continue # null and newline + for k, v in list(SymbolWidget.SYMBOLS.items()): + if v == chr(i): + text.insert("end", "%-10s\t" % k) + break + else: + text.insert("end", "%-10d \t" % i) + text.insert("end", "[%s]\n" % chr(i), "symbol") + top.mainloop() + + +class AbstractContainerWidget(CanvasWidget): + """ + An abstract class for canvas widgets that contain a single child, + such as ``BoxWidget`` and ``OvalWidget``. Subclasses must define + a constructor, which should create any new graphical elements and + then call the ``AbstractCanvasContainer`` constructor. Subclasses + must also define the ``_update`` method and the ``_tags`` method; + and any subclasses that define attributes should define + ``__setitem__`` and ``__getitem__``. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new container widget. This constructor should only + be called by subclass constructors. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The container's child widget. ``child`` must not + have a parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def _manage(self): + self._update(self._child) + + def child(self): + """ + :return: The child widget contained by this container widget. + :rtype: CanvasWidget + """ + return self._child + + def set_child(self, child): + """ + Change the child widget contained by this container widget. + + :param child: The new child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :rtype: None + """ + self._remove_child_widget(self._child) + self._add_child_widget(child) + self._child = child + self.update(child) + + def __repr__(self): + name = self.__class__.__name__ + if name[-6:] == "Widget": + name = name[:-6] + return f"[{name}: {self._child!r}]" + + +class BoxWidget(AbstractContainerWidget): + """ + A canvas widget that places a box around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the box. + - ``outline``: The color used to draw the outline of the box. + - ``width``: The width of the outline of the box. + - ``margin``: The number of pixels space left between the child + and the box. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new box widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._box = canvas.create_rectangle(1, 1, 1, 1) + canvas.tag_lower(self._box) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "margin": + self._margin = value + elif attr in ("outline", "fill", "width"): + self.canvas().itemconfig(self._box, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "width": + return float(self.canvas().itemcget(self._box, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._box, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + self["width"] / 2 + self.canvas().coords( + self._box, x1 - margin, y1 - margin, x2 + margin, y2 + margin + ) + + def _tags(self): + return [self._box] + + +class OvalWidget(AbstractContainerWidget): + """ + A canvas widget that places a oval around a child widget. + + Attributes: + - ``fill``: The color used to fill the interior of the oval. + - ``outline``: The color used to draw the outline of the oval. + - ``width``: The width of the outline of the oval. + - ``margin``: The number of pixels space left between the child + and the oval. + - ``draggable``: whether the text can be dragged by the user. + - ``double``: If true, then a double-oval is drawn. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new oval widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._margin = 1 + self._oval = canvas.create_oval(1, 1, 1, 1) + self._circle = attribs.pop("circle", False) + self._double = attribs.pop("double", False) + if self._double: + self._oval2 = canvas.create_oval(1, 1, 1, 1) + else: + self._oval2 = None + canvas.tag_lower(self._oval) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + c = self.canvas() + if attr == "margin": + self._margin = value + elif attr == "double": + if value == True and self._oval2 is None: + # Copy attributes & position from self._oval. + x1, y1, x2, y2 = c.bbox(self._oval) + w = self["width"] * 2 + self._oval2 = c.create_oval( + x1 - w, + y1 - w, + x2 + w, + y2 + w, + outline=c.itemcget(self._oval, "outline"), + width=c.itemcget(self._oval, "width"), + ) + c.tag_lower(self._oval2) + if value == False and self._oval2 is not None: + c.delete(self._oval2) + self._oval2 = None + elif attr in ("outline", "fill", "width"): + c.itemconfig(self._oval, {attr: value}) + if self._oval2 is not None and attr != "fill": + c.itemconfig(self._oval2, {attr: value}) + if self._oval2 is not None and attr != "fill": + self.canvas().itemconfig(self._oval2, {attr: value}) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "margin": + return self._margin + elif attr == "double": + return self._double is not None + elif attr == "width": + return float(self.canvas().itemcget(self._oval, attr)) + elif attr in ("outline", "fill", "width"): + return self.canvas().itemcget(self._oval, attr) + else: + return CanvasWidget.__getitem__(self, attr) + + # The ratio between inscribed & circumscribed ovals + RATIO = 1.4142135623730949 + + def _update(self, child): + R = OvalWidget.RATIO + (x1, y1, x2, y2) = child.bbox() + margin = self._margin + + # If we're a circle, pretend our contents are square. + if self._circle: + dx, dy = abs(x1 - x2), abs(y1 - y2) + if dx > dy: + y = (y1 + y2) / 2 + y1, y2 = y - dx / 2, y + dx / 2 + elif dy > dx: + x = (x1 + x2) / 2 + x1, x2 = x - dy / 2, x + dy / 2 + + # Find the four corners. + left = int((x1 * (1 + R) + x2 * (1 - R)) / 2) + right = left + int((x2 - x1) * R) + top = int((y1 * (1 + R) + y2 * (1 - R)) / 2) + bot = top + int((y2 - y1) * R) + self.canvas().coords( + self._oval, left - margin, top - margin, right + margin, bot + margin + ) + if self._oval2 is not None: + self.canvas().coords( + self._oval2, + left - margin + 2, + top - margin + 2, + right + margin - 2, + bot + margin - 2, + ) + + def _tags(self): + if self._oval2 is None: + return [self._oval] + else: + return [self._oval, self._oval2] + + +class ParenWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of parenthases around a child + widget. + + Attributes: + - ``color``: The color used to draw the parenthases. + - ``width``: The width of the parenthases. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new parenthasis widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._oparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=90, extent=180) + self._cparen = canvas.create_arc(1, 1, 1, 1, style="arc", start=-90, extent=180) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._oparen, outline=value) + self.canvas().itemconfig(self._cparen, outline=value) + elif attr == "width": + self.canvas().itemconfig(self._oparen, width=value) + self.canvas().itemconfig(self._cparen, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._oparen, "outline") + elif attr == "width": + return self.canvas().itemcget(self._oparen, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 6, 4) + self.canvas().coords(self._oparen, x1 - width, y1, x1 + width, y2) + self.canvas().coords(self._cparen, x2 - width, y1, x2 + width, y2) + + def _tags(self): + return [self._oparen, self._cparen] + + +class BracketWidget(AbstractContainerWidget): + """ + A canvas widget that places a pair of brackets around a child + widget. + + Attributes: + - ``color``: The color used to draw the brackets. + - ``width``: The width of the brackets. + - ``draggable``: whether the text can be dragged by the user. + """ + + def __init__(self, canvas, child, **attribs): + """ + Create a new bracket widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param child: The child widget. ``child`` must not have a + parent. + :type child: CanvasWidget + :param attribs: The new canvas widget's attributes. + """ + self._child = child + self._obrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + self._cbrack = canvas.create_line(1, 1, 1, 1, 1, 1, 1, 1) + AbstractContainerWidget.__init__(self, canvas, child, **attribs) + + def __setitem__(self, attr, value): + if attr == "color": + self.canvas().itemconfig(self._obrack, fill=value) + self.canvas().itemconfig(self._cbrack, fill=value) + elif attr == "width": + self.canvas().itemconfig(self._obrack, width=value) + self.canvas().itemconfig(self._cbrack, width=value) + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "color": + return self.canvas().itemcget(self._obrack, "outline") + elif attr == "width": + return self.canvas().itemcget(self._obrack, "width") + else: + return CanvasWidget.__getitem__(self, attr) + + def _update(self, child): + (x1, y1, x2, y2) = child.bbox() + width = max((y2 - y1) / 8, 2) + self.canvas().coords( + self._obrack, x1, y1, x1 - width, y1, x1 - width, y2, x1, y2 + ) + self.canvas().coords( + self._cbrack, x2, y1, x2 + width, y1, x2 + width, y2, x2, y2 + ) + + def _tags(self): + return [self._obrack, self._cbrack] + + +class SequenceWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a + horizontal line. + + Attributes: + - ``align``: The vertical alignment of the children. Possible + values are ``'top'``, ``'center'``, and ``'bottom'``. By + default, children are center-aligned. + - ``space``: The amount of horizontal space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new sequence widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + horizontally. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("top", "bottom", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _yalign(self, top, bot): + if self._align == "top": + return top + if self._align == "bottom": + return bot + if self._align == "center": + return (top + bot) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(0, y - self._yalign(y1, y2)) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if x > x1: + self._children[i].move(x - x1, 0) + x += x2 - x1 + self._space + + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if x < x2: + self._children[i].move(x - x2, 0) + x -= x2 - x1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + y = self._yalign(top, bot) + + index = self._children.index(child) + + # Line up children to the right of child. + x = right + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x1, y - self._yalign(y1, y2)) + x += x2 - x1 + self._space + + # Line up children to the left of child. + x = left - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - x2, y - self._yalign(y1, y2)) + x -= x2 - x1 + self._space + + def __repr__(self): + return "[Sequence: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class StackWidget(CanvasWidget): + """ + A canvas widget that keeps a list of canvas widgets in a vertical + line. + + Attributes: + - ``align``: The horizontal alignment of the children. Possible + values are ``'left'``, ``'center'``, and ``'right'``. By + default, children are center-aligned. + - ``space``: The amount of vertical space to place between + children. By default, one pixel of space is used. + - ``ordered``: If true, then keep the children in their + original order. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new stack widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :param children: The widgets that should be aligned + vertically. Each child must not have a parent. + :type children: list(CanvasWidget) + :param attribs: The new canvas widget's attributes. + """ + self._align = "center" + self._space = 1 + self._ordered = False + self._children = list(children) + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def __setitem__(self, attr, value): + if attr == "align": + if value not in ("left", "right", "center"): + raise ValueError("Bad alignment: %r" % value) + self._align = value + elif attr == "space": + self._space = value + elif attr == "ordered": + self._ordered = value + else: + CanvasWidget.__setitem__(self, attr, value) + + def __getitem__(self, attr): + if attr == "align": + return self._align + elif attr == "space": + return self._space + elif attr == "ordered": + return self._ordered + else: + return CanvasWidget.__getitem__(self, attr) + + def _tags(self): + return [] + + def _xalign(self, left, right): + if self._align == "left": + return left + if self._align == "right": + return right + if self._align == "center": + return (left + right) / 2 + + def _update(self, child): + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + for c in self._children: + (x1, y1, x2, y2) = c.bbox() + c.move(x - self._xalign(x1, x2), 0) + + if self._ordered and len(self._children) > 1: + index = self._children.index(child) + + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + if y > y1: + self._children[i].move(0, y - y1) + y += y2 - y1 + self._space + + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + if y < y2: + self._children[i].move(0, y - y2) + y -= y2 - y1 + self._space + + def _manage(self): + if len(self._children) == 0: + return + child = self._children[0] + + # Align all children with child. + (left, top, right, bot) = child.bbox() + x = self._xalign(left, right) + + index = self._children.index(child) + + # Line up children below the child. + y = bot + self._space + for i in range(index + 1, len(self._children)): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y1) + y += y2 - y1 + self._space + + # Line up children above the child. + y = top - self._space + for i in range(index - 1, -1, -1): + (x1, y1, x2, y2) = self._children[i].bbox() + self._children[i].move(x - self._xalign(x1, x2), y - y2) + y -= y2 - y1 + self._space + + def __repr__(self): + return "[Stack: " + repr(self._children)[1:-1] + "]" + + # Provide an alias for the child_widgets() member. + children = CanvasWidget.child_widgets + + def replace_child(self, oldchild, newchild): + """ + Replace the child canvas widget ``oldchild`` with ``newchild``. + ``newchild`` must not have a parent. ``oldchild``'s parent will + be set to None. + + :type oldchild: CanvasWidget + :param oldchild: The child canvas widget to remove. + :type newchild: CanvasWidget + :param newchild: The canvas widget that should replace + ``oldchild``. + """ + index = self._children.index(oldchild) + self._children[index] = newchild + self._remove_child_widget(oldchild) + self._add_child_widget(newchild) + self.update(newchild) + + def remove_child(self, child): + """ + Remove the given child canvas widget. ``child``'s parent will + be set to None. + + :type child: CanvasWidget + :param child: The child canvas widget to remove. + """ + index = self._children.index(child) + del self._children[index] + self._remove_child_widget(child) + if len(self._children) > 0: + self.update(self._children[0]) + + def insert_child(self, index, child): + """ + Insert a child canvas widget before a given index. + + :type child: CanvasWidget + :param child: The canvas widget that should be inserted. + :type index: int + :param index: The index where the child widget should be + inserted. In particular, the index of ``child`` will be + ``index``; and the index of any children whose indices were + greater than equal to ``index`` before ``child`` was + inserted will be incremented by one. + """ + self._children.insert(index, child) + self._add_child_widget(child) + + +class SpaceWidget(CanvasWidget): + """ + A canvas widget that takes up space but does not display + anything. A ``SpaceWidget`` can be used to add space between + elements. Each space widget is characterized by a width and a + height. If you wish to only create horizontal space, then use a + height of zero; and if you wish to only create vertical space, use + a width of zero. + """ + + def __init__(self, canvas, width, height, **attribs): + """ + Create a new space widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type width: int + :param width: The width of the new space widget. + :type height: int + :param height: The height of the new space widget. + :param attribs: The new canvas widget's attributes. + """ + # For some reason, + if width > 4: + width -= 4 + if height > 4: + height -= 4 + self._tag = canvas.create_line(1, 1, width, height, fill="") + CanvasWidget.__init__(self, canvas, **attribs) + + # note: width() and height() are already defined by CanvasWidget. + def set_width(self, width): + """ + Change the width of this space widget. + + :param width: The new width. + :type width: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x1 + width, y2) + + def set_height(self, height): + """ + Change the height of this space widget. + + :param height: The new height. + :type height: int + :rtype: None + """ + [x1, y1, x2, y2] = self.bbox() + self.canvas().coords(self._tag, x1, y1, x2, y1 + height) + + def _tags(self): + return [self._tag] + + def __repr__(self): + return "[Space]" + + +class ScrollWatcherWidget(CanvasWidget): + """ + A special canvas widget that adjusts its ``Canvas``'s scrollregion + to always include the bounding boxes of all of its children. The + scroll-watcher widget will only increase the size of the + ``Canvas``'s scrollregion; it will never decrease it. + """ + + def __init__(self, canvas, *children, **attribs): + """ + Create a new scroll-watcher widget. + + :type canvas: Tkinter.Canvas + :param canvas: This canvas widget's canvas. + :type children: list(CanvasWidget) + :param children: The canvas widgets watched by the + scroll-watcher. The scroll-watcher will ensure that these + canvas widgets are always contained in their canvas's + scrollregion. + :param attribs: The new canvas widget's attributes. + """ + for child in children: + self._add_child_widget(child) + CanvasWidget.__init__(self, canvas, **attribs) + + def add_child(self, canvaswidget): + """ + Add a new canvas widget to the scroll-watcher. The + scroll-watcher will ensure that the new canvas widget is + always contained in its canvas's scrollregion. + + :param canvaswidget: The new canvas widget. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._add_child_widget(canvaswidget) + self.update(canvaswidget) + + def remove_child(self, canvaswidget): + """ + Remove a canvas widget from the scroll-watcher. The + scroll-watcher will no longer ensure that the new canvas + widget is always contained in its canvas's scrollregion. + + :param canvaswidget: The canvas widget to remove. + :type canvaswidget: CanvasWidget + :rtype: None + """ + self._remove_child_widget(canvaswidget) + + def _tags(self): + return [] + + def _update(self, child): + self._adjust_scrollregion() + + def _adjust_scrollregion(self): + """ + Adjust the scrollregion of this scroll-watcher's ``Canvas`` to + include the bounding boxes of all of its children. + """ + bbox = self.bbox() + canvas = self.canvas() + scrollregion = [int(n) for n in canvas["scrollregion"].split()] + if len(scrollregion) != 4: + return + if ( + bbox[0] < scrollregion[0] + or bbox[1] < scrollregion[1] + or bbox[2] > scrollregion[2] + or bbox[3] > scrollregion[3] + ): + scrollregion = "%d %d %d %d" % ( + min(bbox[0], scrollregion[0]), + min(bbox[1], scrollregion[1]), + max(bbox[2], scrollregion[2]), + max(bbox[3], scrollregion[3]), + ) + canvas["scrollregion"] = scrollregion + + +##////////////////////////////////////////////////////// +## Canvas Frame +##////////////////////////////////////////////////////// + + +class CanvasFrame: + """ + A ``Tkinter`` frame containing a canvas and scrollbars. + ``CanvasFrame`` uses a ``ScrollWatcherWidget`` to ensure that all of + the canvas widgets contained on its canvas are within its + scrollregion. In order for ``CanvasFrame`` to make these checks, + all canvas widgets must be registered with ``add_widget`` when they + are added to the canvas; and destroyed with ``destroy_widget`` when + they are no longer needed. + + If a ``CanvasFrame`` is created with no parent, then it will create + its own main window, including a "Done" button and a "Print" + button. + """ + + def __init__(self, parent=None, **kw): + """ + Create a new ``CanvasFrame``. + + :type parent: Tkinter.BaseWidget or Tkinter.Tk + :param parent: The parent ``Tkinter`` widget. If no parent is + specified, then ``CanvasFrame`` will create a new main + window. + :param kw: Keyword arguments for the new ``Canvas``. See the + documentation for ``Tkinter.Canvas`` for more information. + """ + # If no parent was given, set up a top-level window. + if parent is None: + self._parent = Tk() + self._parent.title("NLTK") + self._parent.bind("", lambda e: self.print_to_file()) + self._parent.bind("", self.destroy) + self._parent.bind("", self.destroy) + else: + self._parent = parent + + # Create a frame for the canvas & scrollbars + self._frame = frame = Frame(self._parent) + self._canvas = canvas = Canvas(frame, **kw) + xscrollbar = Scrollbar(self._frame, orient="horizontal") + yscrollbar = Scrollbar(self._frame, orient="vertical") + xscrollbar["command"] = canvas.xview + yscrollbar["command"] = canvas.yview + canvas["xscrollcommand"] = xscrollbar.set + canvas["yscrollcommand"] = yscrollbar.set + yscrollbar.pack(fill="y", side="right") + xscrollbar.pack(fill="x", side="bottom") + canvas.pack(expand=1, fill="both", side="left") + + # Set initial scroll region. + scrollregion = "0 0 {} {}".format(canvas["width"], canvas["height"]) + canvas["scrollregion"] = scrollregion + + self._scrollwatcher = ScrollWatcherWidget(canvas) + + # If no parent was given, pack the frame, and add a menu. + if parent is None: + self.pack(expand=1, fill="both") + self._init_menubar() + + def _init_menubar(self): + menubar = Menu(self._parent) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Print to Postscript", + underline=0, + command=self.print_to_file, + accelerator="Ctrl-p", + ) + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + self._parent.config(menu=menubar) + + def print_to_file(self, filename=None): + """ + Print the contents of this ``CanvasFrame`` to a postscript + file. If no filename is given, then prompt the user for one. + + :param filename: The name of the file to print the tree to. + :type filename: str + :rtype: None + """ + if filename is None: + ftypes = [("Postscript files", ".ps"), ("All files", "*")] + filename = asksaveasfilename(filetypes=ftypes, defaultextension=".ps") + if not filename: + return + (x0, y0, w, h) = self.scrollregion() + postscript = self._canvas.postscript( + x=x0, + y=y0, + width=w + 2, + height=h + 2, + pagewidth=w + 2, # points = 1/72 inch + pageheight=h + 2, # points = 1/72 inch + pagex=0, + pagey=0, + ) + # workaround for bug in Tk font handling + postscript = postscript.replace(" 0 scalefont ", " 9 scalefont ") + with open(filename, "wb") as f: + f.write(postscript.encode("utf8")) + + def scrollregion(self): + """ + :return: The current scroll region for the canvas managed by + this ``CanvasFrame``. + :rtype: 4-tuple of int + """ + (x1, y1, x2, y2) = self._canvas["scrollregion"].split() + return (int(x1), int(y1), int(x2), int(y2)) + + def canvas(self): + """ + :return: The canvas managed by this ``CanvasFrame``. + :rtype: Tkinter.Canvas + """ + return self._canvas + + def add_widget(self, canvaswidget, x=None, y=None): + """ + Register a canvas widget with this ``CanvasFrame``. The + ``CanvasFrame`` will ensure that this canvas widget is always + within the ``Canvas``'s scrollregion. If no coordinates are + given for the canvas widget, then the ``CanvasFrame`` will + attempt to find a clear area of the canvas for it. + + :type canvaswidget: CanvasWidget + :param canvaswidget: The new canvas widget. ``canvaswidget`` + must have been created on this ``CanvasFrame``'s canvas. + :type x: int + :param x: The initial x coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + :type y: int + :param y: The initial y coordinate for the upper left hand + corner of ``canvaswidget``, in the canvas's coordinate + space. + """ + if x is None or y is None: + (x, y) = self._find_room(canvaswidget, x, y) + + # Move to (x,y) + (x1, y1, x2, y2) = canvaswidget.bbox() + canvaswidget.move(x - x1, y - y1) + + # Register with scrollwatcher. + self._scrollwatcher.add_child(canvaswidget) + + def _find_room(self, widget, desired_x, desired_y): + """ + Try to find a space for a given widget. + """ + (left, top, right, bot) = self.scrollregion() + w = widget.width() + h = widget.height() + + if w >= (right - left): + return (0, 0) + if h >= (bot - top): + return (0, 0) + + # Move the widget out of the way, for now. + (x1, y1, x2, y2) = widget.bbox() + widget.move(left - x2 - 50, top - y2 - 50) + + if desired_x is not None: + x = desired_x + for y in range(top, bot - h, int((bot - top - h) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + if desired_y is not None: + y = desired_y + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + + for y in range(top, bot - h, int((bot - top - h) / 10)): + for x in range(left, right - w, int((right - left - w) / 10)): + if not self._canvas.find_overlapping( + x - 5, y - 5, x + w + 5, y + h + 5 + ): + return (x, y) + return (0, 0) + + def destroy_widget(self, canvaswidget): + """ + Remove a canvas widget from this ``CanvasFrame``. This + deregisters the canvas widget, and destroys it. + """ + self.remove_widget(canvaswidget) + canvaswidget.destroy() + + def remove_widget(self, canvaswidget): + # Deregister with scrollwatcher. + self._scrollwatcher.remove_child(canvaswidget) + + def pack(self, cnf={}, **kw): + """ + Pack this ``CanvasFrame``. See the documentation for + ``Tkinter.Pack`` for more information. + """ + self._frame.pack(cnf, **kw) + # Adjust to be big enough for kids? + + def destroy(self, *e): + """ + Destroy this ``CanvasFrame``. If this ``CanvasFrame`` created a + top-level window, then this will close that window. + """ + if self._parent is None: + return + self._parent.destroy() + self._parent = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this frame is created from a non-interactive program (e.g. + from a secript); otherwise, the frame will close as soon as + the script completes. + """ + if in_idle(): + return + self._parent.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Text display +##////////////////////////////////////////////////////// + + +class ShowText: + """ + A ``Tkinter`` window used to display a text. ``ShowText`` is + typically used by graphical tools to display help text, or similar + information. + """ + + def __init__(self, root, title, text, width=None, height=None, **textbox_options): + if width is None or height is None: + (width, height) = self.find_dimentions(text, width, height) + + # Create the main window. + if root is None: + self._top = top = Tk() + else: + self._top = top = Toplevel(root) + top.title(title) + + b = Button(top, text="Ok", command=self.destroy) + b.pack(side="bottom") + + tbf = Frame(top) + tbf.pack(expand=1, fill="both") + scrollbar = Scrollbar(tbf, orient="vertical") + scrollbar.pack(side="right", fill="y") + textbox = Text(tbf, wrap="word", width=width, height=height, **textbox_options) + textbox.insert("end", text) + textbox["state"] = "disabled" + textbox.pack(side="left", expand=1, fill="both") + scrollbar["command"] = textbox.yview + textbox["yscrollcommand"] = scrollbar.set + + # Make it easy to close the window. + top.bind("q", self.destroy) + top.bind("x", self.destroy) + top.bind("c", self.destroy) + top.bind("", self.destroy) + top.bind("", self.destroy) + + # Focus the scrollbar, so they can use up/down, etc. + scrollbar.focus() + + def find_dimentions(self, text, width, height): + lines = text.split("\n") + if width is None: + maxwidth = max(len(line) for line in lines) + width = min(maxwidth, 80) + + # Now, find height. + height = 0 + for line in lines: + while len(line) > width: + brk = line[:width].rfind(" ") + line = line[brk:] + height += 1 + height += 1 + height = min(height, 25) + + return (width, height) + + def destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + def mainloop(self, *args, **kwargs): + """ + Enter the Tkinter mainloop. This function must be called if + this window is created from a non-interactive program (e.g. + from a secript); otherwise, the window will close as soon as + the script completes. + """ + if in_idle(): + return + self._top.mainloop(*args, **kwargs) + + +##////////////////////////////////////////////////////// +## Entry dialog +##////////////////////////////////////////////////////// + + +class EntryDialog: + """ + A dialog box for entering + """ + + def __init__( + self, parent, original_text="", instructions="", set_callback=None, title=None + ): + self._parent = parent + self._original_text = original_text + self._set_callback = set_callback + + width = int(max(30, len(original_text) * 3 / 2)) + self._top = Toplevel(parent) + + if title: + self._top.title(title) + + # The text entry box. + entryframe = Frame(self._top) + entryframe.pack(expand=1, fill="both", padx=5, pady=5, ipady=10) + if instructions: + l = Label(entryframe, text=instructions) + l.pack(side="top", anchor="w", padx=30) + self._entry = Entry(entryframe, width=width) + self._entry.pack(expand=1, fill="x", padx=30) + self._entry.insert(0, original_text) + + # A divider + divider = Frame(self._top, borderwidth=1, relief="sunken") + divider.pack(fill="x", ipady=1, padx=10) + + # The buttons. + buttons = Frame(self._top) + buttons.pack(expand=0, fill="x", padx=5, pady=5) + b = Button(buttons, text="Cancel", command=self._cancel, width=8) + b.pack(side="right", padx=5) + b = Button(buttons, text="Ok", command=self._ok, width=8, default="active") + b.pack(side="left", padx=5) + b = Button(buttons, text="Apply", command=self._apply, width=8) + b.pack(side="left") + + self._top.bind("", self._ok) + self._top.bind("", self._cancel) + self._top.bind("", self._cancel) + + self._entry.focus() + + def _reset(self, *e): + self._entry.delete(0, "end") + self._entry.insert(0, self._original_text) + if self._set_callback: + self._set_callback(self._original_text) + + def _cancel(self, *e): + try: + self._reset() + except: + pass + self._destroy() + + def _ok(self, *e): + self._apply() + self._destroy() + + def _apply(self, *e): + if self._set_callback: + self._set_callback(self._entry.get()) + + def _destroy(self, *e): + if self._top is None: + return + self._top.destroy() + self._top = None + + +##////////////////////////////////////////////////////// +## Colorized List +##////////////////////////////////////////////////////// + + +class ColorizedList: + """ + An abstract base class for displaying a colorized list of items. + Subclasses should define: + + - ``_init_colortags``, which sets up Text color tags that + will be used by the list. + - ``_item_repr``, which returns a list of (text,colortag) + tuples that make up the colorized representation of the + item. + + :note: Typically, you will want to register a callback for + ``'select'`` that calls ``mark`` on the given item. + """ + + def __init__(self, parent, items=[], **options): + """ + Construct a new list. + + :param parent: The Tk widget that contains the colorized list + :param items: The initial contents of the colorized list. + :param options: + """ + self._parent = parent + self._callbacks = {} + + # Which items are marked? + self._marks = {} + + # Initialize the Tkinter frames. + self._init_itemframe(options.copy()) + + # Set up key & mouse bindings. + self._textwidget.bind("", self._keypress) + self._textwidget.bind("", self._buttonpress) + + # Fill in the given CFG's items. + self._items = None + self.set(items) + + # //////////////////////////////////////////////////////////// + # Abstract methods + # //////////////////////////////////////////////////////////// + @abstractmethod + def _init_colortags(self, textwidget, options): + """ + Set up any colortags that will be used by this colorized list. + E.g.: + textwidget.tag_config('terminal', foreground='black') + """ + + @abstractmethod + def _item_repr(self, item): + """ + Return a list of (text, colortag) tuples that make up the + colorized representation of the item. Colorized + representations may not span multiple lines. I.e., the text + strings returned may not contain newline characters. + """ + + # //////////////////////////////////////////////////////////// + # Item Access + # //////////////////////////////////////////////////////////// + + def get(self, index=None): + """ + :return: A list of the items contained by this list. + """ + if index is None: + return self._items[:] + else: + return self._items[index] + + def set(self, items): + """ + Modify the list of items contained by this list. + """ + items = list(items) + if self._items == items: + return + self._items = list(items) + + self._textwidget["state"] = "normal" + self._textwidget.delete("1.0", "end") + for item in items: + for (text, colortag) in self._item_repr(item): + assert "\n" not in text, "item repr may not contain newline" + self._textwidget.insert("end", text, colortag) + self._textwidget.insert("end", "\n") + # Remove the final newline + self._textwidget.delete("end-1char", "end") + self._textwidget.mark_set("insert", "1.0") + self._textwidget["state"] = "disabled" + # Clear all marks + self._marks.clear() + + def unmark(self, item=None): + """ + Remove highlighting from the given item; or from every item, + if no item is given. + :raise ValueError: If ``item`` is not contained in the list. + :raise KeyError: If ``item`` is not marked. + """ + if item is None: + self._marks.clear() + self._textwidget.tag_remove("highlight", "1.0", "end+1char") + else: + index = self._items.index(item) + del self._marks[item] + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_remove("highlight", start, end) + + def mark(self, item): + """ + Highlight the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self._marks[item] = 1 + index = self._items.index(item) + (start, end) = ("%d.0" % (index + 1), "%d.0" % (index + 2)) + self._textwidget.tag_add("highlight", start, end) + + def markonly(self, item): + """ + Remove any current highlighting, and mark the given item. + :raise ValueError: If ``item`` is not contained in the list. + """ + self.unmark() + self.mark(item) + + def view(self, item): + """ + Adjust the view such that the given item is visible. If + the item is already visible, then do nothing. + """ + index = self._items.index(item) + self._textwidget.see("%d.0" % (index + 1)) + + # //////////////////////////////////////////////////////////// + # Callbacks + # //////////////////////////////////////////////////////////// + + def add_callback(self, event, func): + """ + Register a callback function with the list. This function + will be called whenever the given event occurs. + + :param event: The event that will trigger the callback + function. Valid events are: click1, click2, click3, + space, return, select, up, down, next, prior, move + :param func: The function that should be called when + the event occurs. ``func`` will be called with a + single item as its argument. (The item selected + or the item moved to). + """ + if event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + self._callbacks.setdefault(e, {})[func] = 1 + + def remove_callback(self, event, func=None): + """ + Deregister a callback function. If ``func`` is none, then + all callbacks are removed for the given event. + """ + if event is None: + events = list(self._callbacks.keys()) + elif event == "select": + events = ["click1", "space", "return"] + elif event == "move": + events = ["up", "down", "next", "prior"] + else: + events = [event] + + for e in events: + if func is None: + del self._callbacks[e] + else: + try: + del self._callbacks[e][func] + except: + pass + + # //////////////////////////////////////////////////////////// + # Tkinter Methods + # //////////////////////////////////////////////////////////// + + def pack(self, cnf={}, **kw): + # "@include: Tkinter.Pack.pack" + self._itemframe.pack(cnf, **kw) + + def grid(self, cnf={}, **kw): + # "@include: Tkinter.Grid.grid" + self._itemframe.grid(cnf, *kw) + + def focus(self): + # "@include: Tkinter.Widget.focus" + self._textwidget.focus() + + # //////////////////////////////////////////////////////////// + # Internal Methods + # //////////////////////////////////////////////////////////// + + def _init_itemframe(self, options): + self._itemframe = Frame(self._parent) + + # Create the basic Text widget & scrollbar. + options.setdefault("background", "#e0e0e0") + self._textwidget = Text(self._itemframe, **options) + self._textscroll = Scrollbar(self._itemframe, takefocus=0, orient="vertical") + self._textwidget.config(yscrollcommand=self._textscroll.set) + self._textscroll.config(command=self._textwidget.yview) + self._textscroll.pack(side="right", fill="y") + self._textwidget.pack(expand=1, fill="both", side="left") + + # Initialize the colorization tags + self._textwidget.tag_config( + "highlight", background="#e0ffff", border="1", relief="raised" + ) + self._init_colortags(self._textwidget, options) + + # How do I want to mark keyboard selection? + self._textwidget.tag_config("sel", foreground="") + self._textwidget.tag_config( + "sel", foreground="", background="", border="", underline=1 + ) + self._textwidget.tag_lower("highlight", "sel") + + def _fire_callback(self, event, itemnum): + if event not in self._callbacks: + return + if 0 <= itemnum < len(self._items): + item = self._items[itemnum] + else: + item = None + for cb_func in list(self._callbacks[event].keys()): + cb_func(item) + + def _buttonpress(self, event): + clickloc = "@%d,%d" % (event.x, event.y) + insert_point = self._textwidget.index(clickloc) + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback("click%d" % event.num, itemnum) + + def _keypress(self, event): + if event.keysym == "Return" or event.keysym == "space": + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + return + elif event.keysym == "Down": + delta = "+1line" + elif event.keysym == "Up": + delta = "-1line" + elif event.keysym == "Next": + delta = "+10lines" + elif event.keysym == "Prior": + delta = "-10lines" + else: + return "continue" + + self._textwidget.mark_set("insert", "insert" + delta) + self._textwidget.see("insert") + self._textwidget.tag_remove("sel", "1.0", "end+1char") + self._textwidget.tag_add("sel", "insert linestart", "insert lineend") + + insert_point = self._textwidget.index("insert") + itemnum = int(insert_point.split(".")[0]) - 1 + self._fire_callback(event.keysym.lower(), itemnum) + + return "break" + + +##////////////////////////////////////////////////////// +## Improved OptionMenu +##////////////////////////////////////////////////////// + + +class MutableOptionMenu(Menubutton): + def __init__(self, master, values, **options): + self._callback = options.get("command") + if "command" in options: + del options["command"] + + # Create a variable + self._variable = variable = StringVar() + if len(values) > 0: + variable.set(values[0]) + + kw = { + "borderwidth": 2, + "textvariable": variable, + "indicatoron": 1, + "relief": RAISED, + "anchor": "c", + "highlightthickness": 2, + } + kw.update(options) + Widget.__init__(self, master, "menubutton", kw) + self.widgetName = "tk_optionMenu" + self._menu = Menu(self, name="menu", tearoff=0) + self.menuname = self._menu._w + + self._values = [] + for value in values: + self.add(value) + + self["menu"] = self._menu + + def add(self, value): + if value in self._values: + return + + def set(value=value): + self.set(value) + + self._menu.add_command(label=value, command=set) + self._values.append(value) + + def set(self, value): + self._variable.set(value) + if self._callback: + self._callback(value) + + def remove(self, value): + # Might raise indexerror: pass to parent. + i = self._values.index(value) + del self._values[i] + self._menu.delete(i, i) + + def __getitem__(self, name): + if name == "menu": + return self.__menu + return Widget.__getitem__(self, name) + + def destroy(self): + """Destroy this widget and the associated menu.""" + Menubutton.destroy(self) + self._menu = None + + +##////////////////////////////////////////////////////// +## Test code. +##////////////////////////////////////////////////////// + + +def demo(): + """ + A simple demonstration showing how to use canvas widgets. + """ + + def fill(cw): + from random import randint + + cw["fill"] = "#00%04d" % randint(0, 9999) + + def color(cw): + from random import randint + + cw["color"] = "#ff%04d" % randint(0, 9999) + + cf = CanvasFrame(closeenough=10, width=300, height=300) + c = cf.canvas() + ct3 = TextWidget(c, "hiya there", draggable=1) + ct2 = TextWidget(c, "o o\n||\n___\n U", draggable=1, justify="center") + co = OvalWidget(c, ct2, outline="red") + ct = TextWidget(c, "o o\n||\n\\___/", draggable=1, justify="center") + cp = ParenWidget(c, ct, color="red") + cb = BoxWidget(c, cp, fill="cyan", draggable=1, width=3, margin=10) + equation = SequenceWidget( + c, + SymbolWidget(c, "forall"), + TextWidget(c, "x"), + SymbolWidget(c, "exists"), + TextWidget(c, "y: "), + TextWidget(c, "x"), + SymbolWidget(c, "notequal"), + TextWidget(c, "y"), + ) + space = SpaceWidget(c, 0, 30) + cstack = StackWidget(c, cb, ct3, space, co, equation, align="center") + prompt_msg = TextWidget( + c, "try clicking\nand dragging", draggable=1, justify="center" + ) + cs = SequenceWidget(c, cstack, prompt_msg) + zz = BracketWidget(c, cs, color="green4", width=3) + cf.add_widget(zz, 60, 30) + + cb.bind_click(fill) + ct.bind_click(color) + co.bind_click(fill) + ct2.bind_click(color) + ct3.bind_click(color) + + cf.mainloop() + # ShowText(None, 'title', ((('this is text'*150)+'\n')*5)) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d78d653933433b87920d43246ee135cdd6cddd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..454608139a7ec319d460578975f1285ce354bb09 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/nonmonotonic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a0ea6d0f2acf58f9b6bed996be778b99c38d429 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/inference/__pycache__/prover9.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/inference/api.py b/venv/lib/python3.10/site-packages/nltk/inference/api.py new file mode 100644 index 0000000000000000000000000000000000000000..12f1c099941280c1a72f40f957330dc5497a1b27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/inference/api.py @@ -0,0 +1,614 @@ +# Natural Language Toolkit: Classifier Interface +# +# Author: Ewan Klein +# Dan Garrette +# +# URL: +# For license information, see LICENSE.TXT + +""" +Interfaces and base classes for theorem provers and model builders. + +``Prover`` is a standard interface for a theorem prover which tries to prove a goal from a +list of assumptions. + +``ModelBuilder`` is a standard interface for a model builder. Given just a set of assumptions. +the model builder tries to build a model for the assumptions. Given a set of assumptions and a +goal *G*, the model builder tries to find a counter-model, in the sense of a model that will satisfy +the assumptions plus the negation of *G*. +""" + +import threading +import time +from abc import ABCMeta, abstractmethod + + +class Prover(metaclass=ABCMeta): + """ + Interface for trying to prove a goal from assumptions. Both the goal and + the assumptions are constrained to be formulas of ``logic.Expression``. + """ + + def prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not. + :rtype: bool + """ + return self._prove(goal, assumptions, verbose)[0] + + @abstractmethod + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :return: Whether the proof was successful or not, along with the proof + :rtype: tuple: (bool, str) + """ + + +class ModelBuilder(metaclass=ABCMeta): + """ + Interface for trying to build a model of set of formulas. + Open formulas are assumed to be universally quantified. + Both the goal and the assumptions are constrained to be formulas + of ``logic.Expression``. + """ + + def build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated + :rtype: bool + """ + return self._build_model(goal, assumptions, verbose)[0] + + @abstractmethod + def _build_model(self, goal=None, assumptions=None, verbose=False): + """ + Perform the actual model building. + :return: Whether a model was generated, and the model itself + :rtype: tuple(bool, sem.Valuation) + """ + + +class TheoremToolCommand(metaclass=ABCMeta): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + @abstractmethod + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + + @abstractmethod + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + + @abstractmethod + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + + @abstractmethod + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + + @abstractmethod + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + + +class ProverCommand(TheoremToolCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + @abstractmethod + def prove(self, verbose=False): + """ + Perform the actual proof. + """ + + @abstractmethod + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_prover(self): + """ + Return the prover object + :return: ``Prover`` + """ + + +class ModelBuilderCommand(TheoremToolCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. + When build_model() is called, the ``ModelBuilder`` is executed with the goal + and assumptions. + """ + + @abstractmethod + def build_model(self, verbose=False): + """ + Perform the actual model building. + :return: A model if one is generated; None otherwise. + :rtype: sem.Valuation + """ + + @abstractmethod + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + + @abstractmethod + def get_model_builder(self): + """ + Return the model builder object + :return: ``ModelBuilder`` + """ + + +class BaseTheoremToolCommand(TheoremToolCommand): + """ + This class holds a goal and a list of assumptions to be used in proving + or model building. + """ + + def __init__(self, goal=None, assumptions=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + self._goal = goal + + if not assumptions: + self._assumptions = [] + else: + self._assumptions = list(assumptions) + + self._result = None + """A holder for the result, to prevent unnecessary re-proving""" + + def add_assumptions(self, new_assumptions): + """ + Add new assumptions to the assumption list. + + :param new_assumptions: new assumptions + :type new_assumptions: list(sem.Expression) + """ + self._assumptions.extend(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + """ + Retract assumptions from the assumption list. + + :param debug: If True, give warning when ``retracted`` is not present on + assumptions list. + :type debug: bool + :param retracted: assumptions to be retracted + :type retracted: list(sem.Expression) + """ + retracted = set(retracted) + result_list = list(filter(lambda a: a not in retracted, self._assumptions)) + if debug and result_list == self._assumptions: + print(Warning("Assumptions list has not been changed:")) + self.print_assumptions() + + self._assumptions = result_list + + self._result = None + + def assumptions(self): + """ + List the current assumptions. + + :return: list of ``Expression`` + """ + return self._assumptions + + def goal(self): + """ + Return the goal + + :return: ``Expression`` + """ + return self._goal + + def print_assumptions(self): + """ + Print the list of the current assumptions. + """ + for a in self.assumptions(): + print(a) + + +class BaseProverCommand(BaseTheoremToolCommand, ProverCommand): + """ + This class holds a ``Prover``, a goal, and a list of assumptions. When + prove() is called, the ``Prover`` is executed with the goal and assumptions. + """ + + def __init__(self, prover, goal=None, assumptions=None): + """ + :param prover: The theorem tool to execute with the assumptions + :type prover: Prover + :see: ``BaseTheoremToolCommand`` + """ + self._prover = prover + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._proof = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, self._proof = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return proof_string + + def get_prover(self): + return self._prover + + +class BaseModelBuilderCommand(BaseTheoremToolCommand, ModelBuilderCommand): + """ + This class holds a ``ModelBuilder``, a goal, and a list of assumptions. When + build_model() is called, the ``ModelBuilder`` is executed with the goal and + assumptions. + """ + + def __init__(self, modelbuilder, goal=None, assumptions=None): + """ + :param modelbuilder: The theorem tool to execute with the assumptions + :type modelbuilder: ModelBuilder + :see: ``BaseTheoremToolCommand`` + """ + self._modelbuilder = modelbuilder + """The theorem tool to execute with the assumptions""" + + BaseTheoremToolCommand.__init__(self, goal, assumptions) + + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + self._result, self._model = self._modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return valuation_str + + def get_model_builder(self): + return self._modelbuilder + + +class TheoremToolCommandDecorator(TheoremToolCommand): + """ + A base decorator for the ``ProverCommandDecorator`` and + ``ModelBuilderCommandDecorator`` classes from which decorators can extend. + """ + + def __init__(self, command): + """ + :param command: ``TheoremToolCommand`` to decorate + """ + self._command = command + + # The decorator has its own versions of 'result' different from the + # underlying command + self._result = None + + def assumptions(self): + return self._command.assumptions() + + def goal(self): + return self._command.goal() + + def add_assumptions(self, new_assumptions): + self._command.add_assumptions(new_assumptions) + self._result = None + + def retract_assumptions(self, retracted, debug=False): + self._command.retract_assumptions(retracted, debug) + self._result = None + + def print_assumptions(self): + self._command.print_assumptions() + + +class ProverCommandDecorator(TheoremToolCommandDecorator, ProverCommand): + """ + A base decorator for the ``ProverCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, proverCommand): + """ + :param proverCommand: ``ProverCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, proverCommand) + + # The decorator has its own versions of 'result' and 'proof' + # because they may be different from the underlying command + self._proof = None + + def prove(self, verbose=False): + if self._result is None: + prover = self.get_prover() + self._result, self._proof = prover._prove( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def proof(self, simplify=True): + """ + Return the proof string + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call prove() first to get a proof!") + else: + return self.decorate_proof(self._proof, simplify) + + def decorate_proof(self, proof_string, simplify=True): + """ + Modify and return the proof string + :param proof_string: str the proof to decorate + :param simplify: bool simplify the proof? + :return: str + """ + return self._command.decorate_proof(proof_string, simplify) + + def get_prover(self): + return self._command.get_prover() + + +class ModelBuilderCommandDecorator(TheoremToolCommandDecorator, ModelBuilderCommand): + """ + A base decorator for the ``ModelBuilderCommand`` class from which other + prover command decorators can extend. + """ + + def __init__(self, modelBuilderCommand): + """ + :param modelBuilderCommand: ``ModelBuilderCommand`` to decorate + """ + TheoremToolCommandDecorator.__init__(self, modelBuilderCommand) + + # The decorator has its own versions of 'result' and 'valuation' + # because they may be different from the underlying command + self._model = None + + def build_model(self, verbose=False): + """ + Attempt to build a model. Store the result to prevent unnecessary + re-building. + """ + if self._result is None: + modelbuilder = self.get_model_builder() + self._result, self._model = modelbuilder._build_model( + self.goal(), self.assumptions(), verbose + ) + return self._result + + def model(self, format=None): + """ + Return a string representation of the model + + :param simplify: bool simplify the proof? + :return: str + """ + if self._result is None: + raise LookupError("You have to call build_model() first to " "get a model!") + else: + return self._decorate_model(self._model, format) + + def _decorate_model(self, valuation_str, format=None): + """ + Modify and return the proof string + :param valuation_str: str with the model builder's output + :param format: str indicating the format for displaying + :return: str + """ + return self._command._decorate_model(valuation_str, format) + + def get_model_builder(self): + return self._command.get_prover() + + +class ParallelProverBuilder(Prover, ModelBuilder): + """ + This class stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + """ + + def __init__(self, prover, modelbuilder): + self._prover = prover + self._modelbuilder = modelbuilder + + def _prove(self, goal=None, assumptions=None, verbose=False): + return self._run(goal, assumptions, verbose), "" + + def _build_model(self, goal=None, assumptions=None, verbose=False): + return not self._run(goal, assumptions, verbose), "" + + def _run(self, goal, assumptions, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: self._prover.prove(goal, assumptions, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: self._modelbuilder.build_model(goal, assumptions, verbose), + verbose, + "MB", + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + return tp_thread.result + elif mb_thread.result is not None: + return not mb_thread.result + else: + return None + + +class ParallelProverBuilderCommand(BaseProverCommand, BaseModelBuilderCommand): + """ + This command stores both a prover and a model builder and when either + prove() or build_model() is called, then both theorem tools are run in + parallel. Whichever finishes first, the prover or the model builder, is the + result that will be used. + + Because the theorem prover result is the opposite of the model builder + result, we will treat self._result as meaning "proof found/no model found". + """ + + def __init__(self, prover, modelbuilder, goal=None, assumptions=None): + BaseProverCommand.__init__(self, prover, goal, assumptions) + BaseModelBuilderCommand.__init__(self, modelbuilder, goal, assumptions) + + def prove(self, verbose=False): + return self._run(verbose) + + def build_model(self, verbose=False): + return not self._run(verbose) + + def _run(self, verbose): + # Set up two thread, Prover and ModelBuilder to run in parallel + tp_thread = TheoremToolThread( + lambda: BaseProverCommand.prove(self, verbose), verbose, "TP" + ) + mb_thread = TheoremToolThread( + lambda: BaseModelBuilderCommand.build_model(self, verbose), verbose, "MB" + ) + + tp_thread.start() + mb_thread.start() + + while tp_thread.is_alive() and mb_thread.is_alive(): + # wait until either the prover or the model builder is done + pass + + if tp_thread.result is not None: + self._result = tp_thread.result + elif mb_thread.result is not None: + self._result = not mb_thread.result + return self._result + + +class TheoremToolThread(threading.Thread): + def __init__(self, command, verbose, name=None): + threading.Thread.__init__(self) + self._command = command + self._result = None + self._verbose = verbose + self._name = name + + def run(self): + try: + self._result = self._command() + if self._verbose: + print( + "Thread %s finished with result %s at %s" + % (self._name, self._result, time.localtime(time.time())) + ) + except Exception as e: + print(e) + print("Thread %s completed abnormally" % (self._name)) + + @property + def result(self): + return self._result diff --git a/venv/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py b/venv/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7075ed11e7833201ad98c6fc80406d1ef646db --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/inference/nonmonotonic.py @@ -0,0 +1,561 @@ +# Natural Language Toolkit: Nonmonotonic Reasoning +# +# Author: Daniel H. Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +A module to perform nonmonotonic reasoning. The ideas and demonstrations in +this module are based on "Logical Foundations of Artificial Intelligence" by +Michael R. Genesereth and Nils J. Nilsson. +""" + +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import Prover, ProverCommandDecorator +from nltk.inference.prover9 import Prover9, Prover9Command +from nltk.sem.logic import ( + AbstractVariableExpression, + AllExpression, + AndExpression, + ApplicationExpression, + BooleanExpression, + EqualityExpression, + ExistsExpression, + Expression, + ImpExpression, + NegatedExpression, + Variable, + VariableExpression, + operator, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +def get_domain(goal, assumptions): + if goal is None: + all_expressions = assumptions + else: + all_expressions = assumptions + [-goal] + return reduce(operator.or_, (a.constants() for a in all_expressions), set()) + + +class ClosedDomainProver(ProverCommandDecorator): + """ + This is a prover decorator that adds domain closure assumptions before + proving. + """ + + def assumptions(self): + assumptions = [a for a in self._command.assumptions()] + goal = self._command.goal() + domain = get_domain(goal, assumptions) + return [self.replace_quants(ex, domain) for ex in assumptions] + + def goal(self): + goal = self._command.goal() + domain = get_domain(goal, self._command.assumptions()) + return self.replace_quants(goal, domain) + + def replace_quants(self, ex, domain): + """ + Apply the closed domain assumption to the expression + + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR + "P.replace(x, d1) | P.replace(x, d2) | ..." + - translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..." + + :param ex: ``Expression`` + :param domain: set of {Variable}s + :return: ``Expression`` + """ + if isinstance(ex, AllExpression): + conjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + conjuncts = [self.replace_quants(c, domain) for c in conjuncts] + return reduce(lambda x, y: x & y, conjuncts) + elif isinstance(ex, BooleanExpression): + return ex.__class__( + self.replace_quants(ex.first, domain), + self.replace_quants(ex.second, domain), + ) + elif isinstance(ex, NegatedExpression): + return -self.replace_quants(ex.term, domain) + elif isinstance(ex, ExistsExpression): + disjuncts = [ + ex.term.replace(ex.variable, VariableExpression(d)) for d in domain + ] + disjuncts = [self.replace_quants(d, domain) for d in disjuncts] + return reduce(lambda x, y: x | y, disjuncts) + else: + return ex + + +class UniqueNamesProver(ProverCommandDecorator): + """ + This is a prover decorator that adds unique names assumptions before + proving. + """ + + def assumptions(self): + """ + - Domain = union([e.free()|e.constants() for e in all_expressions]) + - if "d1 = d2" cannot be proven from the premises, then add "d1 != d2" + """ + assumptions = self._command.assumptions() + + domain = list(get_domain(self._command.goal(), assumptions)) + + # build a dictionary of obvious equalities + eq_sets = SetHolder() + for a in assumptions: + if isinstance(a, EqualityExpression): + av = a.first.variable + bv = a.second.variable + # put 'a' and 'b' in the same set + eq_sets[av].add(bv) + + new_assumptions = [] + for i, a in enumerate(domain): + for b in domain[i + 1 :]: + # if a and b are not already in the same equality set + if b not in eq_sets[a]: + newEqEx = EqualityExpression( + VariableExpression(a), VariableExpression(b) + ) + if Prover9().prove(newEqEx, assumptions): + # we can prove that the names are the same entity. + # remember that they are equal so we don't re-check. + eq_sets[a].add(b) + else: + # we can't prove it, so assume unique names + new_assumptions.append(-newEqEx) + + return assumptions + new_assumptions + + +class SetHolder(list): + """ + A list of sets of Variables. + """ + + def __getitem__(self, item): + """ + :param item: ``Variable`` + :return: the set containing 'item' + """ + assert isinstance(item, Variable) + for s in self: + if item in s: + return s + # item is not found in any existing set. so create a new set + new = {item} + self.append(new) + return new + + +class ClosedWorldProver(ProverCommandDecorator): + """ + This is a prover decorator that completes predicates before proving. + + If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P". + If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird". + If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P". + + walk(Socrates) + Socrates != Bill + + all x.(walk(x) -> (x=Socrates)) + ---------------- + -walk(Bill) + + see(Socrates, John) + see(John, Mary) + Socrates != John + John != Mary + + all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary))) + ---------------- + -see(Socrates, Mary) + + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + Sam != Tweety + + all x.(bird(x) -> (ostrich(x) | x=Tweety)) + + all x.-ostrich(x) + ------------------- + -bird(Sam) + """ + + def assumptions(self): + assumptions = self._command.assumptions() + + predicates = self._make_predicate_dict(assumptions) + + new_assumptions = [] + for p in predicates: + predHolder = predicates[p] + new_sig = self._make_unique_signature(predHolder) + new_sig_exs = [VariableExpression(v) for v in new_sig] + + disjuncts = [] + + # Turn the signatures into disjuncts + for sig in predHolder.signatures: + equality_exs = [] + for v1, v2 in zip(new_sig_exs, sig): + equality_exs.append(EqualityExpression(v1, v2)) + disjuncts.append(reduce(lambda x, y: x & y, equality_exs)) + + # Turn the properties into disjuncts + for prop in predHolder.properties: + # replace variables from the signature with new sig variables + bindings = {} + for v1, v2 in zip(new_sig_exs, prop[0]): + bindings[v2] = v1 + disjuncts.append(prop[1].substitute_bindings(bindings)) + + # make the assumption + if disjuncts: + # disjuncts exist, so make an implication + antecedent = self._make_antecedent(p, new_sig) + consequent = reduce(lambda x, y: x | y, disjuncts) + accum = ImpExpression(antecedent, consequent) + else: + # nothing has property 'p' + accum = NegatedExpression(self._make_antecedent(p, new_sig)) + + # quantify the implication + for new_sig_var in new_sig[::-1]: + accum = AllExpression(new_sig_var, accum) + new_assumptions.append(accum) + + return assumptions + new_assumptions + + def _make_unique_signature(self, predHolder): + """ + This method figures out how many arguments the predicate takes and + returns a tuple containing that number of unique variables. + """ + return tuple(unique_variable() for i in range(predHolder.signature_len)) + + def _make_antecedent(self, predicate, signature): + """ + Return an application expression with 'predicate' as the predicate + and 'signature' as the list of arguments. + """ + antecedent = predicate + for v in signature: + antecedent = antecedent(VariableExpression(v)) + return antecedent + + def _make_predicate_dict(self, assumptions): + """ + Create a dictionary of predicates from the assumptions. + + :param assumptions: a list of ``Expression``s + :return: dict mapping ``AbstractVariableExpression`` to ``PredHolder`` + """ + predicates = defaultdict(PredHolder) + for a in assumptions: + self._map_predicates(a, predicates) + return predicates + + def _map_predicates(self, expression, predDict): + if isinstance(expression, ApplicationExpression): + func, args = expression.uncurry() + if isinstance(func, AbstractVariableExpression): + predDict[func].append_sig(tuple(args)) + elif isinstance(expression, AndExpression): + self._map_predicates(expression.first, predDict) + self._map_predicates(expression.second, predDict) + elif isinstance(expression, AllExpression): + # collect all the universally quantified variables + sig = [expression.variable] + term = expression.term + while isinstance(term, AllExpression): + sig.append(term.variable) + term = term.term + if isinstance(term, ImpExpression): + if isinstance(term.first, ApplicationExpression) and isinstance( + term.second, ApplicationExpression + ): + func1, args1 = term.first.uncurry() + func2, args2 = term.second.uncurry() + if ( + isinstance(func1, AbstractVariableExpression) + and isinstance(func2, AbstractVariableExpression) + and sig == [v.variable for v in args1] + and sig == [v.variable for v in args2] + ): + predDict[func2].append_prop((tuple(sig), term.first)) + predDict[func1].validate_sig_len(sig) + + +class PredHolder: + """ + This class will be used by a dictionary that will store information + about predicates to be used by the ``ClosedWorldProver``. + + The 'signatures' property is a list of tuples defining signatures for + which the predicate is true. For instance, 'see(john, mary)' would be + result in the signature '(john,mary)' for 'see'. + + The second element of the pair is a list of pairs such that the first + element of the pair is a tuple of variables and the second element is an + expression of those variables that makes the predicate true. For instance, + 'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))" + for 'know'. + """ + + def __init__(self): + self.signatures = [] + self.properties = [] + self.signature_len = None + + def append_sig(self, new_sig): + self.validate_sig_len(new_sig) + self.signatures.append(new_sig) + + def append_prop(self, new_prop): + self.validate_sig_len(new_prop[0]) + self.properties.append(new_prop) + + def validate_sig_len(self, new_sig): + if self.signature_len is None: + self.signature_len = len(new_sig) + elif self.signature_len != len(new_sig): + raise Exception("Signature lengths do not match") + + def __str__(self): + return f"({self.signatures},{self.properties},{self.signature_len})" + + def __repr__(self): + return "%s" % self + + +def closed_domain_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"exists x.walk(x)") + p2 = lexpr(r"man(Socrates)") + p3 = lexpr(r"-walk(Bill)") + c = lexpr(r"walk(Socrates)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"walk(Bill)") + c = lexpr(r"all x.walk(x)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + p1 = lexpr(r"girl(mary)") + p2 = lexpr(r"dog(rover)") + p3 = lexpr(r"all x.(girl(x) -> -dog(x))") + p4 = lexpr(r"all x.(dog(x) -> -girl(x))") + p5 = lexpr(r"chase(mary, rover)") + c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))") + prover = Prover9Command(c, [p1, p2, p3, p4, p5]) + print(prover.prove()) + cdp = ClosedDomainProver(prover) + print("assumptions:") + for a in cdp.assumptions(): + print(" ", a) + print("goal:", cdp.goal()) + print(cdp.prove()) + + +def unique_names_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"man(Socrates)") + p2 = lexpr(r"man(Bill)") + c = lexpr(r"exists x.exists y.(x != y)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))") + p2 = lexpr(r"Bill = William") + p3 = lexpr(r"Bill = Billy") + c = lexpr(r"-walk(William)") + prover = Prover9Command(c, [p1, p2, p3]) + print(prover.prove()) + unp = UniqueNamesProver(prover) + print("assumptions:") + for a in unp.assumptions(): + print(" ", a) + print("goal:", unp.goal()) + print(unp.prove()) + + +def closed_world_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"walk(Socrates)") + p2 = lexpr(r"(Socrates != Bill)") + c = lexpr(r"-walk(Bill)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + p3 = lexpr(r"(Socrates != John)") + p4 = lexpr(r"(John != Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + p1 = lexpr(r"all x.(ostrich(x) -> bird(x))") + p2 = lexpr(r"bird(Tweety)") + p3 = lexpr(r"-ostrich(Sam)") + p4 = lexpr(r"Sam != Tweety") + c = lexpr(r"-bird(Sam)") + prover = Prover9Command(c, [p1, p2, p3, p4]) + print(prover.prove()) + cwp = ClosedWorldProver(prover) + print("assumptions:") + for a in cwp.assumptions(): + print(" ", a) + print("goal:", cwp.goal()) + print(cwp.prove()) + + +def combination_prover_demo(): + lexpr = Expression.fromstring + + p1 = lexpr(r"see(Socrates, John)") + p2 = lexpr(r"see(John, Mary)") + c = lexpr(r"-see(Socrates, Mary)") + prover = Prover9Command(c, [p1, p2]) + print(prover.prove()) + command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + for a in command.assumptions(): + print(a) + print(command.prove()) + + +def default_reasoning_demo(): + lexpr = Expression.fromstring + + premises = [] + + # define taxonomy + premises.append(lexpr(r"all x.(elephant(x) -> animal(x))")) + premises.append(lexpr(r"all x.(bird(x) -> animal(x))")) + premises.append(lexpr(r"all x.(dove(x) -> bird(x))")) + premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))")) + premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))")) + + # default properties + premises.append( + lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))") + ) # normal animals don't fly + premises.append( + lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))") + ) # normal birds fly + premises.append( + lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))") + ) # normal ostriches don't fly + + # specify abnormal entities + premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight + premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird + premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich + + # define entities + premises.append(lexpr(r"elephant(E)")) + premises.append(lexpr(r"dove(D)")) + premises.append(lexpr(r"ostrich(O)")) + + # print the assumptions + prover = Prover9Command(None, premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + for a in command.assumptions(): + print(a) + + print_proof("-fly(E)", premises) + print_proof("fly(D)", premises) + print_proof("-fly(O)", premises) + + +def print_proof(goal, premises): + lexpr = Expression.fromstring + prover = Prover9Command(lexpr(goal), premises) + command = UniqueNamesProver(ClosedWorldProver(prover)) + print(goal, prover.prove(), command.prove()) + + +def demo(): + closed_domain_demo() + unique_names_demo() + closed_world_demo() + combination_prover_demo() + default_reasoning_demo() + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/inference/resolution.py b/venv/lib/python3.10/site-packages/nltk/inference/resolution.py new file mode 100644 index 0000000000000000000000000000000000000000..52428eb2c5d2bee410716b058165cbccbbb238a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/inference/resolution.py @@ -0,0 +1,759 @@ +# Natural Language Toolkit: First-order Resolution-based Theorem Prover +# +# Author: Dan Garrette +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +""" +Module for a resolution-based First Order theorem prover. +""" + +import operator +from collections import defaultdict +from functools import reduce + +from nltk.inference.api import BaseProverCommand, Prover +from nltk.sem import skolemize +from nltk.sem.logic import ( + AndExpression, + ApplicationExpression, + EqualityExpression, + Expression, + IndividualVariableExpression, + NegatedExpression, + OrExpression, + Variable, + VariableExpression, + is_indvar, + unique_variable, +) + + +class ProverParseError(Exception): + pass + + +class ResolutionProver(Prover): + ANSWER_KEY = "ANSWER" + _assume_false = True + + def _prove(self, goal=None, assumptions=None, verbose=False): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in the proof + :type assumptions: list(sem.Expression) + """ + if not assumptions: + assumptions = [] + + result = None + try: + clauses = [] + if goal: + clauses.extend(clausify(-goal)) + for a in assumptions: + clauses.extend(clausify(a)) + result, clauses = self._attempt_proof(clauses) + if verbose: + print(ResolutionProverCommand._decorate_clauses(clauses)) + except RuntimeError as e: + if self._assume_false and str(e).startswith( + "maximum recursion depth exceeded" + ): + result = False + clauses = [] + else: + if verbose: + print(e) + else: + raise e + return (result, clauses) + + def _attempt_proof(self, clauses): + # map indices to lists of indices, to store attempted unifications + tried = defaultdict(list) + + i = 0 + while i < len(clauses): + if not clauses[i].is_tautology(): + # since we try clauses in order, we should start after the last + # index tried + if tried[i]: + j = tried[i][-1] + 1 + else: + j = i + 1 # nothing tried yet for 'i', so start with the next + + while j < len(clauses): + # don't: 1) unify a clause with itself, + # 2) use tautologies + if i != j and j and not clauses[j].is_tautology(): + tried[i].append(j) + newclauses = clauses[i].unify(clauses[j]) + if newclauses: + for newclause in newclauses: + newclause._parents = (i + 1, j + 1) + clauses.append(newclause) + if not len(newclause): # if there's an empty clause + return (True, clauses) + i = -1 # since we added a new clause, restart from the top + break + j += 1 + i += 1 + return (False, clauses) + + +class ResolutionProverCommand(BaseProverCommand): + def __init__(self, goal=None, assumptions=None, prover=None): + """ + :param goal: Input expression to prove + :type goal: sem.Expression + :param assumptions: Input expressions to use as assumptions in + the proof. + :type assumptions: list(sem.Expression) + """ + if prover is not None: + assert isinstance(prover, ResolutionProver) + else: + prover = ResolutionProver() + + BaseProverCommand.__init__(self, prover, goal, assumptions) + self._clauses = None + + def prove(self, verbose=False): + """ + Perform the actual proof. Store the result to prevent unnecessary + re-proving. + """ + if self._result is None: + self._result, clauses = self._prover._prove( + self.goal(), self.assumptions(), verbose + ) + self._clauses = clauses + self._proof = ResolutionProverCommand._decorate_clauses(clauses) + return self._result + + def find_answers(self, verbose=False): + self.prove(verbose) + + answers = set() + answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY)) + for clause in self._clauses: + for term in clause: + if ( + isinstance(term, ApplicationExpression) + and term.function == answer_ex + and not isinstance(term.argument, IndividualVariableExpression) + ): + answers.add(term.argument) + return answers + + @staticmethod + def _decorate_clauses(clauses): + """ + Decorate the proof output. + """ + out = "" + max_clause_len = max(len(str(clause)) for clause in clauses) + max_seq_len = len(str(len(clauses))) + for i in range(len(clauses)): + parents = "A" + taut = "" + if clauses[i].is_tautology(): + taut = "Tautology" + if clauses[i]._parents: + parents = str(clauses[i]._parents) + parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents + seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1) + out += f"[{seq}] {clauses[i]} {parents} {taut}\n" + return out + + +class Clause(list): + def __init__(self, data): + list.__init__(self, data) + self._is_tautology = None + self._parents = None + + def unify(self, other, bindings=None, used=None, skipped=None, debug=False): + """ + Attempt to unify this Clause with the other, returning a list of + resulting, unified, Clauses. + + :param other: ``Clause`` with which to unify + :param bindings: ``BindingDict`` containing bindings that should be used + during the unification + :param used: tuple of two lists of atoms. The first lists the + atoms from 'self' that were successfully unified with atoms from + 'other'. The second lists the atoms from 'other' that were successfully + unified with atoms from 'self'. + :param skipped: tuple of two ``Clause`` objects. The first is a list of all + the atoms from the 'self' Clause that have not been unified with + anything on the path. The second is same thing for the 'other' Clause. + :param debug: bool indicating whether debug statements should print + :return: list containing all the resulting ``Clause`` objects that could be + obtained by unification + """ + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + if skipped is None: + skipped = ([], []) + if isinstance(debug, bool): + debug = DebugObject(debug) + + newclauses = _iterate_first( + self, other, bindings, used, skipped, _complete_unify_path, debug + ) + + # remove subsumed clauses. make a list of all indices of subsumed + # clauses, and then remove them from the list + subsumed = [] + for i, c1 in enumerate(newclauses): + if i not in subsumed: + for j, c2 in enumerate(newclauses): + if i != j and j not in subsumed and c1.subsumes(c2): + subsumed.append(j) + result = [] + for i in range(len(newclauses)): + if i not in subsumed: + result.append(newclauses[i]) + + return result + + def isSubsetOf(self, other): + """ + Return True iff every term in 'self' is a term in 'other'. + + :param other: ``Clause`` + :return: bool + """ + for a in self: + if a not in other: + return False + return True + + def subsumes(self, other): + """ + Return True iff 'self' subsumes 'other', this is, if there is a + substitution such that every term in 'self' can be unified with a term + in 'other'. + + :param other: ``Clause`` + :return: bool + """ + negatedother = [] + for atom in other: + if isinstance(atom, NegatedExpression): + negatedother.append(atom.term) + else: + negatedother.append(-atom) + + negatedotherClause = Clause(negatedother) + + bindings = BindingDict() + used = ([], []) + skipped = ([], []) + debug = DebugObject(False) + + return ( + len( + _iterate_first( + self, + negatedotherClause, + bindings, + used, + skipped, + _subsumes_finalize, + debug, + ) + ) + > 0 + ) + + def __getslice__(self, start, end): + return Clause(list.__getslice__(self, start, end)) + + def __sub__(self, other): + return Clause([a for a in self if a not in other]) + + def __add__(self, other): + return Clause(list.__add__(self, other)) + + def is_tautology(self): + """ + Self is a tautology if it contains ground terms P and -P. The ground + term, P, must be an exact match, ie, not using unification. + """ + if self._is_tautology is not None: + return self._is_tautology + for i, a in enumerate(self): + if not isinstance(a, EqualityExpression): + j = len(self) - 1 + while j > i: + b = self[j] + if isinstance(a, NegatedExpression): + if a.term == b: + self._is_tautology = True + return True + elif isinstance(b, NegatedExpression): + if a == b.term: + self._is_tautology = True + return True + j -= 1 + self._is_tautology = False + return False + + def free(self): + return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self)) + + def replace(self, variable, expression): + """ + Replace every instance of variable with expression across every atom + in the clause + + :param variable: ``Variable`` + :param expression: ``Expression`` + """ + return Clause([atom.replace(variable, expression) for atom in self]) + + def substitute_bindings(self, bindings): + """ + Replace every binding + + :param bindings: A list of tuples mapping Variable Expressions to the + Expressions to which they are bound. + :return: ``Clause`` + """ + return Clause([atom.substitute_bindings(bindings) for atom in self]) + + def __str__(self): + return "{" + ", ".join("%s" % item for item in self) + "}" + + def __repr__(self): + return "%s" % self + + +def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'self' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # explore this 'self' atom + result = _iterate_second( + first, second, bindings, used, skipped, finalize_method, debug + 1 + ) + + # skip this possible 'self' atom + newskipped = (skipped[0] + [first[0]], skipped[1]) + result += _iterate_first( + first[1:], second, bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_first( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug): + """ + This method facilitates movement through the terms of 'other' + """ + debug.line(f"unify({first},{second}) {bindings}") + + if not len(first) or not len(second): # if no more recursions can be performed + return finalize_method(first, second, bindings, used, skipped, debug) + else: + # skip this possible pairing and move to the next + newskipped = (skipped[0], skipped[1] + [second[0]]) + result = _iterate_second( + first, second[1:], bindings, used, newskipped, finalize_method, debug + 1 + ) + + try: + newbindings, newused, unused = _unify_terms( + first[0], second[0], bindings, used + ) + # Unification found, so progress with this line of unification + # put skipped and unused terms back into play for later unification. + newfirst = first[1:] + skipped[0] + unused[0] + newsecond = second[1:] + skipped[1] + unused[1] + result += _iterate_second( + newfirst, + newsecond, + newbindings, + newused, + ([], []), + finalize_method, + debug + 1, + ) + except BindingException: + # the atoms could not be unified, + pass + + return result + + +def _unify_terms(a, b, bindings=None, used=None): + """ + This method attempts to unify two terms. Two expressions are unifiable + if there exists a substitution function S such that S(a) == S(-b). + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which + the unification must be consistent + :return: ``BindingDict`` A dictionary of the bindings required to unify + :raise ``BindingException``: If the terms cannot be unified + """ + assert isinstance(a, Expression) + assert isinstance(b, Expression) + + if bindings is None: + bindings = BindingDict() + if used is None: + used = ([], []) + + # Use resolution + if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression): + newbindings = most_general_unification(a.term, b, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression): + newbindings = most_general_unification(a, b.term, bindings) + newused = (used[0] + [a], used[1] + [b]) + unused = ([], []) + + # Use demodulation + elif isinstance(a, EqualityExpression): + newbindings = BindingDict([(a.first.variable, a.second)]) + newused = (used[0] + [a], used[1]) + unused = ([], [b]) + elif isinstance(b, EqualityExpression): + newbindings = BindingDict([(b.first.variable, b.second)]) + newused = (used[0], used[1] + [b]) + unused = ([a], []) + + else: + raise BindingException((a, b)) + + return newbindings, newused, unused + + +def _complete_unify_path(first, second, bindings, used, skipped, debug): + if used[0] or used[1]: # if bindings were made along the path + newclause = Clause(skipped[0] + skipped[1] + first + second) + debug.line(" -> New Clause: %s" % newclause) + return [newclause.substitute_bindings(bindings)] + else: # no bindings made means no unification occurred. so no result + debug.line(" -> End") + return [] + + +def _subsumes_finalize(first, second, bindings, used, skipped, debug): + if not len(skipped[0]) and not len(first): + # If there are no skipped terms and no terms left in 'first', then + # all of the terms in the original 'self' were unified with terms + # in 'other'. Therefore, there exists a binding (this one) such that + # every term in self can be unified with a term in other, which + # is the definition of subsumption. + return [True] + else: + return [] + + +def clausify(expression): + """ + Skolemize, clausify, and standardize the variables apart. + """ + clause_list = [] + for clause in _clausify(skolemize(expression)): + for free in clause.free(): + if is_indvar(free.name): + newvar = VariableExpression(unique_variable()) + clause = clause.replace(free, newvar) + clause_list.append(clause) + return clause_list + + +def _clausify(expression): + """ + :param expression: a skolemized expression in CNF + """ + if isinstance(expression, AndExpression): + return _clausify(expression.first) + _clausify(expression.second) + elif isinstance(expression, OrExpression): + first = _clausify(expression.first) + second = _clausify(expression.second) + assert len(first) == 1 + assert len(second) == 1 + return [first[0] + second[0]] + elif isinstance(expression, EqualityExpression): + return [Clause([expression])] + elif isinstance(expression, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression, NegatedExpression): + if isinstance(expression.term, ApplicationExpression): + return [Clause([expression])] + elif isinstance(expression.term, EqualityExpression): + return [Clause([expression])] + raise ProverParseError() + + +class BindingDict: + def __init__(self, binding_list=None): + """ + :param binding_list: list of (``AbstractVariableExpression``, ``AtomicExpression``) to initialize the dictionary + """ + self.d = {} + + if binding_list: + for (v, b) in binding_list: + self[v] = b + + def __setitem__(self, variable, binding): + """ + A binding is consistent with the dict if its variable is not already bound, OR if its + variable is already bound to its argument. + + :param variable: ``Variable`` The variable to bind + :param binding: ``Expression`` The atomic to which 'variable' should be bound + :raise BindingException: If the variable cannot be bound in this dictionary + """ + assert isinstance(variable, Variable) + assert isinstance(binding, Expression) + + try: + existing = self[variable] + except KeyError: + existing = None + + if not existing or binding == existing: + self.d[variable] = binding + elif isinstance(binding, IndividualVariableExpression): + # Since variable is already bound, try to bind binding to variable + try: + existing = self[binding.variable] + except KeyError: + existing = None + + binding2 = VariableExpression(variable) + + if not existing or binding2 == existing: + self.d[binding.variable] = binding2 + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + else: + raise BindingException( + "Variable %s already bound to another " "value" % (variable) + ) + + def __getitem__(self, variable): + """ + Return the expression to which 'variable' is bound + """ + assert isinstance(variable, Variable) + + intermediate = self.d[variable] + while intermediate: + try: + intermediate = self.d[intermediate] + except KeyError: + return intermediate + + def __contains__(self, item): + return item in self.d + + def __add__(self, other): + """ + :param other: ``BindingDict`` The dict with which to combine self + :return: ``BindingDict`` A new dict containing all the elements of both parameters + :raise BindingException: If the parameter dictionaries are not consistent with each other + """ + try: + combined = BindingDict() + for v in self.d: + combined[v] = self.d[v] + for v in other.d: + combined[v] = other.d[v] + return combined + except BindingException as e: + raise BindingException( + "Attempting to add two contradicting " + "BindingDicts: '%s' and '%s'" % (self, other) + ) from e + + def __len__(self): + return len(self.d) + + def __str__(self): + data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + return "{" + data_str + "}" + + def __repr__(self): + return "%s" % self + + +def most_general_unification(a, b, bindings=None): + """ + Find the most general unification of the two given expressions + + :param a: ``Expression`` + :param b: ``Expression`` + :param bindings: ``BindingDict`` a starting set of bindings with which the + unification must be consistent + :return: a list of bindings + :raise BindingException: if the Expressions cannot be unified + """ + if bindings is None: + bindings = BindingDict() + + if a == b: + return bindings + elif isinstance(a, IndividualVariableExpression): + return _mgu_var(a, b, bindings) + elif isinstance(b, IndividualVariableExpression): + return _mgu_var(b, a, bindings) + elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression): + return most_general_unification( + a.function, b.function, bindings + ) + most_general_unification(a.argument, b.argument, bindings) + raise BindingException((a, b)) + + +def _mgu_var(var, expression, bindings): + if var.variable in expression.free() | expression.constants(): + raise BindingException((var, expression)) + else: + return BindingDict([(var.variable, expression)]) + bindings + + +class BindingException(Exception): + def __init__(self, arg): + if isinstance(arg, tuple): + Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg) + else: + Exception.__init__(self, arg) + + +class UnificationException(Exception): + def __init__(self, a, b): + Exception.__init__(self, f"'{a}' cannot unify with '{b}'") + + +class DebugObject: + def __init__(self, enabled=True, indent=0): + self.enabled = enabled + self.indent = indent + + def __add__(self, i): + return DebugObject(self.enabled, self.indent + i) + + def line(self, line): + if self.enabled: + print(" " * self.indent + line) + + +def testResolutionProver(): + resolution_test(r"man(x)") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) -> --man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"-(man(x) and -man(x))") + resolution_test(r"(man(x) or -man(x))") + resolution_test(r"(man(x) -> man(x))") + resolution_test(r"(man(x) iff man(x))") + resolution_test(r"-(man(x) iff -man(x))") + resolution_test("all x.man(x)") + resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))") + resolution_test("some x.all y.sees(x,y)") + + p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))") + p2 = Expression.fromstring(r"man(Socrates)") + c = Expression.fromstring(r"mortal(Socrates)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))") + p2 = Expression.fromstring(r"man(John)") + c = Expression.fromstring(r"some y.walks(y)") + print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") + + p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))") + c = Expression.fromstring(r"some e0.walk(e0,mary)") + print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}") + + +def resolution_test(e): + f = Expression.fromstring(e) + t = ResolutionProver().prove(f) + print(f"|- {f}: {t}") + + +def test_clausify(): + lexpr = Expression.fromstring + + print(clausify(lexpr("P(x) | Q(x)"))) + print(clausify(lexpr("(P(x) & Q(x)) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x))"))) + print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))"))) + + print(clausify(lexpr("P(x) | Q(x) | R(x)"))) + print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)"))) + + print(clausify(lexpr("exists x.P(x) | Q(x)"))) + + print(clausify(lexpr("-(-P(x) & Q(x))"))) + print(clausify(lexpr("P(x) <-> Q(x)"))) + print(clausify(lexpr("-(P(x) <-> Q(x))"))) + print(clausify(lexpr("-(all x.P(x))"))) + print(clausify(lexpr("-(some x.P(x))"))) + + print(clausify(lexpr("some x.P(x)"))) + print(clausify(lexpr("some x.all y.P(x,y)"))) + print(clausify(lexpr("all y.some x.P(x,y)"))) + print(clausify(lexpr("all z.all y.some x.P(x,y,z)"))) + print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))"))) + + +def demo(): + test_clausify() + print() + testResolutionProver() + print() + + p = Expression.fromstring("man(x)") + print(ResolutionProverCommand(p, [p]).prove()) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58c4ee73fab125bca922f91e4077018c638d56a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96604f3dced80911f3f8b90972dfbc41427722e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf455947d23e0b96abb012d3f533a3775b049a3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/counter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d25648e6a2edc04bd25de3ca0a8e1b4d77aae190 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/models.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6a2eae7cb5620623b8de6635af8c3624408649 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/preprocessing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6390cf7756b4774eccd2e1d3cc248c6806d68953 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/smoothing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bec4177cc1f1f24977387418242c4380637d380 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63db872fae77c7dfc9aaa906306bd9a6ac490338 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/lm/__pycache__/vocabulary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/lm/api.py b/venv/lib/python3.10/site-packages/nltk/lm/api.py new file mode 100644 index 0000000000000000000000000000000000000000..470c4d4ac4c495c4cf9b7bbb3af66cb2a49a02db --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/api.py @@ -0,0 +1,235 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Interface.""" + +import random +import warnings +from abc import ABCMeta, abstractmethod +from bisect import bisect +from itertools import accumulate + +from nltk.lm.counter import NgramCounter +from nltk.lm.util import log_base2 +from nltk.lm.vocabulary import Vocabulary + + +class Smoothing(metaclass=ABCMeta): + """Ngram Smoothing Interface + + Implements Chen & Goodman 1995's idea that all smoothing algorithms have + certain features in common. This should ideally allow smoothing algorithms to + work both with Backoff and Interpolation. + """ + + def __init__(self, vocabulary, counter): + """ + :param vocabulary: The Ngram vocabulary object. + :type vocabulary: nltk.lm.vocab.Vocabulary + :param counter: The counts of the vocabulary items. + :type counter: nltk.lm.counter.NgramCounter + """ + self.vocab = vocabulary + self.counts = counter + + @abstractmethod + def unigram_score(self, word): + raise NotImplementedError() + + @abstractmethod + def alpha_gamma(self, word, context): + raise NotImplementedError() + + +def _mean(items): + """Return average (aka mean) for sequence of items.""" + return sum(items) / len(items) + + +def _random_generator(seed_or_generator): + if isinstance(seed_or_generator, random.Random): + return seed_or_generator + return random.Random(seed_or_generator) + + +def _weighted_choice(population, weights, random_generator=None): + """Like random.choice, but with weights. + + Heavily inspired by python 3.6 `random.choices`. + """ + if not population: + raise ValueError("Can't choose from empty population") + if len(population) != len(weights): + raise ValueError("The number of weights does not match the population") + cum_weights = list(accumulate(weights)) + total = cum_weights[-1] + threshold = random_generator.random() + return population[bisect(cum_weights, total * threshold)] + + +class LanguageModel(metaclass=ABCMeta): + """ABC for Language Models. + + Cannot be directly instantiated itself. + + """ + + def __init__(self, order, vocabulary=None, counter=None): + """Creates new LanguageModel. + + :param vocabulary: If provided, this vocabulary will be used instead + of creating a new one when training. + :type vocabulary: `nltk.lm.Vocabulary` or None + :param counter: If provided, use this object to count ngrams. + :type counter: `nltk.lm.NgramCounter` or None + :param ngrams_fn: If given, defines how sentences in training text are turned to ngram + sequences. + :type ngrams_fn: function or None + :param pad_fn: If given, defines how sentences in training text are padded. + :type pad_fn: function or None + """ + self.order = order + if vocabulary and not isinstance(vocabulary, Vocabulary): + warnings.warn( + f"The `vocabulary` argument passed to {self.__class__.__name__!r} " + "must be an instance of `nltk.lm.Vocabulary`.", + stacklevel=3, + ) + self.vocab = Vocabulary() if vocabulary is None else vocabulary + self.counts = NgramCounter() if counter is None else counter + + def fit(self, text, vocabulary_text=None): + """Trains the model on a text. + + :param text: Training text as a sequence of sentences. + + """ + if not self.vocab: + if vocabulary_text is None: + raise ValueError( + "Cannot fit without a vocabulary or text to create it from." + ) + self.vocab.update(vocabulary_text) + self.counts.update(self.vocab.lookup(sent) for sent in text) + + def score(self, word, context=None): + """Masks out of vocab (OOV) words and computes their model score. + + For model-specific logic of calculating scores, see the `unmasked_score` + method. + """ + return self.unmasked_score( + self.vocab.lookup(word), self.vocab.lookup(context) if context else None + ) + + @abstractmethod + def unmasked_score(self, word, context=None): + """Score a word given some optional context. + + Concrete models are expected to provide an implementation. + Note that this method does not mask its arguments with the OOV label. + Use the `score` method for that. + + :param str word: Word for which we want the score + :param tuple(str) context: Context the word is in. + If `None`, compute unigram score. + :param context: tuple(str) or None + :rtype: float + """ + raise NotImplementedError() + + def logscore(self, word, context=None): + """Evaluate the log score of this word in this context. + + The arguments are the same as for `score` and `unmasked_score`. + + """ + return log_base2(self.score(word, context)) + + def context_counts(self, context): + """Helper method for retrieving counts for a given context. + + Assumes context has been checked and oov words in it masked. + :type context: tuple(str) or None + + """ + return ( + self.counts[len(context) + 1][context] if context else self.counts.unigrams + ) + + def entropy(self, text_ngrams): + """Calculate cross-entropy of model for given evaluation text. + + :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples. + :rtype: float + + """ + return -1 * _mean( + [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams] + ) + + def perplexity(self, text_ngrams): + """Calculates the perplexity of the given text. + + This is simply 2 ** cross-entropy for the text, so the arguments are the same. + + """ + return pow(2.0, self.entropy(text_ngrams)) + + def generate(self, num_words=1, text_seed=None, random_seed=None): + """Generate words from the model. + + :param int num_words: How many words to generate. By default 1. + :param text_seed: Generation can be conditioned on preceding context. + :param random_seed: A random seed or an instance of `random.Random`. If provided, + makes the random sampling part of generation reproducible. + :return: One (str) word or a list of words generated from model. + + Examples: + + >>> from nltk.lm import MLE + >>> lm = MLE(2) + >>> lm.fit([[("a", "b"), ("b", "c")]], vocabulary_text=['a', 'b', 'c']) + >>> lm.fit([[("a",), ("b",), ("c",)]]) + >>> lm.generate(random_seed=3) + 'a' + >>> lm.generate(text_seed=['a']) + 'b' + + """ + text_seed = [] if text_seed is None else list(text_seed) + random_generator = _random_generator(random_seed) + # This is the base recursion case. + if num_words == 1: + context = ( + text_seed[-self.order + 1 :] + if len(text_seed) >= self.order + else text_seed + ) + samples = self.context_counts(self.vocab.lookup(context)) + while context and not samples: + context = context[1:] if len(context) > 1 else [] + samples = self.context_counts(self.vocab.lookup(context)) + # Sorting samples achieves two things: + # - reproducible randomness when sampling + # - turns Mapping into Sequence which `_weighted_choice` expects + samples = sorted(samples) + return _weighted_choice( + samples, + tuple(self.score(w, context) for w in samples), + random_generator, + ) + # We build up text one word at a time using the preceding context. + generated = [] + for _ in range(num_words): + generated.append( + self.generate( + num_words=1, + text_seed=text_seed + generated, + random_seed=random_generator, + ) + ) + return generated diff --git a/venv/lib/python3.10/site-packages/nltk/lm/counter.py b/venv/lib/python3.10/site-packages/nltk/lm/counter.py new file mode 100644 index 0000000000000000000000000000000000000000..6a5ab9c6096d9015b14f4e9f3814dda75417391a --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/counter.py @@ -0,0 +1,163 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +""" +Language Model Counter +---------------------- +""" + +from collections import defaultdict +from collections.abc import Sequence + +from nltk.probability import ConditionalFreqDist, FreqDist + + +class NgramCounter: + """Class for counting ngrams. + + Will count any ngram sequence you give it ;) + + First we need to make sure we are feeding the counter sentences of ngrams. + + >>> text = [["a", "b", "c", "d"], ["a", "c", "d", "c"]] + >>> from nltk.util import ngrams + >>> text_bigrams = [ngrams(sent, 2) for sent in text] + >>> text_unigrams = [ngrams(sent, 1) for sent in text] + + The counting itself is very simple. + + >>> from nltk.lm import NgramCounter + >>> ngram_counts = NgramCounter(text_bigrams + text_unigrams) + + You can conveniently access ngram counts using standard python dictionary notation. + String keys will give you unigram counts. + + >>> ngram_counts['a'] + 2 + >>> ngram_counts['aliens'] + 0 + + If you want to access counts for higher order ngrams, use a list or a tuple. + These are treated as "context" keys, so what you get is a frequency distribution + over all continuations after the given context. + + >>> sorted(ngram_counts[['a']].items()) + [('b', 1), ('c', 1)] + >>> sorted(ngram_counts[('a',)].items()) + [('b', 1), ('c', 1)] + + This is equivalent to specifying explicitly the order of the ngram (in this case + 2 for bigram) and indexing on the context. + + >>> ngram_counts[2][('a',)] is ngram_counts[['a']] + True + + Note that the keys in `ConditionalFreqDist` cannot be lists, only tuples! + It is generally advisable to use the less verbose and more flexible square + bracket notation. + + To get the count of the full ngram "a b", do this: + + >>> ngram_counts[['a']]['b'] + 1 + + Specifying the ngram order as a number can be useful for accessing all ngrams + in that order. + + >>> ngram_counts[2] + + + The keys of this `ConditionalFreqDist` are the contexts we discussed earlier. + Unigrams can also be accessed with a human-friendly alias. + + >>> ngram_counts.unigrams is ngram_counts[1] + True + + Similarly to `collections.Counter`, you can update counts after initialization. + + >>> ngram_counts['e'] + 0 + >>> ngram_counts.update([ngrams(["d", "e", "f"], 1)]) + >>> ngram_counts['e'] + 1 + + """ + + def __init__(self, ngram_text=None): + """Creates a new NgramCounter. + + If `ngram_text` is specified, counts ngrams from it, otherwise waits for + `update` method to be called explicitly. + + :param ngram_text: Optional text containing sentences of ngrams, as for `update` method. + :type ngram_text: Iterable(Iterable(tuple(str))) or None + + """ + self._counts = defaultdict(ConditionalFreqDist) + self._counts[1] = self.unigrams = FreqDist() + + if ngram_text: + self.update(ngram_text) + + def update(self, ngram_text): + """Updates ngram counts from `ngram_text`. + + Expects `ngram_text` to be a sequence of sentences (sequences). + Each sentence consists of ngrams as tuples of strings. + + :param Iterable(Iterable(tuple(str))) ngram_text: Text containing sentences of ngrams. + :raises TypeError: if the ngrams are not tuples. + + """ + + for sent in ngram_text: + for ngram in sent: + if not isinstance(ngram, tuple): + raise TypeError( + "Ngram <{}> isn't a tuple, " "but {}".format(ngram, type(ngram)) + ) + + ngram_order = len(ngram) + if ngram_order == 1: + self.unigrams[ngram[0]] += 1 + continue + + context, word = ngram[:-1], ngram[-1] + self[ngram_order][context][word] += 1 + + def N(self): + """Returns grand total number of ngrams stored. + + This includes ngrams from all orders, so some duplication is expected. + :rtype: int + + >>> from nltk.lm import NgramCounter + >>> counts = NgramCounter([[("a", "b"), ("c",), ("d", "e")]]) + >>> counts.N() + 3 + + """ + return sum(val.N() for val in self._counts.values()) + + def __getitem__(self, item): + """User-friendly access to ngram counts.""" + if isinstance(item, int): + return self._counts[item] + elif isinstance(item, str): + return self._counts.__getitem__(1)[item] + elif isinstance(item, Sequence): + return self._counts.__getitem__(len(item) + 1)[tuple(item)] + + def __str__(self): + return "<{} with {} ngram orders and {} ngrams>".format( + self.__class__.__name__, len(self._counts), self.N() + ) + + def __len__(self): + return self._counts.__len__() + + def __contains__(self, item): + return item in self._counts diff --git a/venv/lib/python3.10/site-packages/nltk/lm/models.py b/venv/lib/python3.10/site-packages/nltk/lm/models.py new file mode 100644 index 0000000000000000000000000000000000000000..ee5094901a14802b835c934e0054762666cd467f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/models.py @@ -0,0 +1,141 @@ +# Natural Language Toolkit: Language Models +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# Manu Joseph +# URL: +# For license information, see LICENSE.TXT +"""Language Models""" + +from nltk.lm.api import LanguageModel, Smoothing +from nltk.lm.smoothing import AbsoluteDiscounting, KneserNey, WittenBell + + +class MLE(LanguageModel): + """Class for providing MLE ngram model scores. + + Inherits initialization from BaseNgramModel. + """ + + def unmasked_score(self, word, context=None): + """Returns the MLE score for a word given a context. + + Args: + - word is expected to be a string + - context is expected to be something reasonably convertible to a tuple + """ + return self.context_counts(context).freq(word) + + +class Lidstone(LanguageModel): + """Provides Lidstone-smoothed scores. + + In addition to initialization arguments from BaseNgramModel also requires + a number by which to increase the counts, gamma. + """ + + def __init__(self, gamma, *args, **kwargs): + super().__init__(*args, **kwargs) + self.gamma = gamma + + def unmasked_score(self, word, context=None): + """Add-one smoothing: Lidstone or Laplace. + + To see what kind, look at `gamma` attribute on the class. + + """ + counts = self.context_counts(context) + word_count = counts[word] + norm_count = counts.N() + return (word_count + self.gamma) / (norm_count + len(self.vocab) * self.gamma) + + +class Laplace(Lidstone): + """Implements Laplace (add one) smoothing. + + Initialization identical to BaseNgramModel because gamma is always 1. + """ + + def __init__(self, *args, **kwargs): + super().__init__(1, *args, **kwargs) + + +class StupidBackoff(LanguageModel): + """Provides StupidBackoff scores. + + In addition to initialization arguments from BaseNgramModel also requires + a parameter alpha with which we scale the lower order probabilities. + Note that this is not a true probability distribution as scores for ngrams + of the same order do not sum up to unity. + """ + + def __init__(self, alpha=0.4, *args, **kwargs): + super().__init__(*args, **kwargs) + self.alpha = alpha + + def unmasked_score(self, word, context=None): + if not context: + # Base recursion + return self.counts.unigrams.freq(word) + counts = self.context_counts(context) + word_count = counts[word] + norm_count = counts.N() + if word_count > 0: + return word_count / norm_count + else: + return self.alpha * self.unmasked_score(word, context[1:]) + + +class InterpolatedLanguageModel(LanguageModel): + """Logic common to all interpolated language models. + + The idea to abstract this comes from Chen & Goodman 1995. + Do not instantiate this class directly! + """ + + def __init__(self, smoothing_cls, order, **kwargs): + params = kwargs.pop("params", {}) + super().__init__(order, **kwargs) + self.estimator = smoothing_cls(self.vocab, self.counts, **params) + + def unmasked_score(self, word, context=None): + if not context: + # The base recursion case: no context, we only have a unigram. + return self.estimator.unigram_score(word) + if not self.counts[context]: + # It can also happen that we have no data for this context. + # In that case we defer to the lower-order ngram. + # This is the same as setting alpha to 0 and gamma to 1. + alpha, gamma = 0, 1 + else: + alpha, gamma = self.estimator.alpha_gamma(word, context) + return alpha + gamma * self.unmasked_score(word, context[1:]) + + +class WittenBellInterpolated(InterpolatedLanguageModel): + """Interpolated version of Witten-Bell smoothing.""" + + def __init__(self, order, **kwargs): + super().__init__(WittenBell, order, **kwargs) + + +class AbsoluteDiscountingInterpolated(InterpolatedLanguageModel): + """Interpolated version of smoothing with absolute discount.""" + + def __init__(self, order, discount=0.75, **kwargs): + super().__init__( + AbsoluteDiscounting, order, params={"discount": discount}, **kwargs + ) + + +class KneserNeyInterpolated(InterpolatedLanguageModel): + """Interpolated version of Kneser-Ney smoothing.""" + + def __init__(self, order, discount=0.1, **kwargs): + if not (0 <= discount <= 1): + raise ValueError( + "Discount must be between 0 and 1 for probabilities to sum to unity." + ) + super().__init__( + KneserNey, order, params={"discount": discount, "order": order}, **kwargs + ) diff --git a/venv/lib/python3.10/site-packages/nltk/lm/smoothing.py b/venv/lib/python3.10/site-packages/nltk/lm/smoothing.py new file mode 100644 index 0000000000000000000000000000000000000000..6761f1ead23f7ab5b410d9a5795b8b4fce189d2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/smoothing.py @@ -0,0 +1,127 @@ +# Natural Language Toolkit: Language Model Unit Tests +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# Manu Joseph +# URL: +# For license information, see LICENSE.TXT +"""Smoothing algorithms for language modeling. + +According to Chen & Goodman 1995 these should work with both Backoff and +Interpolation. +""" +from operator import methodcaller + +from nltk.lm.api import Smoothing +from nltk.probability import ConditionalFreqDist + + +def _count_values_gt_zero(distribution): + """Count values that are greater than zero in a distribution. + + Assumes distribution is either a mapping with counts as values or + an instance of `nltk.ConditionalFreqDist`. + """ + as_count = ( + methodcaller("N") + if isinstance(distribution, ConditionalFreqDist) + else lambda count: count + ) + # We explicitly check that values are > 0 to guard against negative counts. + return sum( + 1 for dist_or_count in distribution.values() if as_count(dist_or_count) > 0 + ) + + +class WittenBell(Smoothing): + """Witten-Bell smoothing.""" + + def __init__(self, vocabulary, counter, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + + def alpha_gamma(self, word, context): + alpha = self.counts[context].freq(word) + gamma = self._gamma(context) + return (1.0 - gamma) * alpha, gamma + + def _gamma(self, context): + n_plus = _count_values_gt_zero(self.counts[context]) + return n_plus / (n_plus + self.counts[context].N()) + + def unigram_score(self, word): + return self.counts.unigrams.freq(word) + + +class AbsoluteDiscounting(Smoothing): + """Smoothing with absolute discount.""" + + def __init__(self, vocabulary, counter, discount=0.75, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + self.discount = discount + + def alpha_gamma(self, word, context): + alpha = ( + max(self.counts[context][word] - self.discount, 0) + / self.counts[context].N() + ) + gamma = self._gamma(context) + return alpha, gamma + + def _gamma(self, context): + n_plus = _count_values_gt_zero(self.counts[context]) + return (self.discount * n_plus) / self.counts[context].N() + + def unigram_score(self, word): + return self.counts.unigrams.freq(word) + + +class KneserNey(Smoothing): + """Kneser-Ney Smoothing. + + This is an extension of smoothing with a discount. + + Resources: + - https://pages.ucsd.edu/~rlevy/lign256/winter2008/kneser_ney_mini_example.pdf + - https://www.youtube.com/watch?v=ody1ysUTD7o + - https://medium.com/@dennyc/a-simple-numerical-example-for-kneser-ney-smoothing-nlp-4600addf38b8 + - https://www.cl.uni-heidelberg.de/courses/ss15/smt/scribe6.pdf + - https://www-i6.informatik.rwth-aachen.de/publications/download/951/Kneser-ICASSP-1995.pdf + """ + + def __init__(self, vocabulary, counter, order, discount=0.1, **kwargs): + super().__init__(vocabulary, counter, **kwargs) + self.discount = discount + self._order = order + + def unigram_score(self, word): + word_continuation_count, total_count = self._continuation_counts(word) + return word_continuation_count / total_count + + def alpha_gamma(self, word, context): + prefix_counts = self.counts[context] + word_continuation_count, total_count = ( + (prefix_counts[word], prefix_counts.N()) + if len(context) + 1 == self._order + else self._continuation_counts(word, context) + ) + alpha = max(word_continuation_count - self.discount, 0.0) / total_count + gamma = self.discount * _count_values_gt_zero(prefix_counts) / total_count + return alpha, gamma + + def _continuation_counts(self, word, context=tuple()): + """Count continuations that end with context and word. + + Continuations track unique ngram "types", regardless of how many + instances were observed for each "type". + This is different than raw ngram counts which track number of instances. + """ + higher_order_ngrams_with_context = ( + counts + for prefix_ngram, counts in self.counts[len(context) + 2].items() + if prefix_ngram[1:] == context + ) + higher_order_ngrams_with_word_count, total = 0, 0 + for counts in higher_order_ngrams_with_context: + higher_order_ngrams_with_word_count += int(counts[word] > 0) + total += _count_values_gt_zero(counts) + return higher_order_ngrams_with_word_count, total diff --git a/venv/lib/python3.10/site-packages/nltk/lm/util.py b/venv/lib/python3.10/site-packages/nltk/lm/util.py new file mode 100644 index 0000000000000000000000000000000000000000..483e64c26abd85ad9bb6caf74d3bb38fd9ae7d66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/util.py @@ -0,0 +1,19 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Utilities""" + +from math import log + +NEG_INF = float("-inf") +POS_INF = float("inf") + + +def log_base2(score): + """Convenience function for computing logarithms with base 2.""" + if score == 0.0: + return NEG_INF + return log(score, 2) diff --git a/venv/lib/python3.10/site-packages/nltk/lm/vocabulary.py b/venv/lib/python3.10/site-packages/nltk/lm/vocabulary.py new file mode 100644 index 0000000000000000000000000000000000000000..74964b262f78fe305cd9e5445d833683ed172978 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lm/vocabulary.py @@ -0,0 +1,218 @@ +# Natural Language Toolkit +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ilia Kurenkov +# URL: +# For license information, see LICENSE.TXT +"""Language Model Vocabulary""" + +import sys +from collections import Counter +from collections.abc import Iterable +from functools import singledispatch +from itertools import chain + + +@singledispatch +def _dispatched_lookup(words, vocab): + raise TypeError(f"Unsupported type for looking up in vocabulary: {type(words)}") + + +@_dispatched_lookup.register(Iterable) +def _(words, vocab): + """Look up a sequence of words in the vocabulary. + + Returns an iterator over looked up words. + + """ + return tuple(_dispatched_lookup(w, vocab) for w in words) + + +@_dispatched_lookup.register(str) +def _string_lookup(word, vocab): + """Looks up one word in the vocabulary.""" + return word if word in vocab else vocab.unk_label + + +class Vocabulary: + """Stores language model vocabulary. + + Satisfies two common language modeling requirements for a vocabulary: + + - When checking membership and calculating its size, filters items + by comparing their counts to a cutoff value. + - Adds a special "unknown" token which unseen words are mapped to. + + >>> words = ['a', 'c', '-', 'd', 'c', 'a', 'b', 'r', 'a', 'c', 'd'] + >>> from nltk.lm import Vocabulary + >>> vocab = Vocabulary(words, unk_cutoff=2) + + Tokens with counts greater than or equal to the cutoff value will + be considered part of the vocabulary. + + >>> vocab['c'] + 3 + >>> 'c' in vocab + True + >>> vocab['d'] + 2 + >>> 'd' in vocab + True + + Tokens with frequency counts less than the cutoff value will be considered not + part of the vocabulary even though their entries in the count dictionary are + preserved. + + >>> vocab['b'] + 1 + >>> 'b' in vocab + False + >>> vocab['aliens'] + 0 + >>> 'aliens' in vocab + False + + Keeping the count entries for seen words allows us to change the cutoff value + without having to recalculate the counts. + + >>> vocab2 = Vocabulary(vocab.counts, unk_cutoff=1) + >>> "b" in vocab2 + True + + The cutoff value influences not only membership checking but also the result of + getting the size of the vocabulary using the built-in `len`. + Note that while the number of keys in the vocabulary's counter stays the same, + the items in the vocabulary differ depending on the cutoff. + We use `sorted` to demonstrate because it keeps the order consistent. + + >>> sorted(vocab2.counts) + ['-', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab2) + ['-', '', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab.counts) + ['-', 'a', 'b', 'c', 'd', 'r'] + >>> sorted(vocab) + ['', 'a', 'c', 'd'] + + In addition to items it gets populated with, the vocabulary stores a special + token that stands in for so-called "unknown" items. By default it's "". + + >>> "" in vocab + True + + We can look up words in a vocabulary using its `lookup` method. + "Unseen" words (with counts less than cutoff) are looked up as the unknown label. + If given one word (a string) as an input, this method will return a string. + + >>> vocab.lookup("a") + 'a' + >>> vocab.lookup("aliens") + '' + + If given a sequence, it will return an tuple of the looked up words. + + >>> vocab.lookup(["p", 'a', 'r', 'd', 'b', 'c']) + ('', 'a', '', 'd', '', 'c') + + It's possible to update the counts after the vocabulary has been created. + In general, the interface is the same as that of `collections.Counter`. + + >>> vocab['b'] + 1 + >>> vocab.update(["b", "b", "c"]) + >>> vocab['b'] + 3 + """ + + def __init__(self, counts=None, unk_cutoff=1, unk_label=""): + """Create a new Vocabulary. + + :param counts: Optional iterable or `collections.Counter` instance to + pre-seed the Vocabulary. In case it is iterable, counts + are calculated. + :param int unk_cutoff: Words that occur less frequently than this value + are not considered part of the vocabulary. + :param unk_label: Label for marking words not part of vocabulary. + + """ + self.unk_label = unk_label + if unk_cutoff < 1: + raise ValueError(f"Cutoff value cannot be less than 1. Got: {unk_cutoff}") + self._cutoff = unk_cutoff + + self.counts = Counter() + self.update(counts if counts is not None else "") + + @property + def cutoff(self): + """Cutoff value. + + Items with count below this value are not considered part of vocabulary. + + """ + return self._cutoff + + def update(self, *counter_args, **counter_kwargs): + """Update vocabulary counts. + + Wraps `collections.Counter.update` method. + + """ + self.counts.update(*counter_args, **counter_kwargs) + self._len = sum(1 for _ in self) + + def lookup(self, words): + """Look up one or more words in the vocabulary. + + If passed one word as a string will return that word or `self.unk_label`. + Otherwise will assume it was passed a sequence of words, will try to look + each of them up and return an iterator over the looked up words. + + :param words: Word(s) to look up. + :type words: Iterable(str) or str + :rtype: generator(str) or str + :raises: TypeError for types other than strings or iterables + + >>> from nltk.lm import Vocabulary + >>> vocab = Vocabulary(["a", "b", "c", "a", "b"], unk_cutoff=2) + >>> vocab.lookup("a") + 'a' + >>> vocab.lookup("aliens") + '' + >>> vocab.lookup(["a", "b", "c", ["x", "b"]]) + ('a', 'b', '', ('', 'b')) + + """ + return _dispatched_lookup(words, self) + + def __getitem__(self, item): + return self._cutoff if item == self.unk_label else self.counts[item] + + def __contains__(self, item): + """Only consider items with counts GE to cutoff as being in the + vocabulary.""" + return self[item] >= self.cutoff + + def __iter__(self): + """Building on membership check define how to iterate over + vocabulary.""" + return chain( + (item for item in self.counts if item in self), + [self.unk_label] if self.counts else [], + ) + + def __len__(self): + """Computing size of vocabulary reflects the cutoff.""" + return self._len + + def __eq__(self, other): + return ( + self.unk_label == other.unk_label + and self.cutoff == other.cutoff + and self.counts == other.counts + ) + + def __str__(self): + return "<{} with cutoff={} unk_label='{}' and {} items>".format( + self.__class__.__name__, self.cutoff, self.unk_label, len(self) + ) diff --git a/venv/lib/python3.10/site-packages/nltk/test/lm.doctest b/venv/lib/python3.10/site-packages/nltk/test/lm.doctest new file mode 100644 index 0000000000000000000000000000000000000000..9668582b3f90f8bcc5ee48f72b0a41d2da9660e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/test/lm.doctest @@ -0,0 +1,135 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + + +Regression Tests +================ + + +Issue 167 +--------- +https://github.com/nltk/nltk/issues/167 + + >>> from nltk.corpus import brown + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> ngram_order = 3 + >>> train_data, vocab_data = padded_everygram_pipeline( + ... ngram_order, + ... brown.sents(categories="news") + ... ) + + >>> from nltk.lm import WittenBellInterpolated + >>> lm = WittenBellInterpolated(ngram_order) + >>> lm.fit(train_data, vocab_data) + + + + +Sentence containing an unseen word should result in infinite entropy because +Witten-Bell is based ultimately on MLE, which cannot handle unseen ngrams. +Crucially, it shouldn't raise any exceptions for unseen words. + + >>> from nltk.util import ngrams + >>> sent = ngrams("This is a sentence with the word aaddvark".split(), 3) + >>> lm.entropy(sent) + inf + +If we remove all unseen ngrams from the sentence, we'll get a non-infinite value +for the entropy. + + >>> sent = ngrams("This is a sentence".split(), 3) + >>> round(lm.entropy(sent), 14) + 10.23701322869105 + + +Issue 367 +--------- +https://github.com/nltk/nltk/issues/367 + +Reproducing Dan Blanchard's example: +https://github.com/nltk/nltk/issues/367#issuecomment-14646110 + + >>> from nltk.lm import Lidstone, Vocabulary + >>> word_seq = list('aaaababaaccbacb') + >>> ngram_order = 2 + >>> from nltk.util import everygrams + >>> train_data = [everygrams(word_seq, max_len=ngram_order)] + >>> V = Vocabulary(['a', 'b', 'c', '']) + >>> lm = Lidstone(0.2, ngram_order, vocabulary=V) + >>> lm.fit(train_data) + +For doctest to work we have to sort the vocabulary keys. + + >>> V_keys = sorted(V) + >>> round(sum(lm.score(w, ("b",)) for w in V_keys), 6) + 1.0 + >>> round(sum(lm.score(w, ("a",)) for w in V_keys), 6) + 1.0 + + >>> [lm.score(w, ("b",)) for w in V_keys] + [0.05, 0.05, 0.8, 0.05, 0.05] + >>> [round(lm.score(w, ("a",)), 4) for w in V_keys] + [0.0222, 0.0222, 0.4667, 0.2444, 0.2444] + + +Here's reproducing @afourney's comment: +https://github.com/nltk/nltk/issues/367#issuecomment-15686289 + + >>> sent = ['foo', 'foo', 'foo', 'foo', 'bar', 'baz'] + >>> ngram_order = 3 + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, [sent]) + >>> from nltk.lm import Lidstone + >>> lm = Lidstone(0.2, ngram_order) + >>> lm.fit(train_data, vocab_data) + +The vocabulary includes the "UNK" symbol as well as two padding symbols. + + >>> len(lm.vocab) + 6 + >>> word = "foo" + >>> context = ("bar", "baz") + +The raw counts. + + >>> lm.context_counts(context)[word] + 0 + >>> lm.context_counts(context).N() + 1 + +Counts with Lidstone smoothing. + + >>> lm.context_counts(context)[word] + lm.gamma + 0.2 + >>> lm.context_counts(context).N() + len(lm.vocab) * lm.gamma + 2.2 + +Without any backoff, just using Lidstone smoothing, P("foo" | "bar", "baz") should be: +0.2 / 2.2 ~= 0.090909 + + >>> round(lm.score(word, context), 6) + 0.090909 + + +Issue 380 +--------- +https://github.com/nltk/nltk/issues/380 + +Reproducing setup akin to this comment: +https://github.com/nltk/nltk/issues/380#issue-12879030 + +For speed take only the first 100 sentences of reuters. Shouldn't affect the test. + + >>> from nltk.corpus import reuters + >>> sents = reuters.sents()[:100] + >>> ngram_order = 3 + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, sents) + + >>> from nltk.lm import Lidstone + >>> lm = Lidstone(0.2, ngram_order) + >>> lm.fit(train_data, vocab_data) + >>> lm.score("said", ("",)) < 1 + True diff --git a/venv/lib/python3.10/site-packages/nltk/test/relextract.doctest b/venv/lib/python3.10/site-packages/nltk/test/relextract.doctest new file mode 100644 index 0000000000000000000000000000000000000000..83c08d4bb0dfaed9efe430ff6b114de999445fd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/test/relextract.doctest @@ -0,0 +1,263 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +Information Extraction +====================== + +Information Extraction standardly consists of three subtasks: + +#. Named Entity Recognition + +#. Relation Extraction + +#. Template Filling + +Named Entities +~~~~~~~~~~~~~~ + +The IEER corpus is marked up for a variety of Named Entities. A Named +Entity (more strictly, a Named Entity mention) is a name of an +entity belonging to a specified class. For example, the Named Entity +classes in IEER include PERSON, LOCATION, ORGANIZATION, DATE and so +on. Within NLTK, Named Entities are represented as subtrees within a +chunk structure: the class name is treated as node label, while the +entity mention itself appears as the leaves of the subtree. This is +illustrated below, where we have show an extract of the chunk +representation of document NYT_19980315.064: + + >>> from nltk.corpus import ieer + >>> docs = ieer.parsed_docs('NYT_19980315') + >>> tree = docs[1].text + >>> print(tree) + (DOCUMENT + ... + ``It's + a + chance + to + think + about + first-level + questions,'' + said + Ms. + (PERSON Cohn) + , + a + partner + in + the + (ORGANIZATION McGlashan & Sarrail) + firm + in + (LOCATION San Mateo) + , + (LOCATION Calif.) + ...) + +Thus, the Named Entity mentions in this example are *Cohn*, *McGlashan & +Sarrail*, *San Mateo* and *Calif.*. + +The CoNLL2002 Dutch and Spanish data is treated similarly, although in +this case, the strings are also POS tagged. + + >>> from nltk.corpus import conll2002 + >>> for doc in conll2002.chunked_sents('ned.train')[27]: + ... print(doc) + ('Het', 'Art') + (ORG Hof/N van/Prep Cassatie/N) + ('verbrak', 'V') + ('het', 'Art') + ('arrest', 'N') + ('zodat', 'Conj') + ('het', 'Pron') + ('moest', 'V') + ('worden', 'V') + ('overgedaan', 'V') + ('door', 'Prep') + ('het', 'Art') + ('hof', 'N') + ('van', 'Prep') + ('beroep', 'N') + ('van', 'Prep') + (LOC Antwerpen/N) + ('.', 'Punc') + +Relation Extraction +~~~~~~~~~~~~~~~~~~~ + +Relation Extraction standardly consists of identifying specified +relations between Named Entities. For example, assuming that we can +recognize ORGANIZATIONs and LOCATIONs in text, we might want to also +recognize pairs *(o, l)* of these kinds of entities such that *o* is +located in *l*. + +The `sem.relextract` module provides some tools to help carry out a +simple version of this task. The `tree2semi_rel()` function splits a chunk +document into a list of two-member lists, each of which consists of a +(possibly empty) string followed by a `Tree` (i.e., a Named Entity): + + >>> from nltk.sem import relextract + >>> pairs = relextract.tree2semi_rel(tree) + >>> for s, tree in pairs[18:22]: + ... print('("...%s", %s)' % (" ".join(s[-5:]),tree)) + ("...about first-level questions,'' said Ms.", (PERSON Cohn)) + ("..., a partner in the", (ORGANIZATION McGlashan & Sarrail)) + ("...firm in", (LOCATION San Mateo)) + ("...,", (LOCATION Calif.)) + +The function `semi_rel2reldict()` processes triples of these pairs, i.e., +pairs of the form ``((string1, Tree1), (string2, Tree2), (string3, +Tree3))`` and outputs a dictionary (a `reldict`) in which ``Tree1`` is +the subject of the relation, ``string2`` is the filler +and ``Tree3`` is the object of the relation. ``string1`` and ``string3`` are +stored as left and right context respectively. + + >>> reldicts = relextract.semi_rel2reldict(pairs) + >>> for k, v in sorted(reldicts[0].items()): + ... print(k, '=>', v) + filler => of messages to their own ``Cyberia'' ... + lcon => transactions.'' Each week, they post + objclass => ORGANIZATION + objsym => white_house + objtext => White House + rcon => for access to its planned + subjclass => CARDINAL + subjsym => hundreds + subjtext => hundreds + untagged_filler => of messages to their own ``Cyberia'' ... + +The next example shows some of the values for two `reldict`\ s +corresponding to the ``'NYT_19980315'`` text extract shown earlier. + + >>> for r in reldicts[18:20]: + ... print('=' * 20) + ... print(r['subjtext']) + ... print(r['filler']) + ... print(r['objtext']) + ==================== + Cohn + , a partner in the + McGlashan & Sarrail + ==================== + McGlashan & Sarrail + firm in + San Mateo + +The function `relextract()` allows us to filter the `reldict`\ s +according to the classes of the subject and object named entities. In +addition, we can specify that the filler text has to match a given +regular expression, as illustrated in the next example. Here, we are +looking for pairs of entities in the IN relation, where IN has +signature . + + >>> import re + >>> IN = re.compile(r'.*\bin\b(?!\b.+ing\b)') + >>> for fileid in ieer.fileids(): + ... for doc in ieer.parsed_docs(fileid): + ... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern = IN): + ... print(relextract.rtuple(rel)) + [ORG: 'Christian Democrats'] ', the leading political forces in' [LOC: 'Italy'] + [ORG: 'AP'] ') _ Lebanese guerrillas attacked Israeli forces in southern' [LOC: 'Lebanon'] + [ORG: 'Security Council'] 'adopted Resolution 425. Huge yellow banners hung across intersections in' [LOC: 'Beirut'] + [ORG: 'U.N.'] 'failures in' [LOC: 'Africa'] + [ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia'] + [ORG: 'U.N.'] 'partners on a more effective role in' [LOC: 'Africa'] + [ORG: 'AP'] ') _ A bomb exploded in a mosque in central' [LOC: 'San`a'] + [ORG: 'Krasnoye Sormovo'] 'shipyard in the Soviet city of' [LOC: 'Gorky'] + [ORG: 'Kelab Golf Darul Ridzuan'] 'in' [LOC: 'Perak'] + [ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia'] + [ORG: 'WHYY'] 'in' [LOC: 'Philadelphia'] + [ORG: 'McGlashan & Sarrail'] 'firm in' [LOC: 'San Mateo'] + [ORG: 'Freedom Forum'] 'in' [LOC: 'Arlington'] + [ORG: 'Brookings Institution'] ', the research group in' [LOC: 'Washington'] + [ORG: 'Idealab'] ', a self-described business incubator based in' [LOC: 'Los Angeles'] + [ORG: 'Open Text'] ', based in' [LOC: 'Waterloo'] + ... + +The next example illustrates a case where the pattern is a disjunction +of roles that a PERSON can occupy in an ORGANIZATION. + + >>> roles = r""" + ... (.*( + ... analyst| + ... chair(wo)?man| + ... commissioner| + ... counsel| + ... director| + ... economist| + ... editor| + ... executive| + ... foreman| + ... governor| + ... head| + ... lawyer| + ... leader| + ... librarian).*)| + ... manager| + ... partner| + ... president| + ... producer| + ... professor| + ... researcher| + ... spokes(wo)?man| + ... writer| + ... ,\sof\sthe?\s* # "X, of (the) Y" + ... """ + >>> ROLES = re.compile(roles, re.VERBOSE) + >>> for fileid in ieer.fileids(): + ... for doc in ieer.parsed_docs(fileid): + ... for rel in relextract.extract_rels('PER', 'ORG', doc, corpus='ieer', pattern=ROLES): + ... print(relextract.rtuple(rel)) + [PER: 'Kivutha Kibwana'] ', of the' [ORG: 'National Convention Assembly'] + [PER: 'Boban Boskovic'] ', chief executive of the' [ORG: 'Plastika'] + [PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations'] + [PER: 'Kiriyenko'] 'became a foreman at the' [ORG: 'Krasnoye Sormovo'] + [PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations'] + [PER: 'Mike Godwin'] ', chief counsel for the' [ORG: 'Electronic Frontier Foundation'] + ... + +In the case of the CoNLL2002 data, we can include POS tags in the +query pattern. This example also illustrates how the output can be +presented as something that looks more like a clause in a logical language. + + >>> de = """ + ... .* + ... ( + ... de/SP| + ... del/SP + ... ) + ... """ + >>> DE = re.compile(de, re.VERBOSE) + >>> rels = [rel for doc in conll2002.chunked_sents('esp.train') + ... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='conll2002', pattern = DE)] + >>> for r in rels[:10]: + ... print(relextract.clause(r, relsym='DE')) + DE('tribunal_supremo', 'victoria') + DE('museo_de_arte', 'alcorc\xf3n') + DE('museo_de_bellas_artes', 'a_coru\xf1a') + DE('siria', 'l\xedbano') + DE('uni\xf3n_europea', 'pek\xedn') + DE('ej\xe9rcito', 'rogberi') + DE('juzgado_de_instrucci\xf3n_n\xfamero_1', 'san_sebasti\xe1n') + DE('psoe', 'villanueva_de_la_serena') + DE('ej\xe9rcito', 'l\xedbano') + DE('juzgado_de_lo_penal_n\xfamero_2', 'ceuta') + >>> vnv = """ + ... ( + ... is/V| + ... was/V| + ... werd/V| + ... wordt/V + ... ) + ... .* + ... van/Prep + ... """ + >>> VAN = re.compile(vnv, re.VERBOSE) + >>> for doc in conll2002.chunked_sents('ned.train'): + ... for r in relextract.extract_rels('PER', 'ORG', doc, corpus='conll2002', pattern=VAN): + ... print(relextract.clause(r, relsym="VAN")) + VAN("cornet_d'elzius", 'buitenlandse_handel') + VAN('johan_rottiers', 'kardinaal_van_roey_instituut') + VAN('annie_lennox', 'eurythmics') diff --git a/venv/lib/python3.10/site-packages/nltk/test/treetransforms.doctest b/venv/lib/python3.10/site-packages/nltk/test/treetransforms.doctest new file mode 100644 index 0000000000000000000000000000000000000000..a1ea0eb6b61914644d80170e50b87e1dd03c6413 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/test/treetransforms.doctest @@ -0,0 +1,154 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +------------------------------------------- +Unit tests for the TreeTransformation class +------------------------------------------- + + >>> from copy import deepcopy + >>> from nltk.tree import Tree, collapse_unary, chomsky_normal_form, un_chomsky_normal_form + + >>> tree_string = "(TOP (S (S (VP (VBN Turned) (ADVP (RB loose)) (PP (IN in) (NP (NP (NNP Shane) (NNP Longman) (POS 's)) (NN trading) (NN room))))) (, ,) (NP (DT the) (NN yuppie) (NNS dealers)) (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) (. .)))" + + >>> tree = Tree.fromstring(tree_string) + >>> print(tree) + (TOP + (S + (S + (VP + (VBN Turned) + (ADVP (RB loose)) + (PP + (IN in) + (NP + (NP (NNP Shane) (NNP Longman) (POS 's)) + (NN trading) + (NN room))))) + (, ,) + (NP (DT the) (NN yuppie) (NNS dealers)) + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) + (. .))) + +Make a copy of the original tree and collapse the subtrees with only one child + + >>> collapsedTree = deepcopy(tree) + >>> collapse_unary(collapsedTree) + >>> print(collapsedTree) + (TOP + (S + (S+VP + (VBN Turned) + (ADVP (RB loose)) + (PP + (IN in) + (NP + (NP (NNP Shane) (NNP Longman) (POS 's)) + (NN trading) + (NN room)))) + (, ,) + (NP (DT the) (NN yuppie) (NNS dealers)) + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) + (. .))) + + >>> collapsedTree2 = deepcopy(tree) + >>> collapse_unary(collapsedTree2, collapsePOS=True, collapseRoot=True) + >>> print(collapsedTree2) + (TOP+S + (S+VP + (VBN Turned) + (ADVP+RB loose) + (PP + (IN in) + (NP + (NP (NNP Shane) (NNP Longman) (POS 's)) + (NN trading) + (NN room)))) + (, ,) + (NP (DT the) (NN yuppie) (NNS dealers)) + (VP (AUX do) (NP (NP+RB little) (ADJP+RB right))) + (. .)) + +Convert the tree to Chomsky Normal Form i.e. each subtree has either two +subtree children or a single leaf value. This conversion can be performed +using either left- or right-factoring. + + >>> cnfTree = deepcopy(collapsedTree) + >>> chomsky_normal_form(cnfTree, factor='left') + >>> print(cnfTree) + (TOP + (S + (S| + (S| + (S| + (S+VP + (S+VP| (VBN Turned) (ADVP (RB loose))) + (PP + (IN in) + (NP + (NP| + (NP + (NP| (NNP Shane) (NNP Longman)) + (POS 's)) + (NN trading)) + (NN room)))) + (, ,)) + (NP (NP| (DT the) (NN yuppie)) (NNS dealers))) + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))) + (. .))) + + >>> cnfTree = deepcopy(collapsedTree) + >>> chomsky_normal_form(cnfTree, factor='right') + >>> print(cnfTree) + (TOP + (S + (S+VP + (VBN Turned) + (S+VP| + (ADVP (RB loose)) + (PP + (IN in) + (NP + (NP (NNP Shane) (NP| (NNP Longman) (POS 's))) + (NP| (NN trading) (NN room)))))) + (S|<,-NP-VP-.> + (, ,) + (S| + (NP (DT the) (NP| (NN yuppie) (NNS dealers))) + (S| + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) + (. .)))))) + +Employ some Markov smoothing to make the artificial node labels a bit more +readable. See the treetransforms.py documentation for more details. + + >>> markovTree = deepcopy(collapsedTree) + >>> chomsky_normal_form(markovTree, horzMarkov=2, vertMarkov=1) + >>> print(markovTree) + (TOP + (S^ + (S+VP^ + (VBN Turned) + (S+VP|^ + (ADVP^ (RB loose)) + (PP^ + (IN in) + (NP^ + (NP^ + (NNP Shane) + (NP|^ (NNP Longman) (POS 's))) + (NP|^ (NN trading) (NN room)))))) + (S|<,-NP>^ + (, ,) + (S|^ + (NP^ (DT the) (NP|^ (NN yuppie) (NNS dealers))) + (S|^ + (VP^ + (AUX do) + (NP^ (NP^ (RB little)) (ADJP^ (RB right)))) + (. .)))))) + +Convert the transformed tree back to its original form + + >>> un_chomsky_normal_form(markovTree) + >>> tree == markovTree + True