diff --git a/ckpts/universal/global_step40/zero/13.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/13.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1186c8470dcfe9bb9aad7dfa227c66656b0ed02c --- /dev/null +++ b/ckpts/universal/global_step40/zero/13.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66a6d58d73c0b0663908f5e90e740642720129be1eb0497ee8b6229a960fea7d +size 16778396 diff --git a/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b825210e39b086a997c9dbfe933583a4e97f3151 --- /dev/null +++ b/ckpts/universal/global_step40/zero/8.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df9af31154d33866f9a480c0654328cdd95dbd206e8da8b36fc4943d7ea4318 +size 33555627 diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de633edda010f4a94c841012d90fdfe2e0b7b3b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/big5freq.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/big5freq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4e64e041995164584fc9c5f79407ed6b58c7f28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/big5freq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/codingstatemachinedict.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/codingstatemachinedict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2fa110343eadd8c2ed8f8ecf18cc8a2b4565339 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/codingstatemachinedict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/escprober.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/escprober.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35c43848971e1dae93b0bc9d497c08cd6330cb84 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/escprober.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/escsm.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/escsm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59e00afa55f139403bf2f3b4ce8855f2b5444ed1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/escsm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/euckrfreq.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/euckrfreq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fad7c84882a48c41c2dde2111fc39820a613d25 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/euckrfreq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/euctwprober.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/euctwprober.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d79270588fe3d8a67239581842eada716561817 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/euctwprober.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/latin1prober.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/latin1prober.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2c78bdb9c33ab57165fdf4404188ce7cdbe5cc4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/latin1prober.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/sbcharsetprober.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/sbcharsetprober.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdebe7f271ff0772571becab75b21daef04565df Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/sbcharsetprober.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/chardet/__pycache__/sjisprober.cpython-310.pyc b/venv/lib/python3.10/site-packages/chardet/__pycache__/sjisprober.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c786967f2c7d4d307e41dc43523b4d65f3cf9e62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/chardet/__pycache__/sjisprober.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/VERSION b/venv/lib/python3.10/site-packages/nltk/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..0603aab1e29c286f7779a50891582571b616825c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/VERSION @@ -0,0 +1 @@ +3.8.1 diff --git a/venv/lib/python3.10/site-packages/nltk/__init__.py b/venv/lib/python3.10/site-packages/nltk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b87cf230510581745ced457e373a7ecc7c3c9006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/__init__.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit (NLTK) +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +The Natural Language Toolkit (NLTK) is an open source Python library +for Natural Language Processing. A free online book is available. +(If you use the library for academic research, please cite the book.) + +Steven Bird, Ewan Klein, and Edward Loper (2009). +Natural Language Processing with Python. O'Reilly Media Inc. +https://www.nltk.org/book/ + +isort:skip_file +""" + +import os + +# ////////////////////////////////////////////////////// +# Metadata +# ////////////////////////////////////////////////////// + +# Version. For each new release, the version number should be updated +# in the file VERSION. +try: + # If a VERSION file exists, use it! + version_file = os.path.join(os.path.dirname(__file__), "VERSION") + with open(version_file) as infile: + __version__ = infile.read().strip() +except NameError: + __version__ = "unknown (running code interactively?)" +except OSError as ex: + __version__ = "unknown (%s)" % ex + +if __doc__ is not None: # fix for the ``python -OO`` + __doc__ += "\n@version: " + __version__ + + +# Copyright notice +__copyright__ = """\ +Copyright (C) 2001-2023 NLTK Project. + +Distributed and Licensed under the Apache License, Version 2.0, +which is included by reference. +""" + +__license__ = "Apache License, Version 2.0" +# Description of the toolkit, keywords, and the project's primary URL. +__longdescr__ = """\ +The Natural Language Toolkit (NLTK) is a Python package for +natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11.""" +__keywords__ = [ + "NLP", + "CL", + "natural language processing", + "computational linguistics", + "parsing", + "tagging", + "tokenizing", + "syntax", + "linguistics", + "language", + "natural language", + "text analytics", +] +__url__ = "https://www.nltk.org/" + +# Maintainer, contributors, etc. +__maintainer__ = "NLTK Team" +__maintainer_email__ = "nltk.team@gmail.com" +__author__ = __maintainer__ +__author_email__ = __maintainer_email__ + +# "Trove" classifiers for Python Package Index. +__classifiers__ = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Human Machine Interfaces", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Text Processing", + "Topic :: Text Processing :: Filters", + "Topic :: Text Processing :: General", + "Topic :: Text Processing :: Indexing", + "Topic :: Text Processing :: Linguistic", +] + +from nltk.internals import config_java + +# support numpy from pypy +try: + import numpypy +except ImportError: + pass + +# Override missing methods on environments where it cannot be used like GAE. +import subprocess + +if not hasattr(subprocess, "PIPE"): + + def _fake_PIPE(*args, **kwargs): + raise NotImplementedError("subprocess.PIPE is not supported.") + + subprocess.PIPE = _fake_PIPE +if not hasattr(subprocess, "Popen"): + + def _fake_Popen(*args, **kwargs): + raise NotImplementedError("subprocess.Popen is not supported.") + + subprocess.Popen = _fake_Popen + +########################################################### +# TOP-LEVEL MODULES +########################################################### + +# Import top-level functionality into top-level namespace + +from nltk.collocations import * +from nltk.decorators import decorator, memoize +from nltk.featstruct import * +from nltk.grammar import * +from nltk.probability import * +from nltk.text import * +from nltk.util import * +from nltk.jsontags import * + +########################################################### +# PACKAGES +########################################################### + +from nltk.chunk import * +from nltk.classify import * +from nltk.inference import * +from nltk.metrics import * +from nltk.parse import * +from nltk.tag import * +from nltk.tokenize import * +from nltk.translate import * +from nltk.tree import * +from nltk.sem import * +from nltk.stem import * + +# Packages which can be lazily imported +# (a) we don't import * +# (b) they're slow to import or have run-time dependencies +# that can safely fail at run time + +from nltk import lazyimport + +app = lazyimport.LazyModule("app", locals(), globals()) +chat = lazyimport.LazyModule("chat", locals(), globals()) +corpus = lazyimport.LazyModule("corpus", locals(), globals()) +draw = lazyimport.LazyModule("draw", locals(), globals()) +toolbox = lazyimport.LazyModule("toolbox", locals(), globals()) + +# Optional loading + +try: + import numpy +except ImportError: + pass +else: + from nltk import cluster + +from nltk.downloader import download, download_shell + +try: + import tkinter +except ImportError: + pass +else: + try: + from nltk.downloader import download_gui + except RuntimeError as e: + import warnings + + warnings.warn( + "Corpus downloader GUI not loaded " + "(RuntimeError during import: %s)" % str(e) + ) + +# explicitly import all top-level modules (ensuring +# they override the same names inadvertently imported +# from a subpackage) + +from nltk import ccg, chunk, classify, collocations +from nltk import data, featstruct, grammar, help, inference, metrics +from nltk import misc, parse, probability, sem, stem, wsd +from nltk import tag, tbl, text, tokenize, translate, tree, util + + +# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116 +def demo(): + print("To run the demo code for a module, type nltk.module.demo()") diff --git a/venv/lib/python3.10/site-packages/nltk/book.py b/venv/lib/python3.10/site-packages/nltk/book.py new file mode 100644 index 0000000000000000000000000000000000000000..704f84d426fdf87b4233454c8ceb9915d7db3161 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/book.py @@ -0,0 +1,213 @@ +# Natural Language Toolkit: Some texts for exploration in chapter 1 of the book +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import ( + genesis, + gutenberg, + inaugural, + nps_chat, + treebank, + webtext, + wordnet, +) +from nltk.probability import FreqDist +from nltk.text import Text +from nltk.util import bigrams + +print("*** Introductory Examples for the NLTK Book ***") +print("Loading text1, ..., text9 and sent1, ..., sent9") +print("Type the name of the text or sentence to view it.") +print("Type: 'texts()' or 'sents()' to list the materials.") + +text1 = Text(gutenberg.words("melville-moby_dick.txt")) +print("text1:", text1.name) + +text2 = Text(gutenberg.words("austen-sense.txt")) +print("text2:", text2.name) + +text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis") +print("text3:", text3.name) + +text4 = Text(inaugural.words(), name="Inaugural Address Corpus") +print("text4:", text4.name) + +text5 = Text(nps_chat.words(), name="Chat Corpus") +print("text5:", text5.name) + +text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail") +print("text6:", text6.name) + +text7 = Text(treebank.words(), name="Wall Street Journal") +print("text7:", text7.name) + +text8 = Text(webtext.words("singles.txt"), name="Personals Corpus") +print("text8:", text8.name) + +text9 = Text(gutenberg.words("chesterton-thursday.txt")) +print("text9:", text9.name) + + +def texts(): + print("text1:", text1.name) + print("text2:", text2.name) + print("text3:", text3.name) + print("text4:", text4.name) + print("text5:", text5.name) + print("text6:", text6.name) + print("text7:", text7.name) + print("text8:", text8.name) + print("text9:", text9.name) + + +sent1 = ["Call", "me", "Ishmael", "."] +sent2 = [ + "The", + "family", + "of", + "Dashwood", + "had", + "long", + "been", + "settled", + "in", + "Sussex", + ".", +] +sent3 = [ + "In", + "the", + "beginning", + "God", + "created", + "the", + "heaven", + "and", + "the", + "earth", + ".", +] +sent4 = [ + "Fellow", + "-", + "Citizens", + "of", + "the", + "Senate", + "and", + "of", + "the", + "House", + "of", + "Representatives", + ":", +] +sent5 = [ + "I", + "have", + "a", + "problem", + "with", + "people", + "PMing", + "me", + "to", + "lol", + "JOIN", +] +sent6 = [ + "SCENE", + "1", + ":", + "[", + "wind", + "]", + "[", + "clop", + "clop", + "clop", + "]", + "KING", + "ARTHUR", + ":", + "Whoa", + "there", + "!", +] +sent7 = [ + "Pierre", + "Vinken", + ",", + "61", + "years", + "old", + ",", + "will", + "join", + "the", + "board", + "as", + "a", + "nonexecutive", + "director", + "Nov.", + "29", + ".", +] +sent8 = [ + "25", + "SEXY", + "MALE", + ",", + "seeks", + "attrac", + "older", + "single", + "lady", + ",", + "for", + "discreet", + "encounters", + ".", +] +sent9 = [ + "THE", + "suburb", + "of", + "Saffron", + "Park", + "lay", + "on", + "the", + "sunset", + "side", + "of", + "London", + ",", + "as", + "red", + "and", + "ragged", + "as", + "a", + "cloud", + "of", + "sunset", + ".", +] + + +def sents(): + print("sent1:", " ".join(sent1)) + print("sent2:", " ".join(sent2)) + print("sent3:", " ".join(sent3)) + print("sent4:", " ".join(sent4)) + print("sent5:", " ".join(sent5)) + print("sent6:", " ".join(sent6)) + print("sent7:", " ".join(sent7)) + print("sent8:", " ".join(sent8)) + print("sent9:", " ".join(sent9)) diff --git a/venv/lib/python3.10/site-packages/nltk/cli.py b/venv/lib/python3.10/site-packages/nltk/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..1a36a14f49e6cce0a0655767eddc4d82894f36d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/cli.py @@ -0,0 +1,55 @@ +# Natural Language Toolkit: NLTK Command-Line Interface +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + + +import click +from tqdm import tqdm + +from nltk import word_tokenize +from nltk.util import parallelize_preprocess + +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) + + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.version_option() +def cli(): + pass + + +@cli.command("tokenize") +@click.option( + "--language", + "-l", + default="en", + help="The language for the Punkt sentence tokenization.", +) +@click.option( + "--preserve-line", + "-l", + default=True, + is_flag=True, + help="An option to keep the preserve the sentence and not sentence tokenize it.", +) +@click.option("--processes", "-j", default=1, help="No. of processes.") +@click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.") +@click.option( + "--delimiter", "-d", default=" ", help="Specify delimiter to join the tokens." +) +def tokenize_file(language, preserve_line, processes, encoding, delimiter): + """This command tokenizes text stream using nltk.word_tokenize""" + with click.get_text_stream("stdin", encoding=encoding) as fin: + with click.get_text_stream("stdout", encoding=encoding) as fout: + # If it's single process, joblib parallelization is slower, + # so just process line by line normally. + if processes == 1: + for line in tqdm(fin.readlines()): + print(delimiter.join(word_tokenize(line)), end="\n", file=fout) + else: + for outline in parallelize_preprocess( + word_tokenize, fin.readlines(), processes, progress_bar=True + ): + print(delimiter.join(outline), end="\n", file=fout) diff --git a/venv/lib/python3.10/site-packages/nltk/collections.py b/venv/lib/python3.10/site-packages/nltk/collections.py new file mode 100644 index 0000000000000000000000000000000000000000..89ade62b665a4b51e63d49e26ef4ce41001efcd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/collections.py @@ -0,0 +1,661 @@ +# Natural Language Toolkit: Collections +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect + +# this unused import is for python 2.7 +from collections import Counter, defaultdict, deque +from functools import total_ordering +from itertools import chain, islice + +from nltk.internals import raise_unorderable_types, slice_bounds + +########################################################################## +# Ordered Dictionary +########################################################################## + + +class OrderedDict(dict): + def __init__(self, data=None, **kwargs): + self._keys = self.keys(data, kwargs.get("keys")) + self._default_factory = kwargs.get("default_factory") + if data is None: + dict.__init__(self) + else: + dict.__init__(self, data) + + def __delitem__(self, key): + dict.__delitem__(self, key) + self._keys.remove(key) + + def __getitem__(self, key): + try: + return dict.__getitem__(self, key) + except KeyError: + return self.__missing__(key) + + def __iter__(self): + return (key for key in self.keys()) + + def __missing__(self, key): + if not self._default_factory and key not in self._keys: + raise KeyError() + return self._default_factory() + + def __setitem__(self, key, item): + dict.__setitem__(self, key, item) + if key not in self._keys: + self._keys.append(key) + + def clear(self): + dict.clear(self) + self._keys.clear() + + def copy(self): + d = dict.copy(self) + d._keys = self._keys + return d + + def items(self): + # returns iterator under python 3 and list under python 2 + return zip(self.keys(), self.values()) + + def keys(self, data=None, keys=None): + if data: + if keys: + assert isinstance(keys, list) + assert len(data) == len(keys) + return keys + else: + assert ( + isinstance(data, dict) + or isinstance(data, OrderedDict) + or isinstance(data, list) + ) + if isinstance(data, dict) or isinstance(data, OrderedDict): + return data.keys() + elif isinstance(data, list): + return [key for (key, value) in data] + elif "_keys" in self.__dict__: + return self._keys + else: + return [] + + def popitem(self): + if not self._keys: + raise KeyError() + + key = self._keys.pop() + value = self[key] + del self[key] + return (key, value) + + def setdefault(self, key, failobj=None): + dict.setdefault(self, key, failobj) + if key not in self._keys: + self._keys.append(key) + + def update(self, data): + dict.update(self, data) + for key in self.keys(data): + if key not in self._keys: + self._keys.append(key) + + def values(self): + # returns iterator under python 3 + return map(self.get, self._keys) + + +###################################################################### +# Lazy Sequences +###################################################################### + + +@total_ordering +class AbstractLazySequence: + """ + An abstract base class for read-only sequences whose values are + computed as needed. Lazy sequences act like tuples -- they can be + indexed, sliced, and iterated over; but they may not be modified. + + The most common application of lazy sequences in NLTK is for + corpus view objects, which provide access to the contents of a + corpus without loading the entire corpus into memory, by loading + pieces of the corpus from disk as needed. + + The result of modifying a mutable element of a lazy sequence is + undefined. In particular, the modifications made to the element + may or may not persist, depending on whether and when the lazy + sequence caches that element's value or reconstructs it from + scratch. + + Subclasses are required to define two methods: ``__len__()`` + and ``iterate_from()``. + """ + + def __len__(self): + """ + Return the number of tokens in the corpus file underlying this + corpus view. + """ + raise NotImplementedError("should be implemented by subclass") + + def iterate_from(self, start): + """ + Return an iterator that generates the tokens in the corpus + file underlying this corpus view, starting at the token number + ``start``. If ``start>=len(self)``, then this iterator will + generate no tokens. + """ + raise NotImplementedError("should be implemented by subclass") + + def __getitem__(self, i): + """ + Return the *i* th token in the corpus file underlying this + corpus view. Negative indices and spans are both supported. + """ + if isinstance(i, slice): + start, stop = slice_bounds(self, i) + return LazySubsequence(self, start, stop) + else: + # Handle negative indices + if i < 0: + i += len(self) + if i < 0: + raise IndexError("index out of range") + # Use iterate_from to extract it. + try: + return next(self.iterate_from(i)) + except StopIteration as e: + raise IndexError("index out of range") from e + + def __iter__(self): + """Return an iterator that generates the tokens in the corpus + file underlying this corpus view.""" + return self.iterate_from(0) + + def count(self, value): + """Return the number of times this list contains ``value``.""" + return sum(1 for elt in self if elt == value) + + def index(self, value, start=None, stop=None): + """Return the index of the first occurrence of ``value`` in this + list that is greater than or equal to ``start`` and less than + ``stop``. Negative start and stop values are treated like negative + slice bounds -- i.e., they count from the end of the list.""" + start, stop = slice_bounds(self, slice(start, stop)) + for i, elt in enumerate(islice(self, start, stop)): + if elt == value: + return i + start + raise ValueError("index(x): x not in list") + + def __contains__(self, value): + """Return true if this list contains ``value``.""" + return bool(self.count(value)) + + def __add__(self, other): + """Return a list concatenating self with other.""" + return LazyConcatenation([self, other]) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return LazyConcatenation([other, self]) + + def __mul__(self, count): + """Return a list concatenating self with itself ``count`` times.""" + return LazyConcatenation([self] * count) + + def __rmul__(self, count): + """Return a list concatenating self with itself ``count`` times.""" + return LazyConcatenation([self] * count) + + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append(repr(elt)) + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + def __eq__(self, other): + return type(self) == type(other) and list(self) == list(other) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if type(other) != type(self): + raise_unorderable_types("<", self, other) + return list(self) < list(other) + + def __hash__(self): + """ + :raise ValueError: Corpus view objects are unhashable. + """ + raise ValueError("%s objects are unhashable" % self.__class__.__name__) + + +class LazySubsequence(AbstractLazySequence): + """ + A subsequence produced by slicing a lazy sequence. This slice + keeps a reference to its source sequence, and generates its values + by looking them up in the source sequence. + """ + + MIN_SIZE = 100 + """ + The minimum size for which lazy slices should be created. If + ``LazySubsequence()`` is called with a subsequence that is + shorter than ``MIN_SIZE``, then a tuple will be returned instead. + """ + + def __new__(cls, source, start, stop): + """ + Construct a new slice from a given underlying sequence. The + ``start`` and ``stop`` indices should be absolute indices -- + i.e., they should not be negative (for indexing from the back + of a list) or greater than the length of ``source``. + """ + # If the slice is small enough, just use a tuple. + if stop - start < cls.MIN_SIZE: + return list(islice(source.iterate_from(start), stop - start)) + else: + return object.__new__(cls) + + def __init__(self, source, start, stop): + self._source = source + self._start = start + self._stop = stop + + def __len__(self): + return self._stop - self._start + + def iterate_from(self, start): + return islice( + self._source.iterate_from(start + self._start), max(0, len(self) - start) + ) + + +class LazyConcatenation(AbstractLazySequence): + """ + A lazy sequence formed by concatenating a list of lists. This + underlying list of lists may itself be lazy. ``LazyConcatenation`` + maintains an index that it uses to keep track of the relationship + between offsets in the concatenated lists and offsets in the + sublists. + """ + + def __init__(self, list_of_lists): + self._list = list_of_lists + self._offsets = [0] + + def __len__(self): + if len(self._offsets) <= len(self._list): + for _ in self.iterate_from(self._offsets[-1]): + pass + return self._offsets[-1] + + def iterate_from(self, start_index): + if start_index < self._offsets[-1]: + sublist_index = bisect.bisect_right(self._offsets, start_index) - 1 + else: + sublist_index = len(self._offsets) - 1 + + index = self._offsets[sublist_index] + + # Construct an iterator over the sublists. + if isinstance(self._list, AbstractLazySequence): + sublist_iter = self._list.iterate_from(sublist_index) + else: + sublist_iter = islice(self._list, sublist_index, None) + + for sublist in sublist_iter: + if sublist_index == (len(self._offsets) - 1): + assert ( + index + len(sublist) >= self._offsets[-1] + ), "offsets not monotonic increasing!" + self._offsets.append(index + len(sublist)) + else: + assert self._offsets[sublist_index + 1] == index + len( + sublist + ), "inconsistent list value (num elts)" + + yield from sublist[max(0, start_index - index) :] + + index += len(sublist) + sublist_index += 1 + + +class LazyMap(AbstractLazySequence): + """ + A lazy sequence whose elements are formed by applying a given + function to each element in one or more underlying lists. The + function is applied lazily -- i.e., when you read a value from the + list, ``LazyMap`` will calculate that value by applying its + function to the underlying lists' value(s). ``LazyMap`` is + essentially a lazy version of the Python primitive function + ``map``. In particular, the following two expressions are + equivalent: + + >>> from nltk.collections import LazyMap + >>> function = str + >>> sequence = [1,2,3] + >>> map(function, sequence) # doctest: +SKIP + ['1', '2', '3'] + >>> list(LazyMap(function, sequence)) + ['1', '2', '3'] + + Like the Python ``map`` primitive, if the source lists do not have + equal size, then the value None will be supplied for the + 'missing' elements. + + Lazy maps can be useful for conserving memory, in cases where + individual values take up a lot of space. This is especially true + if the underlying list's values are constructed lazily, as is the + case with many corpus readers. + + A typical example of a use case for this class is performing + feature detection on the tokens in a corpus. Since featuresets + are encoded as dictionaries, which can take up a lot of memory, + using a ``LazyMap`` can significantly reduce memory usage when + training and running classifiers. + """ + + def __init__(self, function, *lists, **config): + """ + :param function: The function that should be applied to + elements of ``lists``. It should take as many arguments + as there are ``lists``. + :param lists: The underlying lists. + :param cache_size: Determines the size of the cache used + by this lazy map. (default=5) + """ + if not lists: + raise TypeError("LazyMap requires at least two args") + + self._lists = lists + self._func = function + self._cache_size = config.get("cache_size", 5) + self._cache = {} if self._cache_size > 0 else None + + # If you just take bool() of sum() here _all_lazy will be true just + # in case n >= 1 list is an AbstractLazySequence. Presumably this + # isn't what's intended. + self._all_lazy = sum( + isinstance(lst, AbstractLazySequence) for lst in lists + ) == len(lists) + + def iterate_from(self, index): + # Special case: one lazy sublist + if len(self._lists) == 1 and self._all_lazy: + for value in self._lists[0].iterate_from(index): + yield self._func(value) + return + + # Special case: one non-lazy sublist + elif len(self._lists) == 1: + while True: + try: + yield self._func(self._lists[0][index]) + except IndexError: + return + index += 1 + + # Special case: n lazy sublists + elif self._all_lazy: + iterators = [lst.iterate_from(index) for lst in self._lists] + while True: + elements = [] + for iterator in iterators: + try: + elements.append(next(iterator)) + except: # FIXME: What is this except really catching? StopIteration? + elements.append(None) + if elements == [None] * len(self._lists): + return + yield self._func(*elements) + index += 1 + + # general case + else: + while True: + try: + elements = [lst[index] for lst in self._lists] + except IndexError: + elements = [None] * len(self._lists) + for i, lst in enumerate(self._lists): + try: + elements[i] = lst[index] + except IndexError: + pass + if elements == [None] * len(self._lists): + return + yield self._func(*elements) + index += 1 + + def __getitem__(self, index): + if isinstance(index, slice): + sliced_lists = [lst[index] for lst in self._lists] + return LazyMap(self._func, *sliced_lists) + else: + # Handle negative indices + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # Check the cache + if self._cache is not None and index in self._cache: + return self._cache[index] + # Calculate the value + try: + val = next(self.iterate_from(index)) + except StopIteration as e: + raise IndexError("index out of range") from e + # Update the cache + if self._cache is not None: + if len(self._cache) > self._cache_size: + self._cache.popitem() # discard random entry + self._cache[index] = val + # Return the value + return val + + def __len__(self): + return max(len(lst) for lst in self._lists) + + +class LazyZip(LazyMap): + """ + A lazy sequence whose elements are tuples, each containing the i-th + element from each of the argument sequences. The returned list is + truncated in length to the length of the shortest argument sequence. The + tuples are constructed lazily -- i.e., when you read a value from the + list, ``LazyZip`` will calculate that value by forming a tuple from + the i-th element of each of the argument sequences. + + ``LazyZip`` is essentially a lazy version of the Python primitive function + ``zip``. In particular, an evaluated LazyZip is equivalent to a zip: + + >>> from nltk.collections import LazyZip + >>> sequence1, sequence2 = [1, 2, 3], ['a', 'b', 'c'] + >>> zip(sequence1, sequence2) # doctest: +SKIP + [(1, 'a'), (2, 'b'), (3, 'c')] + >>> list(LazyZip(sequence1, sequence2)) + [(1, 'a'), (2, 'b'), (3, 'c')] + >>> sequences = [sequence1, sequence2, [6,7,8,9]] + >>> list(zip(*sequences)) == list(LazyZip(*sequences)) + True + + Lazy zips can be useful for conserving memory in cases where the argument + sequences are particularly long. + + A typical example of a use case for this class is combining long sequences + of gold standard and predicted values in a classification or tagging task + in order to calculate accuracy. By constructing tuples lazily and + avoiding the creation of an additional long sequence, memory usage can be + significantly reduced. + """ + + def __init__(self, *lists): + """ + :param lists: the underlying lists + :type lists: list(list) + """ + LazyMap.__init__(self, lambda *elts: elts, *lists) + + def iterate_from(self, index): + iterator = LazyMap.iterate_from(self, index) + while index < len(self): + yield next(iterator) + index += 1 + return + + def __len__(self): + return min(len(lst) for lst in self._lists) + + +class LazyEnumerate(LazyZip): + """ + A lazy sequence whose elements are tuples, each containing a count (from + zero) and a value yielded by underlying sequence. ``LazyEnumerate`` is + useful for obtaining an indexed list. The tuples are constructed lazily + -- i.e., when you read a value from the list, ``LazyEnumerate`` will + calculate that value by forming a tuple from the count of the i-th + element and the i-th element of the underlying sequence. + + ``LazyEnumerate`` is essentially a lazy version of the Python primitive + function ``enumerate``. In particular, the following two expressions are + equivalent: + + >>> from nltk.collections import LazyEnumerate + >>> sequence = ['first', 'second', 'third'] + >>> list(enumerate(sequence)) + [(0, 'first'), (1, 'second'), (2, 'third')] + >>> list(LazyEnumerate(sequence)) + [(0, 'first'), (1, 'second'), (2, 'third')] + + Lazy enumerations can be useful for conserving memory in cases where the + argument sequences are particularly long. + + A typical example of a use case for this class is obtaining an indexed + list for a long sequence of values. By constructing tuples lazily and + avoiding the creation of an additional long sequence, memory usage can be + significantly reduced. + """ + + def __init__(self, lst): + """ + :param lst: the underlying list + :type lst: list + """ + LazyZip.__init__(self, range(len(lst)), lst) + + +class LazyIteratorList(AbstractLazySequence): + """ + Wraps an iterator, loading its elements on demand + and making them subscriptable. + __repr__ displays only the first few elements. + """ + + def __init__(self, it, known_len=None): + self._it = it + self._len = known_len + self._cache = [] + + def __len__(self): + if self._len: + return self._len + for _ in self.iterate_from(len(self._cache)): + pass + self._len = len(self._cache) + return self._len + + def iterate_from(self, start): + """Create a new iterator over this list starting at the given offset.""" + while len(self._cache) < start: + v = next(self._it) + self._cache.append(v) + i = start + while i < len(self._cache): + yield self._cache[i] + i += 1 + try: + while True: + v = next(self._it) + self._cache.append(v) + yield v + except StopIteration: + pass + + def __add__(self, other): + """Return a list concatenating self with other.""" + return type(self)(chain(self, other)) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return type(self)(chain(other, self)) + + +###################################################################### +# Trie Implementation +###################################################################### +class Trie(dict): + """A Trie implementation for strings""" + + LEAF = True + + def __init__(self, strings=None): + """Builds a Trie object, which is built around a ``dict`` + + If ``strings`` is provided, it will add the ``strings``, which + consist of a ``list`` of ``strings``, to the Trie. + Otherwise, it'll construct an empty Trie. + + :param strings: List of strings to insert into the trie + (Default is ``None``) + :type strings: list(str) + + """ + super().__init__() + if strings: + for string in strings: + self.insert(string) + + def insert(self, string): + """Inserts ``string`` into the Trie + + :param string: String to insert into the trie + :type string: str + + :Example: + + >>> from nltk.collections import Trie + >>> trie = Trie(["abc", "def"]) + >>> expected = {'a': {'b': {'c': {True: None}}}, \ + 'd': {'e': {'f': {True: None}}}} + >>> trie == expected + True + + """ + if len(string): + self[string[0]].insert(string[1:]) + else: + # mark the string is complete + self[Trie.LEAF] = None + + def __missing__(self, key): + self[key] = Trie() + return self[key] diff --git a/venv/lib/python3.10/site-packages/nltk/collocations.py b/venv/lib/python3.10/site-packages/nltk/collocations.py new file mode 100644 index 0000000000000000000000000000000000000000..2a1fd83ad38e861f0e8db96c24871d40c4ee185e --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/collocations.py @@ -0,0 +1,412 @@ +# Natural Language Toolkit: Collocations and Association Measures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT +# +""" +Tools to identify collocations --- words that often appear consecutively +--- within corpora. They may also be used to find other associations between +word occurrences. +See Manning and Schutze ch. 5 at https://nlp.stanford.edu/fsnlp/promo/colloc.pdf +and the Text::NSP Perl package at http://ngram.sourceforge.net + +Finding collocations requires first calculating the frequencies of words and +their appearance in the context of other words. Often the collection of words +will then requiring filtering to only retain useful content terms. Each ngram +of words may then be scored according to some association measure, in order +to determine the relative likelihood of each ngram being a collocation. + +The ``BigramCollocationFinder`` and ``TrigramCollocationFinder`` classes provide +these functionalities, dependent on being provided a function which scores a +ngram given appropriate frequency counts. A number of standard association +measures are provided in bigram_measures and trigram_measures. +""" + +# Possible TODOs: +# - consider the distinction between f(x,_) and f(x) and whether our +# approximation is good enough for fragmented data, and mention it +# - add a n-gram collocation finder with measures which only utilise n-gram +# and unigram counts (raw_freq, pmi, student_t) + +import itertools as _itertools + +# these two unused imports are referenced in collocations.doctest +from nltk.metrics import ( + BigramAssocMeasures, + ContingencyMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) +from nltk.metrics.spearman import ranks_from_scores, spearman_correlation +from nltk.probability import FreqDist +from nltk.util import ngrams + + +class AbstractCollocationFinder: + """ + An abstract base class for collocation finders whose purpose is to + collect collocation candidate frequencies, filter and rank them. + + As a minimum, collocation finders require the frequencies of each + word in a corpus, and the joint frequency of word tuples. This data + should be provided through nltk.probability.FreqDist objects or an + identical interface. + """ + + def __init__(self, word_fd, ngram_fd): + self.word_fd = word_fd + self.N = word_fd.N() + self.ngram_fd = ngram_fd + + @classmethod + def _build_new_documents( + cls, documents, window_size, pad_left=False, pad_right=False, pad_symbol=None + ): + """ + Pad the document with the place holder according to the window_size + """ + padding = (pad_symbol,) * (window_size - 1) + if pad_right: + return _itertools.chain.from_iterable( + _itertools.chain(doc, padding) for doc in documents + ) + if pad_left: + return _itertools.chain.from_iterable( + _itertools.chain(padding, doc) for doc in documents + ) + + @classmethod + def from_documents(cls, documents): + """Constructs a collocation finder given a collection of documents, + each of which is a list (or iterable) of tokens. + """ + # return cls.from_words(_itertools.chain(*documents)) + return cls.from_words( + cls._build_new_documents(documents, cls.default_ws, pad_right=True) + ) + + @staticmethod + def _ngram_freqdist(words, n): + return FreqDist(tuple(words[i : i + n]) for i in range(len(words) - 1)) + + def _apply_filter(self, fn=lambda ngram, freq: False): + """Generic filter removes ngrams from the frequency distribution + if the function returns True when passed an ngram tuple. + """ + tmp_ngram = FreqDist() + for ngram, freq in self.ngram_fd.items(): + if not fn(ngram, freq): + tmp_ngram[ngram] = freq + self.ngram_fd = tmp_ngram + + def apply_freq_filter(self, min_freq): + """Removes candidate ngrams which have frequency less than min_freq.""" + self._apply_filter(lambda ng, freq: freq < min_freq) + + def apply_ngram_filter(self, fn): + """Removes candidate ngrams (w1, w2, ...) where fn(w1, w2, ...) + evaluates to True. + """ + self._apply_filter(lambda ng, f: fn(*ng)) + + def apply_word_filter(self, fn): + """Removes candidate ngrams (w1, w2, ...) where any of (fn(w1), fn(w2), + ...) evaluates to True. + """ + self._apply_filter(lambda ng, f: any(fn(w) for w in ng)) + + def _score_ngrams(self, score_fn): + """Generates of (ngram, score) pairs as determined by the scoring + function provided. + """ + for tup in self.ngram_fd: + score = self.score_ngram(score_fn, *tup) + if score is not None: + yield tup, score + + def score_ngrams(self, score_fn): + """Returns a sequence of (ngram, score) pairs ordered from highest to + lowest score, as determined by the scoring function provided. + """ + return sorted(self._score_ngrams(score_fn), key=lambda t: (-t[1], t[0])) + + def nbest(self, score_fn, n): + """Returns the top n ngrams when scored by the given function.""" + return [p for p, s in self.score_ngrams(score_fn)[:n]] + + def above_score(self, score_fn, min_score): + """Returns a sequence of ngrams, ordered by decreasing score, whose + scores each exceed the given minimum score. + """ + for ngram, score in self.score_ngrams(score_fn): + if score > min_score: + yield ngram + else: + break + + +class BigramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of bigram collocations or other + association measures. It is often useful to use from_words() rather than + constructing an instance directly. + """ + + default_ws = 2 + + def __init__(self, word_fd, bigram_fd, window_size=2): + """Construct a BigramCollocationFinder, given FreqDists for + appearances of words and (possibly non-contiguous) bigrams. + """ + AbstractCollocationFinder.__init__(self, word_fd, bigram_fd) + self.window_size = window_size + + @classmethod + def from_words(cls, words, window_size=2): + """Construct a BigramCollocationFinder for all bigrams in the given + sequence. When window_size > 2, count non-contiguous bigrams, in the + style of Church and Hanks's (1990) association ratio. + """ + wfd = FreqDist() + bfd = FreqDist() + + if window_size < 2: + raise ValueError("Specify window_size at least 2") + + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + wfd[w1] += 1 + for w2 in window[1:]: + if w2 is not None: + bfd[(w1, w2)] += 1 + return cls(wfd, bfd, window_size=window_size) + + def score_ngram(self, score_fn, w1, w2): + """Returns the score for a given bigram using the given scoring + function. Following Church and Hanks (1990), counts are scaled by + a factor of 1/(window_size - 1). + """ + n_all = self.N + n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0) + if not n_ii: + return + n_ix = self.word_fd[w1] + n_xi = self.word_fd[w2] + return score_fn(n_ii, (n_ix, n_xi), n_all) + + +class TrigramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of trigram collocations or other + association measures. It is often useful to use from_words() rather than + constructing an instance directly. + """ + + default_ws = 3 + + def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd): + """Construct a TrigramCollocationFinder, given FreqDists for + appearances of words, bigrams, two words with any word between them, + and trigrams. + """ + AbstractCollocationFinder.__init__(self, word_fd, trigram_fd) + self.wildcard_fd = wildcard_fd + self.bigram_fd = bigram_fd + + @classmethod + def from_words(cls, words, window_size=3): + """Construct a TrigramCollocationFinder for all trigrams in the given + sequence. + """ + if window_size < 3: + raise ValueError("Specify window_size at least 3") + + wfd = FreqDist() + wildfd = FreqDist() + bfd = FreqDist() + tfd = FreqDist() + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + for w2, w3 in _itertools.combinations(window[1:], 2): + wfd[w1] += 1 + if w2 is None: + continue + bfd[(w1, w2)] += 1 + if w3 is None: + continue + wildfd[(w1, w3)] += 1 + tfd[(w1, w2, w3)] += 1 + return cls(wfd, bfd, wildfd, tfd) + + def bigram_finder(self): + """Constructs a bigram collocation finder with the bigram and unigram + data from this finder. Note that this does not include any filtering + applied to this finder. + """ + return BigramCollocationFinder(self.word_fd, self.bigram_fd) + + def score_ngram(self, score_fn, w1, w2, w3): + """Returns the score for a given trigram using the given scoring + function. + """ + n_all = self.N + n_iii = self.ngram_fd[(w1, w2, w3)] + if not n_iii: + return + n_iix = self.bigram_fd[(w1, w2)] + n_ixi = self.wildcard_fd[(w1, w3)] + n_xii = self.bigram_fd[(w2, w3)] + n_ixx = self.word_fd[w1] + n_xix = self.word_fd[w2] + n_xxi = self.word_fd[w3] + return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all) + + +class QuadgramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of quadgram collocations or other association measures. + It is often useful to use from_words() rather than constructing an instance directly. + """ + + default_ws = 4 + + def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii): + """Construct a QuadgramCollocationFinder, given FreqDists for appearances of words, + bigrams, trigrams, two words with one word and two words between them, three words + with a word between them in both variations. + """ + AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd) + self.iii = iii + self.ii = ii + self.ixi = ixi + self.ixxi = ixxi + self.iixi = iixi + self.ixii = ixii + + @classmethod + def from_words(cls, words, window_size=4): + if window_size < 4: + raise ValueError("Specify window_size at least 4") + ixxx = FreqDist() + iiii = FreqDist() + ii = FreqDist() + iii = FreqDist() + ixi = FreqDist() + ixxi = FreqDist() + iixi = FreqDist() + ixii = FreqDist() + + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + for w2, w3, w4 in _itertools.combinations(window[1:], 3): + ixxx[w1] += 1 + if w2 is None: + continue + ii[(w1, w2)] += 1 + if w3 is None: + continue + iii[(w1, w2, w3)] += 1 + ixi[(w1, w3)] += 1 + if w4 is None: + continue + iiii[(w1, w2, w3, w4)] += 1 + ixxi[(w1, w4)] += 1 + ixii[(w1, w3, w4)] += 1 + iixi[(w1, w2, w4)] += 1 + + return cls(ixxx, iiii, ii, iii, ixi, ixxi, iixi, ixii) + + def score_ngram(self, score_fn, w1, w2, w3, w4): + n_all = self.N + n_iiii = self.ngram_fd[(w1, w2, w3, w4)] + if not n_iiii: + return + n_iiix = self.iii[(w1, w2, w3)] + n_xiii = self.iii[(w2, w3, w4)] + n_iixi = self.iixi[(w1, w2, w4)] + n_ixii = self.ixii[(w1, w3, w4)] + + n_iixx = self.ii[(w1, w2)] + n_xxii = self.ii[(w3, w4)] + n_xiix = self.ii[(w2, w3)] + n_ixix = self.ixi[(w1, w3)] + n_ixxi = self.ixxi[(w1, w4)] + n_xixi = self.ixi[(w2, w4)] + + n_ixxx = self.word_fd[w1] + n_xixx = self.word_fd[w2] + n_xxix = self.word_fd[w3] + n_xxxi = self.word_fd[w4] + return score_fn( + n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all, + ) + + +def demo(scorer=None, compare_scorer=None): + """Finds bigram collocations in the files of the WebText corpus.""" + from nltk.metrics import ( + BigramAssocMeasures, + ranks_from_scores, + spearman_correlation, + ) + + if scorer is None: + scorer = BigramAssocMeasures.likelihood_ratio + if compare_scorer is None: + compare_scorer = BigramAssocMeasures.raw_freq + + from nltk.corpus import stopwords, webtext + + ignored_words = stopwords.words("english") + word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words + + for file in webtext.fileids(): + words = [word.lower() for word in webtext.words(file)] + + cf = BigramCollocationFinder.from_words(words) + cf.apply_freq_filter(3) + cf.apply_word_filter(word_filter) + + corr = spearman_correlation( + ranks_from_scores(cf.score_ngrams(scorer)), + ranks_from_scores(cf.score_ngrams(compare_scorer)), + ) + print(file) + print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)]) + print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}") + + +# Slows down loading too much +# bigram_measures = BigramAssocMeasures() +# trigram_measures = TrigramAssocMeasures() + +if __name__ == "__main__": + import sys + + from nltk.metrics import BigramAssocMeasures + + try: + scorer = eval("BigramAssocMeasures." + sys.argv[1]) + except IndexError: + scorer = None + try: + compare_scorer = eval("BigramAssocMeasures." + sys.argv[2]) + except IndexError: + compare_scorer = None + + demo(scorer, compare_scorer) + +__all__ = [ + "BigramCollocationFinder", + "TrigramCollocationFinder", + "QuadgramCollocationFinder", +] diff --git a/venv/lib/python3.10/site-packages/nltk/compat.py b/venv/lib/python3.10/site-packages/nltk/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..ceedc3992530e4e523dc9d479c26fbb43c918280 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/compat.py @@ -0,0 +1,43 @@ +# Natural Language Toolkit: Compatibility +# +# Copyright (C) 2001-2023 NLTK Project +# +# URL: +# For license information, see LICENSE.TXT + +import os +from functools import wraps + +# ======= Compatibility for datasets that care about Python versions ======== + +# The following datasets have a /PY3 subdirectory containing +# a full copy of the data which has been re-encoded or repickled. +DATA_UPDATES = [ + ("chunkers", "maxent_ne_chunker"), + ("help", "tagsets"), + ("taggers", "maxent_treebank_pos_tagger"), + ("tokenizers", "punkt"), +] + +_PY3_DATA_UPDATES = [os.path.join(*path_list) for path_list in DATA_UPDATES] + + +def add_py3_data(path): + for item in _PY3_DATA_UPDATES: + if item in str(path) and "/PY3" not in str(path): + pos = path.index(item) + len(item) + if path[pos : pos + 4] == ".zip": + pos += 4 + path = path[:pos] + "/PY3" + path[pos:] + break + return path + + +# for use in adding /PY3 to the second (filename) argument +# of the file pointers in data.py +def py3_data(init_func): + def _decorator(*args, **kwargs): + args = (args[0], add_py3_data(args[1])) + args[2:] + return init_func(*args, **kwargs) + + return wraps(init_func)(_decorator) diff --git a/venv/lib/python3.10/site-packages/nltk/data.py b/venv/lib/python3.10/site-packages/nltk/data.py new file mode 100644 index 0000000000000000000000000000000000000000..fed75d2bfbf2953a2ecc61d1d5a24244f5749be6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/data.py @@ -0,0 +1,1441 @@ +# Natural Language Toolkit: Utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Functions to find and load NLTK resource files, such as corpora, +grammars, and saved processing objects. Resource files are identified +using URLs, such as ``nltk:corpora/abc/rural.txt`` or +``https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg``. +The following URL protocols are supported: + + - ``file:path``: Specifies the file whose path is *path*. + Both relative and absolute paths may be used. + + - ``https://host/path``: Specifies the file stored on the web + server *host* at path *path*. + + - ``nltk:path``: Specifies the file stored in the NLTK data + package at *path*. NLTK will search for these files in the + directories specified by ``nltk.data.path``. + +If no protocol is specified, then the default protocol ``nltk:`` will +be used. + +This module provides to functions that can be used to access a +resource file, given its URL: ``load()`` loads a given resource, and +adds it to a resource cache; and ``retrieve()`` copies a given resource +to a local file. +""" + +import codecs +import functools +import os +import pickle +import re +import sys +import textwrap +import zipfile +from abc import ABCMeta, abstractmethod +from gzip import WRITE as GZ_WRITE +from gzip import GzipFile +from io import BytesIO, TextIOWrapper +from urllib.request import url2pathname, urlopen + +try: + from zlib import Z_SYNC_FLUSH as FLUSH +except ImportError: + from zlib import Z_FINISH as FLUSH + +from nltk import grammar, sem +from nltk.compat import add_py3_data, py3_data +from nltk.internals import deprecated + +textwrap_indent = functools.partial(textwrap.indent, prefix=" ") + +###################################################################### +# Search Path +###################################################################### + +path = [] +"""A list of directories where the NLTK data package might reside. + These directories will be checked in order when looking for a + resource in the data package. Note that this allows users to + substitute in their own versions of resources, if they have them + (e.g., in their home directory under ~/nltk_data).""" + +# User-specified locations: +_paths_from_env = os.environ.get("NLTK_DATA", "").split(os.pathsep) +path += [d for d in _paths_from_env if d] +if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/": + path.append(os.path.expanduser("~/nltk_data")) + +if sys.platform.startswith("win"): + # Common locations on Windows: + path += [ + os.path.join(sys.prefix, "nltk_data"), + os.path.join(sys.prefix, "share", "nltk_data"), + os.path.join(sys.prefix, "lib", "nltk_data"), + os.path.join(os.environ.get("APPDATA", "C:\\"), "nltk_data"), + r"C:\nltk_data", + r"D:\nltk_data", + r"E:\nltk_data", + ] +else: + # Common locations on UNIX & OS X: + path += [ + os.path.join(sys.prefix, "nltk_data"), + os.path.join(sys.prefix, "share", "nltk_data"), + os.path.join(sys.prefix, "lib", "nltk_data"), + "/usr/share/nltk_data", + "/usr/local/share/nltk_data", + "/usr/lib/nltk_data", + "/usr/local/lib/nltk_data", + ] + + +###################################################################### +# Util Functions +###################################################################### + + +def gzip_open_unicode( + filename, + mode="rb", + compresslevel=9, + encoding="utf-8", + fileobj=None, + errors=None, + newline=None, +): + if fileobj is None: + fileobj = GzipFile(filename, mode, compresslevel, fileobj) + return TextIOWrapper(fileobj, encoding, errors, newline) + + +def split_resource_url(resource_url): + """ + Splits a resource url into ":". + + >>> windows = sys.platform.startswith('win') + >>> split_resource_url('nltk:home/nltk') + ('nltk', 'home/nltk') + >>> split_resource_url('nltk:/home/nltk') + ('nltk', '/home/nltk') + >>> split_resource_url('file:/home/nltk') + ('file', '/home/nltk') + >>> split_resource_url('file:///home/nltk') + ('file', '/home/nltk') + >>> split_resource_url('file:///C:/home/nltk') + ('file', '/C:/home/nltk') + """ + protocol, path_ = resource_url.split(":", 1) + if protocol == "nltk": + pass + elif protocol == "file": + if path_.startswith("/"): + path_ = "/" + path_.lstrip("/") + else: + path_ = re.sub(r"^/{0,2}", "", path_) + return protocol, path_ + + +def normalize_resource_url(resource_url): + r""" + Normalizes a resource url + + >>> windows = sys.platform.startswith('win') + >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \ + ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg')) + True + >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file' + True + >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg' + True + >>> normalize_resource_url('nltk:home/nltk') + 'nltk:home/nltk' + >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk' + True + >>> normalize_resource_url('https://example.com/dir/file') + 'https://example.com/dir/file' + >>> normalize_resource_url('dir/file') + 'nltk:dir/file' + """ + try: + protocol, name = split_resource_url(resource_url) + except ValueError: + # the resource url has no protocol, use the nltk protocol by default + protocol = "nltk" + name = resource_url + # use file protocol if the path is an absolute path + if protocol == "nltk" and os.path.isabs(name): + protocol = "file://" + name = normalize_resource_name(name, False, None) + elif protocol == "file": + protocol = "file://" + # name is absolute + name = normalize_resource_name(name, False, None) + elif protocol == "nltk": + protocol = "nltk:" + name = normalize_resource_name(name, True) + else: + # handled by urllib + protocol += "://" + return "".join([protocol, name]) + + +def normalize_resource_name(resource_name, allow_relative=True, relative_path=None): + """ + :type resource_name: str or unicode + :param resource_name: The name of the resource to search for. + Resource names are posix-style relative path names, such as + ``corpora/brown``. Directory names will automatically + be converted to a platform-appropriate path separator. + Directory trailing slashes are preserved + + >>> windows = sys.platform.startswith('win') + >>> normalize_resource_name('.', True) + './' + >>> normalize_resource_name('./', True) + './' + >>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file' + True + >>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file' + True + >>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file' + True + >>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file' + True + >>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file' + True + >>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file' + True + """ + is_dir = bool(re.search(r"[\\/.]$", resource_name)) or resource_name.endswith( + os.path.sep + ) + if sys.platform.startswith("win"): + resource_name = resource_name.lstrip("/") + else: + resource_name = re.sub(r"^/+", "/", resource_name) + if allow_relative: + resource_name = os.path.normpath(resource_name) + else: + if relative_path is None: + relative_path = os.curdir + resource_name = os.path.abspath(os.path.join(relative_path, resource_name)) + resource_name = resource_name.replace("\\", "/").replace(os.path.sep, "/") + if sys.platform.startswith("win") and os.path.isabs(resource_name): + resource_name = "/" + resource_name + if is_dir and not resource_name.endswith("/"): + resource_name += "/" + return resource_name + + +###################################################################### +# Path Pointers +###################################################################### + + +class PathPointer(metaclass=ABCMeta): + """ + An abstract base class for 'path pointers,' used by NLTK's data + package to identify specific paths. Two subclasses exist: + ``FileSystemPathPointer`` identifies a file that can be accessed + directly via a given absolute path. ``ZipFilePathPointer`` + identifies a file contained within a zipfile, that can be accessed + by reading that zipfile. + """ + + @abstractmethod + def open(self, encoding=None): + """ + Return a seekable read-only stream that can be used to read + the contents of the file identified by this path pointer. + + :raise IOError: If the path specified by this pointer does + not contain a readable file. + """ + + @abstractmethod + def file_size(self): + """ + Return the size of the file pointed to by this path pointer, + in bytes. + + :raise IOError: If the path specified by this pointer does + not contain a readable file. + """ + + @abstractmethod + def join(self, fileid): + """ + Return a new path pointer formed by starting at the path + identified by this pointer, and then following the relative + path given by ``fileid``. The path components of ``fileid`` + should be separated by forward slashes, regardless of + the underlying file system's path separator character. + """ + + +class FileSystemPathPointer(PathPointer, str): + """ + A path pointer that identifies a file which can be accessed + directly via a given absolute path. + """ + + @py3_data + def __init__(self, _path): + """ + Create a new path pointer for the given absolute path. + + :raise IOError: If the given path does not exist. + """ + + _path = os.path.abspath(_path) + if not os.path.exists(_path): + raise OSError("No such file or directory: %r" % _path) + self._path = _path + + # There's no need to call str.__init__(), since it's a no-op; + # str does all of its setup work in __new__. + + @property + def path(self): + """The absolute path identified by this path pointer.""" + return self._path + + def open(self, encoding=None): + stream = open(self._path, "rb") + if encoding is not None: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + def file_size(self): + return os.stat(self._path).st_size + + def join(self, fileid): + _path = os.path.join(self._path, fileid) + return FileSystemPathPointer(_path) + + def __repr__(self): + return "FileSystemPathPointer(%r)" % self._path + + def __str__(self): + return self._path + + +@deprecated("Use gzip.GzipFile instead as it also uses a buffer.") +class BufferedGzipFile(GzipFile): + """A ``GzipFile`` subclass for compatibility with older nltk releases. + + Use ``GzipFile`` directly as it also buffers in all supported + Python versions. + """ + + @py3_data + def __init__( + self, filename=None, mode=None, compresslevel=9, fileobj=None, **kwargs + ): + """Return a buffered gzip file object.""" + GzipFile.__init__(self, filename, mode, compresslevel, fileobj) + + def write(self, data): + # This is identical to GzipFile.write but does not return + # the bytes written to retain compatibility. + super().write(data) + + +class GzipFileSystemPathPointer(FileSystemPathPointer): + """ + A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed + file located at a given absolute path. ``GzipFileSystemPathPointer`` is + appropriate for loading large gzip-compressed pickle objects efficiently. + """ + + def open(self, encoding=None): + stream = GzipFile(self._path, "rb") + if encoding: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + +class ZipFilePathPointer(PathPointer): + """ + A path pointer that identifies a file contained within a zipfile, + which can be accessed by reading that zipfile. + """ + + @py3_data + def __init__(self, zipfile, entry=""): + """ + Create a new path pointer pointing at the specified entry + in the given zipfile. + + :raise IOError: If the given zipfile does not exist, or if it + does not contain the specified entry. + """ + if isinstance(zipfile, str): + zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile)) + + # Check that the entry exists: + if entry: + + # Normalize the entry string, it should be relative: + entry = normalize_resource_name(entry, True, "/").lstrip("/") + + try: + zipfile.getinfo(entry) + except Exception as e: + # Sometimes directories aren't explicitly listed in + # the zip file. So if `entry` is a directory name, + # then check if the zipfile contains any files that + # are under the given directory. + if entry.endswith("/") and [ + n for n in zipfile.namelist() if n.startswith(entry) + ]: + pass # zipfile contains a file in that directory. + else: + # Otherwise, complain. + raise OSError( + f"Zipfile {zipfile.filename!r} does not contain {entry!r}" + ) from e + self._zipfile = zipfile + self._entry = entry + + @property + def zipfile(self): + """ + The zipfile.ZipFile object used to access the zip file + containing the entry identified by this path pointer. + """ + return self._zipfile + + @property + def entry(self): + """ + The name of the file within zipfile that this path + pointer points to. + """ + return self._entry + + def open(self, encoding=None): + data = self._zipfile.read(self._entry) + stream = BytesIO(data) + if self._entry.endswith(".gz"): + stream = GzipFile(self._entry, fileobj=stream) + elif encoding is not None: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + def file_size(self): + return self._zipfile.getinfo(self._entry).file_size + + def join(self, fileid): + entry = f"{self._entry}/{fileid}" + return ZipFilePathPointer(self._zipfile, entry) + + def __repr__(self): + return f"ZipFilePathPointer({self._zipfile.filename!r}, {self._entry!r})" + + def __str__(self): + return os.path.normpath(os.path.join(self._zipfile.filename, self._entry)) + + +###################################################################### +# Access Functions +###################################################################### + +# Don't use a weak dictionary, because in the common case this +# causes a lot more reloading that necessary. +_resource_cache = {} +"""A dictionary used to cache resources so that they won't + need to be loaded more than once.""" + + +def find(resource_name, paths=None): + """ + Find the given resource by searching through the directories and + zip files in paths, where a None or empty string specifies an absolute path. + Returns a corresponding path name. If the given resource is not + found, raise a ``LookupError``, whose message gives a pointer to + the installation instructions for the NLTK downloader. + + Zip File Handling: + + - If ``resource_name`` contains a component with a ``.zip`` + extension, then it is assumed to be a zipfile; and the + remaining path components are used to look inside the zipfile. + + - If any element of ``nltk.data.path`` has a ``.zip`` extension, + then it is assumed to be a zipfile. + + - If a given resource name that does not contain any zipfile + component is not found initially, then ``find()`` will make a + second attempt to find that resource, by replacing each + component *p* in the path with *p.zip/p*. For example, this + allows ``find()`` to map the resource name + ``corpora/chat80/cities.pl`` to a zip file path pointer to + ``corpora/chat80.zip/chat80/cities.pl``. + + - When using ``find()`` to locate a directory contained in a + zipfile, the resource name must end with the forward slash + character. Otherwise, ``find()`` will not locate the + directory. + + :type resource_name: str or unicode + :param resource_name: The name of the resource to search for. + Resource names are posix-style relative path names, such as + ``corpora/brown``. Directory names will be + automatically converted to a platform-appropriate path separator. + :rtype: str + """ + resource_name = normalize_resource_name(resource_name, True) + + # Resolve default paths at runtime in-case the user overrides + # nltk.data.path + if paths is None: + paths = path + + # Check if the resource name includes a zipfile name + m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) + zipfile, zipentry = m.groups() + + # Check each item in our path + for path_ in paths: + # Is the path item a zipfile? + if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): + try: + return ZipFilePathPointer(path_, resource_name) + except OSError: + # resource not in zipfile + continue + + # Is the path item a directory or is resource_name an absolute path? + elif not path_ or os.path.isdir(path_): + if zipfile is None: + p = os.path.join(path_, url2pathname(resource_name)) + if os.path.exists(p): + if p.endswith(".gz"): + return GzipFileSystemPathPointer(p) + else: + return FileSystemPathPointer(p) + else: + p = os.path.join(path_, url2pathname(zipfile)) + if os.path.exists(p): + try: + return ZipFilePathPointer(p, zipentry) + except OSError: + # resource not in zipfile + continue + + # Fallback: if the path doesn't include a zip file, then try + # again, assuming that one of the path components is inside a + # zipfile of the same name. + if zipfile is None: + pieces = resource_name.split("/") + for i in range(len(pieces)): + modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) + try: + return find(modified_name, paths) + except LookupError: + pass + + # Identify the package (i.e. the .zip file) to download. + resource_zipname = resource_name.split("/")[1] + if resource_zipname.endswith(".zip"): + resource_zipname = resource_zipname.rpartition(".")[0] + # Display a friendly error message if the resource wasn't found: + msg = str( + "Resource \33[93m{resource}\033[0m not found.\n" + "Please use the NLTK Downloader to obtain the resource:\n\n" + "\33[31m" # To display red text in terminal. + ">>> import nltk\n" + ">>> nltk.download('{resource}')\n" + "\033[0m" + ).format(resource=resource_zipname) + msg = textwrap_indent(msg) + + msg += "\n For more information see: https://www.nltk.org/data.html\n" + + msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( + resource_name=resource_name + ) + + msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) + sep = "*" * 70 + resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" + raise LookupError(resource_not_found) + + +def retrieve(resource_url, filename=None, verbose=True): + """ + Copy the given resource to a local file. If no filename is + specified, then use the URL's filename. If there is already a + file named ``filename``, then raise a ``ValueError``. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + """ + resource_url = normalize_resource_url(resource_url) + if filename is None: + if resource_url.startswith("file:"): + filename = os.path.split(resource_url)[-1] + else: + filename = re.sub(r"(^\w+:)?.*/", "", resource_url) + if os.path.exists(filename): + filename = os.path.abspath(filename) + raise ValueError("File %r already exists!" % filename) + + if verbose: + print(f"Retrieving {resource_url!r}, saving to {filename!r}") + + # Open the input & output streams. + infile = _open(resource_url) + + # Copy infile -> outfile, using 64k blocks. + with open(filename, "wb") as outfile: + while True: + s = infile.read(1024 * 64) # 64k blocks. + outfile.write(s) + if not s: + break + + infile.close() + + +#: A dictionary describing the formats that are supported by NLTK's +#: load() method. Keys are format names, and values are format +#: descriptions. +FORMATS = { + "pickle": "A serialized python object, stored using the pickle module.", + "json": "A serialized python object, stored using the json module.", + "yaml": "A serialized python object, stored using the yaml module.", + "cfg": "A context free grammar.", + "pcfg": "A probabilistic CFG.", + "fcfg": "A feature CFG.", + "fol": "A list of first order logic expressions, parsed with " + "nltk.sem.logic.Expression.fromstring.", + "logic": "A list of first order logic expressions, parsed with " + "nltk.sem.logic.LogicParser. Requires an additional logic_parser " + "parameter", + "val": "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.", + "raw": "The raw (byte string) contents of a file.", + "text": "The raw (unicode string) contents of a file. ", +} + +#: A dictionary mapping from file extensions to format names, used +#: by load() when format="auto" to decide the format for a +#: given resource url. +AUTO_FORMATS = { + "pickle": "pickle", + "json": "json", + "yaml": "yaml", + "cfg": "cfg", + "pcfg": "pcfg", + "fcfg": "fcfg", + "fol": "fol", + "logic": "logic", + "val": "val", + "txt": "text", + "text": "text", +} + + +def load( + resource_url, + format="auto", + cache=True, + verbose=False, + logic_parser=None, + fstruct_reader=None, + encoding=None, +): + """ + Load a given resource from the NLTK data package. The following + resource formats are currently supported: + + - ``pickle`` + - ``json`` + - ``yaml`` + - ``cfg`` (context free grammars) + - ``pcfg`` (probabilistic CFGs) + - ``fcfg`` (feature-based CFGs) + - ``fol`` (formulas of First Order Logic) + - ``logic`` (Logical formulas to be parsed by the given logic_parser) + - ``val`` (valuation of First Order Logic model) + - ``text`` (the file contents as a unicode string) + - ``raw`` (the raw file contents as a byte string) + + If no format is specified, ``load()`` will attempt to determine a + format based on the resource name's file extension. If that + fails, ``load()`` will raise a ``ValueError`` exception. + + For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``), + it tries to decode the raw contents using UTF-8, and if that doesn't + work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding`` + is specified. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + :type cache: bool + :param cache: If true, add this resource to a cache. If load() + finds a resource in its cache, then it will return it from the + cache rather than loading it. + :type verbose: bool + :param verbose: If true, print a message when loading a resource. + Messages are not displayed when a resource is retrieved from + the cache. + :type logic_parser: LogicParser + :param logic_parser: The parser that will be used to parse logical + expressions. + :type fstruct_reader: FeatStructReader + :param fstruct_reader: The parser that will be used to parse the + feature structure of an fcfg. + :type encoding: str + :param encoding: the encoding of the input; only used for text formats. + """ + resource_url = normalize_resource_url(resource_url) + resource_url = add_py3_data(resource_url) + + # Determine the format of the resource. + if format == "auto": + resource_url_parts = resource_url.split(".") + ext = resource_url_parts[-1] + if ext == "gz": + ext = resource_url_parts[-2] + format = AUTO_FORMATS.get(ext) + if format is None: + raise ValueError( + "Could not determine format for %s based " + 'on its file\nextension; use the "format" ' + "argument to specify the format explicitly." % resource_url + ) + + if format not in FORMATS: + raise ValueError(f"Unknown format type: {format}!") + + # If we've cached the resource, then just return it. + if cache: + resource_val = _resource_cache.get((resource_url, format)) + if resource_val is not None: + if verbose: + print(f"<>") + return resource_val + + # Let the user know what's going on. + if verbose: + print(f"<>") + + # Load the resource. + opened_resource = _open(resource_url) + + if format == "raw": + resource_val = opened_resource.read() + elif format == "pickle": + resource_val = pickle.load(opened_resource) + elif format == "json": + import json + + from nltk.jsontags import json_tags + + resource_val = json.load(opened_resource) + tag = None + if len(resource_val) != 1: + tag = next(resource_val.keys()) + if tag not in json_tags: + raise ValueError("Unknown json tag.") + elif format == "yaml": + import yaml + + resource_val = yaml.safe_load(opened_resource) + else: + # The resource is a text format. + binary_data = opened_resource.read() + if encoding is not None: + string_data = binary_data.decode(encoding) + else: + try: + string_data = binary_data.decode("utf-8") + except UnicodeDecodeError: + string_data = binary_data.decode("latin-1") + if format == "text": + resource_val = string_data + elif format == "cfg": + resource_val = grammar.CFG.fromstring(string_data, encoding=encoding) + elif format == "pcfg": + resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding) + elif format == "fcfg": + resource_val = grammar.FeatureGrammar.fromstring( + string_data, + logic_parser=logic_parser, + fstruct_reader=fstruct_reader, + encoding=encoding, + ) + elif format == "fol": + resource_val = sem.read_logic( + string_data, + logic_parser=sem.logic.LogicParser(), + encoding=encoding, + ) + elif format == "logic": + resource_val = sem.read_logic( + string_data, logic_parser=logic_parser, encoding=encoding + ) + elif format == "val": + resource_val = sem.read_valuation(string_data, encoding=encoding) + else: + raise AssertionError( + "Internal NLTK error: Format %s isn't " + "handled by nltk.data.load()" % (format,) + ) + + opened_resource.close() + + # If requested, add it to the cache. + if cache: + try: + _resource_cache[(resource_url, format)] = resource_val + # TODO: add this line + # print('<>' % (resource_url,)) + except TypeError: + # We can't create weak references to some object types, like + # strings and tuples. For now, just don't cache them. + pass + + return resource_val + + +def show_cfg(resource_url, escape="##"): + """ + Write out a grammar file, ignoring escaped and empty lines. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + :type escape: str + :param escape: Prepended string that signals lines to be ignored + """ + resource_url = normalize_resource_url(resource_url) + resource_val = load(resource_url, format="text", cache=False) + lines = resource_val.splitlines() + for l in lines: + if l.startswith(escape): + continue + if re.match("^$", l): + continue + print(l) + + +def clear_cache(): + """ + Remove all objects from the resource cache. + :see: load() + """ + _resource_cache.clear() + + +def _open(resource_url): + """ + Helper function that returns an open file object for a resource, + given its resource URL. If the given resource URL uses the "nltk:" + protocol, or uses no protocol, then use ``nltk.data.find`` to find + its path, and open it with the given mode; if the resource URL + uses the 'file' protocol, then open the file with the given mode; + otherwise, delegate to ``urllib2.urlopen``. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + """ + resource_url = normalize_resource_url(resource_url) + protocol, path_ = split_resource_url(resource_url) + + if protocol is None or protocol.lower() == "nltk": + return find(path_, path + [""]).open() + elif protocol.lower() == "file": + # urllib might not use mode='rb', so handle this one ourselves: + return find(path_, [""]).open() + else: + return urlopen(resource_url) + + +###################################################################### +# Lazy Resource Loader +###################################################################### + + +class LazyLoader: + @py3_data + def __init__(self, _path): + self._path = _path + + def __load(self): + resource = load(self._path) + # This is where the magic happens! Transform ourselves into + # the object by modifying our own __dict__ and __class__ to + # match that of `resource`. + self.__dict__ = resource.__dict__ + self.__class__ = resource.__class__ + + def __getattr__(self, attr): + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return getattr(self, attr) + + def __repr__(self): + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return repr(self) + + +###################################################################### +# Open-On-Demand ZipFile +###################################################################### + + +class OpenOnDemandZipFile(zipfile.ZipFile): + """ + A subclass of ``zipfile.ZipFile`` that closes its file pointer + whenever it is not using it; and re-opens it when it needs to read + data from the zipfile. This is useful for reducing the number of + open file handles when many zip files are being accessed at once. + ``OpenOnDemandZipFile`` must be constructed from a filename, not a + file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is + read-only (i.e. ``write()`` and ``writestr()`` are disabled. + """ + + @py3_data + def __init__(self, filename): + if not isinstance(filename, str): + raise TypeError("ReopenableZipFile filename must be a string") + zipfile.ZipFile.__init__(self, filename) + assert self.filename == filename + self.close() + # After closing a ZipFile object, the _fileRefCnt needs to be cleared + # for Python2and3 compatible code. + self._fileRefCnt = 0 + + def read(self, name): + assert self.fp is None + self.fp = open(self.filename, "rb") + value = zipfile.ZipFile.read(self, name) + # Ensure that _fileRefCnt needs to be set for Python2and3 compatible code. + # Since we only opened one file here, we add 1. + self._fileRefCnt += 1 + self.close() + return value + + def write(self, *args, **kwargs): + """:raise NotImplementedError: OpenOnDemandZipfile is read-only""" + raise NotImplementedError("OpenOnDemandZipfile is read-only") + + def writestr(self, *args, **kwargs): + """:raise NotImplementedError: OpenOnDemandZipfile is read-only""" + raise NotImplementedError("OpenOnDemandZipfile is read-only") + + def __repr__(self): + return repr("OpenOnDemandZipFile(%r)" % self.filename) + + +###################################################################### +# Seekable Unicode Stream Reader +###################################################################### + + +class SeekableUnicodeStreamReader: + """ + A stream reader that automatically encodes the source byte stream + into unicode (like ``codecs.StreamReader``); but still supports the + ``seek()`` and ``tell()`` operations correctly. This is in contrast + to ``codecs.StreamReader``, which provide *broken* ``seek()`` and + ``tell()`` methods. + + This class was motivated by ``StreamBackedCorpusView``, which + makes extensive use of ``seek()`` and ``tell()``, and needs to be + able to handle unicode-encoded files. + + Note: this class requires stateless decoders. To my knowledge, + this shouldn't cause a problem with any of python's builtin + unicode encodings. + """ + + DEBUG = True # : If true, then perform extra sanity checks. + + @py3_data + def __init__(self, stream, encoding, errors="strict"): + # Rewind the stream to its beginning. + stream.seek(0) + + self.stream = stream + """The underlying stream.""" + + self.encoding = encoding + """The name of the encoding that should be used to encode the + underlying stream.""" + + self.errors = errors + """The error mode that should be used when decoding data from + the underlying stream. Can be 'strict', 'ignore', or + 'replace'.""" + + self.decode = codecs.getdecoder(encoding) + """The function that is used to decode byte strings into + unicode strings.""" + + self.bytebuffer = b"" + """A buffer to use bytes that have been read but have not yet + been decoded. This is only used when the final bytes from + a read do not form a complete encoding for a character.""" + + self.linebuffer = None + """A buffer used by ``readline()`` to hold characters that have + been read, but have not yet been returned by ``read()`` or + ``readline()``. This buffer consists of a list of unicode + strings, where each string corresponds to a single line. + The final element of the list may or may not be a complete + line. Note that the existence of a linebuffer makes the + ``tell()`` operation more complex, because it must backtrack + to the beginning of the buffer to determine the correct + file position in the underlying byte stream.""" + + self._rewind_checkpoint = 0 + """The file position at which the most recent read on the + underlying stream began. This is used, together with + ``_rewind_numchars``, to backtrack to the beginning of + ``linebuffer`` (which is required by ``tell()``).""" + + self._rewind_numchars = None + """The number of characters that have been returned since the + read that started at ``_rewind_checkpoint``. This is used, + together with ``_rewind_checkpoint``, to backtrack to the + beginning of ``linebuffer`` (which is required by ``tell()``).""" + + self._bom = self._check_bom() + """The length of the byte order marker at the beginning of + the stream (or None for no byte order marker).""" + + # ///////////////////////////////////////////////////////////////// + # Read methods + # ///////////////////////////////////////////////////////////////// + + def read(self, size=None): + """ + Read up to ``size`` bytes, decode them using this reader's + encoding, and return the resulting unicode string. + + :param size: The maximum number of bytes to read. If not + specified, then read as many bytes as possible. + :type size: int + :rtype: unicode + """ + chars = self._read(size) + + # If linebuffer is not empty, then include it in the result + if self.linebuffer: + chars = "".join(self.linebuffer) + chars + self.linebuffer = None + self._rewind_numchars = None + + return chars + + def discard_line(self): + if self.linebuffer and len(self.linebuffer) > 1: + line = self.linebuffer.pop(0) + self._rewind_numchars += len(line) + else: + self.stream.readline() + + def readline(self, size=None): + """ + Read a line of text, decode it using this reader's encoding, + and return the resulting unicode string. + + :param size: The maximum number of bytes to read. If no + newline is encountered before ``size`` bytes have been read, + then the returned value may not be a complete line of text. + :type size: int + """ + # If we have a non-empty linebuffer, then return the first + # line from it. (Note that the last element of linebuffer may + # not be a complete line; so let _read() deal with it.) + if self.linebuffer and len(self.linebuffer) > 1: + line = self.linebuffer.pop(0) + self._rewind_numchars += len(line) + return line + + readsize = size or 72 + chars = "" + + # If there's a remaining incomplete line in the buffer, add it. + if self.linebuffer: + chars += self.linebuffer.pop() + self.linebuffer = None + + while True: + startpos = self.stream.tell() - len(self.bytebuffer) + new_chars = self._read(readsize) + + # If we're at a '\r', then read one extra character, since + # it might be a '\n', to get the proper line ending. + if new_chars and new_chars.endswith("\r"): + new_chars += self._read(1) + + chars += new_chars + lines = chars.splitlines(True) + if len(lines) > 1: + line = lines[0] + self.linebuffer = lines[1:] + self._rewind_numchars = len(new_chars) - (len(chars) - len(line)) + self._rewind_checkpoint = startpos + break + elif len(lines) == 1: + line0withend = lines[0] + line0withoutend = lines[0].splitlines(False)[0] + if line0withend != line0withoutend: # complete line + line = line0withend + break + + if not new_chars or size is not None: + line = chars + break + + # Read successively larger blocks of text. + if readsize < 8000: + readsize *= 2 + + return line + + def readlines(self, sizehint=None, keepends=True): + """ + Read this file's contents, decode them using this reader's + encoding, and return it as a list of unicode lines. + + :rtype: list(unicode) + :param sizehint: Ignored. + :param keepends: If false, then strip newlines. + """ + return self.read().splitlines(keepends) + + def next(self): + """Return the next decoded line from the underlying stream.""" + line = self.readline() + if line: + return line + else: + raise StopIteration + + def __next__(self): + return self.next() + + def __iter__(self): + """Return self""" + return self + + def __del__(self): + # let garbage collector deal with still opened streams + if not self.closed: + self.close() + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def xreadlines(self): + """Return self""" + return self + + # ///////////////////////////////////////////////////////////////// + # Pass-through methods & properties + # ///////////////////////////////////////////////////////////////// + + @property + def closed(self): + """True if the underlying stream is closed.""" + return self.stream.closed + + @property + def name(self): + """The name of the underlying stream.""" + return self.stream.name + + @property + def mode(self): + """The mode of the underlying stream.""" + return self.stream.mode + + def close(self): + """ + Close the underlying stream. + """ + self.stream.close() + + # ///////////////////////////////////////////////////////////////// + # Seek and tell + # ///////////////////////////////////////////////////////////////// + + def seek(self, offset, whence=0): + """ + Move the stream to a new file position. If the reader is + maintaining any buffers, then they will be cleared. + + :param offset: A byte count offset. + :param whence: If 0, then the offset is from the start of the file + (offset should be positive), if 1, then the offset is from the + current position (offset may be positive or negative); and if 2, + then the offset is from the end of the file (offset should + typically be negative). + """ + if whence == 1: + raise ValueError( + "Relative seek is not supported for " + "SeekableUnicodeStreamReader -- consider " + "using char_seek_forward() instead." + ) + self.stream.seek(offset, whence) + self.linebuffer = None + self.bytebuffer = b"" + self._rewind_numchars = None + self._rewind_checkpoint = self.stream.tell() + + def char_seek_forward(self, offset): + """ + Move the read pointer forward by ``offset`` characters. + """ + if offset < 0: + raise ValueError("Negative offsets are not supported") + # Clear all buffers. + self.seek(self.tell()) + # Perform the seek operation. + self._char_seek_forward(offset) + + def _char_seek_forward(self, offset, est_bytes=None): + """ + Move the file position forward by ``offset`` characters, + ignoring all buffers. + + :param est_bytes: A hint, giving an estimate of the number of + bytes that will be needed to move forward by ``offset`` chars. + Defaults to ``offset``. + """ + if est_bytes is None: + est_bytes = offset + bytes = b"" + + while True: + # Read in a block of bytes. + newbytes = self.stream.read(est_bytes - len(bytes)) + bytes += newbytes + + # Decode the bytes to characters. + chars, bytes_decoded = self._incr_decode(bytes) + + # If we got the right number of characters, then seek + # backwards over any truncated characters, and return. + if len(chars) == offset: + self.stream.seek(-len(bytes) + bytes_decoded, 1) + return + + # If we went too far, then we can back-up until we get it + # right, using the bytes we've already read. + if len(chars) > offset: + while len(chars) > offset: + # Assume at least one byte/char. + est_bytes += offset - len(chars) + chars, bytes_decoded = self._incr_decode(bytes[:est_bytes]) + self.stream.seek(-len(bytes) + bytes_decoded, 1) + return + + # Otherwise, we haven't read enough bytes yet; loop again. + est_bytes += offset - len(chars) + + def tell(self): + """ + Return the current file position on the underlying byte + stream. If this reader is maintaining any buffers, then the + returned file position will be the position of the beginning + of those buffers. + """ + # If nothing's buffered, then just return our current filepos: + if self.linebuffer is None: + return self.stream.tell() - len(self.bytebuffer) + + # Otherwise, we'll need to backtrack the filepos until we + # reach the beginning of the buffer. + + # Store our original file position, so we can return here. + orig_filepos = self.stream.tell() + + # Calculate an estimate of where we think the newline is. + bytes_read = (orig_filepos - len(self.bytebuffer)) - self._rewind_checkpoint + buf_size = sum(len(line) for line in self.linebuffer) + est_bytes = int( + bytes_read * self._rewind_numchars / (self._rewind_numchars + buf_size) + ) + + self.stream.seek(self._rewind_checkpoint) + self._char_seek_forward(self._rewind_numchars, est_bytes) + filepos = self.stream.tell() + + # Sanity check + if self.DEBUG: + self.stream.seek(filepos) + check1 = self._incr_decode(self.stream.read(50))[0] + check2 = "".join(self.linebuffer) + assert check1.startswith(check2) or check2.startswith(check1) + + # Return to our original filepos (so we don't have to throw + # out our buffer.) + self.stream.seek(orig_filepos) + + # Return the calculated filepos + return filepos + + # ///////////////////////////////////////////////////////////////// + # Helper methods + # ///////////////////////////////////////////////////////////////// + + def _read(self, size=None): + """ + Read up to ``size`` bytes from the underlying stream, decode + them using this reader's encoding, and return the resulting + unicode string. ``linebuffer`` is not included in the result. + """ + if size == 0: + return "" + + # Skip past the byte order marker, if present. + if self._bom and self.stream.tell() == 0: + self.stream.read(self._bom) + + # Read the requested number of bytes. + if size is None: + new_bytes = self.stream.read() + else: + new_bytes = self.stream.read(size) + bytes = self.bytebuffer + new_bytes + + # Decode the bytes into unicode characters + chars, bytes_decoded = self._incr_decode(bytes) + + # If we got bytes but couldn't decode any, then read further. + if (size is not None) and (not chars) and (len(new_bytes) > 0): + while not chars: + new_bytes = self.stream.read(1) + if not new_bytes: + break # end of file. + bytes += new_bytes + chars, bytes_decoded = self._incr_decode(bytes) + + # Record any bytes we didn't consume. + self.bytebuffer = bytes[bytes_decoded:] + + # Return the result + return chars + + def _incr_decode(self, bytes): + """ + Decode the given byte string into a unicode string, using this + reader's encoding. If an exception is encountered that + appears to be caused by a truncation error, then just decode + the byte string without the bytes that cause the trunctaion + error. + + Return a tuple ``(chars, num_consumed)``, where ``chars`` is + the decoded unicode string, and ``num_consumed`` is the + number of bytes that were consumed. + """ + while True: + try: + return self.decode(bytes, "strict") + except UnicodeDecodeError as exc: + # If the exception occurs at the end of the string, + # then assume that it's a truncation error. + if exc.end == len(bytes): + return self.decode(bytes[: exc.start], self.errors) + + # Otherwise, if we're being strict, then raise it. + elif self.errors == "strict": + raise + + # If we're not strict, then re-process it with our + # errors setting. This *may* raise an exception. + else: + return self.decode(bytes, self.errors) + + _BOM_TABLE = { + "utf8": [(codecs.BOM_UTF8, None)], + "utf16": [(codecs.BOM_UTF16_LE, "utf16-le"), (codecs.BOM_UTF16_BE, "utf16-be")], + "utf16le": [(codecs.BOM_UTF16_LE, None)], + "utf16be": [(codecs.BOM_UTF16_BE, None)], + "utf32": [(codecs.BOM_UTF32_LE, "utf32-le"), (codecs.BOM_UTF32_BE, "utf32-be")], + "utf32le": [(codecs.BOM_UTF32_LE, None)], + "utf32be": [(codecs.BOM_UTF32_BE, None)], + } + + def _check_bom(self): + # Normalize our encoding name + enc = re.sub("[ -]", "", self.encoding.lower()) + + # Look up our encoding in the BOM table. + bom_info = self._BOM_TABLE.get(enc) + + if bom_info: + # Read a prefix, to check against the BOM(s) + bytes = self.stream.read(16) + self.stream.seek(0) + + # Check for each possible BOM. + for (bom, new_encoding) in bom_info: + if bytes.startswith(bom): + if new_encoding: + self.encoding = new_encoding + return len(bom) + + return None + + +__all__ = [ + "path", + "PathPointer", + "FileSystemPathPointer", + "BufferedGzipFile", + "GzipFileSystemPathPointer", + "GzipFileSystemPathPointer", + "find", + "retrieve", + "FORMATS", + "AUTO_FORMATS", + "load", + "show_cfg", + "clear_cache", + "LazyLoader", + "OpenOnDemandZipFile", + "GzipFileSystemPathPointer", + "SeekableUnicodeStreamReader", +] diff --git a/venv/lib/python3.10/site-packages/nltk/decorators.py b/venv/lib/python3.10/site-packages/nltk/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0fae1852afd47a2290b41ce94843aca36aa05f --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/decorators.py @@ -0,0 +1,251 @@ +""" +Decorator module by Michele Simionato +Copyright Michele Simionato, distributed under the terms of the BSD License (see below). +http://www.phyast.pitt.edu/~micheles/python/documentation.html + +Included in NLTK for its support of a nice memoization decorator. +""" + +__docformat__ = "restructuredtext en" + +## The basic trick is to generate the source code for the decorated function +## with the right signature and to evaluate it. +## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator +## to understand what is going on. + +__all__ = ["decorator", "new_wrapper", "getinfo"] + +import sys + +# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in +# the Python standard library. +OLD_SYS_PATH = sys.path[:] +sys.path = [p for p in sys.path if p and "nltk" not in str(p)] +import inspect + +sys.path = OLD_SYS_PATH + + +def __legacysignature(signature): + """ + For retrocompatibility reasons, we don't use a standard Signature. + Instead, we use the string generated by this method. + Basically, from a Signature we create a string and remove the default values. + """ + listsignature = str(signature)[1:-1].split(",") + for counter, param in enumerate(listsignature): + if param.count("=") > 0: + listsignature[counter] = param[0 : param.index("=")].strip() + else: + listsignature[counter] = param.strip() + return ", ".join(listsignature) + + +def getinfo(func): + """ + Returns an info dictionary containing: + - name (the name of the function : str) + - argnames (the names of the arguments : list) + - defaults (the values of the default arguments : tuple) + - signature (the signature : str) + - fullsignature (the full signature : Signature) + - doc (the docstring : str) + - module (the module name : str) + - dict (the function __dict__ : str) + + >>> def f(self, x=1, y=2, *args, **kw): pass + + >>> info = getinfo(f) + + >>> info["name"] + 'f' + >>> info["argnames"] + ['self', 'x', 'y', 'args', 'kw'] + + >>> info["defaults"] + (1, 2) + + >>> info["signature"] + 'self, x, y, *args, **kw' + + >>> info["fullsignature"] + + """ + assert inspect.ismethod(func) or inspect.isfunction(func) + argspec = inspect.getfullargspec(func) + regargs, varargs, varkwargs = argspec[:3] + argnames = list(regargs) + if varargs: + argnames.append(varargs) + if varkwargs: + argnames.append(varkwargs) + fullsignature = inspect.signature(func) + # Convert Signature to str + signature = __legacysignature(fullsignature) + + # pypy compatibility + if hasattr(func, "__closure__"): + _closure = func.__closure__ + _globals = func.__globals__ + else: + _closure = func.func_closure + _globals = func.func_globals + + return dict( + name=func.__name__, + argnames=argnames, + signature=signature, + fullsignature=fullsignature, + defaults=func.__defaults__, + doc=func.__doc__, + module=func.__module__, + dict=func.__dict__, + globals=_globals, + closure=_closure, + ) + + +def update_wrapper(wrapper, model, infodict=None): + "akin to functools.update_wrapper" + infodict = infodict or getinfo(model) + wrapper.__name__ = infodict["name"] + wrapper.__doc__ = infodict["doc"] + wrapper.__module__ = infodict["module"] + wrapper.__dict__.update(infodict["dict"]) + wrapper.__defaults__ = infodict["defaults"] + wrapper.undecorated = model + return wrapper + + +def new_wrapper(wrapper, model): + """ + An improvement over functools.update_wrapper. The wrapper is a generic + callable object. It works by generating a copy of the wrapper with the + right signature and by updating the copy, not the original. + Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module', + 'dict', 'defaults'. + """ + if isinstance(model, dict): + infodict = model + else: # assume model is a function + infodict = getinfo(model) + assert ( + not "_wrapper_" in infodict["argnames"] + ), '"_wrapper_" is a reserved argument name!' + src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict + funcopy = eval(src, dict(_wrapper_=wrapper)) + return update_wrapper(funcopy, model, infodict) + + +# helper used in decorator_factory +def __call__(self, func): + return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func) + + +def decorator_factory(cls): + """ + Take a class with a ``.caller`` method and return a callable decorator + object. It works by adding a suitable __call__ method to the class; + it raises a TypeError if the class already has a nontrivial __call__ + method. + """ + attrs = set(dir(cls)) + if "__call__" in attrs: + raise TypeError( + "You cannot decorate a class with a nontrivial " "__call__ method" + ) + if "call" not in attrs: + raise TypeError("You cannot decorate a class without a " ".call method") + cls.__call__ = __call__ + return cls + + +def decorator(caller): + """ + General purpose decorator factory: takes a caller function as + input and returns a decorator with the same attributes. + A caller function is any function like this:: + + def caller(func, *args, **kw): + # do something + return func(*args, **kw) + + Here is an example of usage: + + >>> @decorator + ... def chatty(f, *args, **kw): + ... print("Calling %r" % f.__name__) + ... return f(*args, **kw) + + >>> chatty.__name__ + 'chatty' + + >>> @chatty + ... def f(): pass + ... + >>> f() + Calling 'f' + + decorator can also take in input a class with a .caller method; in this + case it converts the class into a factory of callable decorator objects. + See the documentation for an example. + """ + if inspect.isclass(caller): + return decorator_factory(caller) + + def _decorator(func): # the real meat is here + infodict = getinfo(func) + argnames = infodict["argnames"] + assert not ( + "_call_" in argnames or "_func_" in argnames + ), "You cannot use _call_ or _func_ as argument names!" + src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict + # import sys; print >> sys.stderr, src # for debugging purposes + dec_func = eval(src, dict(_func_=func, _call_=caller)) + return update_wrapper(dec_func, func, infodict) + + return update_wrapper(_decorator, caller) + + +def getattr_(obj, name, default_thunk): + "Similar to .setdefault in dictionaries." + try: + return getattr(obj, name) + except AttributeError: + default = default_thunk() + setattr(obj, name, default) + return default + + +@decorator +def memoize(func, *args): + dic = getattr_(func, "memoize_dic", dict) + # memoize_dic is created at the first call + if args in dic: + return dic[args] + result = func(*args) + dic[args] = result + return result + + +########################## LEGALESE ############################### + +## Redistributions of source code must retain the above copyright +## notice, this list of conditions and the following disclaimer. +## Redistributions in bytecode form must reproduce the above copyright +## notice, this list of conditions and the following disclaimer in +## the documentation and/or other materials provided with the +## distribution. + +## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +## DAMAGE. diff --git a/venv/lib/python3.10/site-packages/nltk/downloader.py b/venv/lib/python3.10/site-packages/nltk/downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..71519238755062c698a1d82ffa0984b3ccb5ba92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/downloader.py @@ -0,0 +1,2559 @@ +# Natural Language Toolkit: Corpus & Model Downloader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +The NLTK corpus and module downloader. This module defines several +interfaces which can be used to download corpora, models, and other +data packages that can be used with NLTK. + +Downloading Packages +==================== +If called with no arguments, ``download()`` will display an interactive +interface which can be used to download and install new packages. +If Tkinter is available, then a graphical interface will be shown, +otherwise a simple text interface will be provided. + +Individual packages can be downloaded by calling the ``download()`` +function with a single argument, giving the package identifier for the +package that should be downloaded: + + >>> download('treebank') # doctest: +SKIP + [nltk_data] Downloading package 'treebank'... + [nltk_data] Unzipping corpora/treebank.zip. + +NLTK also provides a number of \"package collections\", consisting of +a group of related packages. To download all packages in a +colleciton, simply call ``download()`` with the collection's +identifier: + + >>> download('all-corpora') # doctest: +SKIP + [nltk_data] Downloading package 'abc'... + [nltk_data] Unzipping corpora/abc.zip. + [nltk_data] Downloading package 'alpino'... + [nltk_data] Unzipping corpora/alpino.zip. + ... + [nltk_data] Downloading package 'words'... + [nltk_data] Unzipping corpora/words.zip. + +Download Directory +================== +By default, packages are installed in either a system-wide directory +(if Python has sufficient access to write to it); or in the current +user's home directory. However, the ``download_dir`` argument may be +used to specify a different installation target, if desired. + +See ``Downloader.default_download_dir()`` for more a detailed +description of how the default download directory is chosen. + +NLTK Download Server +==================== +Before downloading any packages, the corpus and module downloader +contacts the NLTK download server, to retrieve an index file +describing the available packages. By default, this index file is +loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``. +If necessary, it is possible to create a new ``Downloader`` object, +specifying a different URL for the package index file. + +Usage:: + + python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS + +or:: + + python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS +""" +# ---------------------------------------------------------------------- + +""" + + 0 1 2 3 +[label][----][label][----] +[column ][column ] + +Notes +===== +Handling data files.. Some questions: + +* Should the data files be kept zipped or unzipped? I say zipped. + +* Should the data files be kept in svn at all? Advantages: history; + automatic version numbers; 'svn up' could be used rather than the + downloader to update the corpora. Disadvantages: they're big, + which makes working from svn a bit of a pain. And we're planning + to potentially make them much bigger. I don't think we want + people to have to download 400MB corpora just to use nltk from svn. + +* Compromise: keep the data files in trunk/data rather than in + trunk/nltk. That way you can check them out in svn if you want + to; but you don't need to, and you can use the downloader instead. + +* Also: keep models in mind. When we change the code, we'd + potentially like the models to get updated. This could require a + little thought. + +* So.. let's assume we have a trunk/data directory, containing a bunch + of packages. The packages should be kept as zip files, because we + really shouldn't be editing them much (well -- we may edit models + more, but they tend to be binary-ish files anyway, where diffs + aren't that helpful). So we'll have trunk/data, with a bunch of + files like abc.zip and treebank.zip and propbank.zip. For each + package we could also have eg treebank.xml and propbank.xml, + describing the contents of the package (name, copyright, license, + etc). Collections would also have .xml files. Finally, we would + pull all these together to form a single index.xml file. Some + directory structure wouldn't hurt. So how about:: + + /trunk/data/ ....................... root of data svn + index.xml ........................ main index file + src/ ............................. python scripts + packages/ ........................ dir for packages + corpora/ ....................... zip & xml files for corpora + grammars/ ...................... zip & xml files for grammars + taggers/ ....................... zip & xml files for taggers + tokenizers/ .................... zip & xml files for tokenizers + etc. + collections/ ..................... xml files for collections + + Where the root (/trunk/data) would contain a makefile; and src/ + would contain a script to update the info.xml file. It could also + contain scripts to rebuild some of the various model files. The + script that builds index.xml should probably check that each zip + file expands entirely into a single subdir, whose name matches the + package's uid. + +Changes I need to make: + - in index: change "size" to "filesize" or "compressed-size" + - in index: add "unzipped-size" + - when checking status: check both compressed & uncompressed size. + uncompressed size is important to make sure we detect a problem + if something got partially unzipped. define new status values + to differentiate stale vs corrupt vs corruptly-uncompressed?? + (we shouldn't need to re-download the file if the zip file is ok + but it didn't get uncompressed fully.) + - add other fields to the index: author, license, copyright, contact, + etc. + +the current grammars/ package would become a single new package (eg +toy-grammars or book-grammars). + +xml file should have: + - authorship info + - license info + - copyright info + - contact info + - info about what type of data/annotation it contains? + - recommended corpus reader? + +collections can contain other collections. they can also contain +multiple package types (corpora & models). Have a single 'basics' +package that includes everything we talk about in the book? + +n.b.: there will have to be a fallback to the punkt tokenizer, in case +they didn't download that model. + +default: unzip or not? + +""" +import functools +import itertools +import os +import shutil +import subprocess +import sys +import textwrap +import threading +import time +import warnings +import zipfile +from hashlib import md5 +from xml.etree import ElementTree + +try: + TKINTER = True + from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk + from tkinter.messagebox import showerror + + from nltk.draw.table import Table + from nltk.draw.util import ShowText +except ImportError: + TKINTER = False + TclError = ValueError + +from urllib.error import HTTPError, URLError +from urllib.request import urlopen + +import nltk + +# urllib2 = nltk.internals.import_from_stdlib('urllib2') + + +###################################################################### +# Directory entry objects (from the data server's index file) +###################################################################### + + +class Package: + """ + A directory entry for a downloadable package. These entries are + extracted from the XML index file that is downloaded by + ``Downloader``. Each package consists of a single file; but if + that file is a zip file, then it can be automatically decompressed + when the package is installed. + """ + + def __init__( + self, + id, + url, + name=None, + subdir="", + size=None, + unzipped_size=None, + checksum=None, + svn_revision=None, + copyright="Unknown", + contact="Unknown", + license="Unknown", + author="Unknown", + unzip=True, + **kw, + ): + self.id = id + """A unique identifier for this package.""" + + self.name = name or id + """A string name for this package.""" + + self.subdir = subdir + """The subdirectory where this package should be installed. + E.g., ``'corpora'`` or ``'taggers'``.""" + + self.url = url + """A URL that can be used to download this package's file.""" + + self.size = int(size) + """The filesize (in bytes) of the package file.""" + + self.unzipped_size = int(unzipped_size) + """The total filesize of the files contained in the package's + zipfile.""" + + self.checksum = checksum + """The MD-5 checksum of the package file.""" + + self.svn_revision = svn_revision + """A subversion revision number for this package.""" + + self.copyright = copyright + """Copyright holder for this package.""" + + self.contact = contact + """Name & email of the person who should be contacted with + questions about this package.""" + + self.license = license + """License information for this package.""" + + self.author = author + """Author of this package.""" + + ext = os.path.splitext(url.split("/")[-1])[1] + self.filename = os.path.join(subdir, id + ext) + """The filename that should be used for this package's file. It + is formed by joining ``self.subdir`` with ``self.id``, and + using the same extension as ``url``.""" + + self.unzip = bool(int(unzip)) # '0' or '1' + """A flag indicating whether this corpus should be unzipped by + default.""" + + # Include any other attributes provided by the XML file. + self.__dict__.update(kw) + + @staticmethod + def fromxml(xml): + if isinstance(xml, str): + xml = ElementTree.parse(xml) + for key in xml.attrib: + xml.attrib[key] = str(xml.attrib[key]) + return Package(**xml.attrib) + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return "" % self.id + + +class Collection: + """ + A directory entry for a collection of downloadable packages. + These entries are extracted from the XML index file that is + downloaded by ``Downloader``. + """ + + def __init__(self, id, children, name=None, **kw): + self.id = id + """A unique identifier for this collection.""" + + self.name = name or id + """A string name for this collection.""" + + self.children = children + """A list of the ``Collections`` or ``Packages`` directly + contained by this collection.""" + + self.packages = None + """A list of ``Packages`` contained by this collection or any + collections it recursively contains.""" + + # Include any other attributes provided by the XML file. + self.__dict__.update(kw) + + @staticmethod + def fromxml(xml): + if isinstance(xml, str): + xml = ElementTree.parse(xml) + for key in xml.attrib: + xml.attrib[key] = str(xml.attrib[key]) + children = [child.get("ref") for child in xml.findall("item")] + return Collection(children=children, **xml.attrib) + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return "" % self.id + + +###################################################################### +# Message Passing Objects +###################################################################### + + +class DownloaderMessage: + """A status message object, used by ``incr_download`` to + communicate its progress.""" + + +class StartCollectionMessage(DownloaderMessage): + """Data server has started working on a collection of packages.""" + + def __init__(self, collection): + self.collection = collection + + +class FinishCollectionMessage(DownloaderMessage): + """Data server has finished working on a collection of packages.""" + + def __init__(self, collection): + self.collection = collection + + +class StartPackageMessage(DownloaderMessage): + """Data server has started working on a package.""" + + def __init__(self, package): + self.package = package + + +class FinishPackageMessage(DownloaderMessage): + """Data server has finished working on a package.""" + + def __init__(self, package): + self.package = package + + +class StartDownloadMessage(DownloaderMessage): + """Data server has started downloading a package.""" + + def __init__(self, package): + self.package = package + + +class FinishDownloadMessage(DownloaderMessage): + """Data server has finished downloading a package.""" + + def __init__(self, package): + self.package = package + + +class StartUnzipMessage(DownloaderMessage): + """Data server has started unzipping a package.""" + + def __init__(self, package): + self.package = package + + +class FinishUnzipMessage(DownloaderMessage): + """Data server has finished unzipping a package.""" + + def __init__(self, package): + self.package = package + + +class UpToDateMessage(DownloaderMessage): + """The package download file is already up-to-date""" + + def __init__(self, package): + self.package = package + + +class StaleMessage(DownloaderMessage): + """The package download file is out-of-date or corrupt""" + + def __init__(self, package): + self.package = package + + +class ErrorMessage(DownloaderMessage): + """Data server encountered an error""" + + def __init__(self, package, message): + self.package = package + if isinstance(message, Exception): + self.message = str(message) + else: + self.message = message + + +class ProgressMessage(DownloaderMessage): + """Indicates how much progress the data server has made""" + + def __init__(self, progress): + self.progress = progress + + +class SelectDownloadDirMessage(DownloaderMessage): + """Indicates what download directory the data server is using""" + + def __init__(self, download_dir): + self.download_dir = download_dir + + +###################################################################### +# NLTK Data Server +###################################################################### + + +class Downloader: + """ + A class used to access the NLTK data server, which can be used to + download corpora and other data packages. + """ + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + INDEX_TIMEOUT = 60 * 60 # 1 hour + """The amount of time after which the cached copy of the data + server index will be considered 'stale,' and will be + re-downloaded.""" + + DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml" + """The default URL for the NLTK data server's index. An + alternative URL can be specified when creating a new + ``Downloader`` object.""" + + # ///////////////////////////////////////////////////////////////// + # Status Constants + # ///////////////////////////////////////////////////////////////// + + INSTALLED = "installed" + """A status string indicating that a package or collection is + installed and up-to-date.""" + NOT_INSTALLED = "not installed" + """A status string indicating that a package or collection is + not installed.""" + STALE = "out of date" + """A status string indicating that a package or collection is + corrupt or out-of-date.""" + PARTIAL = "partial" + """A status string indicating that a collection is partially + installed (i.e., only some of its packages are installed.)""" + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, server_index_url=None, download_dir=None): + self._url = server_index_url or self.DEFAULT_URL + """The URL for the data server's index file.""" + + self._collections = {} + """Dictionary from collection identifier to ``Collection``""" + + self._packages = {} + """Dictionary from package identifier to ``Package``""" + + self._download_dir = download_dir + """The default directory to which packages will be downloaded.""" + + self._index = None + """The XML index file downloaded from the data server""" + + self._index_timestamp = None + """Time at which ``self._index`` was downloaded. If it is more + than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded.""" + + self._status_cache = {} + """Dictionary from package/collection identifier to status + string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or + ``PARTIAL``). Cache is used for packages only, not + collections.""" + + self._errors = None + """Flag for telling if all packages got successfully downloaded or not.""" + + # decide where we're going to save things to. + if self._download_dir is None: + self._download_dir = self.default_download_dir() + + # ///////////////////////////////////////////////////////////////// + # Information + # ///////////////////////////////////////////////////////////////// + + def list( + self, + download_dir=None, + show_packages=True, + show_collections=True, + header=True, + more_prompt=False, + skip_installed=False, + ): + lines = 0 # for more_prompt + if download_dir is None: + download_dir = self._download_dir + print("Using default data directory (%s)" % download_dir) + if header: + print("=" * (26 + len(self._url))) + print(" Data server index for <%s>" % self._url) + print("=" * (26 + len(self._url))) + lines += 3 # for more_prompt + stale = partial = False + + categories = [] + if show_packages: + categories.append("packages") + if show_collections: + categories.append("collections") + for category in categories: + print("%s:" % category.capitalize()) + lines += 1 # for more_prompt + for info in sorted(getattr(self, category)(), key=str): + status = self.status(info, download_dir) + if status == self.INSTALLED and skip_installed: + continue + if status == self.STALE: + stale = True + if status == self.PARTIAL: + partial = True + prefix = { + self.INSTALLED: "*", + self.STALE: "-", + self.PARTIAL: "P", + self.NOT_INSTALLED: " ", + }[status] + name = textwrap.fill( + "-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " " + )[27:] + print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name)) + lines += len(name.split("\n")) # for more_prompt + if more_prompt and lines > 20: + user_input = input("Hit Enter to continue: ") + if user_input.lower() in ("x", "q"): + return + lines = 0 + print() + msg = "([*] marks installed packages" + if stale: + msg += "; [-] marks out-of-date or corrupt packages" + if partial: + msg += "; [P] marks partially installed collections" + print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76)) + + def packages(self): + self._update_index() + return self._packages.values() + + def corpora(self): + self._update_index() + return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"] + + def models(self): + self._update_index() + return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"] + + def collections(self): + self._update_index() + return self._collections.values() + + # ///////////////////////////////////////////////////////////////// + # Downloading + # ///////////////////////////////////////////////////////////////// + + def _info_or_id(self, info_or_id): + if isinstance(info_or_id, str): + return self.info(info_or_id) + else: + return info_or_id + + # [xx] When during downloading is it 'safe' to abort? Only unsafe + # time is *during* an unzip -- we don't want to leave a + # partially-unzipped corpus in place because we wouldn't notice + # it. But if we had the exact total size of the unzipped corpus, + # then that would be fine. Then we could abort anytime we want! + # So this is really what we should do. That way the threaded + # downloader in the gui can just kill the download thread anytime + # it wants. + + def incr_download(self, info_or_id, download_dir=None, force=False): + # If they didn't specify a download_dir, then use the default one. + if download_dir is None: + download_dir = self._download_dir + yield SelectDownloadDirMessage(download_dir) + + # If they gave us a list of ids, then download each one. + if isinstance(info_or_id, (list, tuple)): + yield from self._download_list(info_or_id, download_dir, force) + return + + # Look up the requested collection or package. + try: + info = self._info_or_id(info_or_id) + except (OSError, ValueError) as e: + yield ErrorMessage(None, f"Error loading {info_or_id}: {e}") + return + + # Handle collections. + if isinstance(info, Collection): + yield StartCollectionMessage(info) + yield from self.incr_download(info.children, download_dir, force) + yield FinishCollectionMessage(info) + + # Handle Packages (delegate to a helper function). + else: + yield from self._download_package(info, download_dir, force) + + def _num_packages(self, item): + if isinstance(item, Package): + return 1 + else: + return len(item.packages) + + def _download_list(self, items, download_dir, force): + # Look up the requested items. + for i in range(len(items)): + try: + items[i] = self._info_or_id(items[i]) + except (OSError, ValueError) as e: + yield ErrorMessage(items[i], e) + return + + # Download each item, re-scaling their progress. + num_packages = sum(self._num_packages(item) for item in items) + progress = 0 + for i, item in enumerate(items): + if isinstance(item, Package): + delta = 1.0 / num_packages + else: + delta = len(item.packages) / num_packages + for msg in self.incr_download(item, download_dir, force): + if isinstance(msg, ProgressMessage): + yield ProgressMessage(progress + msg.progress * delta) + else: + yield msg + + progress += 100 * delta + + def _download_package(self, info, download_dir, force): + yield StartPackageMessage(info) + yield ProgressMessage(0) + + # Do we already have the current version? + status = self.status(info, download_dir) + if not force and status == self.INSTALLED: + yield UpToDateMessage(info) + yield ProgressMessage(100) + yield FinishPackageMessage(info) + return + + # Remove the package from our status cache + self._status_cache.pop(info.id, None) + + # Check for (and remove) any old/stale version. + filepath = os.path.join(download_dir, info.filename) + if os.path.exists(filepath): + if status == self.STALE: + yield StaleMessage(info) + os.remove(filepath) + + # Ensure the download_dir exists + if not os.path.exists(download_dir): + os.makedirs(download_dir) + if not os.path.exists(os.path.join(download_dir, info.subdir)): + os.makedirs(os.path.join(download_dir, info.subdir)) + + # Download the file. This will raise an IOError if the url + # is not found. + yield StartDownloadMessage(info) + yield ProgressMessage(5) + try: + infile = urlopen(info.url) + with open(filepath, "wb") as outfile: + num_blocks = max(1, info.size / (1024 * 16)) + for block in itertools.count(): + s = infile.read(1024 * 16) # 16k blocks. + outfile.write(s) + if not s: + break + if block % 2 == 0: # how often? + yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks))) + infile.close() + except OSError as e: + yield ErrorMessage( + info, + "Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e), + ) + return + yield FinishDownloadMessage(info) + yield ProgressMessage(80) + + # If it's a zipfile, uncompress it. + if info.filename.endswith(".zip"): + zipdir = os.path.join(download_dir, info.subdir) + # Unzip if we're unzipping by default; *or* if it's already + # been unzipped (presumably a previous version). + if info.unzip or os.path.exists(os.path.join(zipdir, info.id)): + yield StartUnzipMessage(info) + for msg in _unzip_iter(filepath, zipdir, verbose=False): + # Somewhat of a hack, but we need a proper package reference + msg.package = info + yield msg + yield FinishUnzipMessage(info) + + yield FinishPackageMessage(info) + + def download( + self, + info_or_id=None, + download_dir=None, + quiet=False, + force=False, + prefix="[nltk_data] ", + halt_on_error=True, + raise_on_error=False, + print_error_to=sys.stderr, + ): + + print_to = functools.partial(print, file=print_error_to) + # If no info or id is given, then use the interactive shell. + if info_or_id is None: + # [xx] hmm -- changing self._download_dir here seems like + # the wrong thing to do. Maybe the _interactive_download + # function should make a new copy of self to use? + if download_dir is not None: + self._download_dir = download_dir + self._interactive_download() + return True + + else: + # Define a helper function for displaying output: + def show(s, prefix2=""): + print_to( + textwrap.fill( + s, + initial_indent=prefix + prefix2, + subsequent_indent=prefix + prefix2 + " " * 4, + ) + ) + + for msg in self.incr_download(info_or_id, download_dir, force): + # Error messages + if isinstance(msg, ErrorMessage): + show(msg.message) + if raise_on_error: + raise ValueError(msg.message) + if halt_on_error: + return False + self._errors = True + if not quiet: + print_to("Error installing package. Retry? [n/y/e]") + choice = input().strip() + if choice in ["y", "Y"]: + if not self.download( + msg.package.id, + download_dir, + quiet, + force, + prefix, + halt_on_error, + raise_on_error, + ): + return False + elif choice in ["e", "E"]: + return False + + # All other messages + if not quiet: + # Collection downloading messages: + if isinstance(msg, StartCollectionMessage): + show("Downloading collection %r" % msg.collection.id) + prefix += " | " + print_to(prefix) + elif isinstance(msg, FinishCollectionMessage): + print_to(prefix) + prefix = prefix[:-4] + if self._errors: + show( + "Downloaded collection %r with errors" + % msg.collection.id + ) + else: + show("Done downloading collection %s" % msg.collection.id) + + # Package downloading messages: + elif isinstance(msg, StartPackageMessage): + show( + "Downloading package %s to %s..." + % (msg.package.id, download_dir) + ) + elif isinstance(msg, UpToDateMessage): + show("Package %s is already up-to-date!" % msg.package.id, " ") + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt' % + # msg.package.id, ' ') + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s." % msg.package.filename, " ") + + # Data directory message: + elif isinstance(msg, SelectDownloadDirMessage): + download_dir = msg.download_dir + return True + + def is_stale(self, info_or_id, download_dir=None): + return self.status(info_or_id, download_dir) == self.STALE + + def is_installed(self, info_or_id, download_dir=None): + return self.status(info_or_id, download_dir) == self.INSTALLED + + def clear_status_cache(self, id=None): + if id is None: + self._status_cache.clear() + else: + self._status_cache.pop(id, None) + + def status(self, info_or_id, download_dir=None): + """ + Return a constant describing the status of the given package + or collection. Status can be one of ``INSTALLED``, + ``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``. + """ + if download_dir is None: + download_dir = self._download_dir + info = self._info_or_id(info_or_id) + + # Handle collections: + if isinstance(info, Collection): + pkg_status = [self.status(pkg.id) for pkg in info.packages] + if self.STALE in pkg_status: + return self.STALE + elif self.PARTIAL in pkg_status: + return self.PARTIAL + elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status: + return self.PARTIAL + elif self.NOT_INSTALLED in pkg_status: + return self.NOT_INSTALLED + else: + return self.INSTALLED + + # Handle packages: + else: + filepath = os.path.join(download_dir, info.filename) + if download_dir != self._download_dir: + return self._pkg_status(info, filepath) + else: + if info.id not in self._status_cache: + self._status_cache[info.id] = self._pkg_status(info, filepath) + return self._status_cache[info.id] + + def _pkg_status(self, info, filepath): + if not os.path.exists(filepath): + return self.NOT_INSTALLED + + # Check if the file has the correct size. + try: + filestat = os.stat(filepath) + except OSError: + return self.NOT_INSTALLED + if filestat.st_size != int(info.size): + return self.STALE + + # Check if the file's checksum matches + if md5_hexdigest(filepath) != info.checksum: + return self.STALE + + # If it's a zipfile, and it's been at least partially + # unzipped, then check if it's been fully unzipped. + if filepath.endswith(".zip"): + unzipdir = filepath[:-4] + if not os.path.exists(unzipdir): + return self.INSTALLED # but not unzipped -- ok! + if not os.path.isdir(unzipdir): + return self.STALE + + unzipped_size = sum( + os.stat(os.path.join(d, f)).st_size + for d, _, files in os.walk(unzipdir) + for f in files + ) + if unzipped_size != info.unzipped_size: + return self.STALE + + # Otherwise, everything looks good. + return self.INSTALLED + + def update(self, quiet=False, prefix="[nltk_data] "): + """ + Re-download any packages whose status is STALE. + """ + self.clear_status_cache() + for pkg in self.packages(): + if self.status(pkg) == self.STALE: + self.download(pkg, quiet=quiet, prefix=prefix) + + # ///////////////////////////////////////////////////////////////// + # Index + # ///////////////////////////////////////////////////////////////// + + def _update_index(self, url=None): + """A helper function that ensures that self._index is + up-to-date. If the index is older than self.INDEX_TIMEOUT, + then download it again.""" + # Check if the index is already up-to-date. If so, do nothing. + if not ( + self._index is None + or url is not None + or time.time() - self._index_timestamp > self.INDEX_TIMEOUT + ): + return + + # If a URL was specified, then update our URL. + self._url = url or self._url + + # Download the index file. + self._index = nltk.internals.ElementWrapper( + ElementTree.parse(urlopen(self._url)).getroot() + ) + self._index_timestamp = time.time() + + # Build a dictionary of packages. + packages = [Package.fromxml(p) for p in self._index.findall("packages/package")] + self._packages = {p.id: p for p in packages} + + # Build a dictionary of collections. + collections = [ + Collection.fromxml(c) for c in self._index.findall("collections/collection") + ] + self._collections = {c.id: c for c in collections} + + # Replace identifiers with actual children in collection.children. + for collection in self._collections.values(): + for i, child_id in enumerate(collection.children): + if child_id in self._packages: + collection.children[i] = self._packages[child_id] + elif child_id in self._collections: + collection.children[i] = self._collections[child_id] + else: + print( + "removing collection member with no package: {}".format( + child_id + ) + ) + del collection.children[i] + + # Fill in collection.packages for each collection. + for collection in self._collections.values(): + packages = {} + queue = [collection] + for child in queue: + if isinstance(child, Collection): + queue.extend(child.children) + elif isinstance(child, Package): + packages[child.id] = child + else: + pass + collection.packages = packages.values() + + # Flush the status cache + self._status_cache.clear() + + def index(self): + """ + Return the XML index describing the packages available from + the data server. If necessary, this index will be downloaded + from the data server. + """ + self._update_index() + return self._index + + def info(self, id): + """Return the ``Package`` or ``Collection`` record for the + given item.""" + self._update_index() + if id in self._packages: + return self._packages[id] + if id in self._collections: + return self._collections[id] + raise ValueError("Package %r not found in index" % id) + + def xmlinfo(self, id): + """Return the XML info record for the given item""" + self._update_index() + for package in self._index.findall("packages/package"): + if package.get("id") == id: + return package + for collection in self._index.findall("collections/collection"): + if collection.get("id") == id: + return collection + raise ValueError("Package %r not found in index" % id) + + # ///////////////////////////////////////////////////////////////// + # URL & Data Directory + # ///////////////////////////////////////////////////////////////// + + def _get_url(self): + """The URL for the data server's index file.""" + return self._url + + def _set_url(self, url): + """ + Set a new URL for the data server. If we're unable to contact + the given url, then the original url is kept. + """ + original_url = self._url + try: + self._update_index(url) + except: + self._url = original_url + raise + + url = property(_get_url, _set_url) + + def default_download_dir(self): + """ + Return the directory to which packages will be downloaded by + default. This value can be overridden using the constructor, + or on a case-by-case basis using the ``download_dir`` argument when + calling ``download()``. + + On Windows, the default download directory is + ``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the + directory containing Python, e.g. ``C:\\Python25``. + + On all other platforms, the default directory is the first of + the following which exists or which can be created with write + permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``, + ``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``. + """ + # Check if we are on GAE where we cannot write into filesystem. + if "APPENGINE_RUNTIME" in os.environ: + return + + # Check if we have sufficient permissions to install in a + # variety of system-wide locations. + for nltkdir in nltk.data.path: + if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir): + return nltkdir + + # On Windows, use %APPDATA% + if sys.platform == "win32" and "APPDATA" in os.environ: + homedir = os.environ["APPDATA"] + + # Otherwise, install in the user's home directory. + else: + homedir = os.path.expanduser("~/") + if homedir == "~/": + raise ValueError("Could not find a default download directory") + + # append "nltk_data" to the home directory + return os.path.join(homedir, "nltk_data") + + def _get_download_dir(self): + """ + The default directory to which packages will be downloaded. + This defaults to the value returned by ``default_download_dir()``. + To override this default on a case-by-case basis, use the + ``download_dir`` argument when calling ``download()``. + """ + return self._download_dir + + def _set_download_dir(self, download_dir): + self._download_dir = download_dir + # Clear the status cache. + self._status_cache.clear() + + download_dir = property(_get_download_dir, _set_download_dir) + + # ///////////////////////////////////////////////////////////////// + # Interactive Shell + # ///////////////////////////////////////////////////////////////// + + def _interactive_download(self): + # Try the GUI first; if that doesn't work, try the simple + # interactive shell. + if TKINTER: + try: + DownloaderGUI(self).mainloop() + except TclError: + DownloaderShell(self).run() + else: + DownloaderShell(self).run() + + +class DownloaderShell: + def __init__(self, dataserver): + self._ds = dataserver + + def _simple_interactive_menu(self, *options): + print("-" * 75) + spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " " + print(" " + spc.join(options)) + print("-" * 75) + + def run(self): + print("NLTK Downloader") + while True: + self._simple_interactive_menu( + "d) Download", + "l) List", + " u) Update", + "c) Config", + "h) Help", + "q) Quit", + ) + user_input = input("Downloader> ").strip() + if not user_input: + print() + continue + command = user_input.lower().split()[0] + args = user_input.split()[1:] + try: + if command == "l": + print() + self._ds.list(self._ds.download_dir, header=False, more_prompt=True) + elif command == "h": + self._simple_interactive_help() + elif command == "c": + self._simple_interactive_config() + elif command in ("q", "x"): + return + elif command == "d": + self._simple_interactive_download(args) + elif command == "u": + self._simple_interactive_update() + else: + print("Command %r unrecognized" % user_input) + except HTTPError as e: + print("Error reading from server: %s" % e) + except URLError as e: + print("Error connecting to server: %s" % e.reason) + # try checking if user_input is a package name, & + # downloading it? + print() + + def _simple_interactive_download(self, args): + if args: + for arg in args: + try: + self._ds.download(arg, prefix=" ") + except (OSError, ValueError) as e: + print(e) + else: + while True: + print() + print("Download which package (l=list; x=cancel)?") + user_input = input(" Identifier> ") + if user_input.lower() == "l": + self._ds.list( + self._ds.download_dir, + header=False, + more_prompt=True, + skip_installed=True, + ) + continue + elif user_input.lower() in ("x", "q", ""): + return + elif user_input: + for id in user_input.split(): + try: + self._ds.download(id, prefix=" ") + except (OSError, ValueError) as e: + print(e) + break + + def _simple_interactive_update(self): + while True: + stale_packages = [] + stale = partial = False + for info in sorted(getattr(self._ds, "packages")(), key=str): + if self._ds.status(info) == self._ds.STALE: + stale_packages.append((info.id, info.name)) + + print() + if stale_packages: + print("Will update following packages (o=ok; x=cancel)") + for pid, pname in stale_packages: + name = textwrap.fill( + "-" * 27 + (pname), 75, subsequent_indent=27 * " " + )[27:] + print(" [ ] {} {}".format(pid.ljust(20, "."), name)) + print() + + user_input = input(" Identifier> ") + if user_input.lower() == "o": + for pid, pname in stale_packages: + try: + self._ds.download(pid, prefix=" ") + except (OSError, ValueError) as e: + print(e) + break + elif user_input.lower() in ("x", "q", ""): + return + else: + print("Nothing to update.") + return + + def _simple_interactive_help(self): + print() + print("Commands:") + print( + " d) Download a package or collection u) Update out of date packages" + ) + print(" l) List packages & collections h) Help") + print(" c) View & Modify Configuration q) Quit") + + def _show_config(self): + print() + print("Data Server:") + print(" - URL: <%s>" % self._ds.url) + print(" - %d Package Collections Available" % len(self._ds.collections())) + print(" - %d Individual Packages Available" % len(self._ds.packages())) + print() + print("Local Machine:") + print(" - Data directory: %s" % self._ds.download_dir) + + def _simple_interactive_config(self): + self._show_config() + while True: + print() + self._simple_interactive_menu( + "s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu" + ) + user_input = input("Config> ").strip().lower() + if user_input == "s": + self._show_config() + elif user_input == "d": + new_dl_dir = input(" New Directory> ").strip() + if new_dl_dir in ("", "x", "q", "X", "Q"): + print(" Cancelled!") + elif os.path.isdir(new_dl_dir): + self._ds.download_dir = new_dl_dir + else: + print("Directory %r not found! Create it first." % new_dl_dir) + elif user_input == "u": + new_url = input(" New URL> ").strip() + if new_url in ("", "x", "q", "X", "Q"): + print(" Cancelled!") + else: + if not new_url.startswith(("http://", "https://")): + new_url = "http://" + new_url + try: + self._ds.url = new_url + except Exception as e: + print(f"Error reading <{new_url!r}>:\n {e}") + elif user_input == "m": + break + + +class DownloaderGUI: + """ + Graphical interface for downloading packages from the NLTK data + server. + """ + + # ///////////////////////////////////////////////////////////////// + # Column Configuration + # ///////////////////////////////////////////////////////////////// + + COLUMNS = [ + "", + "Identifier", + "Name", + "Size", + "Status", + "Unzipped Size", + "Copyright", + "Contact", + "License", + "Author", + "Subdir", + "Checksum", + ] + """A list of the names of columns. This controls the order in + which the columns will appear. If this is edited, then + ``_package_to_columns()`` may need to be edited to match.""" + + COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0} + """A dictionary specifying how columns should be resized when the + table is resized. Columns with weight 0 will not be resized at + all; and columns with high weight will be resized more. + Default weight (for columns not explicitly listed) is 1.""" + + COLUMN_WIDTHS = { + "": 1, + "Identifier": 20, + "Name": 45, + "Size": 10, + "Unzipped Size": 10, + "Status": 12, + } + """A dictionary specifying how wide each column should be, in + characters. The default width (for columns not explicitly + listed) is specified by ``DEFAULT_COLUMN_WIDTH``.""" + + DEFAULT_COLUMN_WIDTH = 30 + """The default width for columns that are not explicitly listed + in ``COLUMN_WIDTHS``.""" + + INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"] + """The set of columns that should be displayed by default.""" + + # Perform a few import-time sanity checks to make sure that the + # column configuration variables are defined consistently: + for c in COLUMN_WEIGHTS: + assert c in COLUMNS + for c in COLUMN_WIDTHS: + assert c in COLUMNS + for c in INITIAL_COLUMNS: + assert c in COLUMNS + + # ///////////////////////////////////////////////////////////////// + # Color Configuration + # ///////////////////////////////////////////////////////////////// + + _BACKDROP_COLOR = ("#000", "#ccc") + + _ROW_COLOR = { + Downloader.INSTALLED: ("#afa", "#080"), + Downloader.PARTIAL: ("#ffa", "#880"), + Downloader.STALE: ("#faa", "#800"), + Downloader.NOT_INSTALLED: ("#fff", "#888"), + } + + _MARK_COLOR = ("#000", "#ccc") + + # _FRONT_TAB_COLOR = ('#ccf', '#008') + # _BACK_TAB_COLOR = ('#88a', '#448') + _FRONT_TAB_COLOR = ("#fff", "#45c") + _BACK_TAB_COLOR = ("#aaa", "#67a") + + _PROGRESS_COLOR = ("#f00", "#aaa") + + _TAB_FONT = "helvetica -16 bold" + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, dataserver, use_threads=True): + self._ds = dataserver + self._use_threads = use_threads + + # For the threaded downloader: + self._download_lock = threading.Lock() + self._download_msg_queue = [] + self._download_abort_queue = [] + self._downloading = False + + # For tkinter after callbacks: + self._afterid = {} + + # A message log. + self._log_messages = [] + self._log_indent = 0 + self._log("NLTK Downloader Started!") + + # Create the main window. + top = self.top = Tk() + top.geometry("+50+50") + top.title("NLTK Downloader") + top.configure(background=self._BACKDROP_COLOR[1]) + + # Set up some bindings now, in case anything goes wrong. + top.bind("", self.destroy) + top.bind("", self.destroy) + self._destroyed = False + + self._column_vars = {} + + # Initialize the GUI. + self._init_widgets() + self._init_menu() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + self._show_info() + self._select_columns() + self._table.select(0) + + # Make sure we get notified when we're destroyed, so we can + # cancel any download in progress. + self._table.bind("", self._destroy) + + def _log(self, msg): + self._log_messages.append( + "{} {}{}".format(time.ctime(), " | " * self._log_indent, msg) + ) + + # ///////////////////////////////////////////////////////////////// + # Internals + # ///////////////////////////////////////////////////////////////// + + def _init_widgets(self): + # Create the top-level frame structures + f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0) + f1.pack(sid="top", expand=True, fill="both") + f1.grid_rowconfigure(2, weight=1) + f1.grid_columnconfigure(0, weight=1) + Frame(f1, height=8).grid(column=0, row=0) # spacer + tabframe = Frame(f1) + tabframe.grid(column=0, row=1, sticky="news") + tableframe = Frame(f1) + tableframe.grid(column=0, row=2, sticky="news") + buttonframe = Frame(f1) + buttonframe.grid(column=0, row=3, sticky="news") + Frame(f1, height=8).grid(column=0, row=4) # spacer + infoframe = Frame(f1) + infoframe.grid(column=0, row=5, sticky="news") + Frame(f1, height=8).grid(column=0, row=6) # spacer + progressframe = Frame( + self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1] + ) + progressframe.pack(side="bottom", fill="x") + self.top["border"] = 0 + self.top["highlightthickness"] = 0 + + # Create the tabs + self._tab_names = ["Collections", "Corpora", "Models", "All Packages"] + self._tabs = {} + for i, tab in enumerate(self._tab_names): + label = Label(tabframe, text=tab, font=self._TAB_FONT) + label.pack(side="left", padx=((i + 1) % 2) * 10) + label.bind("", self._select_tab) + self._tabs[tab.lower()] = label + + # Create the table. + column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS] + self._table = Table( + tableframe, + self.COLUMNS, + column_weights=column_weights, + highlightthickness=0, + listbox_height=16, + reprfunc=self._table_reprfunc, + ) + self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked + for i, column in enumerate(self.COLUMNS): + width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH) + self._table.columnconfig(i, width=width) + self._table.pack(expand=True, fill="both") + self._table.focus() + self._table.bind_to_listboxes("", self._download) + self._table.bind("", self._table_mark) + self._table.bind("", self._download) + self._table.bind("", self._prev_tab) + self._table.bind("", self._next_tab) + self._table.bind("", self._mark_all) + + # Create entry boxes for URL & download_dir + infoframe.grid_columnconfigure(1, weight=1) + + info = [ + ("url", "Server Index:", self._set_url), + ("download_dir", "Download Directory:", self._set_download_dir), + ] + self._info = {} + for (i, (key, label, callback)) in enumerate(info): + Label(infoframe, text=label).grid(column=0, row=i, sticky="e") + entry = Entry( + infoframe, + font="courier", + relief="groove", + disabledforeground="#007aff", + foreground="#007aff", + ) + self._info[key] = (entry, callback) + entry.bind("", self._info_save) + entry.bind("", lambda e, key=key: self._info_edit(key)) + entry.grid(column=1, row=i, sticky="ew") + + # If the user edits url or download_dir, and then clicks outside + # the entry box, then save their results. + self.top.bind("", self._info_save) + + # Create Download & Refresh buttons. + self._download_button = Button( + buttonframe, text="Download", command=self._download, width=8 + ) + self._download_button.pack(side="left") + self._refresh_button = Button( + buttonframe, text="Refresh", command=self._refresh, width=8 + ) + self._refresh_button.pack(side="right") + + # Create Progress bar + self._progresslabel = Label( + progressframe, + text="", + foreground=self._BACKDROP_COLOR[0], + background=self._BACKDROP_COLOR[1], + ) + self._progressbar = Canvas( + progressframe, + width=200, + height=16, + background=self._PROGRESS_COLOR[1], + relief="sunken", + border=1, + ) + self._init_progressbar() + self._progressbar.pack(side="right") + self._progresslabel.pack(side="left") + + def _init_menu(self): + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Download", underline=0, command=self._download, accelerator="Return" + ) + filemenu.add_separator() + filemenu.add_command( + label="Change Server Index", + underline=7, + command=lambda: self._info_edit("url"), + ) + filemenu.add_command( + label="Change Download Directory", + underline=0, + command=lambda: self._info_edit("download_dir"), + ) + filemenu.add_separator() + filemenu.add_command(label="Show Log", underline=5, command=self._show_log) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + # Create a menu to control which columns of the table are + # shown. n.b.: we never hide the first two columns (mark and + # identifier). + viewmenu = Menu(menubar, tearoff=0) + for column in self._table.column_names[2:]: + var = IntVar(self.top) + assert column not in self._column_vars + self._column_vars[column] = var + if column in self.INITIAL_COLUMNS: + var.set(1) + viewmenu.add_checkbutton( + label=column, underline=0, variable=var, command=self._select_columns + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + # Create a sort menu + # [xx] this should be selectbuttons; and it should include + # reversed sorts as options. + sortmenu = Menu(menubar, tearoff=0) + for column in self._table.column_names[1:]: + sortmenu.add_command( + label="Sort by %s" % column, + command=(lambda c=column: self._table.sort_by(c, "ascending")), + ) + sortmenu.add_separator() + # sortmenu.add_command(label='Descending Sort:') + for column in self._table.column_names[1:]: + sortmenu.add_command( + label="Reverse sort by %s" % column, + command=(lambda c=column: self._table.sort_by(c, "descending")), + ) + menubar.add_cascade(label="Sort", underline=0, menu=sortmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + self.top.bind("", self.help) + + self.top.config(menu=menubar) + + def _select_columns(self): + for (column, var) in self._column_vars.items(): + if var.get(): + self._table.show_column(column) + else: + self._table.hide_column(column) + + def _refresh(self): + self._ds.clear_status_cache() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + self._table.select(0) + + def _info_edit(self, info_key): + self._info_save() # just in case. + (entry, callback) = self._info[info_key] + entry["state"] = "normal" + entry["relief"] = "sunken" + entry.focus() + + def _info_save(self, e=None): + focus = self._table + for entry, callback in self._info.values(): + if entry["state"] == "disabled": + continue + if e is not None and e.widget is entry and e.keysym != "Return": + focus = entry + else: + entry["state"] = "disabled" + entry["relief"] = "groove" + callback(entry.get()) + focus.focus() + + def _table_reprfunc(self, row, col, val): + if self._table.column_names[col].endswith("Size"): + if isinstance(val, str): + return " %s" % val + elif val < 1024**2: + return " %.1f KB" % (val / 1024.0**1) + elif val < 1024**3: + return " %.1f MB" % (val / 1024.0**2) + else: + return " %.1f GB" % (val / 1024.0**3) + + if col in (0, ""): + return str(val) + else: + return " %s" % val + + def _set_url(self, url): + if url == self._ds.url: + return + try: + self._ds.url = url + self._fill_table() + except OSError as e: + showerror("Error Setting Server Index", str(e)) + self._show_info() + + def _set_download_dir(self, download_dir): + if self._ds.download_dir == download_dir: + return + # check if the dir exists, and if not, ask if we should create it? + + # Clear our status cache, & re-check what's installed + self._ds.download_dir = download_dir + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + self._show_info() + + def _show_info(self): + print("showing info", self._ds.url) + for entry, cb in self._info.values(): + entry["state"] = "normal" + entry.delete(0, "end") + self._info["url"][0].insert(0, self._ds.url) + self._info["download_dir"][0].insert(0, self._ds.download_dir) + for entry, cb in self._info.values(): + entry["state"] = "disabled" + + def _prev_tab(self, *e): + for i, tab in enumerate(self._tab_names): + if tab.lower() == self._tab and i > 0: + self._tab = self._tab_names[i - 1].lower() + try: + return self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + def _next_tab(self, *e): + for i, tab in enumerate(self._tab_names): + if tab.lower() == self._tab and i < (len(self._tabs) - 1): + self._tab = self._tab_names[i + 1].lower() + try: + return self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + def _select_tab(self, event): + self._tab = event.widget["text"].lower() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + _tab = "collections" + # _tab = 'corpora' + _rows = None + + def _fill_table(self): + selected_row = self._table.selected_row() + self._table.clear() + if self._tab == "all packages": + items = self._ds.packages() + elif self._tab == "corpora": + items = self._ds.corpora() + elif self._tab == "models": + items = self._ds.models() + elif self._tab == "collections": + items = self._ds.collections() + else: + assert 0, "bad tab value %r" % self._tab + rows = [self._package_to_columns(item) for item in items] + self._table.extend(rows) + + # Highlight the active tab. + for tab, label in self._tabs.items(): + if tab == self._tab: + label.configure( + foreground=self._FRONT_TAB_COLOR[0], + background=self._FRONT_TAB_COLOR[1], + ) + else: + label.configure( + foreground=self._BACK_TAB_COLOR[0], + background=self._BACK_TAB_COLOR[1], + ) + + self._table.sort_by("Identifier", order="ascending") + self._color_table() + self._table.select(selected_row) + + # This is a hack, because the scrollbar isn't updating its + # position right -- I'm not sure what the underlying cause is + # though. (This is on OS X w/ python 2.5) The length of + # delay that's necessary seems to depend on how fast the + # comptuer is. :-/ + self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview()) + self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview()) + + def _update_table_status(self): + for row_num in range(len(self._table)): + status = self._ds.status(self._table[row_num, "Identifier"]) + self._table[row_num, "Status"] = status + self._color_table() + + def _download(self, *e): + # If we're using threads, then delegate to the threaded + # downloader instead. + if self._use_threads: + return self._download_threaded(*e) + + marked = [ + self._table[row, "Identifier"] + for row in range(len(self._table)) + if self._table[row, 0] != "" + ] + selection = self._table.selected_row() + if not marked and selection is not None: + marked = [self._table[selection, "Identifier"]] + + download_iter = self._ds.incr_download(marked, self._ds.download_dir) + self._log_indent = 0 + self._download_cb(download_iter, marked) + + _DL_DELAY = 10 + + def _download_cb(self, download_iter, ids): + try: + msg = next(download_iter) + except StopIteration: + # self._fill_table(sort=False) + self._update_table_status() + afterid = self.top.after(10, self._show_progress, 0) + self._afterid["_download_cb"] = afterid + return + + def show(s): + self._progresslabel["text"] = s + self._log(s) + + if isinstance(msg, ProgressMessage): + self._show_progress(msg.progress) + elif isinstance(msg, ErrorMessage): + show(msg.message) + if msg.package is not None: + self._select(msg.package.id) + self._show_progress(None) + return # halt progress. + elif isinstance(msg, StartCollectionMessage): + show("Downloading collection %s" % msg.collection.id) + self._log_indent += 1 + elif isinstance(msg, StartPackageMessage): + show("Downloading package %s" % msg.package.id) + elif isinstance(msg, UpToDateMessage): + show("Package %s is up-to-date!" % msg.package.id) + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt' % msg.package.id) + elif isinstance(msg, FinishDownloadMessage): + show("Finished downloading %r." % msg.package.id) + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s" % msg.package.filename) + elif isinstance(msg, FinishCollectionMessage): + self._log_indent -= 1 + show("Finished downloading collection %r." % msg.collection.id) + self._clear_mark(msg.collection.id) + elif isinstance(msg, FinishPackageMessage): + self._clear_mark(msg.package.id) + afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids) + self._afterid["_download_cb"] = afterid + + def _select(self, id): + for row in range(len(self._table)): + if self._table[row, "Identifier"] == id: + self._table.select(row) + return + + def _color_table(self): + # Color rows according to status. + for row in range(len(self._table)): + bg, sbg = self._ROW_COLOR[self._table[row, "Status"]] + fg, sfg = ("black", "white") + self._table.rowconfig( + row, + foreground=fg, + selectforeground=sfg, + background=bg, + selectbackground=sbg, + ) + # Color the marked column + self._table.itemconfigure( + row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1] + ) + + def _clear_mark(self, id): + for row in range(len(self._table)): + if self._table[row, "Identifier"] == id: + self._table[row, 0] = "" + + def _mark_all(self, *e): + for row in range(len(self._table)): + self._table[row, 0] = "X" + + def _table_mark(self, *e): + selection = self._table.selected_row() + if selection >= 0: + if self._table[selection][0] != "": + self._table[selection, 0] = "" + else: + self._table[selection, 0] = "X" + self._table.select(delta=1) + + def _show_log(self): + text = "\n".join(self._log_messages) + ShowText(self.top, "NLTK Downloader Log", text) + + def _package_to_columns(self, pkg): + """ + Given a package, return a list of values describing that + package, one for each column in ``self.COLUMNS``. + """ + row = [] + for column_index, column_name in enumerate(self.COLUMNS): + if column_index == 0: # Mark: + row.append("") + elif column_name == "Identifier": + row.append(pkg.id) + elif column_name == "Status": + row.append(self._ds.status(pkg)) + else: + attr = column_name.lower().replace(" ", "_") + row.append(getattr(pkg, attr, "n/a")) + return row + + # ///////////////////////////////////////////////////////////////// + # External Interface + # ///////////////////////////////////////////////////////////////// + + def destroy(self, *e): + if self._destroyed: + return + self.top.destroy() + self._destroyed = True + + def _destroy(self, *e): + if self.top is not None: + for afterid in self._afterid.values(): + self.top.after_cancel(afterid) + + # Abort any download in progress. + if self._downloading and self._use_threads: + self._abort_download() + + # Make sure the garbage collector destroys these now; + # otherwise, they may get destroyed when we're not in the main + # thread, which would make Tkinter unhappy. + self._column_vars.clear() + + def mainloop(self, *args, **kwargs): + self.top.mainloop(*args, **kwargs) + + # ///////////////////////////////////////////////////////////////// + # HELP + # ///////////////////////////////////////////////////////////////// + + HELP = textwrap.dedent( + """\ + This tool can be used to download a variety of corpora and models + that can be used with NLTK. Each corpus or model is distributed + in a single zip file, known as a \"package file.\" You can + download packages individually, or you can download pre-defined + collections of packages. + + When you download a package, it will be saved to the \"download + directory.\" A default download directory is chosen when you run + + the downloader; but you may also select a different download + directory. On Windows, the default download directory is + + + \"package.\" + + The NLTK downloader can be used to download a variety of corpora, + models, and other data packages. + + Keyboard shortcuts:: + [return]\t Download + [up]\t Select previous package + [down]\t Select next package + [left]\t Select previous tab + [right]\t Select next tab + """ + ) + + def help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self.top, + "Help: NLTK Downloader", + self.HELP.strip(), + width=75, + font="fixed", + ) + except: + ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75) + + def about(self, *e): + ABOUT = "NLTK Downloader\n" + "Written by Edward Loper" + TITLE = "About: NLTK Downloader" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except ImportError: + ShowText(self.top, TITLE, ABOUT) + + # ///////////////////////////////////////////////////////////////// + # Progress Bar + # ///////////////////////////////////////////////////////////////// + + _gradient_width = 5 + + def _init_progressbar(self): + c = self._progressbar + width, height = int(c["width"]), int(c["height"]) + for i in range(0, (int(c["width"]) * 2) // self._gradient_width): + c.create_line( + i * self._gradient_width + 20, + -20, + i * self._gradient_width - height - 20, + height + 20, + width=self._gradient_width, + fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12), + ) + c.addtag_all("gradient") + c.itemconfig("gradient", state="hidden") + + # This is used to display progress + c.addtag_withtag( + "redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0]) + ) + + def _show_progress(self, percent): + c = self._progressbar + if percent is None: + c.coords("redbox", 0, 0, 0, 0) + c.itemconfig("gradient", state="hidden") + else: + width, height = int(c["width"]), int(c["height"]) + x = percent * int(width) // 100 + 1 + c.coords("redbox", 0, 0, x, height + 1) + + def _progress_alive(self): + c = self._progressbar + if not self._downloading: + c.itemconfig("gradient", state="hidden") + else: + c.itemconfig("gradient", state="normal") + x1, y1, x2, y2 = c.bbox("gradient") + if x1 <= -100: + c.move("gradient", (self._gradient_width * 6) - 4, 0) + else: + c.move("gradient", -4, 0) + afterid = self.top.after(200, self._progress_alive) + self._afterid["_progress_alive"] = afterid + + # ///////////////////////////////////////////////////////////////// + # Threaded downloader + # ///////////////////////////////////////////////////////////////// + + def _download_threaded(self, *e): + # If the user tries to start a new download while we're already + # downloading something, then abort the current download instead. + if self._downloading: + self._abort_download() + return + + # Change the 'download' button to an 'abort' button. + self._download_button["text"] = "Cancel" + + marked = [ + self._table[row, "Identifier"] + for row in range(len(self._table)) + if self._table[row, 0] != "" + ] + selection = self._table.selected_row() + if not marked and selection is not None: + marked = [self._table[selection, "Identifier"]] + + # Create a new data server object for the download operation, + # just in case the user modifies our data server during the + # download (e.g., clicking 'refresh' or editing the index url). + ds = Downloader(self._ds.url, self._ds.download_dir) + + # Start downloading in a separate thread. + assert self._download_msg_queue == [] + assert self._download_abort_queue == [] + self._DownloadThread( + ds, + marked, + self._download_lock, + self._download_msg_queue, + self._download_abort_queue, + ).start() + + # Monitor the download message queue & display its progress. + self._log_indent = 0 + self._downloading = True + self._monitor_message_queue() + + # Display an indication that we're still alive and well by + # cycling the progress bar. + self._progress_alive() + + def _abort_download(self): + if self._downloading: + self._download_lock.acquire() + self._download_abort_queue.append("abort") + self._download_lock.release() + + class _DownloadThread(threading.Thread): + def __init__(self, data_server, items, lock, message_queue, abort): + self.data_server = data_server + self.items = items + self.lock = lock + self.message_queue = message_queue + self.abort = abort + threading.Thread.__init__(self) + + def run(self): + for msg in self.data_server.incr_download(self.items): + self.lock.acquire() + self.message_queue.append(msg) + # Check if we've been told to kill ourselves: + if self.abort: + self.message_queue.append("aborted") + self.lock.release() + return + self.lock.release() + self.lock.acquire() + self.message_queue.append("finished") + self.lock.release() + + _MONITOR_QUEUE_DELAY = 100 + + def _monitor_message_queue(self): + def show(s): + self._progresslabel["text"] = s + self._log(s) + + # Try to acquire the lock; if it's busy, then just try again later. + if not self._download_lock.acquire(): + return + for msg in self._download_msg_queue: + + # Done downloading? + if msg == "finished" or msg == "aborted": + # self._fill_table(sort=False) + self._update_table_status() + self._downloading = False + self._download_button["text"] = "Download" + del self._download_msg_queue[:] + del self._download_abort_queue[:] + self._download_lock.release() + if msg == "aborted": + show("Download aborted!") + self._show_progress(None) + else: + afterid = self.top.after(100, self._show_progress, None) + self._afterid["_monitor_message_queue"] = afterid + return + + # All other messages + elif isinstance(msg, ProgressMessage): + self._show_progress(msg.progress) + elif isinstance(msg, ErrorMessage): + show(msg.message) + if msg.package is not None: + self._select(msg.package.id) + self._show_progress(None) + self._downloading = False + return # halt progress. + elif isinstance(msg, StartCollectionMessage): + show("Downloading collection %r" % msg.collection.id) + self._log_indent += 1 + elif isinstance(msg, StartPackageMessage): + self._ds.clear_status_cache(msg.package.id) + show("Downloading package %r" % msg.package.id) + elif isinstance(msg, UpToDateMessage): + show("Package %s is up-to-date!" % msg.package.id) + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt; updating it' % + # msg.package.id) + elif isinstance(msg, FinishDownloadMessage): + show("Finished downloading %r." % msg.package.id) + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s" % msg.package.filename) + elif isinstance(msg, FinishUnzipMessage): + show("Finished installing %s" % msg.package.id) + elif isinstance(msg, FinishCollectionMessage): + self._log_indent -= 1 + show("Finished downloading collection %r." % msg.collection.id) + self._clear_mark(msg.collection.id) + elif isinstance(msg, FinishPackageMessage): + self._update_table_status() + self._clear_mark(msg.package.id) + + # Let the user know when we're aborting a download (but + # waiting for a good point to abort it, so we don't end up + # with a partially unzipped package or anything like that). + if self._download_abort_queue: + self._progresslabel["text"] = "Aborting download..." + + # Clear the message queue and then release the lock + del self._download_msg_queue[:] + self._download_lock.release() + + # Check the queue again after MONITOR_QUEUE_DELAY msec. + afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue) + self._afterid["_monitor_message_queue"] = afterid + + +###################################################################### +# Helper Functions +###################################################################### +# [xx] It may make sense to move these to nltk.internals. + + +def md5_hexdigest(file): + """ + Calculate and return the MD5 checksum for a given file. + ``file`` may either be a filename or an open stream. + """ + if isinstance(file, str): + with open(file, "rb") as infile: + return _md5_hexdigest(infile) + return _md5_hexdigest(file) + + +def _md5_hexdigest(fp): + md5_digest = md5() + while True: + block = fp.read(1024 * 16) # 16k blocks + if not block: + break + md5_digest.update(block) + return md5_digest.hexdigest() + + +# change this to periodically yield progress messages? +# [xx] get rid of topdir parameter -- we should be checking +# this when we build the index, anyway. +def unzip(filename, root, verbose=True): + """ + Extract the contents of the zip file ``filename`` into the + directory ``root``. + """ + for message in _unzip_iter(filename, root, verbose): + if isinstance(message, ErrorMessage): + raise Exception(message) + + +def _unzip_iter(filename, root, verbose=True): + if verbose: + sys.stdout.write("Unzipping %s" % os.path.split(filename)[1]) + sys.stdout.flush() + + try: + zf = zipfile.ZipFile(filename) + except zipfile.error as e: + yield ErrorMessage(filename, "Error with downloaded zip file") + return + except Exception as e: + yield ErrorMessage(filename, e) + return + + zf.extractall(root) + + if verbose: + print() + + +###################################################################### +# Index Builder +###################################################################### +# This may move to a different file sometime. + + +def build_index(root, base_url): + """ + Create a new data.xml index file, by combining the xml description + files for various packages and collections. ``root`` should be the + path to a directory containing the package xml and zip files; and + the collection xml files. The ``root`` directory is expected to + have the following subdirectories:: + + root/ + packages/ .................. subdirectory for packages + corpora/ ................. zip & xml files for corpora + grammars/ ................ zip & xml files for grammars + taggers/ ................. zip & xml files for taggers + tokenizers/ .............. zip & xml files for tokenizers + etc. + collections/ ............... xml files for collections + + For each package, there should be two files: ``package.zip`` + (where *package* is the package name) + which contains the package itself as a compressed zip file; and + ``package.xml``, which is an xml description of the package. The + zipfile ``package.zip`` should expand to a single subdirectory + named ``package/``. The base filename ``package`` must match + the identifier given in the package's xml file. + + For each collection, there should be a single file ``collection.zip`` + describing the collection, where *collection* is the name of the collection. + + All identifiers (for both packages and collections) must be unique. + """ + # Find all packages. + packages = [] + for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")): + zipstat = os.stat(zf.filename) + url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}" + unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist()) + + # Fill in several fields of the package xml with calculated values. + pkg_xml.set("unzipped_size", "%s" % unzipped_size) + pkg_xml.set("size", "%s" % zipstat.st_size) + pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename)) + pkg_xml.set("subdir", subdir) + # pkg_xml.set('svn_revision', _svn_revision(zf.filename)) + if not pkg_xml.get("url"): + pkg_xml.set("url", url) + + # Record the package. + packages.append(pkg_xml) + + # Find all collections + collections = list(_find_collections(os.path.join(root, "collections"))) + + # Check that all UIDs are unique + uids = set() + for item in packages + collections: + if item.get("id") in uids: + raise ValueError("Duplicate UID: %s" % item.get("id")) + uids.add(item.get("id")) + + # Put it all together + top_elt = ElementTree.Element("nltk_data") + top_elt.append(ElementTree.Element("packages")) + top_elt[0].extend(sorted(packages, key=lambda package: package.get("id"))) + top_elt.append(ElementTree.Element("collections")) + top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id"))) + + _indent_xml(top_elt) + return top_elt + + +def _indent_xml(xml, prefix=""): + """ + Helper for ``build_index()``: Given an XML ``ElementTree``, modify it + (and its descendents) ``text`` and ``tail`` attributes to generate + an indented tree, where each nested element is indented by 2 + spaces with respect to its parent. + """ + if len(xml) > 0: + xml.text = (xml.text or "").strip() + "\n" + prefix + " " + for child in xml: + _indent_xml(child, prefix + " ") + for child in xml[:-1]: + child.tail = (child.tail or "").strip() + "\n" + prefix + " " + xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix + + +def _check_package(pkg_xml, zipfilename, zf): + """ + Helper for ``build_index()``: Perform some checks to make sure that + the given package is consistent. + """ + # The filename must patch the id given in the XML file. + uid = os.path.splitext(os.path.split(zipfilename)[1])[0] + if pkg_xml.get("id") != uid: + raise ValueError( + "package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid) + ) + + # Zip file must expand to a subdir whose name matches uid. + if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()): + raise ValueError( + "Zipfile %s.zip does not expand to a single " + "subdirectory %s/" % (uid, uid) + ) + + +# update for git? +def _svn_revision(filename): + """ + Helper for ``build_index()``: Calculate the subversion revision + number for a given file (by using ``subprocess`` to run ``svn``). + """ + p = subprocess.Popen( + ["svn", "status", "-v", filename], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + (stdout, stderr) = p.communicate() + if p.returncode != 0 or stderr or not stdout: + raise ValueError( + "Error determining svn_revision for %s: %s" + % (os.path.split(filename)[1], textwrap.fill(stderr)) + ) + return stdout.split()[2] + + +def _find_collections(root): + """ + Helper for ``build_index()``: Yield a list of ElementTree.Element + objects, each holding the xml for a single package collection. + """ + for dirname, _subdirs, files in os.walk(root): + for filename in files: + if filename.endswith(".xml"): + xmlfile = os.path.join(dirname, filename) + yield ElementTree.parse(xmlfile).getroot() + + +def _find_packages(root): + """ + Helper for ``build_index()``: Yield a list of tuples + ``(pkg_xml, zf, subdir)``, where: + - ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a + package + - ``zf`` is a ``zipfile.ZipFile`` for the package's contents. + - ``subdir`` is the subdirectory (relative to ``root``) where + the package was found (e.g. 'corpora' or 'grammars'). + """ + from nltk.corpus.reader.util import _path_from + + # Find all packages. + packages = [] + for dirname, subdirs, files in os.walk(root): + relpath = "/".join(_path_from(root, dirname)) + for filename in files: + if filename.endswith(".xml"): + xmlfilename = os.path.join(dirname, filename) + zipfilename = xmlfilename[:-4] + ".zip" + try: + zf = zipfile.ZipFile(zipfilename) + except Exception as e: + raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e + try: + pkg_xml = ElementTree.parse(xmlfilename).getroot() + except Exception as e: + raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e + + # Check that the UID matches the filename + uid = os.path.split(xmlfilename[:-4])[1] + if pkg_xml.get("id") != uid: + raise ValueError( + "package identifier mismatch (%s " + "vs %s)" % (pkg_xml.get("id"), uid) + ) + + # Check that the zipfile expands to a subdir whose + # name matches the uid. + if sum( + (name != uid and not name.startswith(uid + "/")) + for name in zf.namelist() + ): + raise ValueError( + "Zipfile %s.zip does not expand to a " + "single subdirectory %s/" % (uid, uid) + ) + + yield pkg_xml, zf, relpath + + elif filename.endswith(".zip"): + # Warn user in case a .xml does not exist for a .zip + resourcename = os.path.splitext(filename)[0] + xmlfilename = os.path.join(dirname, resourcename + ".xml") + if not os.path.exists(xmlfilename): + warnings.warn( + f"{filename} exists, but {resourcename + '.xml'} cannot be found! " + f"This could mean that {resourcename} can not be downloaded.", + stacklevel=2, + ) + + # Don't recurse into svn subdirectories: + try: + subdirs.remove(".svn") + except ValueError: + pass + + +###################################################################### +# Main: +###################################################################### + +# There should be a command-line interface + +# Aliases +_downloader = Downloader() +download = _downloader.download + + +def download_shell(): + DownloaderShell(_downloader).run() + + +def download_gui(): + DownloaderGUI(_downloader).mainloop() + + +def update(): + _downloader.update() + + +if __name__ == "__main__": + from optparse import OptionParser + + parser = OptionParser() + parser.add_option( + "-d", + "--dir", + dest="dir", + help="download package to directory DIR", + metavar="DIR", + ) + parser.add_option( + "-q", + "--quiet", + dest="quiet", + action="store_true", + default=False, + help="work quietly", + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + default=False, + help="download even if already installed", + ) + parser.add_option( + "-e", + "--exit-on-error", + dest="halt_on_error", + action="store_true", + default=False, + help="exit if an error occurs", + ) + parser.add_option( + "-u", + "--url", + dest="server_index_url", + default=os.environ.get("NLTK_DOWNLOAD_URL"), + help="download server index url", + ) + + (options, args) = parser.parse_args() + + downloader = Downloader(server_index_url=options.server_index_url) + + if args: + for pkg_id in args: + rv = downloader.download( + info_or_id=pkg_id, + download_dir=options.dir, + quiet=options.quiet, + force=options.force, + halt_on_error=options.halt_on_error, + ) + if rv == False and options.halt_on_error: + break + else: + downloader.download( + download_dir=options.dir, + quiet=options.quiet, + force=options.force, + halt_on_error=options.halt_on_error, + ) diff --git a/venv/lib/python3.10/site-packages/nltk/featstruct.py b/venv/lib/python3.10/site-packages/nltk/featstruct.py new file mode 100644 index 0000000000000000000000000000000000000000..5684f06f51e76070ca6e606722aa1583332429e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/featstruct.py @@ -0,0 +1,2779 @@ +# Natural Language Toolkit: Feature Structures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper , +# Rob Speer, +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Basic data classes for representing feature structures, and for +performing basic operations on those feature structures. A feature +structure is a mapping from feature identifiers to feature values, +where each feature value is either a basic value (such as a string or +an integer), or a nested feature structure. There are two types of +feature structure, implemented by two subclasses of ``FeatStruct``: + + - feature dictionaries, implemented by ``FeatDict``, act like + Python dictionaries. Feature identifiers may be strings or + instances of the ``Feature`` class. + - feature lists, implemented by ``FeatList``, act like Python + lists. Feature identifiers are integers. + +Feature structures are typically used to represent partial information +about objects. A feature identifier that is not mapped to a value +stands for a feature whose value is unknown (*not* a feature without +a value). Two feature structures that represent (potentially +overlapping) information about the same object can be combined by +unification. When two inconsistent feature structures are unified, +the unification fails and returns None. + +Features can be specified using "feature paths", or tuples of feature +identifiers that specify path through the nested feature structures to +a value. Feature structures may contain reentrant feature values. A +"reentrant feature value" is a single feature value that can be +accessed via multiple feature paths. Unification preserves the +reentrance relations imposed by both of the unified feature +structures. In the feature structure resulting from unification, any +modifications to a reentrant feature value will be visible using any +of its feature paths. + +Feature structure variables are encoded using the ``nltk.sem.Variable`` +class. The variables' values are tracked using a bindings +dictionary, which maps variables to their values. When two feature +structures are unified, a fresh bindings dictionary is created to +track their values; and before unification completes, all bound +variables are replaced by their values. Thus, the bindings +dictionaries are usually strictly internal to the unification process. +However, it is possible to track the bindings of variables if you +choose to, by supplying your own initial bindings dictionary to the +``unify()`` function. + +When unbound variables are unified with one another, they become +aliased. This is encoded by binding one variable to the other. + +Lightweight Feature Structures +============================== +Many of the functions defined by ``nltk.featstruct`` can be applied +directly to simple Python dictionaries and lists, rather than to +full-fledged ``FeatDict`` and ``FeatList`` objects. In other words, +Python ``dicts`` and ``lists`` can be used as "light-weight" feature +structures. + + >>> from nltk.featstruct import unify + >>> unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))) # doctest: +SKIP + {'y': {'b': 'b'}, 'x': 1, 'a': 'a'} + +However, you should keep in mind the following caveats: + + - Python dictionaries & lists ignore reentrance when checking for + equality between values. But two FeatStructs with different + reentrances are considered nonequal, even if all their base + values are equal. + + - FeatStructs can be easily frozen, allowing them to be used as + keys in hash tables. Python dictionaries and lists can not. + + - FeatStructs display reentrance in their string representations; + Python dictionaries and lists do not. + + - FeatStructs may *not* be mixed with Python dictionaries and lists + (e.g., when performing unification). + + - FeatStructs provide a number of useful methods, such as ``walk()`` + and ``cyclic()``, which are not available for Python dicts and lists. + +In general, if your feature structures will contain any reentrances, +or if you plan to use them as dictionary keys, it is strongly +recommended that you use full-fledged ``FeatStruct`` objects. +""" + +import copy +import re +from functools import total_ordering + +from nltk.internals import raise_unorderable_types, read_str +from nltk.sem.logic import ( + Expression, + LogicalExpressionException, + LogicParser, + SubstituteBindingsI, + Variable, +) + +###################################################################### +# Feature Structure +###################################################################### + + +@total_ordering +class FeatStruct(SubstituteBindingsI): + """ + A mapping from feature identifiers to feature values, where each + feature value is either a basic value (such as a string or an + integer), or a nested feature structure. There are two types of + feature structure: + + - feature dictionaries, implemented by ``FeatDict``, act like + Python dictionaries. Feature identifiers may be strings or + instances of the ``Feature`` class. + - feature lists, implemented by ``FeatList``, act like Python + lists. Feature identifiers are integers. + + Feature structures may be indexed using either simple feature + identifiers or 'feature paths.' A feature path is a sequence + of feature identifiers that stand for a corresponding sequence of + indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is + equivalent to ``fstruct[f1][f2]...[fn]``. + + Feature structures may contain reentrant feature structures. A + "reentrant feature structure" is a single feature structure + object that can be accessed via multiple feature paths. Feature + structures may also be cyclic. A feature structure is "cyclic" + if there is any feature path from the feature structure to itself. + + Two feature structures are considered equal if they assign the + same values to all features, and have the same reentrancies. + + By default, feature structures are mutable. They may be made + immutable with the ``freeze()`` method. Once they have been + frozen, they may be hashed, and thus used as dictionary keys. + """ + + _frozen = False + """:ivar: A flag indicating whether this feature structure is + frozen or not. Once this flag is set, it should never be + un-set; and no further modification should be made to this + feature structure.""" + + ##//////////////////////////////////////////////////////////// + # { Constructor + ##//////////////////////////////////////////////////////////// + + def __new__(cls, features=None, **morefeatures): + """ + Construct and return a new feature structure. If this + constructor is called directly, then the returned feature + structure will be an instance of either the ``FeatDict`` class + or the ``FeatList`` class. + + :param features: The initial feature values for this feature + structure: + + - FeatStruct(string) -> FeatStructReader().read(string) + - FeatStruct(mapping) -> FeatDict(mapping) + - FeatStruct(sequence) -> FeatList(sequence) + - FeatStruct() -> FeatDict() + :param morefeatures: If ``features`` is a mapping or None, + then ``morefeatures`` provides additional features for the + ``FeatDict`` constructor. + """ + # If the FeatStruct constructor is called directly, then decide + # whether to create a FeatDict or a FeatList, based on the + # contents of the `features` argument. + if cls is FeatStruct: + if features is None: + return FeatDict.__new__(FeatDict, **morefeatures) + elif _is_mapping(features): + return FeatDict.__new__(FeatDict, features, **morefeatures) + elif morefeatures: + raise TypeError( + "Keyword arguments may only be specified " + "if features is None or is a mapping." + ) + if isinstance(features, str): + if FeatStructReader._START_FDICT_RE.match(features): + return FeatDict.__new__(FeatDict, features, **morefeatures) + else: + return FeatList.__new__(FeatList, features, **morefeatures) + elif _is_sequence(features): + return FeatList.__new__(FeatList, features) + else: + raise TypeError("Expected string or mapping or sequence") + + # Otherwise, construct the object as normal. + else: + return super().__new__(cls, features, **morefeatures) + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + # These helper functions allow the methods defined by FeatStruct + # to treat all feature structures as mappings, even if they're + # really lists. (Lists are treated as mappings from ints to vals) + + def _keys(self): + """Return an iterable of the feature identifiers used by this + FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + def _values(self): + """Return an iterable of the feature values directly defined + by this FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + def _items(self): + """Return an iterable of (fid,fval) pairs, where fid is a + feature identifier and fval is the corresponding feature + value, for all features defined by this FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + ##//////////////////////////////////////////////////////////// + # { Equality & Hashing + ##//////////////////////////////////////////////////////////// + + def equal_values(self, other, check_reentrance=False): + """ + Return True if ``self`` and ``other`` assign the same value to + to every feature. In particular, return true if + ``self[p]==other[p]`` for every feature path *p* such + that ``self[p]`` or ``other[p]`` is a base value (i.e., + not a nested feature structure). + + :param check_reentrance: If True, then also return False if + there is any difference between the reentrances of ``self`` + and ``other``. + :note: the ``==`` is equivalent to ``equal_values()`` with + ``check_reentrance=True``. + """ + return self._equal(other, check_reentrance, set(), set(), set()) + + def __eq__(self, other): + """ + Return true if ``self`` and ``other`` are both feature structures, + assign the same values to all features, and contain the same + reentrances. I.e., return + ``self.equal_values(other, check_reentrance=True)``. + + :see: ``equal_values()`` + """ + return self._equal(other, True, set(), set(), set()) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, FeatStruct): + # raise_unorderable_types("<", self, other) + # Sometimes feature values can be pure strings, + # so we need to be able to compare with non-featstructs: + return self.__class__.__name__ < other.__class__.__name__ + else: + return len(self) < len(other) + + def __hash__(self): + """ + If this feature structure is frozen, return its hash value; + otherwise, raise ``TypeError``. + """ + if not self._frozen: + raise TypeError("FeatStructs must be frozen before they " "can be hashed.") + try: + return self._hash + except AttributeError: + self._hash = self._calculate_hashvalue(set()) + return self._hash + + def _equal( + self, other, check_reentrance, visited_self, visited_other, visited_pairs + ): + """ + Return True iff self and other have equal values. + + :param visited_self: A set containing the ids of all ``self`` + feature structures we've already visited. + :param visited_other: A set containing the ids of all ``other`` + feature structures we've already visited. + :param visited_pairs: A set containing ``(selfid, otherid)`` pairs + for all pairs of feature structures we've already visited. + """ + # If we're the same object, then we're equal. + if self is other: + return True + + # If we have different classes, we're definitely not equal. + if self.__class__ != other.__class__: + return False + + # If we define different features, we're definitely not equal. + # (Perform len test first because it's faster -- we should + # do profiling to see if this actually helps) + if len(self) != len(other): + return False + if set(self._keys()) != set(other._keys()): + return False + + # If we're checking reentrance, then any time we revisit a + # structure, make sure that it was paired with the same + # feature structure that it is now. Note: if check_reentrance, + # then visited_pairs will never contain two pairs whose first + # values are equal, or two pairs whose second values are equal. + if check_reentrance: + if id(self) in visited_self or id(other) in visited_other: + return (id(self), id(other)) in visited_pairs + + # If we're not checking reentrance, then we still need to deal + # with cycles. If we encounter the same (self, other) pair a + # second time, then we won't learn anything more by examining + # their children a second time, so just return true. + else: + if (id(self), id(other)) in visited_pairs: + return True + + # Keep track of which nodes we've visited. + visited_self.add(id(self)) + visited_other.add(id(other)) + visited_pairs.add((id(self), id(other))) + + # Now we have to check all values. If any of them don't match, + # then return false. + for (fname, self_fval) in self._items(): + other_fval = other[fname] + if isinstance(self_fval, FeatStruct): + if not self_fval._equal( + other_fval, + check_reentrance, + visited_self, + visited_other, + visited_pairs, + ): + return False + else: + if self_fval != other_fval: + return False + + # Everything matched up; return true. + return True + + def _calculate_hashvalue(self, visited): + """ + Return a hash value for this feature structure. + + :require: ``self`` must be frozen. + :param visited: A set containing the ids of all feature + structures we've already visited while hashing. + """ + if id(self) in visited: + return 1 + visited.add(id(self)) + + hashval = 5831 + for (fname, fval) in sorted(self._items()): + hashval *= 37 + hashval += hash(fname) + hashval *= 37 + if isinstance(fval, FeatStruct): + hashval += fval._calculate_hashvalue(visited) + else: + hashval += hash(fval) + # Convert to a 32 bit int. + hashval = int(hashval & 0x7FFFFFFF) + return hashval + + ##//////////////////////////////////////////////////////////// + # { Freezing + ##//////////////////////////////////////////////////////////// + + #: Error message used by mutating methods when called on a frozen + #: feature structure. + _FROZEN_ERROR = "Frozen FeatStructs may not be modified." + + def freeze(self): + """ + Make this feature structure, and any feature structures it + contains, immutable. Note: this method does not attempt to + 'freeze' any feature value that is not a ``FeatStruct``; it + is recommended that you use only immutable feature values. + """ + if self._frozen: + return + self._freeze(set()) + + def frozen(self): + """ + Return True if this feature structure is immutable. Feature + structures can be made immutable with the ``freeze()`` method. + Immutable feature structures may not be made mutable again, + but new mutable copies can be produced with the ``copy()`` method. + """ + return self._frozen + + def _freeze(self, visited): + """ + Make this feature structure, and any feature structure it + contains, immutable. + + :param visited: A set containing the ids of all feature + structures we've already visited while freezing. + """ + if id(self) in visited: + return + visited.add(id(self)) + self._frozen = True + for (fname, fval) in sorted(self._items()): + if isinstance(fval, FeatStruct): + fval._freeze(visited) + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def copy(self, deep=True): + """ + Return a new copy of ``self``. The new copy will not be frozen. + + :param deep: If true, create a deep copy; if false, create + a shallow copy. + """ + if deep: + return copy.deepcopy(self) + else: + return self.__class__(self) + + # Subclasses should define __deepcopy__ to ensure that the new + # copy will not be frozen. + def __deepcopy__(self, memo): + raise NotImplementedError() # Implemented by subclasses. + + ##//////////////////////////////////////////////////////////// + # { Structural Information + ##//////////////////////////////////////////////////////////// + + def cyclic(self): + """ + Return True if this feature structure contains itself. + """ + return self._find_reentrances({})[id(self)] + + def walk(self): + """ + Return an iterator that generates this feature structure, and + each feature structure it contains. Each feature structure will + be generated exactly once. + """ + return self._walk(set()) + + def _walk(self, visited): + """ + Return an iterator that generates this feature structure, and + each feature structure it contains. + + :param visited: A set containing the ids of all feature + structures we've already visited while freezing. + """ + raise NotImplementedError() # Implemented by subclasses. + + def _walk(self, visited): + if id(self) in visited: + return + visited.add(id(self)) + yield self + for fval in self._values(): + if isinstance(fval, FeatStruct): + yield from fval._walk(visited) + + # Walk through the feature tree. The first time we see a feature + # value, map it to False (not reentrant). If we see a feature + # value more than once, then map it to True (reentrant). + def _find_reentrances(self, reentrances): + """ + Return a dictionary that maps from the ``id`` of each feature + structure contained in ``self`` (including ``self``) to a + boolean value, indicating whether it is reentrant or not. + """ + if id(self) in reentrances: + # We've seen it more than once. + reentrances[id(self)] = True + else: + # This is the first time we've seen it. + reentrances[id(self)] = False + + # Recurse to contained feature structures. + for fval in self._values(): + if isinstance(fval, FeatStruct): + fval._find_reentrances(reentrances) + + return reentrances + + ##//////////////////////////////////////////////////////////// + # { Variables & Bindings + ##//////////////////////////////////////////////////////////// + + def substitute_bindings(self, bindings): + """:see: ``nltk.featstruct.substitute_bindings()``""" + return substitute_bindings(self, bindings) + + def retract_bindings(self, bindings): + """:see: ``nltk.featstruct.retract_bindings()``""" + return retract_bindings(self, bindings) + + def variables(self): + """:see: ``nltk.featstruct.find_variables()``""" + return find_variables(self) + + def rename_variables(self, vars=None, used_vars=(), new_vars=None): + """:see: ``nltk.featstruct.rename_variables()``""" + return rename_variables(self, vars, used_vars, new_vars) + + def remove_variables(self): + """ + Return the feature structure that is obtained by deleting + any feature whose value is a ``Variable``. + + :rtype: FeatStruct + """ + return remove_variables(self) + + ##//////////////////////////////////////////////////////////// + # { Unification + ##//////////////////////////////////////////////////////////// + + def unify(self, other, bindings=None, trace=False, fail=None, rename_vars=True): + return unify(self, other, bindings, trace, fail, rename_vars) + + def subsumes(self, other): + """ + Return True if ``self`` subsumes ``other``. I.e., return true + If unifying ``self`` with ``other`` would result in a feature + structure equal to ``other``. + """ + return subsumes(self, other) + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + def __repr__(self): + """ + Display a single-line representation of this feature structure, + suitable for embedding in other representations. + """ + return self._repr(self._find_reentrances({}), {}) + + def _repr(self, reentrances, reentrance_ids): + """ + Return a string representation of this feature structure. + + :param reentrances: A dictionary that maps from the ``id`` of + each feature value in self, indicating whether that value + is reentrant or not. + :param reentrance_ids: A dictionary mapping from each ``id`` + of a feature value to a unique identifier. This is modified + by ``repr``: the first time a reentrant feature value is + displayed, an identifier is added to ``reentrance_ids`` for it. + """ + raise NotImplementedError() + + +# Mutation: disable if frozen. +_FROZEN_ERROR = "Frozen FeatStructs may not be modified." +_FROZEN_NOTICE = "\n%sIf self is frozen, raise ValueError." + + +def _check_frozen(method, indent=""): + """ + Given a method function, return a new method function that first + checks if ``self._frozen`` is true; and if so, raises ``ValueError`` + with an appropriate message. Otherwise, call the method and return + its result. + """ + + def wrapped(self, *args, **kwargs): + if self._frozen: + raise ValueError(_FROZEN_ERROR) + else: + return method(self, *args, **kwargs) + + wrapped.__name__ = method.__name__ + wrapped.__doc__ = (method.__doc__ or "") + (_FROZEN_NOTICE % indent) + return wrapped + + +###################################################################### +# Feature Dictionary +###################################################################### + + +class FeatDict(FeatStruct, dict): + """ + A feature structure that acts like a Python dictionary. I.e., a + mapping from feature identifiers to feature values, where a feature + identifier can be a string or a ``Feature``; and where a feature value + can be either a basic value (such as a string or an integer), or a nested + feature structure. A feature identifiers for a ``FeatDict`` is + sometimes called a "feature name". + + Two feature dicts are considered equal if they assign the same + values to all features, and have the same reentrances. + + :see: ``FeatStruct`` for information about feature paths, reentrance, + cyclic feature structures, mutability, freezing, and hashing. + """ + + def __init__(self, features=None, **morefeatures): + """ + Create a new feature dictionary, with the specified features. + + :param features: The initial value for this feature + dictionary. If ``features`` is a ``FeatStruct``, then its + features are copied (shallow copy). If ``features`` is a + dict, then a feature is created for each item, mapping its + key to its value. If ``features`` is a string, then it is + processed using ``FeatStructReader``. If ``features`` is a list of + tuples ``(name, val)``, then a feature is created for each tuple. + :param morefeatures: Additional features for the new feature + dictionary. If a feature is listed under both ``features`` and + ``morefeatures``, then the value from ``morefeatures`` will be + used. + """ + if isinstance(features, str): + FeatStructReader().fromstring(features, self) + self.update(**morefeatures) + else: + # update() checks the types of features. + self.update(features, **morefeatures) + + # //////////////////////////////////////////////////////////// + # { Dict methods + # //////////////////////////////////////////////////////////// + _INDEX_ERROR = "Expected feature name or path. Got %r." + + def __getitem__(self, name_or_path): + """If the feature with the given name or path exists, return + its value; otherwise, raise ``KeyError``.""" + if isinstance(name_or_path, (str, Feature)): + return dict.__getitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + try: + val = self + for fid in name_or_path: + if not isinstance(val, FeatStruct): + raise KeyError # path contains base value + val = val[fid] + return val + except (KeyError, IndexError) as e: + raise KeyError(name_or_path) from e + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def get(self, name_or_path, default=None): + """If the feature with the given name or path exists, return its + value; otherwise, return ``default``.""" + try: + return self[name_or_path] + except KeyError: + return default + + def __contains__(self, name_or_path): + """Return true if a feature with the given name or path exists.""" + try: + self[name_or_path] + return True + except KeyError: + return False + + def has_key(self, name_or_path): + """Return true if a feature with the given name or path exists.""" + return name_or_path in self + + def __delitem__(self, name_or_path): + """If the feature with the given name or path exists, delete + its value; otherwise, raise ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (str, Feature)): + return dict.__delitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + del parent[name_or_path[-1]] + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __setitem__(self, name_or_path, value): + """Set the value for the feature with the given name or path + to ``value``. If ``name_or_path`` is an invalid path, raise + ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (str, Feature)): + return dict.__setitem__(self, name_or_path, value) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + parent[name_or_path[-1]] = value + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + clear = _check_frozen(dict.clear) + pop = _check_frozen(dict.pop) + popitem = _check_frozen(dict.popitem) + setdefault = _check_frozen(dict.setdefault) + + def update(self, features=None, **morefeatures): + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if features is None: + items = () + elif hasattr(features, "items") and callable(features.items): + items = features.items() + elif hasattr(features, "__iter__"): + items = features + else: + raise ValueError("Expected mapping or list of tuples") + + for key, val in items: + if not isinstance(key, (str, Feature)): + raise TypeError("Feature names must be strings") + self[key] = val + for key, val in morefeatures.items(): + if not isinstance(key, (str, Feature)): + raise TypeError("Feature names must be strings") + self[key] = val + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def __deepcopy__(self, memo): + memo[id(self)] = selfcopy = self.__class__() + for (key, val) in self._items(): + selfcopy[copy.deepcopy(key, memo)] = copy.deepcopy(val, memo) + return selfcopy + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + + def _keys(self): + return self.keys() + + def _values(self): + return self.values() + + def _items(self): + return self.items() + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + def __str__(self): + """ + Display a multi-line representation of this feature dictionary + as an FVM (feature value matrix). + """ + return "\n".join(self._str(self._find_reentrances({}), {})) + + def _repr(self, reentrances, reentrance_ids): + segments = [] + prefix = "" + suffix = "" + + # If this is the first time we've seen a reentrant structure, + # then assign it a unique identifier. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + + # sorting note: keys are unique strings, so we'll never fall + # through to comparing values. + for (fname, fval) in sorted(self.items()): + display = getattr(fname, "display", None) + if id(fval) in reentrance_ids: + segments.append(f"{fname}->({reentrance_ids[id(fval)]})") + elif ( + display == "prefix" and not prefix and isinstance(fval, (Variable, str)) + ): + prefix = "%s" % fval + elif display == "slash" and not suffix: + if isinstance(fval, Variable): + suffix = "/%s" % fval.name + else: + suffix = "/%s" % repr(fval) + elif isinstance(fval, Variable): + segments.append(f"{fname}={fval.name}") + elif fval is True: + segments.append("+%s" % fname) + elif fval is False: + segments.append("-%s" % fname) + elif isinstance(fval, Expression): + segments.append(f"{fname}=<{fval}>") + elif not isinstance(fval, FeatStruct): + segments.append(f"{fname}={repr(fval)}") + else: + fval_repr = fval._repr(reentrances, reentrance_ids) + segments.append(f"{fname}={fval_repr}") + # If it's reentrant, then add on an identifier tag. + if reentrances[id(self)]: + prefix = f"({reentrance_ids[id(self)]}){prefix}" + return "{}[{}]{}".format(prefix, ", ".join(segments), suffix) + + def _str(self, reentrances, reentrance_ids): + """ + :return: A list of lines composing a string representation of + this feature dictionary. + :param reentrances: A dictionary that maps from the ``id`` of + each feature value in self, indicating whether that value + is reentrant or not. + :param reentrance_ids: A dictionary mapping from each ``id`` + of a feature value to a unique identifier. This is modified + by ``repr``: the first time a reentrant feature value is + displayed, an identifier is added to ``reentrance_ids`` for + it. + """ + # If this is the first time we've seen a reentrant structure, + # then tack on an id string. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + + # Special case: empty feature dict. + if len(self) == 0: + if reentrances[id(self)]: + return ["(%s) []" % reentrance_ids[id(self)]] + else: + return ["[]"] + + # What's the longest feature name? Use this to align names. + maxfnamelen = max(len("%s" % k) for k in self.keys()) + + lines = [] + # sorting note: keys are unique strings, so we'll never fall + # through to comparing values. + for (fname, fval) in sorted(self.items()): + fname = ("%s" % fname).ljust(maxfnamelen) + if isinstance(fval, Variable): + lines.append(f"{fname} = {fval.name}") + + elif isinstance(fval, Expression): + lines.append(f"{fname} = <{fval}>") + + elif isinstance(fval, FeatList): + fval_repr = fval._repr(reentrances, reentrance_ids) + lines.append(f"{fname} = {repr(fval_repr)}") + + elif not isinstance(fval, FeatDict): + # It's not a nested feature structure -- just print it. + lines.append(f"{fname} = {repr(fval)}") + + elif id(fval) in reentrance_ids: + # It's a feature structure we've seen before -- print + # the reentrance id. + lines.append(f"{fname} -> ({reentrance_ids[id(fval)]})") + + else: + # It's a new feature structure. Separate it from + # other values by a blank line. + if lines and lines[-1] != "": + lines.append("") + + # Recursively print the feature's value (fval). + fval_lines = fval._str(reentrances, reentrance_ids) + + # Indent each line to make room for fname. + fval_lines = [(" " * (maxfnamelen + 3)) + l for l in fval_lines] + + # Pick which line we'll display fname on, & splice it in. + nameline = (len(fval_lines) - 1) // 2 + fval_lines[nameline] = ( + fname + " =" + fval_lines[nameline][maxfnamelen + 2 :] + ) + + # Add the feature structure to the output. + lines += fval_lines + + # Separate FeatStructs by a blank line. + lines.append("") + + # Get rid of any excess blank lines. + if lines[-1] == "": + lines.pop() + + # Add brackets around everything. + maxlen = max(len(line) for line in lines) + lines = ["[ {}{} ]".format(line, " " * (maxlen - len(line))) for line in lines] + + # If it's reentrant, then add on an identifier tag. + if reentrances[id(self)]: + idstr = "(%s) " % reentrance_ids[id(self)] + lines = [(" " * len(idstr)) + l for l in lines] + idline = (len(lines) - 1) // 2 + lines[idline] = idstr + lines[idline][len(idstr) :] + + return lines + + +###################################################################### +# Feature List +###################################################################### + + +class FeatList(FeatStruct, list): + """ + A list of feature values, where each feature value is either a + basic value (such as a string or an integer), or a nested feature + structure. + + Feature lists may contain reentrant feature values. A "reentrant + feature value" is a single feature value that can be accessed via + multiple feature paths. Feature lists may also be cyclic. + + Two feature lists are considered equal if they assign the same + values to all features, and have the same reentrances. + + :see: ``FeatStruct`` for information about feature paths, reentrance, + cyclic feature structures, mutability, freezing, and hashing. + """ + + def __init__(self, features=()): + """ + Create a new feature list, with the specified features. + + :param features: The initial list of features for this feature + list. If ``features`` is a string, then it is paresd using + ``FeatStructReader``. Otherwise, it should be a sequence + of basic values and nested feature structures. + """ + if isinstance(features, str): + FeatStructReader().fromstring(features, self) + else: + list.__init__(self, features) + + # //////////////////////////////////////////////////////////// + # { List methods + # //////////////////////////////////////////////////////////// + _INDEX_ERROR = "Expected int or feature path. Got %r." + + def __getitem__(self, name_or_path): + if isinstance(name_or_path, int): + return list.__getitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + try: + val = self + for fid in name_or_path: + if not isinstance(val, FeatStruct): + raise KeyError # path contains base value + val = val[fid] + return val + except (KeyError, IndexError) as e: + raise KeyError(name_or_path) from e + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __delitem__(self, name_or_path): + """If the feature with the given name or path exists, delete + its value; otherwise, raise ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (int, slice)): + return list.__delitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + del parent[name_or_path[-1]] + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __setitem__(self, name_or_path, value): + """Set the value for the feature with the given name or path + to ``value``. If ``name_or_path`` is an invalid path, raise + ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (int, slice)): + return list.__setitem__(self, name_or_path, value) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + parent[name_or_path[-1]] = value + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + # __delslice__ = _check_frozen(list.__delslice__, ' ') + # __setslice__ = _check_frozen(list.__setslice__, ' ') + __iadd__ = _check_frozen(list.__iadd__) + __imul__ = _check_frozen(list.__imul__) + append = _check_frozen(list.append) + extend = _check_frozen(list.extend) + insert = _check_frozen(list.insert) + pop = _check_frozen(list.pop) + remove = _check_frozen(list.remove) + reverse = _check_frozen(list.reverse) + sort = _check_frozen(list.sort) + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def __deepcopy__(self, memo): + memo[id(self)] = selfcopy = self.__class__() + selfcopy.extend(copy.deepcopy(fval, memo) for fval in self) + return selfcopy + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + + def _keys(self): + return list(range(len(self))) + + def _values(self): + return self + + def _items(self): + return enumerate(self) + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + # Special handling for: reentrances, variables, expressions. + def _repr(self, reentrances, reentrance_ids): + # If this is the first time we've seen a reentrant structure, + # then assign it a unique identifier. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + prefix = "(%s)" % reentrance_ids[id(self)] + else: + prefix = "" + + segments = [] + for fval in self: + if id(fval) in reentrance_ids: + segments.append("->(%s)" % reentrance_ids[id(fval)]) + elif isinstance(fval, Variable): + segments.append(fval.name) + elif isinstance(fval, Expression): + segments.append("%s" % fval) + elif isinstance(fval, FeatStruct): + segments.append(fval._repr(reentrances, reentrance_ids)) + else: + segments.append("%s" % repr(fval)) + + return "{}[{}]".format(prefix, ", ".join(segments)) + + +###################################################################### +# Variables & Bindings +###################################################################### + + +def substitute_bindings(fstruct, bindings, fs_class="default"): + """ + Return the feature structure that is obtained by replacing each + variable bound by ``bindings`` with its binding. If a variable is + aliased to a bound variable, then it will be replaced by that + variable's value. If a variable is aliased to an unbound + variable, then it will be replaced by that variable. + + :type bindings: dict(Variable -> any) + :param bindings: A dictionary mapping from variables to values. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + fstruct = copy.deepcopy(fstruct) + _substitute_bindings(fstruct, bindings, fs_class, set()) + return fstruct + + +def _substitute_bindings(fstruct, bindings, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + while isinstance(fval, Variable) and fval in bindings: + fval = fstruct[fname] = bindings[fval] + if isinstance(fval, fs_class): + _substitute_bindings(fval, bindings, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + fstruct[fname] = fval.substitute_bindings(bindings) + + +def retract_bindings(fstruct, bindings, fs_class="default"): + """ + Return the feature structure that is obtained by replacing each + feature structure value that is bound by ``bindings`` with the + variable that binds it. A feature structure value must be + identical to a bound value (i.e., have equal id) to be replaced. + + ``bindings`` is modified to point to this new feature structure, + rather than the original feature structure. Feature structure + values in ``bindings`` may be modified if they are contained in + ``fstruct``. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + (fstruct, new_bindings) = copy.deepcopy((fstruct, bindings)) + bindings.update(new_bindings) + inv_bindings = {id(val): var for (var, val) in bindings.items()} + _retract_bindings(fstruct, inv_bindings, fs_class, set()) + return fstruct + + +def _retract_bindings(fstruct, inv_bindings, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, fs_class): + if id(fval) in inv_bindings: + fstruct[fname] = inv_bindings[id(fval)] + _retract_bindings(fval, inv_bindings, fs_class, visited) + + +def find_variables(fstruct, fs_class="default"): + """ + :return: The set of variables used by this feature structure. + :rtype: set(Variable) + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + return _variables(fstruct, set(), fs_class, set()) + + +def _variables(fstruct, vars, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, Variable): + vars.add(fval) + elif isinstance(fval, fs_class): + _variables(fval, vars, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + vars.update(fval.variables()) + return vars + + +def rename_variables( + fstruct, vars=None, used_vars=(), new_vars=None, fs_class="default" +): + """ + Return the feature structure that is obtained by replacing + any of this feature structure's variables that are in ``vars`` + with new variables. The names for these new variables will be + names that are not used by any variable in ``vars``, or in + ``used_vars``, or in this feature structure. + + :type vars: set + :param vars: The set of variables that should be renamed. + If not specified, ``find_variables(fstruct)`` is used; i.e., all + variables will be given new names. + :type used_vars: set + :param used_vars: A set of variables whose names should not be + used by the new variables. + :type new_vars: dict(Variable -> Variable) + :param new_vars: A dictionary that is used to hold the mapping + from old variables to new variables. For each variable *v* + in this feature structure: + + - If ``new_vars`` maps *v* to *v'*, then *v* will be + replaced by *v'*. + - If ``new_vars`` does not contain *v*, but ``vars`` + does contain *v*, then a new entry will be added to + ``new_vars``, mapping *v* to the new variable that is used + to replace it. + + To consistently rename the variables in a set of feature + structures, simply apply rename_variables to each one, using + the same dictionary: + + >>> from nltk.featstruct import FeatStruct + >>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]') + >>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]') + >>> new_vars = {} # Maps old vars to alpha-renamed vars + >>> fstruct1.rename_variables(new_vars=new_vars) + [obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]] + >>> fstruct2.rename_variables(new_vars=new_vars) + [obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]] + + If new_vars is not specified, then an empty dictionary is used. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + + # Default values: + if new_vars is None: + new_vars = {} + if vars is None: + vars = find_variables(fstruct, fs_class) + else: + vars = set(vars) + + # Add our own variables to used_vars. + used_vars = find_variables(fstruct, fs_class).union(used_vars) + + # Copy ourselves, and rename variables in the copy. + return _rename_variables( + copy.deepcopy(fstruct), vars, used_vars, new_vars, fs_class, set() + ) + + +def _rename_variables(fstruct, vars, used_vars, new_vars, fs_class, visited): + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, Variable): + # If it's in new_vars, then rebind it. + if fval in new_vars: + fstruct[fname] = new_vars[fval] + # If it's in vars, pick a new name for it. + elif fval in vars: + new_vars[fval] = _rename_variable(fval, used_vars) + fstruct[fname] = new_vars[fval] + used_vars.add(new_vars[fval]) + elif isinstance(fval, fs_class): + _rename_variables(fval, vars, used_vars, new_vars, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + # Pick new names for any variables in `vars` + for var in fval.variables(): + if var in vars and var not in new_vars: + new_vars[var] = _rename_variable(var, used_vars) + used_vars.add(new_vars[var]) + # Replace all variables in `new_vars`. + fstruct[fname] = fval.substitute_bindings(new_vars) + return fstruct + + +def _rename_variable(var, used_vars): + name, n = re.sub(r"\d+$", "", var.name), 2 + if not name: + name = "?" + while Variable(f"{name}{n}") in used_vars: + n += 1 + return Variable(f"{name}{n}") + + +def remove_variables(fstruct, fs_class="default"): + """ + :rtype: FeatStruct + :return: The feature structure that is obtained by deleting + all features whose values are ``Variables``. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + return _remove_variables(copy.deepcopy(fstruct), fs_class, set()) + + +def _remove_variables(fstruct, fs_class, visited): + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = list(fstruct.items()) + elif _is_sequence(fstruct): + items = list(enumerate(fstruct)) + else: + raise ValueError("Expected mapping or sequence") + + for (fname, fval) in items: + if isinstance(fval, Variable): + del fstruct[fname] + elif isinstance(fval, fs_class): + _remove_variables(fval, fs_class, visited) + return fstruct + + +###################################################################### +# Unification +###################################################################### + + +class _UnificationFailure: + def __repr__(self): + return "nltk.featstruct.UnificationFailure" + + +UnificationFailure = _UnificationFailure() +"""A unique value used to indicate unification failure. It can be + returned by ``Feature.unify_base_values()`` or by custom ``fail()`` + functions to indicate that unificaiton should fail.""" + + +# The basic unification algorithm: +# 1. Make copies of self and other (preserving reentrance) +# 2. Destructively unify self and other +# 3. Apply forward pointers, to preserve reentrance. +# 4. Replace bound variables with their values. +def unify( + fstruct1, + fstruct2, + bindings=None, + trace=False, + fail=None, + rename_vars=True, + fs_class="default", +): + """ + Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature + structure. This unified feature structure is the minimal + feature structure that contains all feature value assignments from both + ``fstruct1`` and ``fstruct2``, and that preserves all reentrancies. + + If no such feature structure exists (because ``fstruct1`` and + ``fstruct2`` specify incompatible values for some feature), then + unification fails, and ``unify`` returns None. + + Bound variables are replaced by their values. Aliased + variables are replaced by their representative variable + (if unbound) or the value of their representative variable + (if bound). I.e., if variable *v* is in ``bindings``, + then *v* is replaced by ``bindings[v]``. This will + be repeated until the variable is replaced by an unbound + variable or a non-variable value. + + Unbound variables are bound when they are unified with + values; and aliased when they are unified with variables. + I.e., if variable *v* is not in ``bindings``, and is + unified with a variable or value *x*, then + ``bindings[v]`` is set to *x*. + + If ``bindings`` is unspecified, then all variables are + assumed to be unbound. I.e., ``bindings`` defaults to an + empty dict. + + >>> from nltk.featstruct import FeatStruct + >>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]')) + [a=?x, b=?x2] + + :type bindings: dict(Variable -> any) + :param bindings: A set of variable bindings to be used and + updated during unification. + :type trace: bool + :param trace: If true, generate trace output. + :type rename_vars: bool + :param rename_vars: If True, then rename any variables in + ``fstruct2`` that are also used in ``fstruct1``, in order to + avoid collisions on variable names. + """ + # Decide which class(es) will be treated as feature structures, + # for the purposes of unification. + if fs_class == "default": + fs_class = _default_fs_class(fstruct1) + if _default_fs_class(fstruct2) != fs_class: + raise ValueError( + "Mixing FeatStruct objects with Python " + "dicts and lists is not supported." + ) + assert isinstance(fstruct1, fs_class) + assert isinstance(fstruct2, fs_class) + + # If bindings are unspecified, use an empty set of bindings. + user_bindings = bindings is not None + if bindings is None: + bindings = {} + + # Make copies of fstruct1 and fstruct2 (since the unification + # algorithm is destructive). Do it all at once, to preserve + # reentrance links between fstruct1 and fstruct2. Copy bindings + # as well, in case there are any bound vars that contain parts + # of fstruct1 or fstruct2. + (fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy( + (fstruct1, fstruct2, bindings) + ) + + # Copy the bindings back to the original bindings dict. + bindings.update(bindings_copy) + + if rename_vars: + vars1 = find_variables(fstruct1copy, fs_class) + vars2 = find_variables(fstruct2copy, fs_class) + _rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set()) + + # Do the actual unification. If it fails, return None. + forward = {} + if trace: + _trace_unify_start((), fstruct1copy, fstruct2copy) + try: + result = _destructively_unify( + fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, () + ) + except _UnificationFailureError: + return None + + # _destructively_unify might return UnificationFailure, e.g. if we + # tried to unify a mapping with a sequence. + if result is UnificationFailure: + if fail is None: + return None + else: + return fail(fstruct1copy, fstruct2copy, ()) + + # Replace any feature structure that has a forward pointer + # with the target of its forward pointer. + result = _apply_forwards(result, forward, fs_class, set()) + if user_bindings: + _apply_forwards_to_bindings(forward, bindings) + + # Replace bound vars with values. + _resolve_aliases(bindings) + _substitute_bindings(result, bindings, fs_class, set()) + + # Return the result. + if trace: + _trace_unify_succeed((), result) + if trace: + _trace_bindings((), bindings) + return result + + +class _UnificationFailureError(Exception): + """An exception that is used by ``_destructively_unify`` to abort + unification when a failure is encountered.""" + + +def _destructively_unify( + fstruct1, fstruct2, bindings, forward, trace, fail, fs_class, path +): + """ + Attempt to unify ``fstruct1`` and ``fstruct2`` by modifying them + in-place. If the unification succeeds, then ``fstruct1`` will + contain the unified value, the value of ``fstruct2`` is undefined, + and forward[id(fstruct2)] is set to fstruct1. If the unification + fails, then a _UnificationFailureError is raised, and the + values of ``fstruct1`` and ``fstruct2`` are undefined. + + :param bindings: A dictionary mapping variables to values. + :param forward: A dictionary mapping feature structures ids + to replacement structures. When two feature structures + are merged, a mapping from one to the other will be added + to the forward dictionary; and changes will be made only + to the target of the forward dictionary. + ``_destructively_unify`` will always 'follow' any links + in the forward dictionary for fstruct1 and fstruct2 before + actually unifying them. + :param trace: If true, generate trace output + :param path: The feature path that led us to this unification + step. Used for trace output. + """ + # If fstruct1 is already identical to fstruct2, we're done. + # Note: this, together with the forward pointers, ensures + # that unification will terminate even for cyclic structures. + if fstruct1 is fstruct2: + if trace: + _trace_unify_identity(path, fstruct1) + return fstruct1 + + # Set fstruct2's forward pointer to point to fstruct1; this makes + # fstruct1 the canonical copy for fstruct2. Note that we need to + # do this before we recurse into any child structures, in case + # they're cyclic. + forward[id(fstruct2)] = fstruct1 + + # Unifying two mappings: + if _is_mapping(fstruct1) and _is_mapping(fstruct2): + for fname in fstruct1: + if getattr(fname, "default", None) is not None: + fstruct2.setdefault(fname, fname.default) + for fname in fstruct2: + if getattr(fname, "default", None) is not None: + fstruct1.setdefault(fname, fname.default) + + # Unify any values that are defined in both fstruct1 and + # fstruct2. Copy any values that are defined in fstruct2 but + # not in fstruct1 to fstruct1. Note: sorting fstruct2's + # features isn't actually necessary; but we do it to give + # deterministic behavior, e.g. for tracing. + for fname, fval2 in sorted(fstruct2.items()): + if fname in fstruct1: + fstruct1[fname] = _unify_feature_values( + fname, + fstruct1[fname], + fval2, + bindings, + forward, + trace, + fail, + fs_class, + path + (fname,), + ) + else: + fstruct1[fname] = fval2 + + return fstruct1 # Contains the unified value. + + # Unifying two sequences: + elif _is_sequence(fstruct1) and _is_sequence(fstruct2): + # If the lengths don't match, fail. + if len(fstruct1) != len(fstruct2): + return UnificationFailure + + # Unify corresponding values in fstruct1 and fstruct2. + for findex in range(len(fstruct1)): + fstruct1[findex] = _unify_feature_values( + findex, + fstruct1[findex], + fstruct2[findex], + bindings, + forward, + trace, + fail, + fs_class, + path + (findex,), + ) + + return fstruct1 # Contains the unified value. + + # Unifying sequence & mapping: fail. The failure function + # doesn't get a chance to recover in this case. + elif (_is_sequence(fstruct1) or _is_mapping(fstruct1)) and ( + _is_sequence(fstruct2) or _is_mapping(fstruct2) + ): + return UnificationFailure + + # Unifying anything else: not allowed! + raise TypeError("Expected mappings or sequences") + + +def _unify_feature_values( + fname, fval1, fval2, bindings, forward, trace, fail, fs_class, fpath +): + """ + Attempt to unify ``fval1`` and and ``fval2``, and return the + resulting unified value. The method of unification will depend on + the types of ``fval1`` and ``fval2``: + + 1. If they're both feature structures, then destructively + unify them (see ``_destructively_unify()``. + 2. If they're both unbound variables, then alias one variable + to the other (by setting bindings[v2]=v1). + 3. If one is an unbound variable, and the other is a value, + then bind the unbound variable to the value. + 4. If one is a feature structure, and the other is a base value, + then fail. + 5. If they're both base values, then unify them. By default, + this will succeed if they are equal, and fail otherwise. + """ + if trace: + _trace_unify_start(fpath, fval1, fval2) + + # Look up the "canonical" copy of fval1 and fval2 + while id(fval1) in forward: + fval1 = forward[id(fval1)] + while id(fval2) in forward: + fval2 = forward[id(fval2)] + + # If fval1 or fval2 is a bound variable, then + # replace it by the variable's bound value. This + # includes aliased variables, which are encoded as + # variables bound to other variables. + fvar1 = fvar2 = None + while isinstance(fval1, Variable) and fval1 in bindings: + fvar1 = fval1 + fval1 = bindings[fval1] + while isinstance(fval2, Variable) and fval2 in bindings: + fvar2 = fval2 + fval2 = bindings[fval2] + + # Case 1: Two feature structures (recursive case) + if isinstance(fval1, fs_class) and isinstance(fval2, fs_class): + result = _destructively_unify( + fval1, fval2, bindings, forward, trace, fail, fs_class, fpath + ) + + # Case 2: Two unbound variables (create alias) + elif isinstance(fval1, Variable) and isinstance(fval2, Variable): + if fval1 != fval2: + bindings[fval2] = fval1 + result = fval1 + + # Case 3: An unbound variable and a value (bind) + elif isinstance(fval1, Variable): + bindings[fval1] = fval2 + result = fval1 + elif isinstance(fval2, Variable): + bindings[fval2] = fval1 + result = fval2 + + # Case 4: A feature structure & a base value (fail) + elif isinstance(fval1, fs_class) or isinstance(fval2, fs_class): + result = UnificationFailure + + # Case 5: Two base values + else: + # Case 5a: Feature defines a custom unification method for base values + if isinstance(fname, Feature): + result = fname.unify_base_values(fval1, fval2, bindings) + # Case 5b: Feature value defines custom unification method + elif isinstance(fval1, CustomFeatureValue): + result = fval1.unify(fval2) + # Sanity check: unify value should be symmetric + if isinstance(fval2, CustomFeatureValue) and result != fval2.unify(fval1): + raise AssertionError( + "CustomFeatureValue objects %r and %r disagree " + "about unification value: %r vs. %r" + % (fval1, fval2, result, fval2.unify(fval1)) + ) + elif isinstance(fval2, CustomFeatureValue): + result = fval2.unify(fval1) + # Case 5c: Simple values -- check if they're equal. + else: + if fval1 == fval2: + result = fval1 + else: + result = UnificationFailure + + # If either value was a bound variable, then update the + # bindings. (This is really only necessary if fname is a + # Feature or if either value is a CustomFeatureValue.) + if result is not UnificationFailure: + if fvar1 is not None: + bindings[fvar1] = result + result = fvar1 + if fvar2 is not None and fvar2 != fvar1: + bindings[fvar2] = result + result = fvar2 + + # If we unification failed, call the failure function; it + # might decide to continue anyway. + if result is UnificationFailure: + if fail is not None: + result = fail(fval1, fval2, fpath) + if trace: + _trace_unify_fail(fpath[:-1], result) + if result is UnificationFailure: + raise _UnificationFailureError + + # Normalize the result. + if isinstance(result, fs_class): + result = _apply_forwards(result, forward, fs_class, set()) + + if trace: + _trace_unify_succeed(fpath, result) + if trace and isinstance(result, fs_class): + _trace_bindings(fpath, bindings) + + return result + + +def _apply_forwards_to_bindings(forward, bindings): + """ + Replace any feature structure that has a forward pointer with + the target of its forward pointer (to preserve reentrancy). + """ + for (var, value) in bindings.items(): + while id(value) in forward: + value = forward[id(value)] + bindings[var] = value + + +def _apply_forwards(fstruct, forward, fs_class, visited): + """ + Replace any feature structure that has a forward pointer with + the target of its forward pointer (to preserve reentrancy). + """ + # Follow our own forwards pointers (if any) + while id(fstruct) in forward: + fstruct = forward[id(fstruct)] + + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for fname, fval in items: + if isinstance(fval, fs_class): + # Replace w/ forwarded value. + while id(fval) in forward: + fval = forward[id(fval)] + fstruct[fname] = fval + # Recurse to child. + _apply_forwards(fval, forward, fs_class, visited) + + return fstruct + + +def _resolve_aliases(bindings): + """ + Replace any bound aliased vars with their binding; and replace + any unbound aliased vars with their representative var. + """ + for (var, value) in bindings.items(): + while isinstance(value, Variable) and value in bindings: + value = bindings[var] = bindings[value] + + +def _trace_unify_start(path, fval1, fval2): + if path == (): + print("\nUnification trace:") + else: + fullname = ".".join("%s" % n for n in path) + print(" " + "| " * (len(path) - 1) + "|") + print(" " + "| " * (len(path) - 1) + "| Unify feature: %s" % fullname) + print(" " + "| " * len(path) + " / " + _trace_valrepr(fval1)) + print(" " + "| " * len(path) + "|\\ " + _trace_valrepr(fval2)) + + +def _trace_unify_identity(path, fval1): + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "| (identical objects)") + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "+-->" + repr(fval1)) + + +def _trace_unify_fail(path, result): + if result is UnificationFailure: + resume = "" + else: + resume = " (nonfatal)" + print(" " + "| " * len(path) + "| |") + print(" " + "X " * len(path) + "X X <-- FAIL" + resume) + + +def _trace_unify_succeed(path, fval1): + # Print the result. + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "+-->" + repr(fval1)) + + +def _trace_bindings(path, bindings): + # Print the bindings (if any). + if len(bindings) > 0: + binditems = sorted(bindings.items(), key=lambda v: v[0].name) + bindstr = "{%s}" % ", ".join( + f"{var}: {_trace_valrepr(val)}" for (var, val) in binditems + ) + print(" " + "| " * len(path) + " Bindings: " + bindstr) + + +def _trace_valrepr(val): + if isinstance(val, Variable): + return "%s" % val + else: + return "%s" % repr(val) + + +def subsumes(fstruct1, fstruct2): + """ + Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return + true if unifying ``fstruct1`` with ``fstruct2`` would result in a + feature structure equal to ``fstruct2.`` + + :rtype: bool + """ + return fstruct2 == unify(fstruct1, fstruct2) + + +def conflicts(fstruct1, fstruct2, trace=0): + """ + Return a list of the feature paths of all features which are + assigned incompatible values by ``fstruct1`` and ``fstruct2``. + + :rtype: list(tuple) + """ + conflict_list = [] + + def add_conflict(fval1, fval2, path): + conflict_list.append(path) + return fval1 + + unify(fstruct1, fstruct2, fail=add_conflict, trace=trace) + return conflict_list + + +###################################################################### +# Helper Functions +###################################################################### + + +def _is_mapping(v): + return hasattr(v, "__contains__") and hasattr(v, "keys") + + +def _is_sequence(v): + return hasattr(v, "__iter__") and hasattr(v, "__len__") and not isinstance(v, str) + + +def _default_fs_class(obj): + if isinstance(obj, FeatStruct): + return FeatStruct + if isinstance(obj, (dict, list)): + return (dict, list) + else: + raise ValueError( + "To unify objects of type %s, you must specify " + "fs_class explicitly." % obj.__class__.__name__ + ) + + +###################################################################### +# FeatureValueSet & FeatureValueTuple +###################################################################### + + +class SubstituteBindingsSequence(SubstituteBindingsI): + """ + A mixin class for sequence classes that distributes variables() and + substitute_bindings() over the object's elements. + """ + + def variables(self): + return [elt for elt in self if isinstance(elt, Variable)] + sum( + ( + list(elt.variables()) + for elt in self + if isinstance(elt, SubstituteBindingsI) + ), + [], + ) + + def substitute_bindings(self, bindings): + return self.__class__([self.subst(v, bindings) for v in self]) + + def subst(self, v, bindings): + if isinstance(v, SubstituteBindingsI): + return v.substitute_bindings(bindings) + else: + return bindings.get(v, v) + + +class FeatureValueTuple(SubstituteBindingsSequence, tuple): + """ + A base feature value that is a tuple of other base feature values. + FeatureValueTuple implements ``SubstituteBindingsI``, so it any + variable substitutions will be propagated to the elements + contained by the set. A ``FeatureValueTuple`` is immutable. + """ + + def __repr__(self): # [xx] really use %s here? + if len(self) == 0: + return "()" + return "(%s)" % ", ".join(f"{b}" for b in self) + + +class FeatureValueSet(SubstituteBindingsSequence, frozenset): + """ + A base feature value that is a set of other base feature values. + FeatureValueSet implements ``SubstituteBindingsI``, so it any + variable substitutions will be propagated to the elements + contained by the set. A ``FeatureValueSet`` is immutable. + """ + + def __repr__(self): # [xx] really use %s here? + if len(self) == 0: + return "{/}" # distinguish from dict. + # n.b., we sort the string reprs of our elements, to ensure + # that our own repr is deterministic. + return "{%s}" % ", ".join(sorted(f"{b}" for b in self)) + + __str__ = __repr__ + + +class FeatureValueUnion(SubstituteBindingsSequence, frozenset): + """ + A base feature value that represents the union of two or more + ``FeatureValueSet`` or ``Variable``. + """ + + def __new__(cls, values): + # If values contains FeatureValueUnions, then collapse them. + values = _flatten(values, FeatureValueUnion) + + # If the resulting list contains no variables, then + # use a simple FeatureValueSet instead. + if sum(isinstance(v, Variable) for v in values) == 0: + values = _flatten(values, FeatureValueSet) + return FeatureValueSet(values) + + # If we contain a single variable, return that variable. + if len(values) == 1: + return list(values)[0] + + # Otherwise, build the FeatureValueUnion. + return frozenset.__new__(cls, values) + + def __repr__(self): + # n.b., we sort the string reprs of our elements, to ensure + # that our own repr is deterministic. also, note that len(self) + # is guaranteed to be 2 or more. + return "{%s}" % "+".join(sorted(f"{b}" for b in self)) + + +class FeatureValueConcat(SubstituteBindingsSequence, tuple): + """ + A base feature value that represents the concatenation of two or + more ``FeatureValueTuple`` or ``Variable``. + """ + + def __new__(cls, values): + # If values contains FeatureValueConcats, then collapse them. + values = _flatten(values, FeatureValueConcat) + + # If the resulting list contains no variables, then + # use a simple FeatureValueTuple instead. + if sum(isinstance(v, Variable) for v in values) == 0: + values = _flatten(values, FeatureValueTuple) + return FeatureValueTuple(values) + + # If we contain a single variable, return that variable. + if len(values) == 1: + return list(values)[0] + + # Otherwise, build the FeatureValueConcat. + return tuple.__new__(cls, values) + + def __repr__(self): + # n.b.: len(self) is guaranteed to be 2 or more. + return "(%s)" % "+".join(f"{b}" for b in self) + + +def _flatten(lst, cls): + """ + Helper function -- return a copy of list, with all elements of + type ``cls`` spliced in rather than appended in. + """ + result = [] + for elt in lst: + if isinstance(elt, cls): + result.extend(elt) + else: + result.append(elt) + return result + + +###################################################################### +# Specialized Features +###################################################################### + + +@total_ordering +class Feature: + """ + A feature identifier that's specialized to put additional + constraints, default values, etc. + """ + + def __init__(self, name, default=None, display=None): + assert display in (None, "prefix", "slash") + + self._name = name # [xx] rename to .identifier? + self._default = default # [xx] not implemented yet. + self._display = display + + if self._display == "prefix": + self._sortkey = (-1, self._name) + elif self._display == "slash": + self._sortkey = (1, self._name) + else: + self._sortkey = (0, self._name) + + @property + def name(self): + """The name of this feature.""" + return self._name + + @property + def default(self): + """Default value for this feature.""" + return self._default + + @property + def display(self): + """Custom display location: can be prefix, or slash.""" + return self._display + + def __repr__(self): + return "*%s*" % self.name + + def __lt__(self, other): + if isinstance(other, str): + return True + if not isinstance(other, Feature): + raise_unorderable_types("<", self, other) + return self._sortkey < other._sortkey + + def __eq__(self, other): + return type(self) == type(other) and self._name == other._name + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._name) + + # //////////////////////////////////////////////////////////// + # These can be overridden by subclasses: + # //////////////////////////////////////////////////////////// + + def read_value(self, s, position, reentrances, parser): + return parser.read_value(s, position, reentrances) + + def unify_base_values(self, fval1, fval2, bindings): + """ + If possible, return a single value.. If not, return + the value ``UnificationFailure``. + """ + if fval1 == fval2: + return fval1 + else: + return UnificationFailure + + +class SlashFeature(Feature): + def read_value(self, s, position, reentrances, parser): + return parser.read_partial(s, position, reentrances) + + +class RangeFeature(Feature): + RANGE_RE = re.compile(r"(-?\d+):(-?\d+)") + + def read_value(self, s, position, reentrances, parser): + m = self.RANGE_RE.match(s, position) + if not m: + raise ValueError("range", position) + return (int(m.group(1)), int(m.group(2))), m.end() + + def unify_base_values(self, fval1, fval2, bindings): + if fval1 is None: + return fval2 + if fval2 is None: + return fval1 + rng = max(fval1[0], fval2[0]), min(fval1[1], fval2[1]) + if rng[1] < rng[0]: + return UnificationFailure + return rng + + +SLASH = SlashFeature("slash", default=False, display="slash") +TYPE = Feature("type", display="prefix") + + +###################################################################### +# Specialized Feature Values +###################################################################### + + +@total_ordering +class CustomFeatureValue: + """ + An abstract base class for base values that define a custom + unification method. The custom unification method of + ``CustomFeatureValue`` will be used during unification if: + + - The ``CustomFeatureValue`` is unified with another base value. + - The ``CustomFeatureValue`` is not the value of a customized + ``Feature`` (which defines its own unification method). + + If two ``CustomFeatureValue`` objects are unified with one another + during feature structure unification, then the unified base values + they return *must* be equal; otherwise, an ``AssertionError`` will + be raised. + + Subclasses must define ``unify()``, ``__eq__()`` and ``__lt__()``. + Subclasses may also wish to define ``__hash__()``. + """ + + def unify(self, other): + """ + If this base value unifies with ``other``, then return the + unified value. Otherwise, return ``UnificationFailure``. + """ + raise NotImplementedError("abstract base class") + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + return NotImplemented + + def __hash__(self): + raise TypeError("%s objects or unhashable" % self.__class__.__name__) + + +###################################################################### +# Feature Structure Reader +###################################################################### + + +class FeatStructReader: + def __init__( + self, + features=(SLASH, TYPE), + fdict_class=FeatStruct, + flist_class=FeatList, + logic_parser=None, + ): + self._features = {f.name: f for f in features} + self._fdict_class = fdict_class + self._flist_class = flist_class + self._prefix_feature = None + self._slash_feature = None + for feature in features: + if feature.display == "slash": + if self._slash_feature: + raise ValueError("Multiple features w/ display=slash") + self._slash_feature = feature + if feature.display == "prefix": + if self._prefix_feature: + raise ValueError("Multiple features w/ display=prefix") + self._prefix_feature = feature + self._features_with_defaults = [ + feature for feature in features if feature.default is not None + ] + if logic_parser is None: + logic_parser = LogicParser() + self._logic_parser = logic_parser + + def fromstring(self, s, fstruct=None): + """ + Convert a string representation of a feature structure (as + displayed by repr) into a ``FeatStruct``. This process + imposes the following restrictions on the string + representation: + + - Feature names cannot contain any of the following: + whitespace, parentheses, quote marks, equals signs, + dashes, commas, and square brackets. Feature names may + not begin with plus signs or minus signs. + - Only the following basic feature value are supported: + strings, integers, variables, None, and unquoted + alphanumeric strings. + - For reentrant values, the first mention must specify + a reentrance identifier and a value; and any subsequent + mentions must use arrows (``'->'``) to reference the + reentrance identifier. + """ + s = s.strip() + value, position = self.read_partial(s, 0, {}, fstruct) + if position != len(s): + self._error(s, "end of string", position) + return value + + _START_FSTRUCT_RE = re.compile(r"\s*(?:\((\d+)\)\s*)?(\??[\w-]+)?(\[)") + _END_FSTRUCT_RE = re.compile(r"\s*]\s*") + _SLASH_RE = re.compile(r"/") + _FEATURE_NAME_RE = re.compile(r'\s*([+-]?)([^\s\(\)<>"\'\-=\[\],]+)\s*') + _REENTRANCE_RE = re.compile(r"\s*->\s*") + _TARGET_RE = re.compile(r"\s*\((\d+)\)\s*") + _ASSIGN_RE = re.compile(r"\s*=\s*") + _COMMA_RE = re.compile(r"\s*,\s*") + _BARE_PREFIX_RE = re.compile(r"\s*(?:\((\d+)\)\s*)?(\??[\w-]+\s*)()") + # This one is used to distinguish fdicts from flists: + _START_FDICT_RE = re.compile( + r"(%s)|(%s\s*(%s\s*(=|->)|[+-]%s|\]))" + % ( + _BARE_PREFIX_RE.pattern, + _START_FSTRUCT_RE.pattern, + _FEATURE_NAME_RE.pattern, + _FEATURE_NAME_RE.pattern, + ) + ) + + def read_partial(self, s, position=0, reentrances=None, fstruct=None): + """ + Helper function that reads in a feature structure. + + :param s: The string to read. + :param position: The position in the string to start parsing. + :param reentrances: A dictionary from reentrance ids to values. + Defaults to an empty dictionary. + :return: A tuple (val, pos) of the feature structure created by + parsing and the position where the parsed feature structure ends. + :rtype: bool + """ + if reentrances is None: + reentrances = {} + try: + return self._read_partial(s, position, reentrances, fstruct) + except ValueError as e: + if len(e.args) != 2: + raise + self._error(s, *e.args) + + def _read_partial(self, s, position, reentrances, fstruct=None): + # Create the new feature structure + if fstruct is None: + if self._START_FDICT_RE.match(s, position): + fstruct = self._fdict_class() + else: + fstruct = self._flist_class() + + # Read up to the open bracket. + match = self._START_FSTRUCT_RE.match(s, position) + if not match: + match = self._BARE_PREFIX_RE.match(s, position) + if not match: + raise ValueError("open bracket or identifier", position) + position = match.end() + + # If there as an identifier, record it. + if match.group(1): + identifier = match.group(1) + if identifier in reentrances: + raise ValueError("new identifier", match.start(1)) + reentrances[identifier] = fstruct + + if isinstance(fstruct, FeatDict): + fstruct.clear() + return self._read_partial_featdict(s, position, match, reentrances, fstruct) + else: + del fstruct[:] + return self._read_partial_featlist(s, position, match, reentrances, fstruct) + + def _read_partial_featlist(self, s, position, match, reentrances, fstruct): + # Prefix features are not allowed: + if match.group(2): + raise ValueError("open bracket") + # Bare prefixes are not allowed: + if not match.group(3): + raise ValueError("open bracket") + + # Build a list of the features defined by the structure. + while position < len(s): + # Check for the close bracket. + match = self._END_FSTRUCT_RE.match(s, position) + if match is not None: + return fstruct, match.end() + + # Reentances have the form "-> (target)" + match = self._REENTRANCE_RE.match(s, position) + if match: + position = match.end() + match = self._TARGET_RE.match(s, position) + if not match: + raise ValueError("identifier", position) + target = match.group(1) + if target not in reentrances: + raise ValueError("bound identifier", position) + position = match.end() + fstruct.append(reentrances[target]) + + # Anything else is a value. + else: + value, position = self._read_value(0, s, position, reentrances) + fstruct.append(value) + + # If there's a close bracket, handle it at the top of the loop. + if self._END_FSTRUCT_RE.match(s, position): + continue + + # Otherwise, there should be a comma + match = self._COMMA_RE.match(s, position) + if match is None: + raise ValueError("comma", position) + position = match.end() + + # We never saw a close bracket. + raise ValueError("close bracket", position) + + def _read_partial_featdict(self, s, position, match, reentrances, fstruct): + # If there was a prefix feature, record it. + if match.group(2): + if self._prefix_feature is None: + raise ValueError("open bracket or identifier", match.start(2)) + prefixval = match.group(2).strip() + if prefixval.startswith("?"): + prefixval = Variable(prefixval) + fstruct[self._prefix_feature] = prefixval + + # If group 3 is empty, then we just have a bare prefix, so + # we're done. + if not match.group(3): + return self._finalize(s, match.end(), reentrances, fstruct) + + # Build a list of the features defined by the structure. + # Each feature has one of the three following forms: + # name = value + # name -> (target) + # +name + # -name + while position < len(s): + # Use these variables to hold info about each feature: + name = value = None + + # Check for the close bracket. + match = self._END_FSTRUCT_RE.match(s, position) + if match is not None: + return self._finalize(s, match.end(), reentrances, fstruct) + + # Get the feature name's name + match = self._FEATURE_NAME_RE.match(s, position) + if match is None: + raise ValueError("feature name", position) + name = match.group(2) + position = match.end() + + # Check if it's a special feature. + if name[0] == "*" and name[-1] == "*": + name = self._features.get(name[1:-1]) + if name is None: + raise ValueError("known special feature", match.start(2)) + + # Check if this feature has a value already. + if name in fstruct: + raise ValueError("new name", match.start(2)) + + # Boolean value ("+name" or "-name") + if match.group(1) == "+": + value = True + if match.group(1) == "-": + value = False + + # Reentrance link ("-> (target)") + if value is None: + match = self._REENTRANCE_RE.match(s, position) + if match is not None: + position = match.end() + match = self._TARGET_RE.match(s, position) + if not match: + raise ValueError("identifier", position) + target = match.group(1) + if target not in reentrances: + raise ValueError("bound identifier", position) + position = match.end() + value = reentrances[target] + + # Assignment ("= value"). + if value is None: + match = self._ASSIGN_RE.match(s, position) + if match: + position = match.end() + value, position = self._read_value(name, s, position, reentrances) + # None of the above: error. + else: + raise ValueError("equals sign", position) + + # Store the value. + fstruct[name] = value + + # If there's a close bracket, handle it at the top of the loop. + if self._END_FSTRUCT_RE.match(s, position): + continue + + # Otherwise, there should be a comma + match = self._COMMA_RE.match(s, position) + if match is None: + raise ValueError("comma", position) + position = match.end() + + # We never saw a close bracket. + raise ValueError("close bracket", position) + + def _finalize(self, s, pos, reentrances, fstruct): + """ + Called when we see the close brace -- checks for a slash feature, + and adds in default values. + """ + # Add the slash feature (if any) + match = self._SLASH_RE.match(s, pos) + if match: + name = self._slash_feature + v, pos = self._read_value(name, s, match.end(), reentrances) + fstruct[name] = v + ## Add any default features. -- handle in unficiation instead? + # for feature in self._features_with_defaults: + # fstruct.setdefault(feature, feature.default) + # Return the value. + return fstruct, pos + + def _read_value(self, name, s, position, reentrances): + if isinstance(name, Feature): + return name.read_value(s, position, reentrances, self) + else: + return self.read_value(s, position, reentrances) + + def read_value(self, s, position, reentrances): + for (handler, regexp) in self.VALUE_HANDLERS: + match = regexp.match(s, position) + if match: + handler_func = getattr(self, handler) + return handler_func(s, position, reentrances, match) + raise ValueError("value", position) + + def _error(self, s, expected, position): + lines = s.split("\n") + while position > len(lines[0]): + position -= len(lines.pop(0)) + 1 # +1 for the newline. + estr = ( + "Error parsing feature structure\n " + + lines[0] + + "\n " + + " " * position + + "^ " + + "Expected %s" % expected + ) + raise ValueError(estr) + + # //////////////////////////////////////////////////////////// + # { Value Readers + # //////////////////////////////////////////////////////////// + + #: A table indicating how feature values should be processed. Each + #: entry in the table is a pair (handler, regexp). The first entry + #: with a matching regexp will have its handler called. Handlers + #: should have the following signature:: + #: + #: def handler(s, position, reentrances, match): ... + #: + #: and should return a tuple (value, position), where position is + #: the string position where the value ended. (n.b.: order is + #: important here!) + VALUE_HANDLERS = [ + ("read_fstruct_value", _START_FSTRUCT_RE), + ("read_var_value", re.compile(r"\?[a-zA-Z_][a-zA-Z0-9_]*")), + ("read_str_value", re.compile("[uU]?[rR]?(['\"])")), + ("read_int_value", re.compile(r"-?\d+")), + ("read_sym_value", re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")), + ( + "read_app_value", + re.compile(r"<(app)\((\?[a-z][a-z]*)\s*," r"\s*(\?[a-z][a-z]*)\)>"), + ), + # ('read_logic_value', re.compile(r'<([^>]*)>')), + # lazily match any character after '<' until we hit a '>' not preceded by '-' + ("read_logic_value", re.compile(r"<(.*?)(?")), + ("read_set_value", re.compile(r"{")), + ("read_tuple_value", re.compile(r"\(")), + ] + + def read_fstruct_value(self, s, position, reentrances, match): + return self.read_partial(s, position, reentrances) + + def read_str_value(self, s, position, reentrances, match): + return read_str(s, position) + + def read_int_value(self, s, position, reentrances, match): + return int(match.group()), match.end() + + # Note: the '?' is included in the variable name. + def read_var_value(self, s, position, reentrances, match): + return Variable(match.group()), match.end() + + _SYM_CONSTS = {"None": None, "True": True, "False": False} + + def read_sym_value(self, s, position, reentrances, match): + val, end = match.group(), match.end() + return self._SYM_CONSTS.get(val, val), end + + def read_app_value(self, s, position, reentrances, match): + """Mainly included for backwards compat.""" + return self._logic_parser.parse("%s(%s)" % match.group(2, 3)), match.end() + + def read_logic_value(self, s, position, reentrances, match): + try: + try: + expr = self._logic_parser.parse(match.group(1)) + except LogicalExpressionException as e: + raise ValueError from e + return expr, match.end() + except ValueError as e: + raise ValueError("logic expression", match.start(1)) from e + + def read_tuple_value(self, s, position, reentrances, match): + return self._read_seq_value( + s, position, reentrances, match, ")", FeatureValueTuple, FeatureValueConcat + ) + + def read_set_value(self, s, position, reentrances, match): + return self._read_seq_value( + s, position, reentrances, match, "}", FeatureValueSet, FeatureValueUnion + ) + + def _read_seq_value( + self, s, position, reentrances, match, close_paren, seq_class, plus_class + ): + """ + Helper function used by read_tuple_value and read_set_value. + """ + cp = re.escape(close_paren) + position = match.end() + # Special syntax of empty tuples: + m = re.compile(r"\s*/?\s*%s" % cp).match(s, position) + if m: + return seq_class(), m.end() + # Read values: + values = [] + seen_plus = False + while True: + # Close paren: return value. + m = re.compile(r"\s*%s" % cp).match(s, position) + if m: + if seen_plus: + return plus_class(values), m.end() + else: + return seq_class(values), m.end() + + # Read the next value. + val, position = self.read_value(s, position, reentrances) + values.append(val) + + # Comma or looking at close paren + m = re.compile(r"\s*(,|\+|(?=%s))\s*" % cp).match(s, position) + if not m: + raise ValueError("',' or '+' or '%s'" % cp, position) + if m.group(1) == "+": + seen_plus = True + position = m.end() + + +###################################################################### +# { Demo +###################################################################### + + +def display_unification(fs1, fs2, indent=" "): + # Print the two input feature structures, side by side. + fs1_lines = ("%s" % fs1).split("\n") + fs2_lines = ("%s" % fs2).split("\n") + if len(fs1_lines) > len(fs2_lines): + blankline = "[" + " " * (len(fs2_lines[0]) - 2) + "]" + fs2_lines += [blankline] * len(fs1_lines) + else: + blankline = "[" + " " * (len(fs1_lines[0]) - 2) + "]" + fs1_lines += [blankline] * len(fs2_lines) + for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines): + print(indent + fs1_line + " " + fs2_line) + print(indent + "-" * len(fs1_lines[0]) + " " + "-" * len(fs2_lines[0])) + + linelen = len(fs1_lines[0]) * 2 + 3 + print(indent + "| |".center(linelen)) + print(indent + "+-----UNIFY-----+".center(linelen)) + print(indent + "|".center(linelen)) + print(indent + "V".center(linelen)) + + bindings = {} + + result = fs1.unify(fs2, bindings) + if result is None: + print(indent + "(FAILED)".center(linelen)) + else: + print( + "\n".join(indent + l.center(linelen) for l in ("%s" % result).split("\n")) + ) + if bindings and len(bindings.bound_variables()) > 0: + print(repr(bindings).center(linelen)) + return result + + +def interactive_demo(trace=False): + import random + import sys + + HELP = """ + 1-%d: Select the corresponding feature structure + q: Quit + t: Turn tracing on or off + l: List all feature structures + ?: Help + """ + + print( + """ + This demo will repeatedly present you with a list of feature + structures, and ask you to choose two for unification. Whenever a + new feature structure is generated, it is added to the list of + choices that you can pick from. However, since this can be a + large number of feature structures, the demo will only print out a + random subset for you to choose between at a given time. If you + want to see the complete lists, type "l". For a list of valid + commands, type "?". + """ + ) + print('Press "Enter" to continue...') + sys.stdin.readline() + + fstruct_strings = [ + "[agr=[number=sing, gender=masc]]", + "[agr=[gender=masc, person=3]]", + "[agr=[gender=fem, person=3]]", + "[subj=[agr=(1)[]], agr->(1)]", + "[obj=?x]", + "[subj=?x]", + "[/=None]", + "[/=NP]", + "[cat=NP]", + "[cat=VP]", + "[cat=PP]", + "[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]", + "[gender=masc, agr=?C]", + "[gender=?S, agr=[gender=?S,person=3]]", + ] + + all_fstructs = [ + (i, FeatStruct(fstruct_strings[i])) for i in range(len(fstruct_strings)) + ] + + def list_fstructs(fstructs): + for i, fstruct in fstructs: + print() + lines = ("%s" % fstruct).split("\n") + print("%3d: %s" % (i + 1, lines[0])) + for line in lines[1:]: + print(" " + line) + print() + + while True: + # Pick 5 feature structures at random from the master list. + MAX_CHOICES = 5 + if len(all_fstructs) > MAX_CHOICES: + fstructs = sorted(random.sample(all_fstructs, MAX_CHOICES)) + else: + fstructs = all_fstructs + + print("_" * 75) + + print("Choose two feature structures to unify:") + list_fstructs(fstructs) + + selected = [None, None] + for (nth, i) in (("First", 0), ("Second", 1)): + while selected[i] is None: + print( + ( + "%s feature structure (1-%d,q,t,l,?): " + % (nth, len(all_fstructs)) + ), + end=" ", + ) + try: + input = sys.stdin.readline().strip() + if input in ("q", "Q", "x", "X"): + return + if input in ("t", "T"): + trace = not trace + print(" Trace = %s" % trace) + continue + if input in ("h", "H", "?"): + print(HELP % len(fstructs)) + continue + if input in ("l", "L"): + list_fstructs(all_fstructs) + continue + num = int(input) - 1 + selected[i] = all_fstructs[num][1] + print() + except: + print("Bad sentence number") + continue + + if trace: + result = selected[0].unify(selected[1], trace=1) + else: + result = display_unification(selected[0], selected[1]) + if result is not None: + for i, fstruct in all_fstructs: + if repr(result) == repr(fstruct): + break + else: + all_fstructs.append((len(all_fstructs), result)) + + print('\nType "Enter" to continue unifying; or "q" to quit.') + input = sys.stdin.readline().strip() + if input in ("q", "Q", "x", "X"): + return + + +def demo(trace=False): + """ + Just for testing + """ + # import random + + # processor breaks with values like '3rd' + fstruct_strings = [ + "[agr=[number=sing, gender=masc]]", + "[agr=[gender=masc, person=3]]", + "[agr=[gender=fem, person=3]]", + "[subj=[agr=(1)[]], agr->(1)]", + "[obj=?x]", + "[subj=?x]", + "[/=None]", + "[/=NP]", + "[cat=NP]", + "[cat=VP]", + "[cat=PP]", + "[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]", + "[gender=masc, agr=?C]", + "[gender=?S, agr=[gender=?S,person=3]]", + ] + all_fstructs = [FeatStruct(fss) for fss in fstruct_strings] + # MAX_CHOICES = 5 + # if len(all_fstructs) > MAX_CHOICES: + # fstructs = random.sample(all_fstructs, MAX_CHOICES) + # fstructs.sort() + # else: + # fstructs = all_fstructs + + for fs1 in all_fstructs: + for fs2 in all_fstructs: + print( + "\n*******************\nfs1 is:\n%s\n\nfs2 is:\n%s\n\nresult is:\n%s" + % (fs1, fs2, unify(fs1, fs2)) + ) + + +if __name__ == "__main__": + demo() + +__all__ = [ + "FeatStruct", + "FeatDict", + "FeatList", + "unify", + "subsumes", + "conflicts", + "Feature", + "SlashFeature", + "RangeFeature", + "SLASH", + "TYPE", + "FeatStructReader", +] diff --git a/venv/lib/python3.10/site-packages/nltk/grammar.py b/venv/lib/python3.10/site-packages/nltk/grammar.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f1fe736a4a84e0982780e514108a6812f6876b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/grammar.py @@ -0,0 +1,1708 @@ +# Natural Language Toolkit: Context Free Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Jason Narad +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT +# + +""" +Basic data classes for representing context free grammars. A +"grammar" specifies which trees can represent the structure of a +given text. Each of these trees is called a "parse tree" for the +text (or simply a "parse"). In a "context free" grammar, the set of +parse trees for any piece of a text can depend only on that piece, and +not on the rest of the text (i.e., the piece's context). Context free +grammars are often used to find possible syntactic structures for +sentences. In this context, the leaves of a parse tree are word +tokens; and the node values are phrasal categories, such as ``NP`` +and ``VP``. + +The ``CFG`` class is used to encode context free grammars. Each +``CFG`` consists of a start symbol and a set of productions. +The "start symbol" specifies the root node value for parse trees. For example, +the start symbol for syntactic parsing is usually ``S``. Start +symbols are encoded using the ``Nonterminal`` class, which is discussed +below. + +A Grammar's "productions" specify what parent-child relationships a parse +tree can contain. Each production specifies that a particular +node can be the parent of a particular set of children. For example, +the production `` -> `` specifies that an ``S`` node can +be the parent of an ``NP`` node and a ``VP`` node. + +Grammar productions are implemented by the ``Production`` class. +Each ``Production`` consists of a left hand side and a right hand +side. The "left hand side" is a ``Nonterminal`` that specifies the +node type for a potential parent; and the "right hand side" is a list +that specifies allowable children for that parent. This lists +consists of ``Nonterminals`` and text types: each ``Nonterminal`` +indicates that the corresponding child may be a ``TreeToken`` with the +specified node type; and each text type indicates that the +corresponding child may be a ``Token`` with the with that type. + +The ``Nonterminal`` class is used to distinguish node values from leaf +values. This prevents the grammar from accidentally using a leaf +value (such as the English word "A") as the node of a subtree. Within +a ``CFG``, all node values are wrapped in the ``Nonterminal`` +class. Note, however, that the trees that are specified by the grammar do +*not* include these ``Nonterminal`` wrappers. + +Grammars can also be given a more procedural interpretation. According to +this interpretation, a Grammar specifies any tree structure *tree* that +can be produced by the following procedure: + +| Set tree to the start symbol +| Repeat until tree contains no more nonterminal leaves: +| Choose a production prod with whose left hand side +| lhs is a nonterminal leaf of tree. +| Replace the nonterminal leaf with a subtree, whose node +| value is the value wrapped by the nonterminal lhs, and +| whose children are the right hand side of prod. + +The operation of replacing the left hand side (*lhs*) of a production +with the right hand side (*rhs*) in a tree (*tree*) is known as +"expanding" *lhs* to *rhs* in *tree*. +""" +import re +from functools import total_ordering + +from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader +from nltk.internals import raise_unorderable_types +from nltk.probability import ImmutableProbabilisticMixIn +from nltk.util import invert_graph, transitive_closure + +################################################################# +# Nonterminal +################################################################# + + +@total_ordering +class Nonterminal: + """ + A non-terminal symbol for a context free grammar. ``Nonterminal`` + is a wrapper class for node values; it is used by ``Production`` + objects to distinguish node values from leaf values. + The node value that is wrapped by a ``Nonterminal`` is known as its + "symbol". Symbols are typically strings representing phrasal + categories (such as ``"NP"`` or ``"VP"``). However, more complex + symbol types are sometimes used (e.g., for lexicalized grammars). + Since symbols are node values, they must be immutable and + hashable. Two ``Nonterminals`` are considered equal if their + symbols are equal. + + :see: ``CFG``, ``Production`` + :type _symbol: any + :ivar _symbol: The node value corresponding to this + ``Nonterminal``. This value must be immutable and hashable. + """ + + def __init__(self, symbol): + """ + Construct a new non-terminal from the given symbol. + + :type symbol: any + :param symbol: The node value corresponding to this + ``Nonterminal``. This value must be immutable and + hashable. + """ + self._symbol = symbol + + def symbol(self): + """ + Return the node value corresponding to this ``Nonterminal``. + + :rtype: (any) + """ + return self._symbol + + def __eq__(self, other): + """ + Return True if this non-terminal is equal to ``other``. In + particular, return True if ``other`` is a ``Nonterminal`` + and this non-terminal's symbol is equal to ``other`` 's symbol. + + :rtype: bool + """ + return type(self) == type(other) and self._symbol == other._symbol + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Nonterminal): + raise_unorderable_types("<", self, other) + return self._symbol < other._symbol + + def __hash__(self): + return hash(self._symbol) + + def __repr__(self): + """ + Return a string representation for this ``Nonterminal``. + + :rtype: str + """ + if isinstance(self._symbol, str): + return "%s" % self._symbol + else: + return "%s" % repr(self._symbol) + + def __str__(self): + """ + Return a string representation for this ``Nonterminal``. + + :rtype: str + """ + if isinstance(self._symbol, str): + return "%s" % self._symbol + else: + return "%s" % repr(self._symbol) + + def __div__(self, rhs): + """ + Return a new nonterminal whose symbol is ``A/B``, where ``A`` is + the symbol for this nonterminal, and ``B`` is the symbol for rhs. + + :param rhs: The nonterminal used to form the right hand side + of the new nonterminal. + :type rhs: Nonterminal + :rtype: Nonterminal + """ + return Nonterminal(f"{self._symbol}/{rhs._symbol}") + + def __truediv__(self, rhs): + """ + Return a new nonterminal whose symbol is ``A/B``, where ``A`` is + the symbol for this nonterminal, and ``B`` is the symbol for rhs. + This function allows use of the slash ``/`` operator with + the future import of division. + + :param rhs: The nonterminal used to form the right hand side + of the new nonterminal. + :type rhs: Nonterminal + :rtype: Nonterminal + """ + return self.__div__(rhs) + + +def nonterminals(symbols): + """ + Given a string containing a list of symbol names, return a list of + ``Nonterminals`` constructed from those symbols. + + :param symbols: The symbol name string. This string can be + delimited by either spaces or commas. + :type symbols: str + :return: A list of ``Nonterminals`` constructed from the symbol + names given in ``symbols``. The ``Nonterminals`` are sorted + in the same order as the symbols names. + :rtype: list(Nonterminal) + """ + if "," in symbols: + symbol_list = symbols.split(",") + else: + symbol_list = symbols.split() + return [Nonterminal(s.strip()) for s in symbol_list] + + +class FeatStructNonterminal(FeatDict, Nonterminal): + """A feature structure that's also a nonterminal. It acts as its + own symbol, and automatically freezes itself when hashed.""" + + def __hash__(self): + self.freeze() + return FeatStruct.__hash__(self) + + def symbol(self): + return self + + +def is_nonterminal(item): + """ + :return: True if the item is a ``Nonterminal``. + :rtype: bool + """ + return isinstance(item, Nonterminal) + + +################################################################# +# Terminals +################################################################# + + +def is_terminal(item): + """ + Return True if the item is a terminal, which currently is + if it is hashable and not a ``Nonterminal``. + + :rtype: bool + """ + return hasattr(item, "__hash__") and not isinstance(item, Nonterminal) + + +################################################################# +# Productions +################################################################# + + +@total_ordering +class Production: + """ + A grammar production. Each production maps a single symbol + on the "left-hand side" to a sequence of symbols on the + "right-hand side". (In the case of context-free productions, + the left-hand side must be a ``Nonterminal``, and the right-hand + side is a sequence of terminals and ``Nonterminals``.) + "terminals" can be any immutable hashable object that is + not a ``Nonterminal``. Typically, terminals are strings + representing words, such as ``"dog"`` or ``"under"``. + + :see: ``CFG`` + :see: ``DependencyGrammar`` + :see: ``Nonterminal`` + :type _lhs: Nonterminal + :ivar _lhs: The left-hand side of the production. + :type _rhs: tuple(Nonterminal, terminal) + :ivar _rhs: The right-hand side of the production. + """ + + def __init__(self, lhs, rhs): + """ + Construct a new ``Production``. + + :param lhs: The left-hand side of the new ``Production``. + :type lhs: Nonterminal + :param rhs: The right-hand side of the new ``Production``. + :type rhs: sequence(Nonterminal and terminal) + """ + if isinstance(rhs, str): + raise TypeError( + "production right hand side should be a list, " "not a string" + ) + self._lhs = lhs + self._rhs = tuple(rhs) + + def lhs(self): + """ + Return the left-hand side of this ``Production``. + + :rtype: Nonterminal + """ + return self._lhs + + def rhs(self): + """ + Return the right-hand side of this ``Production``. + + :rtype: sequence(Nonterminal and terminal) + """ + return self._rhs + + def __len__(self): + """ + Return the length of the right-hand side. + + :rtype: int + """ + return len(self._rhs) + + def is_nonlexical(self): + """ + Return True if the right-hand side only contains ``Nonterminals`` + + :rtype: bool + """ + return all(is_nonterminal(n) for n in self._rhs) + + def is_lexical(self): + """ + Return True if the right-hand contain at least one terminal token. + + :rtype: bool + """ + return not self.is_nonlexical() + + def __str__(self): + """ + Return a verbose string representation of the ``Production``. + + :rtype: str + """ + result = "%s -> " % repr(self._lhs) + result += " ".join(repr(el) for el in self._rhs) + return result + + def __repr__(self): + """ + Return a concise string representation of the ``Production``. + + :rtype: str + """ + return "%s" % self + + def __eq__(self, other): + """ + Return True if this ``Production`` is equal to ``other``. + + :rtype: bool + """ + return ( + type(self) == type(other) + and self._lhs == other._lhs + and self._rhs == other._rhs + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Production): + raise_unorderable_types("<", self, other) + return (self._lhs, self._rhs) < (other._lhs, other._rhs) + + def __hash__(self): + """ + Return a hash value for the ``Production``. + + :rtype: int + """ + return hash((self._lhs, self._rhs)) + + +class DependencyProduction(Production): + """ + A dependency grammar production. Each production maps a single + head word to an unordered list of one or more modifier words. + """ + + def __str__(self): + """ + Return a verbose string representation of the ``DependencyProduction``. + + :rtype: str + """ + result = f"'{self._lhs}' ->" + for elt in self._rhs: + result += f" '{elt}'" + return result + + +class ProbabilisticProduction(Production, ImmutableProbabilisticMixIn): + """ + A probabilistic context free grammar production. + A PCFG ``ProbabilisticProduction`` is essentially just a ``Production`` that + has an associated probability, which represents how likely it is that + this production will be used. In particular, the probability of a + ``ProbabilisticProduction`` records the likelihood that its right-hand side is + the correct instantiation for any given occurrence of its left-hand side. + + :see: ``Production`` + """ + + def __init__(self, lhs, rhs, **prob): + """ + Construct a new ``ProbabilisticProduction``. + + :param lhs: The left-hand side of the new ``ProbabilisticProduction``. + :type lhs: Nonterminal + :param rhs: The right-hand side of the new ``ProbabilisticProduction``. + :type rhs: sequence(Nonterminal and terminal) + :param prob: Probability parameters of the new ``ProbabilisticProduction``. + """ + ImmutableProbabilisticMixIn.__init__(self, **prob) + Production.__init__(self, lhs, rhs) + + def __str__(self): + return super().__str__() + ( + " [1.0]" if (self.prob() == 1.0) else " [%g]" % self.prob() + ) + + def __eq__(self, other): + return ( + type(self) == type(other) + and self._lhs == other._lhs + and self._rhs == other._rhs + and self.prob() == other.prob() + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self._lhs, self._rhs, self.prob())) + + +################################################################# +# Grammars +################################################################# + + +class CFG: + """ + A context-free grammar. A grammar consists of a start state and + a set of productions. The set of terminals and nonterminals is + implicitly specified by the productions. + + If you need efficient key-based access to productions, you + can use a subclass to implement it. + """ + + def __init__(self, start, productions, calculate_leftcorners=True): + """ + Create a new context-free grammar, from the given start state + and set of ``Production`` instances. + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + :param calculate_leftcorners: False if we don't want to calculate the + leftcorner relation. In that case, some optimized chart parsers won't work. + :type calculate_leftcorners: bool + """ + if not is_nonterminal(start): + raise TypeError( + "start should be a Nonterminal object," + " not a %s" % type(start).__name__ + ) + + self._start = start + self._productions = productions + self._categories = {prod.lhs() for prod in productions} + self._calculate_indexes() + self._calculate_grammar_forms() + if calculate_leftcorners: + self._calculate_leftcorners() + + def _calculate_indexes(self): + self._lhs_index = {} + self._rhs_index = {} + self._empty_index = {} + self._lexical_index = {} + for prod in self._productions: + # Left hand side. + lhs = prod._lhs + if lhs not in self._lhs_index: + self._lhs_index[lhs] = [] + self._lhs_index[lhs].append(prod) + if prod._rhs: + # First item in right hand side. + rhs0 = prod._rhs[0] + if rhs0 not in self._rhs_index: + self._rhs_index[rhs0] = [] + self._rhs_index[rhs0].append(prod) + else: + # The right hand side is empty. + self._empty_index[prod.lhs()] = prod + # Lexical tokens in the right hand side. + for token in prod._rhs: + if is_terminal(token): + self._lexical_index.setdefault(token, set()).add(prod) + + def _calculate_leftcorners(self): + # Calculate leftcorner relations, for use in optimized parsing. + self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories} + self._immediate_leftcorner_words = {cat: set() for cat in self._categories} + for prod in self.productions(): + if len(prod) > 0: + cat, left = prod.lhs(), prod.rhs()[0] + if is_nonterminal(left): + self._immediate_leftcorner_categories[cat].add(left) + else: + self._immediate_leftcorner_words[cat].add(left) + + lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True) + self._leftcorners = lc + self._leftcorner_parents = invert_graph(lc) + + nr_leftcorner_categories = sum( + map(len, self._immediate_leftcorner_categories.values()) + ) + nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values())) + if nr_leftcorner_words > nr_leftcorner_categories > 10000: + # If the grammar is big, the leftcorner-word dictionary will be too large. + # In that case it is better to calculate the relation on demand. + self._leftcorner_words = None + return + + self._leftcorner_words = {} + for cat in self._leftcorners: + lefts = self._leftcorners[cat] + lc = self._leftcorner_words[cat] = set() + for left in lefts: + lc.update(self._immediate_leftcorner_words.get(left, set())) + + @classmethod + def fromstring(cls, input, encoding=None): + """ + Return the grammar instance corresponding to the input string(s). + + :param input: a grammar, either in the form of a string or as a list of strings. + """ + start, productions = read_grammar( + input, standard_nonterm_parser, encoding=encoding + ) + return cls(start, productions) + + def start(self): + """ + Return the start symbol of the grammar + + :rtype: Nonterminal + """ + return self._start + + # tricky to balance readability and efficiency here! + # can't use set operations as they don't preserve ordering + def productions(self, lhs=None, rhs=None, empty=False): + """ + Return the grammar productions, filtered by the left-hand side + or the first item in the right-hand side. + + :param lhs: Only return productions with the given left-hand side. + :param rhs: Only return productions with the given first item + in the right-hand side. + :param empty: Only return productions with an empty right-hand side. + :return: A list of productions matching the given constraints. + :rtype: list(Production) + """ + if rhs and empty: + raise ValueError( + "You cannot select empty and non-empty " "productions at the same time." + ) + + # no constraints so return everything + if not lhs and not rhs: + if not empty: + return self._productions + else: + return self._empty_index.values() + + # only lhs specified so look up its index + elif lhs and not rhs: + if not empty: + return self._lhs_index.get(lhs, []) + elif lhs in self._empty_index: + return [self._empty_index[lhs]] + else: + return [] + + # only rhs specified so look up its index + elif rhs and not lhs: + return self._rhs_index.get(rhs, []) + + # intersect + else: + return [ + prod + for prod in self._lhs_index.get(lhs, []) + if prod in self._rhs_index.get(rhs, []) + ] + + def leftcorners(self, cat): + """ + Return the set of all nonterminals that the given nonterminal + can start with, including itself. + + This is the reflexive, transitive closure of the immediate + leftcorner relation: (A > B) iff (A -> B beta) + + :param cat: the parent of the leftcorners + :type cat: Nonterminal + :return: the set of all leftcorners + :rtype: set(Nonterminal) + """ + return self._leftcorners.get(cat, {cat}) + + def is_leftcorner(self, cat, left): + """ + True if left is a leftcorner of cat, where left can be a + terminal or a nonterminal. + + :param cat: the parent of the leftcorner + :type cat: Nonterminal + :param left: the suggested leftcorner + :type left: Terminal or Nonterminal + :rtype: bool + """ + if is_nonterminal(left): + return left in self.leftcorners(cat) + elif self._leftcorner_words: + return left in self._leftcorner_words.get(cat, set()) + else: + return any( + left in self._immediate_leftcorner_words.get(parent, set()) + for parent in self.leftcorners(cat) + ) + + def leftcorner_parents(self, cat): + """ + Return the set of all nonterminals for which the given category + is a left corner. This is the inverse of the leftcorner relation. + + :param cat: the suggested leftcorner + :type cat: Nonterminal + :return: the set of all parents to the leftcorner + :rtype: set(Nonterminal) + """ + return self._leftcorner_parents.get(cat, {cat}) + + def check_coverage(self, tokens): + """ + Check whether the grammar rules cover the given list of tokens. + If not, then raise an exception. + + :type tokens: list(str) + """ + missing = [tok for tok in tokens if not self._lexical_index.get(tok)] + if missing: + missing = ", ".join(f"{w!r}" for w in missing) + raise ValueError( + "Grammar does not cover some of the " "input words: %r." % missing + ) + + def _calculate_grammar_forms(self): + """ + Pre-calculate of which form(s) the grammar is. + """ + prods = self._productions + self._is_lexical = all(p.is_lexical() for p in prods) + self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1) + self._min_len = min(len(p) for p in prods) + self._max_len = max(len(p) for p in prods) + self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1) + + def is_lexical(self): + """ + Return True if all productions are lexicalised. + """ + return self._is_lexical + + def is_nonlexical(self): + """ + Return True if all lexical rules are "preterminals", that is, + unary rules which can be separated in a preprocessing step. + + This means that all productions are of the forms + A -> B1 ... Bn (n>=0), or A -> "s". + + Note: is_lexical() and is_nonlexical() are not opposites. + There are grammars which are neither, and grammars which are both. + """ + return self._is_nonlexical + + def min_len(self): + """ + Return the right-hand side length of the shortest grammar production. + """ + return self._min_len + + def max_len(self): + """ + Return the right-hand side length of the longest grammar production. + """ + return self._max_len + + def is_nonempty(self): + """ + Return True if there are no empty productions. + """ + return self._min_len > 0 + + def is_binarised(self): + """ + Return True if all productions are at most binary. + Note that there can still be empty and unary productions. + """ + return self._max_len <= 2 + + def is_flexible_chomsky_normal_form(self): + """ + Return True if all productions are of the forms + A -> B C, A -> B, or A -> "s". + """ + return self.is_nonempty() and self.is_nonlexical() and self.is_binarised() + + def is_chomsky_normal_form(self): + """ + Return True if the grammar is of Chomsky Normal Form, i.e. all productions + are of the form A -> B C, or A -> "s". + """ + return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical + + def chomsky_normal_form(self, new_token_padding="@$@", flexible=False): + """ + Returns a new Grammar that is in chomsky normal + + :param: new_token_padding + Customise new rule formation during binarisation + """ + if self.is_chomsky_normal_form(): + return self + if self.productions(empty=True): + raise ValueError( + "Grammar has Empty rules. " "Cannot deal with them at the moment" + ) + + # check for mixed rules + for rule in self.productions(): + if rule.is_lexical() and len(rule.rhs()) > 1: + raise ValueError( + f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}" + ) + + step1 = CFG.eliminate_start(self) + step2 = CFG.binarize(step1, new_token_padding) + if flexible: + return step2 + step3 = CFG.remove_unitary_rules(step2) + step4 = CFG(step3.start(), list(set(step3.productions()))) + return step4 + + @classmethod + def remove_unitary_rules(cls, grammar): + """ + Remove nonlexical unitary rules and convert them to + lexical + """ + result = [] + unitary = [] + for rule in grammar.productions(): + if len(rule) == 1 and rule.is_nonlexical(): + unitary.append(rule) + else: + result.append(rule) + + while unitary: + rule = unitary.pop(0) + for item in grammar.productions(lhs=rule.rhs()[0]): + new_rule = Production(rule.lhs(), item.rhs()) + if len(new_rule) != 1 or new_rule.is_lexical(): + result.append(new_rule) + else: + unitary.append(new_rule) + + n_grammar = CFG(grammar.start(), result) + return n_grammar + + @classmethod + def binarize(cls, grammar, padding="@$@"): + """ + Convert all non-binary rules into binary by introducing + new tokens. + Example:: + + Original: + A => B C D + After Conversion: + A => B A@$@B + A@$@B => C D + """ + result = [] + + for rule in grammar.productions(): + if len(rule.rhs()) > 2: + # this rule needs to be broken down + left_side = rule.lhs() + for k in range(0, len(rule.rhs()) - 2): + tsym = rule.rhs()[k] + new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol()) + new_production = Production(left_side, (tsym, new_sym)) + left_side = new_sym + result.append(new_production) + last_prd = Production(left_side, rule.rhs()[-2:]) + result.append(last_prd) + else: + result.append(rule) + + n_grammar = CFG(grammar.start(), result) + return n_grammar + + @classmethod + def eliminate_start(cls, grammar): + """ + Eliminate start rule in case it appears on RHS + Example: S -> S0 S1 and S0 -> S1 S + Then another rule S0_Sigma -> S is added + """ + start = grammar.start() + result = [] + need_to_add = None + for rule in grammar.productions(): + if start in rule.rhs(): + need_to_add = True + result.append(rule) + if need_to_add: + start = Nonterminal("S0_SIGMA") + result.append(Production(start, [grammar.start()])) + n_grammar = CFG(start, result) + return n_grammar + return grammar + + def __repr__(self): + return "" % len(self._productions) + + def __str__(self): + result = "Grammar with %d productions" % len(self._productions) + result += " (start state = %r)" % self._start + for production in self._productions: + result += "\n %s" % production + return result + + +class FeatureGrammar(CFG): + """ + A feature-based grammar. This is equivalent to a + ``CFG`` whose nonterminals are all + ``FeatStructNonterminal``. + + A grammar consists of a start state and a set of + productions. The set of terminals and nonterminals + is implicitly specified by the productions. + """ + + def __init__(self, start, productions): + """ + Create a new feature-based grammar, from the given start + state and set of ``Productions``. + + :param start: The start symbol + :type start: FeatStructNonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + CFG.__init__(self, start, productions) + + # The difference with CFG is that the productions are + # indexed on the TYPE feature of the nonterminals. + # This is calculated by the method _get_type_if_possible(). + + def _calculate_indexes(self): + self._lhs_index = {} + self._rhs_index = {} + self._empty_index = {} + self._empty_productions = [] + self._lexical_index = {} + for prod in self._productions: + # Left hand side. + lhs = self._get_type_if_possible(prod._lhs) + if lhs not in self._lhs_index: + self._lhs_index[lhs] = [] + self._lhs_index[lhs].append(prod) + if prod._rhs: + # First item in right hand side. + rhs0 = self._get_type_if_possible(prod._rhs[0]) + if rhs0 not in self._rhs_index: + self._rhs_index[rhs0] = [] + self._rhs_index[rhs0].append(prod) + else: + # The right hand side is empty. + if lhs not in self._empty_index: + self._empty_index[lhs] = [] + self._empty_index[lhs].append(prod) + self._empty_productions.append(prod) + # Lexical tokens in the right hand side. + for token in prod._rhs: + if is_terminal(token): + self._lexical_index.setdefault(token, set()).add(prod) + + @classmethod + def fromstring( + cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None + ): + """ + Return a feature structure based grammar. + + :param input: a grammar, either in the form of a string or else + as a list of strings. + :param features: a tuple of features (default: SLASH, TYPE) + :param logic_parser: a parser for lambda-expressions, + by default, ``LogicParser()`` + :param fstruct_reader: a feature structure parser + (only if features and logic_parser is None) + """ + if features is None: + features = (SLASH, TYPE) + + if fstruct_reader is None: + fstruct_reader = FeatStructReader( + features, FeatStructNonterminal, logic_parser=logic_parser + ) + elif logic_parser is not None: + raise Exception( + "'logic_parser' and 'fstruct_reader' must " "not both be set" + ) + + start, productions = read_grammar( + input, fstruct_reader.read_partial, encoding=encoding + ) + return cls(start, productions) + + def productions(self, lhs=None, rhs=None, empty=False): + """ + Return the grammar productions, filtered by the left-hand side + or the first item in the right-hand side. + + :param lhs: Only return productions with the given left-hand side. + :param rhs: Only return productions with the given first item + in the right-hand side. + :param empty: Only return productions with an empty right-hand side. + :rtype: list(Production) + """ + if rhs and empty: + raise ValueError( + "You cannot select empty and non-empty " "productions at the same time." + ) + + # no constraints so return everything + if not lhs and not rhs: + if empty: + return self._empty_productions + else: + return self._productions + + # only lhs specified so look up its index + elif lhs and not rhs: + if empty: + return self._empty_index.get(self._get_type_if_possible(lhs), []) + else: + return self._lhs_index.get(self._get_type_if_possible(lhs), []) + + # only rhs specified so look up its index + elif rhs and not lhs: + return self._rhs_index.get(self._get_type_if_possible(rhs), []) + + # intersect + else: + return [ + prod + for prod in self._lhs_index.get(self._get_type_if_possible(lhs), []) + if prod in self._rhs_index.get(self._get_type_if_possible(rhs), []) + ] + + def leftcorners(self, cat): + """ + Return the set of all words that the given category can start with. + Also called the "first set" in compiler construction. + """ + raise NotImplementedError("Not implemented yet") + + def leftcorner_parents(self, cat): + """ + Return the set of all categories for which the given category + is a left corner. + """ + raise NotImplementedError("Not implemented yet") + + def _get_type_if_possible(self, item): + """ + Helper function which returns the ``TYPE`` feature of the ``item``, + if it exists, otherwise it returns the ``item`` itself + """ + if isinstance(item, dict) and TYPE in item: + return FeatureValueType(item[TYPE]) + else: + return item + + +@total_ordering +class FeatureValueType: + """ + A helper class for ``FeatureGrammars``, designed to be different + from ordinary strings. This is to stop the ``FeatStruct`` + ``FOO[]`` from being compare equal to the terminal "FOO". + """ + + def __init__(self, value): + self._value = value + + def __repr__(self): + return "<%s>" % self._value + + def __eq__(self, other): + return type(self) == type(other) and self._value == other._value + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, FeatureValueType): + raise_unorderable_types("<", self, other) + return self._value < other._value + + def __hash__(self): + return hash(self._value) + + +class DependencyGrammar: + """ + A dependency grammar. A DependencyGrammar consists of a set of + productions. Each production specifies a head/modifier relationship + between a pair of words. + """ + + def __init__(self, productions): + """ + Create a new dependency grammar, from the set of ``Productions``. + + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + self._productions = productions + + @classmethod + def fromstring(cls, input): + productions = [] + for linenum, line in enumerate(input.split("\n")): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + productions += _read_dependency_production(line) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + if len(productions) == 0: + raise ValueError("No productions found!") + return cls(productions) + + def contains(self, head, mod): + """ + :param head: A head word. + :type head: str + :param mod: A mod word, to test as a modifier of 'head'. + :type mod: str + + :return: true if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + :rtype: bool + """ + for production in self._productions: + for possibleMod in production._rhs: + if production._lhs == head and possibleMod == mod: + return True + return False + + def __contains__(self, head_mod): + """ + Return True if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + + :param head_mod: A tuple of a head word and a mod word, + to test as a modifier of 'head'. + :type head: Tuple[str, str] + :rtype: bool + """ + try: + head, mod = head_mod + except ValueError as e: + raise ValueError( + "Must use a tuple of strings, e.g. `('price', 'of') in grammar`" + ) from e + return self.contains(head, mod) + + # # should be rewritten, the set comp won't work in all comparisons + # def contains_exactly(self, head, modlist): + # for production in self._productions: + # if(len(production._rhs) == len(modlist)): + # if(production._lhs == head): + # set1 = Set(production._rhs) + # set2 = Set(modlist) + # if(set1 == set2): + # return True + # return False + + def __str__(self): + """ + Return a verbose string representation of the ``DependencyGrammar`` + + :rtype: str + """ + str = "Dependency grammar with %d productions" % len(self._productions) + for production in self._productions: + str += "\n %s" % production + return str + + def __repr__(self): + """ + Return a concise string representation of the ``DependencyGrammar`` + """ + return "Dependency grammar with %d productions" % len(self._productions) + + +class ProbabilisticDependencyGrammar: + """ """ + + def __init__(self, productions, events, tags): + self._productions = productions + self._events = events + self._tags = tags + + def contains(self, head, mod): + """ + Return True if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + + :param head: A head word. + :type head: str + :param mod: A mod word, to test as a modifier of 'head'. + :type mod: str + :rtype: bool + """ + for production in self._productions: + for possibleMod in production._rhs: + if production._lhs == head and possibleMod == mod: + return True + return False + + def __str__(self): + """ + Return a verbose string representation of the ``ProbabilisticDependencyGrammar`` + + :rtype: str + """ + str = "Statistical dependency grammar with %d productions" % len( + self._productions + ) + for production in self._productions: + str += "\n %s" % production + str += "\nEvents:" + for event in self._events: + str += "\n %d:%s" % (self._events[event], event) + str += "\nTags:" + for tag_word in self._tags: + str += f"\n {tag_word}:\t({self._tags[tag_word]})" + return str + + def __repr__(self): + """ + Return a concise string representation of the ``ProbabilisticDependencyGrammar`` + """ + return "Statistical Dependency grammar with %d productions" % len( + self._productions + ) + + +class PCFG(CFG): + """ + A probabilistic context-free grammar. A PCFG consists of a + start state and a set of productions with probabilities. The set of + terminals and nonterminals is implicitly specified by the productions. + + PCFG productions use the ``ProbabilisticProduction`` class. + ``PCFGs`` impose the constraint that the set of productions with + any given left-hand-side must have probabilities that sum to 1 + (allowing for a small margin of error). + + If you need efficient key-based access to productions, you can use + a subclass to implement it. + + :type EPSILON: float + :cvar EPSILON: The acceptable margin of error for checking that + productions with a given left-hand side have probabilities + that sum to 1. + """ + + EPSILON = 0.01 + + def __init__(self, start, productions, calculate_leftcorners=True): + """ + Create a new context-free grammar, from the given start state + and set of ``ProbabilisticProductions``. + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + :raise ValueError: if the set of productions with any left-hand-side + do not have probabilities that sum to a value within + EPSILON of 1. + :param calculate_leftcorners: False if we don't want to calculate the + leftcorner relation. In that case, some optimized chart parsers won't work. + :type calculate_leftcorners: bool + """ + CFG.__init__(self, start, productions, calculate_leftcorners) + + # Make sure that the probabilities sum to one. + probs = {} + for production in productions: + probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob() + for (lhs, p) in probs.items(): + if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)): + raise ValueError("Productions for %r do not sum to 1" % lhs) + + @classmethod + def fromstring(cls, input, encoding=None): + """ + Return a probabilistic context-free grammar corresponding to the + input string(s). + + :param input: a grammar, either in the form of a string or else + as a list of strings. + """ + start, productions = read_grammar( + input, standard_nonterm_parser, probabilistic=True, encoding=encoding + ) + return cls(start, productions) + + +################################################################# +# Inducing Grammars +################################################################# + +# Contributed by Nathan Bodenstab + + +def induce_pcfg(start, productions): + r""" + Induce a PCFG grammar from a list of productions. + + The probability of a production A -> B C in a PCFG is: + + | count(A -> B C) + | P(B, C | A) = --------------- where \* is any right hand side + | count(A -> \*) + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + # Production count: the number of times a given production occurs + pcount = {} + + # LHS-count: counts the number of times a given lhs occurs + lcount = {} + + for prod in productions: + lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1 + pcount[prod] = pcount.get(prod, 0) + 1 + + prods = [ + ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()]) + for p in pcount + ] + return PCFG(start, prods) + + +################################################################# +# Helper functions for reading productions +################################################################# + + +def _read_cfg_production(input): + """ + Return a list of context-free ``Productions``. + """ + return _read_production(input, standard_nonterm_parser) + + +def _read_pcfg_production(input): + """ + Return a list of PCFG ``ProbabilisticProductions``. + """ + return _read_production(input, standard_nonterm_parser, probabilistic=True) + + +def _read_fcfg_production(input, fstruct_reader): + """ + Return a list of feature-based ``Productions``. + """ + return _read_production(input, fstruct_reader) + + +# Parsing generic grammars + +_ARROW_RE = re.compile(r"\s* -> \s*", re.VERBOSE) +_PROBABILITY_RE = re.compile(r"( \[ [\d\.]+ \] ) \s*", re.VERBOSE) +_TERMINAL_RE = re.compile(r'( "[^"]*" | \'[^\']*\' ) \s*', re.VERBOSE) +_DISJUNCTION_RE = re.compile(r"\| \s*", re.VERBOSE) + + +def _read_production(line, nonterm_parser, probabilistic=False): + """ + Parse a grammar rule, given as a string, and return + a list of productions. + """ + pos = 0 + + # Parse the left-hand side. + lhs, pos = nonterm_parser(line, pos) + + # Skip over the arrow. + m = _ARROW_RE.match(line, pos) + if not m: + raise ValueError("Expected an arrow") + pos = m.end() + + # Parse the right hand side. + probabilities = [0.0] + rhsides = [[]] + while pos < len(line): + # Probability. + m = _PROBABILITY_RE.match(line, pos) + if probabilistic and m: + pos = m.end() + probabilities[-1] = float(m.group(1)[1:-1]) + if probabilities[-1] > 1.0: + raise ValueError( + "Production probability %f, " + "should not be greater than 1.0" % (probabilities[-1],) + ) + + # String -- add terminal. + elif line[pos] in "'\"": + m = _TERMINAL_RE.match(line, pos) + if not m: + raise ValueError("Unterminated string") + rhsides[-1].append(m.group(1)[1:-1]) + pos = m.end() + + # Vertical bar -- start new rhside. + elif line[pos] == "|": + m = _DISJUNCTION_RE.match(line, pos) + probabilities.append(0.0) + rhsides.append([]) + pos = m.end() + + # Anything else -- nonterminal. + else: + nonterm, pos = nonterm_parser(line, pos) + rhsides[-1].append(nonterm) + + if probabilistic: + return [ + ProbabilisticProduction(lhs, rhs, prob=probability) + for (rhs, probability) in zip(rhsides, probabilities) + ] + else: + return [Production(lhs, rhs) for rhs in rhsides] + + +################################################################# +# Reading Phrase Structure Grammars +################################################################# + + +def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None): + """ + Return a pair consisting of a starting category and a list of + ``Productions``. + + :param input: a grammar, either in the form of a string or else + as a list of strings. + :param nonterm_parser: a function for parsing nonterminals. + It should take a ``(string, position)`` as argument and + return a ``(nonterminal, position)`` as result. + :param probabilistic: are the grammar rules probabilistic? + :type probabilistic: bool + :param encoding: the encoding of the grammar, if it is a binary string + :type encoding: str + """ + if encoding is not None: + input = input.decode(encoding) + if isinstance(input, str): + lines = input.split("\n") + else: + lines = input + + start = None + productions = [] + continue_line = "" + for linenum, line in enumerate(lines): + line = continue_line + line.strip() + if line.startswith("#") or line == "": + continue + if line.endswith("\\"): + continue_line = line[:-1].rstrip() + " " + continue + continue_line = "" + try: + if line[0] == "%": + directive, args = line[1:].split(None, 1) + if directive == "start": + start, pos = nonterm_parser(args, 0) + if pos != len(args): + raise ValueError("Bad argument to start directive") + else: + raise ValueError("Bad directive") + else: + # expand out the disjunctions on the RHS + productions += _read_production(line, nonterm_parser, probabilistic) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e + + if not productions: + raise ValueError("No productions found!") + if not start: + start = productions[0].lhs() + return (start, productions) + + +_STANDARD_NONTERM_RE = re.compile(r"( [\w/][\w/^<>-]* ) \s*", re.VERBOSE) + + +def standard_nonterm_parser(string, pos): + m = _STANDARD_NONTERM_RE.match(string, pos) + if not m: + raise ValueError("Expected a nonterminal, found: " + string[pos:]) + return (Nonterminal(m.group(1)), m.end()) + + +################################################################# +# Reading Dependency Grammars +################################################################# + +_READ_DG_RE = re.compile( + r"""^\s* # leading whitespace + ('[^']+')\s* # single-quoted lhs + (?:[-=]+>)\s* # arrow + (?:( # rhs: + "[^"]+" # doubled-quoted terminal + | '[^']+' # single-quoted terminal + | \| # disjunction + ) + \s*) # trailing space + *$""", # zero or more copies + re.VERBOSE, +) +_SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""") + + +def _read_dependency_production(s): + if not _READ_DG_RE.match(s): + raise ValueError("Bad production string") + pieces = _SPLIT_DG_RE.split(s) + pieces = [p for i, p in enumerate(pieces) if i % 2 == 1] + lhside = pieces[0].strip("'\"") + rhsides = [[]] + for piece in pieces[2:]: + if piece == "|": + rhsides.append([]) + else: + rhsides[-1].append(piece.strip("'\"")) + return [DependencyProduction(lhside, rhside) for rhside in rhsides] + + +################################################################# +# Demonstration +################################################################# + + +def cfg_demo(): + """ + A demonstration showing how ``CFGs`` can be created and used. + """ + + from nltk import CFG, Production, nonterminals + + # Create some nonterminals + S, NP, VP, PP = nonterminals("S, NP, VP, PP") + N, V, P, Det = nonterminals("N, V, P, Det") + VP_slash_NP = VP / NP + + print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP]) + print(" S.symbol() =>", repr(S.symbol())) + print() + + print(Production(S, [NP])) + + # Create some Grammar Productions + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N | NP PP + VP -> V NP | VP PP + Det -> 'a' | 'the' + N -> 'dog' | 'cat' + V -> 'chased' | 'sat' + P -> 'on' | 'in' + """ + ) + + print("A Grammar:", repr(grammar)) + print(" grammar.start() =>", repr(grammar.start())) + print(" grammar.productions() =>", end=" ") + # Use string.replace(...) is to line-wrap the output. + print(repr(grammar.productions()).replace(",", ",\n" + " " * 25)) + print() + + +def pcfg_demo(): + """ + A demonstration showing how a ``PCFG`` can be created and used. + """ + + from nltk import induce_pcfg, treetransforms + from nltk.corpus import treebank + from nltk.parse import pchart + + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + pcfg_prods = toy_pcfg1.productions() + + pcfg_prod = pcfg_prods[2] + print("A PCFG production:", repr(pcfg_prod)) + print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs())) + print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs())) + print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob())) + print() + + grammar = toy_pcfg2 + print("A PCFG grammar:", repr(grammar)) + print(" grammar.start() =>", repr(grammar.start())) + print(" grammar.productions() =>", end=" ") + # Use .replace(...) is to line-wrap the output. + print(repr(grammar.productions()).replace(",", ",\n" + " " * 26)) + print() + + # extract productions from three trees and induce the PCFG + print("Induce PCFG grammar from treebank data:") + + productions = [] + item = treebank._fileids[0] + for tree in treebank.parsed_sents(item)[:3]: + # perform optional tree transformations, e.g.: + tree.collapse_unary(collapsePOS=False) + tree.chomsky_normal_form(horzMarkov=2) + + productions += tree.productions() + + S = Nonterminal("S") + grammar = induce_pcfg(S, productions) + print(grammar) + print() + + print("Parse sentence using induced grammar:") + + parser = pchart.InsideChartParser(grammar) + parser.trace(3) + + # doesn't work as tokens are different: + # sent = treebank.tokenized('wsj_0001.mrg')[0] + + sent = treebank.parsed_sents(item)[0].leaves() + print(sent) + for parse in parser.parse(sent): + print(parse) + + +def fcfg_demo(): + import nltk.data + + g = nltk.data.load("grammars/book_grammars/feat0.fcfg") + print(g) + print() + + +def dg_demo(): + """ + A demonstration showing the creation and inspection of a + ``DependencyGrammar``. + """ + grammar = DependencyGrammar.fromstring( + """ + 'scratch' -> 'cats' | 'walls' + 'walls' -> 'the' + 'cats' -> 'the' + """ + ) + print(grammar) + + +def sdg_demo(): + """ + A demonstration of how to read a string representation of + a CoNLL format dependency tree. + """ + from nltk.parse import DependencyGraph + + dg = DependencyGraph( + """ + 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ + 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ + 3 met met Prep Prep voor 8 mod _ _ + 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ + 5 moeder moeder N N soort|ev|neut 3 obj1 _ _ + 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ + 7 gaan ga V V hulp|inf 6 vc _ _ + 8 winkelen winkel V V intrans|inf 11 cnj _ _ + 9 , , Punc Punc komma 8 punct _ _ + 10 zwemmen zwem V V intrans|inf 11 cnj _ _ + 11 of of Conj Conj neven 7 vc _ _ + 12 terrassen terras N N soort|mv|neut 11 cnj _ _ + 13 . . Punc Punc punt 12 punct _ _ + """ + ) + tree = dg.tree() + print(tree.pprint()) + + +def demo(): + cfg_demo() + pcfg_demo() + fcfg_demo() + dg_demo() + sdg_demo() + + +if __name__ == "__main__": + demo() + +__all__ = [ + "Nonterminal", + "nonterminals", + "CFG", + "Production", + "PCFG", + "ProbabilisticProduction", + "DependencyGrammar", + "DependencyProduction", + "ProbabilisticDependencyGrammar", + "induce_pcfg", + "read_grammar", +] diff --git a/venv/lib/python3.10/site-packages/nltk/help.py b/venv/lib/python3.10/site-packages/nltk/help.py new file mode 100644 index 0000000000000000000000000000000000000000..e0b5f7b876cda304a7fdaeecfbab9278113058df --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/help.py @@ -0,0 +1,64 @@ +# Natural Language Toolkit (NLTK) Help +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Provide structured access to documentation. +""" + +import re +from textwrap import wrap + +from nltk.data import load + + +def brown_tagset(tagpattern=None): + _format_tagset("brown_tagset", tagpattern) + + +def claws5_tagset(tagpattern=None): + _format_tagset("claws5_tagset", tagpattern) + + +def upenn_tagset(tagpattern=None): + _format_tagset("upenn_tagset", tagpattern) + + +##################################################################### +# UTILITIES +##################################################################### + + +def _print_entries(tags, tagdict): + for tag in tags: + entry = tagdict[tag] + defn = [tag + ": " + entry[0]] + examples = wrap( + entry[1], width=75, initial_indent=" ", subsequent_indent=" " + ) + print("\n".join(defn + examples)) + + +def _format_tagset(tagset, tagpattern=None): + tagdict = load("help/tagsets/" + tagset + ".pickle") + if not tagpattern: + _print_entries(sorted(tagdict), tagdict) + elif tagpattern in tagdict: + _print_entries([tagpattern], tagdict) + else: + tagpattern = re.compile(tagpattern) + tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)] + if tags: + _print_entries(tags, tagdict) + else: + print("No matching tags found.") + + +if __name__ == "__main__": + brown_tagset(r"NN.*") + upenn_tagset(r".*\$") + claws5_tagset("UNDEFINED") + brown_tagset(r"NN") diff --git a/venv/lib/python3.10/site-packages/nltk/internals.py b/venv/lib/python3.10/site-packages/nltk/internals.py new file mode 100644 index 0000000000000000000000000000000000000000..b53d77da5e976c08f2cb002759e9da1044dc9bf0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/internals.py @@ -0,0 +1,1123 @@ +# Natural Language Toolkit: Internal utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +import fnmatch +import locale +import os +import re +import stat +import subprocess +import sys +import textwrap +import types +import warnings +from xml.etree import ElementTree + +########################################################################## +# Java Via Command-Line +########################################################################## + +_java_bin = None +_java_options = [] +# [xx] add classpath option to config_java? +def config_java(bin=None, options=None, verbose=False): + """ + Configure nltk's java interface, by letting nltk know where it can + find the Java binary, and what extra options (if any) should be + passed to Java when it is run. + + :param bin: The full path to the Java binary. If not specified, + then nltk will search the system for a Java binary; and if + one is not found, it will raise a ``LookupError`` exception. + :type bin: str + :param options: A list of options that should be passed to the + Java binary when it is called. A common value is + ``'-Xmx512m'``, which tells Java binary to increase + the maximum heap size to 512 megabytes. If no options are + specified, then do not modify the options list. + :type options: list(str) + """ + global _java_bin, _java_options + _java_bin = find_binary( + "java", + bin, + env_vars=["JAVAHOME", "JAVA_HOME"], + verbose=verbose, + binary_names=["java.exe"], + ) + + if options is not None: + if isinstance(options, str): + options = options.split() + _java_options = list(options) + + +def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True): + """ + Execute the given java command, by opening a subprocess that calls + Java. If java has not yet been configured, it will be configured + by calling ``config_java()`` with no arguments. + + :param cmd: The java command that should be called, formatted as + a list of strings. Typically, the first string will be the name + of the java class; and the remaining strings will be arguments + for that java class. + :type cmd: list(str) + + :param classpath: A ``':'`` separated list of directories, JAR + archives, and ZIP archives to search for class files. + :type classpath: str + + :param stdin: Specify the executed program's + standard input file handles, respectively. Valid values are ``subprocess.PIPE``, + an existing file descriptor (a positive integer), an existing + file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a + new pipe to the child should be created. With None, no + redirection will occur; the child's file handles will be + inherited from the parent. Additionally, stderr can be + ``subprocess.STDOUT``, which indicates that the stderr data + from the applications should be captured into the same file + handle as for stdout. + + :param stdout: Specify the executed program's standard output file + handle. See ``stdin`` for valid values. + + :param stderr: Specify the executed program's standard error file + handle. See ``stdin`` for valid values. + + + :param blocking: If ``false``, then return immediately after + spawning the subprocess. In this case, the return value is + the ``Popen`` object, and not a ``(stdout, stderr)`` tuple. + + :return: If ``blocking=True``, then return a tuple ``(stdout, + stderr)``, containing the stdout and stderr outputs generated + by the java command if the ``stdout`` and ``stderr`` parameters + were set to ``subprocess.PIPE``; or None otherwise. If + ``blocking=False``, then return a ``subprocess.Popen`` object. + + :raise OSError: If the java command returns a nonzero return code. + """ + + subprocess_output_dict = { + "pipe": subprocess.PIPE, + "stdout": subprocess.STDOUT, + "devnull": subprocess.DEVNULL, + } + + stdin = subprocess_output_dict.get(stdin, stdin) + stdout = subprocess_output_dict.get(stdout, stdout) + stderr = subprocess_output_dict.get(stderr, stderr) + + if isinstance(cmd, str): + raise TypeError("cmd should be a list of strings") + + # Make sure we know where a java binary is. + if _java_bin is None: + config_java() + + # Set up the classpath. + if isinstance(classpath, str): + classpaths = [classpath] + else: + classpaths = list(classpath) + classpath = os.path.pathsep.join(classpaths) + + # Construct the full command string. + cmd = list(cmd) + cmd = ["-cp", classpath] + cmd + cmd = [_java_bin] + _java_options + cmd + + # Call java via a subprocess + p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr) + if not blocking: + return p + (stdout, stderr) = p.communicate() + + # Check the return code. + if p.returncode != 0: + print(_decode_stdoutdata(stderr)) + raise OSError("Java command failed : " + str(cmd)) + + return (stdout, stderr) + + +###################################################################### +# Parsing +###################################################################### + + +class ReadError(ValueError): + """ + Exception raised by read_* functions when they fail. + :param position: The index in the input string where an error occurred. + :param expected: What was expected when an error occurred. + """ + + def __init__(self, expected, position): + ValueError.__init__(self, expected, position) + self.expected = expected + self.position = position + + def __str__(self): + return f"Expected {self.expected} at {self.position}" + + +_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')") + + +def read_str(s, start_position): + """ + If a Python string literal begins at the specified position in the + given string, then return a tuple ``(val, end_position)`` + containing the value of the string literal and the position where + it ends. Otherwise, raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python string literal exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched string literal evaluated as a + string and the end position of the string literal. + :rtype: tuple(str, int) + + :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a + match in ``s`` at ``start_position``, i.e., open quote. If the + ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the + end of the first match, i.e., close quote. + :raise ValueError: If an invalid string (i.e., contains an invalid + escape sequence) is passed into the ``eval``. + + :Example: + + >>> from nltk.internals import read_str + >>> read_str('"Hello", World!', 0) + ('Hello', 7) + + """ + # Read the open quote, and any modifiers. + m = _STRING_START_RE.match(s, start_position) + if not m: + raise ReadError("open quote", start_position) + quotemark = m.group(1) + + # Find the close quote. + _STRING_END_RE = re.compile(r"\\|%s" % quotemark) + position = m.end() + while True: + match = _STRING_END_RE.search(s, position) + if not match: + raise ReadError("close quote", position) + if match.group(0) == "\\": + position = match.end() + 1 + else: + break + + # Process it, using eval. Strings with invalid escape sequences + # might raise ValueError. + try: + return eval(s[start_position : match.end()]), match.end() + except ValueError as e: + raise ReadError("valid escape sequence", start_position) from e + + +_READ_INT_RE = re.compile(r"-?\d+") + + +def read_int(s, start_position): + """ + If an integer begins at the specified position in the given + string, then return a tuple ``(val, end_position)`` containing the + value of the integer and the position where it ends. Otherwise, + raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python integer exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched integer casted to an int, + and the end position of the int in ``s``. + :rtype: tuple(int, int) + + :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a + match in ``s`` at ``start_position``. + + :Example: + + >>> from nltk.internals import read_int + >>> read_int('42 is the answer', 0) + (42, 2) + + """ + m = _READ_INT_RE.match(s, start_position) + if not m: + raise ReadError("integer", start_position) + return int(m.group()), m.end() + + +_READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?") + + +def read_number(s, start_position): + """ + If an integer or float begins at the specified position in the + given string, then return a tuple ``(val, end_position)`` + containing the value of the number and the position where it ends. + Otherwise, raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python number exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched number casted to a ``float``, + and the end position of the number in ``s``. + :rtype: tuple(float, int) + + :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a + match in ``s`` at ``start_position``. + + :Example: + + >>> from nltk.internals import read_number + >>> read_number('Pi is 3.14159', 6) + (3.14159, 13) + + """ + m = _READ_NUMBER_VALUE.match(s, start_position) + if not m or not (m.group(1) or m.group(2)): + raise ReadError("number", start_position) + if m.group(2): + return float(m.group()), m.end() + else: + return int(m.group()), m.end() + + +###################################################################### +# Check if a method has been overridden +###################################################################### + + +def overridden(method): + """ + :return: True if ``method`` overrides some method with the same + name in a base class. This is typically used when defining + abstract base classes or interfaces, to allow subclasses to define + either of two related methods: + + >>> class EaterI: + ... '''Subclass must define eat() or batch_eat().''' + ... def eat(self, food): + ... if overridden(self.batch_eat): + ... return self.batch_eat([food])[0] + ... else: + ... raise NotImplementedError() + ... def batch_eat(self, foods): + ... return [self.eat(food) for food in foods] + + :type method: instance method + """ + if isinstance(method, types.MethodType) and method.__self__.__class__ is not None: + name = method.__name__ + funcs = [ + cls.__dict__[name] + for cls in _mro(method.__self__.__class__) + if name in cls.__dict__ + ] + return len(funcs) > 1 + else: + raise TypeError("Expected an instance method.") + + +def _mro(cls): + """ + Return the method resolution order for ``cls`` -- i.e., a list + containing ``cls`` and all its base classes, in the order in which + they would be checked by ``getattr``. For new-style classes, this + is just cls.__mro__. For classic classes, this can be obtained by + a depth-first left-to-right traversal of ``__bases__``. + """ + if isinstance(cls, type): + return cls.__mro__ + else: + mro = [cls] + for base in cls.__bases__: + mro.extend(_mro(base)) + return mro + + +###################################################################### +# Deprecation decorator & base class +###################################################################### +# [xx] dedent msg first if it comes from a docstring. + + +def _add_epytext_field(obj, field, message): + """Add an epytext @field to a given object's docstring.""" + indent = "" + # If we already have a docstring, then add a blank line to separate + # it from the new field, and check its indentation. + if obj.__doc__: + obj.__doc__ = obj.__doc__.rstrip() + "\n\n" + indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) + if indents: + indent = min(indents) + # If we don't have a docstring, add an empty one. + else: + obj.__doc__ = "" + + obj.__doc__ += textwrap.fill( + f"@{field}: {message}", + initial_indent=indent, + subsequent_indent=indent + " ", + ) + + +def deprecated(message): + """ + A decorator used to mark functions as deprecated. This will cause + a warning to be printed the when the function is used. Usage: + + >>> from nltk.internals import deprecated + >>> @deprecated('Use foo() instead') + ... def bar(x): + ... print(x/10) + + """ + + def decorator(func): + msg = f"Function {func.__name__}() has been deprecated. {message}" + msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") + + def newFunc(*args, **kwargs): + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + return func(*args, **kwargs) + + # Copy the old function's name, docstring, & dict + newFunc.__dict__.update(func.__dict__) + newFunc.__name__ = func.__name__ + newFunc.__doc__ = func.__doc__ + newFunc.__deprecated__ = True + # Add a @deprecated field to the docstring. + _add_epytext_field(newFunc, "deprecated", message) + return newFunc + + return decorator + + +class Deprecated: + """ + A base class used to mark deprecated classes. A typical usage is to + alert users that the name of a class has changed: + + >>> from nltk.internals import Deprecated + >>> class NewClassName: + ... pass # All logic goes here. + ... + >>> class OldClassName(Deprecated, NewClassName): + ... "Use NewClassName instead." + + The docstring of the deprecated class will be used in the + deprecation warning message. + """ + + def __new__(cls, *args, **kwargs): + # Figure out which class is the deprecated one. + dep_cls = None + for base in _mro(cls): + if Deprecated in base.__bases__: + dep_cls = base + break + assert dep_cls, "Unable to determine which base is deprecated." + + # Construct an appropriate warning. + doc = dep_cls.__doc__ or "".strip() + # If there's a @deprecated field, strip off the field marker. + doc = re.sub(r"\A\s*@deprecated:", r"", doc) + # Strip off any indentation. + doc = re.sub(r"(?m)^\s*", "", doc) + # Construct a 'name' string. + name = "Class %s" % dep_cls.__name__ + if cls != dep_cls: + name += " (base class for %s)" % cls.__name__ + # Put it all together. + msg = f"{name} has been deprecated. {doc}" + # Wrap it. + msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + # Do the actual work of __new__. + return object.__new__(cls) + + +########################################################################## +# COUNTER, FOR UNIQUE NAMING +########################################################################## + + +class Counter: + """ + A counter that auto-increments each time its value is read. + """ + + def __init__(self, initial_value=0): + self._value = initial_value + + def get(self): + self._value += 1 + return self._value + + +########################################################################## +# Search for files/binaries +########################################################################## + + +def find_file_iter( + filename, + env_vars=(), + searchpath=(), + file_names=None, + url=None, + verbose=False, + finding_dir=False, +): + """ + Search for a file to be used by nltk. + + :param filename: The name or path of the file. + :param env_vars: A list of environment variable names to check. + :param file_names: A list of alternative file names to check. + :param searchpath: List of directories to search. + :param url: URL presented to user for download help. + :param verbose: Whether or not to print path when a file is found. + """ + file_names = [filename] + (file_names or []) + assert isinstance(filename, str) + assert not isinstance(file_names, str) + assert not isinstance(searchpath, str) + if isinstance(env_vars, str): + env_vars = env_vars.split() + yielded = False + + # File exists, no magic + for alternative in file_names: + path_to_file = os.path.join(filename, alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + # Check the bare alternatives + if os.path.isfile(alternative): + if verbose: + print(f"[Found {filename}: {alternative}]") + yielded = True + yield alternative + # Check if the alternative is inside a 'file' directory + path_to_file = os.path.join(filename, "file", alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + + # Check environment variables + for env_var in env_vars: + if env_var in os.environ: + if finding_dir: # This is to file a directory instead of file + yielded = True + yield os.environ[env_var] + + for env_dir in os.environ[env_var].split(os.pathsep): + # Check if the environment variable contains a direct path to the bin + if os.path.isfile(env_dir): + if verbose: + print(f"[Found {filename}: {env_dir}]") + yielded = True + yield env_dir + # Check if the possible bin names exist inside the environment variable directories + for alternative in file_names: + path_to_file = os.path.join(env_dir, alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + # Check if the alternative is inside a 'file' directory + # path_to_file = os.path.join(env_dir, 'file', alternative) + + # Check if the alternative is inside a 'bin' directory + path_to_file = os.path.join(env_dir, "bin", alternative) + + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + + # Check the path list. + for directory in searchpath: + for alternative in file_names: + path_to_file = os.path.join(directory, alternative) + if os.path.isfile(path_to_file): + yielded = True + yield path_to_file + + # If we're on a POSIX system, then try using the 'which' command + # to find the file. + if os.name == "posix": + for alternative in file_names: + try: + p = subprocess.Popen( + ["which", alternative], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + path = _decode_stdoutdata(stdout).strip() + if path.endswith(alternative) and os.path.exists(path): + if verbose: + print(f"[Found {filename}: {path}]") + yielded = True + yield path + except (KeyboardInterrupt, SystemExit, OSError): + raise + finally: + pass + + if not yielded: + msg = ( + "NLTK was unable to find the %s file!" + "\nUse software specific " + "configuration parameters" % filename + ) + if env_vars: + msg += " or set the %s environment variable" % env_vars[0] + msg += "." + if searchpath: + msg += "\n\n Searched in:" + msg += "".join("\n - %s" % d for d in searchpath) + if url: + msg += f"\n\n For more information on {filename}, see:\n <{url}>" + div = "=" * 75 + raise LookupError(f"\n\n{div}\n{msg}\n{div}") + + +def find_file( + filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False +): + return next( + find_file_iter(filename, env_vars, searchpath, file_names, url, verbose) + ) + + +def find_dir( + filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False +): + return next( + find_file_iter( + filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True + ) + ) + + +def find_binary_iter( + name, + path_to_bin=None, + env_vars=(), + searchpath=(), + binary_names=None, + url=None, + verbose=False, +): + """ + Search for a file to be used by nltk. + + :param name: The name or path of the file. + :param path_to_bin: The user-supplied binary location (deprecated) + :param env_vars: A list of environment variable names to check. + :param file_names: A list of alternative file names to check. + :param searchpath: List of directories to search. + :param url: URL presented to user for download help. + :param verbose: Whether or not to print path when a file is found. + """ + yield from find_file_iter( + path_to_bin or name, env_vars, searchpath, binary_names, url, verbose + ) + + +def find_binary( + name, + path_to_bin=None, + env_vars=(), + searchpath=(), + binary_names=None, + url=None, + verbose=False, +): + return next( + find_binary_iter( + name, path_to_bin, env_vars, searchpath, binary_names, url, verbose + ) + ) + + +def find_jar_iter( + name_pattern, + path_to_jar=None, + env_vars=(), + searchpath=(), + url=None, + verbose=False, + is_regex=False, +): + """ + Search for a jar that is used by nltk. + + :param name_pattern: The name of the jar file + :param path_to_jar: The user-supplied jar location, or None. + :param env_vars: A list of environment variable names to check + in addition to the CLASSPATH variable which is + checked by default. + :param searchpath: List of directories to search. + :param is_regex: Whether name is a regular expression. + """ + + assert isinstance(name_pattern, str) + assert not isinstance(searchpath, str) + if isinstance(env_vars, str): + env_vars = env_vars.split() + yielded = False + + # Make sure we check the CLASSPATH first + env_vars = ["CLASSPATH"] + list(env_vars) + + # If an explicit location was given, then check it, and yield it if + # it's present; otherwise, complain. + if path_to_jar is not None: + if os.path.isfile(path_to_jar): + yielded = True + yield path_to_jar + else: + raise LookupError( + f"Could not find {name_pattern} jar file at {path_to_jar}" + ) + + # Check environment variables + for env_var in env_vars: + if env_var in os.environ: + if env_var == "CLASSPATH": + classpath = os.environ["CLASSPATH"] + for cp in classpath.split(os.path.pathsep): + cp = os.path.expanduser(cp) + if os.path.isfile(cp): + filename = os.path.basename(cp) + if ( + is_regex + and re.match(name_pattern, filename) + or (not is_regex and filename == name_pattern) + ): + if verbose: + print(f"[Found {name_pattern}: {cp}]") + yielded = True + yield cp + # The case where user put directory containing the jar file in the classpath + if os.path.isdir(cp): + if not is_regex: + if os.path.isfile(os.path.join(cp, name_pattern)): + if verbose: + print(f"[Found {name_pattern}: {cp}]") + yielded = True + yield os.path.join(cp, name_pattern) + else: + # Look for file using regular expression + for file_name in os.listdir(cp): + if re.match(name_pattern, file_name): + if verbose: + print( + "[Found %s: %s]" + % ( + name_pattern, + os.path.join(cp, file_name), + ) + ) + yielded = True + yield os.path.join(cp, file_name) + + else: + jar_env = os.path.expanduser(os.environ[env_var]) + jar_iter = ( + ( + os.path.join(jar_env, path_to_jar) + for path_to_jar in os.listdir(jar_env) + ) + if os.path.isdir(jar_env) + else (jar_env,) + ) + for path_to_jar in jar_iter: + if os.path.isfile(path_to_jar): + filename = os.path.basename(path_to_jar) + if ( + is_regex + and re.match(name_pattern, filename) + or (not is_regex and filename == name_pattern) + ): + if verbose: + print(f"[Found {name_pattern}: {path_to_jar}]") + yielded = True + yield path_to_jar + + # Check the path list. + for directory in searchpath: + if is_regex: + for filename in os.listdir(directory): + path_to_jar = os.path.join(directory, filename) + if os.path.isfile(path_to_jar): + if re.match(name_pattern, filename): + if verbose: + print(f"[Found {filename}: {path_to_jar}]") + yielded = True + yield path_to_jar + else: + path_to_jar = os.path.join(directory, name_pattern) + if os.path.isfile(path_to_jar): + if verbose: + print(f"[Found {name_pattern}: {path_to_jar}]") + yielded = True + yield path_to_jar + + if not yielded: + # If nothing was found, raise an error + msg = "NLTK was unable to find %s!" % name_pattern + if env_vars: + msg += " Set the %s environment variable" % env_vars[0] + msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ") + if searchpath: + msg += "\n\n Searched in:" + msg += "".join("\n - %s" % d for d in searchpath) + if url: + msg += "\n\n For more information, on {}, see:\n <{}>".format( + name_pattern, + url, + ) + div = "=" * 75 + raise LookupError(f"\n\n{div}\n{msg}\n{div}") + + +def find_jar( + name_pattern, + path_to_jar=None, + env_vars=(), + searchpath=(), + url=None, + verbose=False, + is_regex=False, +): + return next( + find_jar_iter( + name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex + ) + ) + + +def find_jars_within_path(path_to_jars): + return [ + os.path.join(root, filename) + for root, dirnames, filenames in os.walk(path_to_jars) + for filename in fnmatch.filter(filenames, "*.jar") + ] + + +def _decode_stdoutdata(stdoutdata): + """Convert data read from stdout/stderr to unicode""" + if not isinstance(stdoutdata, bytes): + return stdoutdata + + encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding()) + if encoding is None: + return stdoutdata.decode() + return stdoutdata.decode(encoding) + + +########################################################################## +# Import Stdlib Module +########################################################################## + + +def import_from_stdlib(module): + """ + When python is run from within the nltk/ directory tree, the + current directory is included at the beginning of the search path. + Unfortunately, that means that modules within nltk can sometimes + shadow standard library modules. As an example, the stdlib + 'inspect' module will attempt to import the stdlib 'tokenize' + module, but will instead end up importing NLTK's 'tokenize' module + instead (causing the import to fail). + """ + old_path = sys.path + sys.path = [d for d in sys.path if d not in ("", ".")] + m = __import__(module) + sys.path = old_path + return m + + +########################################################################## +# Wrapper for ElementTree Elements +########################################################################## + + +class ElementWrapper: + """ + A wrapper around ElementTree Element objects whose main purpose is + to provide nicer __repr__ and __str__ methods. In addition, any + of the wrapped Element's methods that return other Element objects + are overridden to wrap those values before returning them. + + This makes Elements more convenient to work with in + interactive sessions and doctests, at the expense of some + efficiency. + """ + + # Prevent double-wrapping: + def __new__(cls, etree): + """ + Create and return a wrapper around a given Element object. + If ``etree`` is an ``ElementWrapper``, then ``etree`` is + returned as-is. + """ + if isinstance(etree, ElementWrapper): + return etree + else: + return object.__new__(ElementWrapper) + + def __init__(self, etree): + r""" + Initialize a new Element wrapper for ``etree``. + + If ``etree`` is a string, then it will be converted to an + Element object using ``ElementTree.fromstring()`` first: + + >>> ElementWrapper("") + \n"> + + """ + if isinstance(etree, str): + etree = ElementTree.fromstring(etree) + self.__dict__["_etree"] = etree + + def unwrap(self): + """ + Return the Element object wrapped by this wrapper. + """ + return self._etree + + ##//////////////////////////////////////////////////////////// + # { String Representation + ##//////////////////////////////////////////////////////////// + + def __repr__(self): + s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8") + if len(s) > 60: + e = s.rfind("<") + if (len(s) - e) > 30: + e = -20 + s = f"{s[:30]}...{s[e:]}" + return "" % s + + def __str__(self): + """ + :return: the result of applying ``ElementTree.tostring()`` to + the wrapped Element object. + """ + return ( + ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip() + ) + + ##//////////////////////////////////////////////////////////// + # { Element interface Delegation (pass-through) + ##//////////////////////////////////////////////////////////// + + def __getattr__(self, attrib): + return getattr(self._etree, attrib) + + def __setattr__(self, attr, value): + return setattr(self._etree, attr, value) + + def __delattr__(self, attr): + return delattr(self._etree, attr) + + def __setitem__(self, index, element): + self._etree[index] = element + + def __delitem__(self, index): + del self._etree[index] + + def __setslice__(self, start, stop, elements): + self._etree[start:stop] = elements + + def __delslice__(self, start, stop): + del self._etree[start:stop] + + def __len__(self): + return len(self._etree) + + ##//////////////////////////////////////////////////////////// + # { Element interface Delegation (wrap result) + ##//////////////////////////////////////////////////////////// + + def __getitem__(self, index): + return ElementWrapper(self._etree[index]) + + def __getslice__(self, start, stop): + return [ElementWrapper(elt) for elt in self._etree[start:stop]] + + def getchildren(self): + return [ElementWrapper(elt) for elt in self._etree] + + def getiterator(self, tag=None): + return (ElementWrapper(elt) for elt in self._etree.getiterator(tag)) + + def makeelement(self, tag, attrib): + return ElementWrapper(self._etree.makeelement(tag, attrib)) + + def find(self, path): + elt = self._etree.find(path) + if elt is None: + return elt + else: + return ElementWrapper(elt) + + def findall(self, path): + return [ElementWrapper(elt) for elt in self._etree.findall(path)] + + +###################################################################### +# Helper for Handling Slicing +###################################################################### + + +def slice_bounds(sequence, slice_obj, allow_step=False): + """ + Given a slice, return the corresponding (start, stop) bounds, + taking into account None indices and negative indices. The + following guarantees are made for the returned start and stop values: + + - 0 <= start <= len(sequence) + - 0 <= stop <= len(sequence) + - start <= stop + + :raise ValueError: If ``slice_obj.step`` is not None. + :param allow_step: If true, then the slice object may have a + non-None step. If it does, then return a tuple + (start, stop, step). + """ + start, stop = (slice_obj.start, slice_obj.stop) + + # If allow_step is true, then include the step in our return + # value tuple. + if allow_step: + step = slice_obj.step + if step is None: + step = 1 + # Use a recursive call without allow_step to find the slice + # bounds. If step is negative, then the roles of start and + # stop (in terms of default values, etc), are swapped. + if step < 0: + start, stop = slice_bounds(sequence, slice(stop, start)) + else: + start, stop = slice_bounds(sequence, slice(start, stop)) + return start, stop, step + + # Otherwise, make sure that no non-default step value is used. + elif slice_obj.step not in (None, 1): + raise ValueError( + "slices with steps are not supported by %s" % sequence.__class__.__name__ + ) + + # Supply default offsets. + if start is None: + start = 0 + if stop is None: + stop = len(sequence) + + # Handle negative indices. + if start < 0: + start = max(0, len(sequence) + start) + if stop < 0: + stop = max(0, len(sequence) + stop) + + # Make sure stop doesn't go past the end of the list. Note that + # we avoid calculating len(sequence) if possible, because for lazy + # sequences, calculating the length of a sequence can be expensive. + if stop > 0: + try: + sequence[stop - 1] + except IndexError: + stop = len(sequence) + + # Make sure start isn't past stop. + start = min(start, stop) + + # That's all folks! + return start, stop + + +###################################################################### +# Permission Checking +###################################################################### + + +def is_writable(path): + # Ensure that it exists. + if not os.path.exists(path): + return False + + # If we're on a posix system, check its permissions. + if hasattr(os, "getuid"): + statdata = os.stat(path) + perm = stat.S_IMODE(statdata.st_mode) + # is it world-writable? + if perm & 0o002: + return True + # do we own it? + elif statdata.st_uid == os.getuid() and (perm & 0o200): + return True + # are we in a group that can write to it? + elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020): + return True + # otherwise, we can't write to it. + else: + return False + + # Otherwise, we'll assume it's writable. + # [xx] should we do other checks on other platforms? + return True + + +###################################################################### +# NLTK Error reporting +###################################################################### + + +def raise_unorderable_types(ordering, a, b): + raise TypeError( + "unorderable types: %s() %s %s()" + % (type(a).__name__, ordering, type(b).__name__) + ) diff --git a/venv/lib/python3.10/site-packages/nltk/jsontags.py b/venv/lib/python3.10/site-packages/nltk/jsontags.py new file mode 100644 index 0000000000000000000000000000000000000000..58d73bf138d07236bc38979bd69266c5972b62b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/jsontags.py @@ -0,0 +1,65 @@ +# Natural Language Toolkit: JSON Encoder/Decoder Helpers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +""" +Register JSON tags, so the nltk data loader knows what module and class to look for. + +NLTK uses simple '!' tags to mark the types of objects, but the fully-qualified +"tag:nltk.org,2011:" prefix is also accepted in case anyone ends up +using it. +""" + +import json + +json_tags = {} + +TAG_PREFIX = "!" + + +def register_tag(cls): + """ + Decorates a class to register it's json tag. + """ + json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls + return cls + + +class JSONTaggedEncoder(json.JSONEncoder): + def default(self, obj): + obj_tag = getattr(obj, "json_tag", None) + if obj_tag is None: + return super().default(obj) + obj_tag = TAG_PREFIX + obj_tag + obj = obj.encode_json_obj() + return {obj_tag: obj} + + +class JSONTaggedDecoder(json.JSONDecoder): + def decode(self, s): + return self.decode_obj(super().decode(s)) + + @classmethod + def decode_obj(cls, obj): + # Decode nested objects first. + if isinstance(obj, dict): + obj = {key: cls.decode_obj(val) for (key, val) in obj.items()} + elif isinstance(obj, list): + obj = list(cls.decode_obj(val) for val in obj) + # Check if we have a tagged object. + if not isinstance(obj, dict) or len(obj) != 1: + return obj + obj_tag = next(iter(obj.keys())) + if not obj_tag.startswith("!"): + return obj + if obj_tag not in json_tags: + raise ValueError("Unknown tag", obj_tag) + obj_cls = json_tags[obj_tag] + return obj_cls.decode_json_obj(obj[obj_tag]) + + +__all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"] diff --git a/venv/lib/python3.10/site-packages/nltk/langnames.py b/venv/lib/python3.10/site-packages/nltk/langnames.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fa6b40a4b381b4b2c4f3ff42ee2450f3849465 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/langnames.py @@ -0,0 +1,730 @@ +# Natural Language Toolkit: Language Codes +# +# Copyright (C) 2022-2023 NLTK Project +# Author: Eric Kafe +# URL: +# For license information, see LICENSE.TXT +# +# iso639-3 language codes (C) https://iso639-3.sil.org/ + +""" +Translate between language names and language codes. + +The iso639-3 language codes were downloaded from the registration authority at +https://iso639-3.sil.org/ + +The iso639-3 codeset is evolving, so retired language codes are kept in the +"iso639retired" dictionary, which is used as fallback by the wrapper functions +"langname" and "langcode", in order to support the lookup of retired codes. + +The "langcode" function returns the current iso639-3 code if there is one, +and falls back to the retired code otherwise. As specified by BCP-47, +it returns the shortest (2-letter) code by default, but 3-letter codes +are also available: + + >>> import nltk.langnames as lgn + >>> lgn.langname('fri') #'fri' is a retired code + 'Western Frisian' + + The current code is different from the retired one: + >>> lgn.langcode('Western Frisian') + 'fy' + + >>> lgn.langcode('Western Frisian', typ = 3) + 'fry' + +""" + +import re +from warnings import warn + +from nltk.corpus import bcp47 + +codepattern = re.compile("[a-z][a-z][a-z]?") + + +def langname(tag, typ="full"): + """ + Convert a composite BCP-47 tag to a language name + + >>> from nltk.langnames import langname + >>> langname('ca-Latn-ES-valencia') + 'Catalan: Latin: Spain: Valencian' + + >>> langname('ca-Latn-ES-valencia', typ="short") + 'Catalan' + """ + tags = tag.split("-") + code = tags[0].lower() + if codepattern.fullmatch(code): + if code in iso639retired: # retired codes + return iso639retired[code] + elif code in iso639short: # 3-letter codes + code2 = iso639short[code] # convert to 2-letter code + warn(f"Shortening {code!r} to {code2!r}", stacklevel=2) + tag = "-".join([code2] + tags[1:]) + name = bcp47.name(tag) # parse according to BCP-47 + if typ == "full": + return name # include all subtags + elif name: + return name.split(":")[0] # only the language subtag + else: + warn(f"Could not find code in {code!r}", stacklevel=2) + + +def langcode(name, typ=2): + """ + Convert language name to iso639-3 language code. Returns the short 2-letter + code by default, if one is available, and the 3-letter code otherwise: + + >>> from nltk.langnames import langcode + >>> langcode('Modern Greek (1453-)') + 'el' + + Specify 'typ=3' to get the 3-letter code: + + >>> langcode('Modern Greek (1453-)', typ=3) + 'ell' + """ + if name in bcp47.langcode: + code = bcp47.langcode[name] + if typ == 3 and code in iso639long: + code = iso639long[code] # convert to 3-letter code + return code + elif name in iso639code_retired: + return iso639code_retired[name] + else: + warn(f"Could not find language in {name!r}", stacklevel=2) + + +# ======================================================================= +# Translate betwwen Wikidata Q-codes and BCP-47 codes or names +# ....................................................................... + + +def tag2q(tag): + """ + Convert BCP-47 tag to Wikidata Q-code + + >>> tag2q('nds-u-sd-demv') + 'Q4289225' + """ + return bcp47.wiki_q[tag] + + +def q2tag(qcode): + """ + Convert Wikidata Q-code to BCP-47 tag + + >>> q2tag('Q4289225') + 'nds-u-sd-demv' + """ + return wiki_bcp47[qcode] + + +def q2name(qcode, typ="full"): + """ + Convert Wikidata Q-code to BCP-47 (full or short) language name + + >>> q2name('Q4289225') + 'Low German: Mecklenburg-Vorpommern' + + >>> q2name('Q4289225', "short") + 'Low German' + """ + return langname(q2tag(qcode), typ) + + +def lang2q(name): + """ + Convert simple language name to Wikidata Q-code + + >>> lang2q('Low German') + 'Q25433' + """ + return tag2q(langcode(name)) + + +# ====================================================================== +# Data dictionaries +# ...................................................................... + + +def inverse_dict(dic): + """Return inverse mapping, but only if it is bijective""" + if len(dic.keys()) == len(set(dic.values())): + return {val: key for (key, val) in dic.items()} + else: + warn("This dictionary has no bijective inverse mapping.") + + +bcp47.load_wiki_q() # Wikidata conversion table needs to be loaded explicitly +wiki_bcp47 = inverse_dict(bcp47.wiki_q) + +iso639short = { + "aar": "aa", + "abk": "ab", + "afr": "af", + "aka": "ak", + "amh": "am", + "ara": "ar", + "arg": "an", + "asm": "as", + "ava": "av", + "ave": "ae", + "aym": "ay", + "aze": "az", + "bak": "ba", + "bam": "bm", + "bel": "be", + "ben": "bn", + "bis": "bi", + "bod": "bo", + "bos": "bs", + "bre": "br", + "bul": "bg", + "cat": "ca", + "ces": "cs", + "cha": "ch", + "che": "ce", + "chu": "cu", + "chv": "cv", + "cor": "kw", + "cos": "co", + "cre": "cr", + "cym": "cy", + "dan": "da", + "deu": "de", + "div": "dv", + "dzo": "dz", + "ell": "el", + "eng": "en", + "epo": "eo", + "est": "et", + "eus": "eu", + "ewe": "ee", + "fao": "fo", + "fas": "fa", + "fij": "fj", + "fin": "fi", + "fra": "fr", + "fry": "fy", + "ful": "ff", + "gla": "gd", + "gle": "ga", + "glg": "gl", + "glv": "gv", + "grn": "gn", + "guj": "gu", + "hat": "ht", + "hau": "ha", + "hbs": "sh", + "heb": "he", + "her": "hz", + "hin": "hi", + "hmo": "ho", + "hrv": "hr", + "hun": "hu", + "hye": "hy", + "ibo": "ig", + "ido": "io", + "iii": "ii", + "iku": "iu", + "ile": "ie", + "ina": "ia", + "ind": "id", + "ipk": "ik", + "isl": "is", + "ita": "it", + "jav": "jv", + "jpn": "ja", + "kal": "kl", + "kan": "kn", + "kas": "ks", + "kat": "ka", + "kau": "kr", + "kaz": "kk", + "khm": "km", + "kik": "ki", + "kin": "rw", + "kir": "ky", + "kom": "kv", + "kon": "kg", + "kor": "ko", + "kua": "kj", + "kur": "ku", + "lao": "lo", + "lat": "la", + "lav": "lv", + "lim": "li", + "lin": "ln", + "lit": "lt", + "ltz": "lb", + "lub": "lu", + "lug": "lg", + "mah": "mh", + "mal": "ml", + "mar": "mr", + "mkd": "mk", + "mlg": "mg", + "mlt": "mt", + "mon": "mn", + "mri": "mi", + "msa": "ms", + "mya": "my", + "nau": "na", + "nav": "nv", + "nbl": "nr", + "nde": "nd", + "ndo": "ng", + "nep": "ne", + "nld": "nl", + "nno": "nn", + "nob": "nb", + "nor": "no", + "nya": "ny", + "oci": "oc", + "oji": "oj", + "ori": "or", + "orm": "om", + "oss": "os", + "pan": "pa", + "pli": "pi", + "pol": "pl", + "por": "pt", + "pus": "ps", + "que": "qu", + "roh": "rm", + "ron": "ro", + "run": "rn", + "rus": "ru", + "sag": "sg", + "san": "sa", + "sin": "si", + "slk": "sk", + "slv": "sl", + "sme": "se", + "smo": "sm", + "sna": "sn", + "snd": "sd", + "som": "so", + "sot": "st", + "spa": "es", + "sqi": "sq", + "srd": "sc", + "srp": "sr", + "ssw": "ss", + "sun": "su", + "swa": "sw", + "swe": "sv", + "tah": "ty", + "tam": "ta", + "tat": "tt", + "tel": "te", + "tgk": "tg", + "tgl": "tl", + "tha": "th", + "tir": "ti", + "ton": "to", + "tsn": "tn", + "tso": "ts", + "tuk": "tk", + "tur": "tr", + "twi": "tw", + "uig": "ug", + "ukr": "uk", + "urd": "ur", + "uzb": "uz", + "ven": "ve", + "vie": "vi", + "vol": "vo", + "wln": "wa", + "wol": "wo", + "xho": "xh", + "yid": "yi", + "yor": "yo", + "zha": "za", + "zho": "zh", + "zul": "zu", +} + + +iso639retired = { + "fri": "Western Frisian", + "auv": "Auvergnat", + "gsc": "Gascon", + "lms": "Limousin", + "lnc": "Languedocien", + "prv": "Provençal", + "amd": "Amapá Creole", + "bgh": "Bogan", + "bnh": "Banawá", + "bvs": "Belgian Sign Language", + "ccy": "Southern Zhuang", + "cit": "Chittagonian", + "flm": "Falam Chin", + "jap": "Jaruára", + "kob": "Kohoroxitari", + "mob": "Moinba", + "mzf": "Aiku", + "nhj": "Tlalitzlipa Nahuatl", + "nhs": "Southeastern Puebla Nahuatl", + "occ": "Occidental", + "tmx": "Tomyang", + "tot": "Patla-Chicontla Totonac", + "xmi": "Miarrã", + "yib": "Yinglish", + "ztc": "Lachirioag Zapotec", + "atf": "Atuence", + "bqe": "Navarro-Labourdin Basque", + "bsz": "Souletin Basque", + "aex": "Amerax", + "ahe": "Ahe", + "aiz": "Aari", + "akn": "Amikoana", + "arf": "Arafundi", + "azr": "Adzera", + "bcx": "Pamona", + "bii": "Bisu", + "bke": "Bengkulu", + "blu": "Hmong Njua", + "boc": "Bakung Kenyah", + "bsd": "Sarawak Bisaya", + "bwv": "Bahau River Kenyah", + "bxt": "Buxinhua", + "byu": "Buyang", + "ccx": "Northern Zhuang", + "cru": "Carútana", + "dat": "Darang Deng", + "dyk": "Land Dayak", + "eni": "Enim", + "fiz": "Izere", + "gen": "Geman Deng", + "ggh": "Garreh-Ajuran", + "itu": "Itutang", + "kds": "Lahu Shi", + "knh": "Kayan River Kenyah", + "krg": "North Korowai", + "krq": "Krui", + "kxg": "Katingan", + "lmt": "Lematang", + "lnt": "Lintang", + "lod": "Berawan", + "mbg": "Northern Nambikuára", + "mdo": "Southwest Gbaya", + "mhv": "Arakanese", + "miv": "Mimi", + "mqd": "Madang", + "nky": "Khiamniungan Naga", + "nxj": "Nyadu", + "ogn": "Ogan", + "ork": "Orokaiva", + "paj": "Ipeka-Tapuia", + "pec": "Southern Pesisir", + "pen": "Penesak", + "plm": "Palembang", + "poj": "Lower Pokomo", + "pun": "Pubian", + "rae": "Ranau", + "rjb": "Rajbanshi", + "rws": "Rawas", + "sdd": "Semendo", + "sdi": "Sindang Kelingi", + "skl": "Selako", + "slb": "Kahumamahon Saluan", + "srj": "Serawai", + "suf": "Tarpia", + "suh": "Suba", + "suu": "Sungkai", + "szk": "Sizaki", + "tle": "Southern Marakwet", + "tnj": "Tanjong", + "ttx": "Tutong 1", + "ubm": "Upper Baram Kenyah", + "vky": "Kayu Agung", + "vmo": "Muko-Muko", + "wre": "Ware", + "xah": "Kahayan", + "xkm": "Mahakam Kenyah", + "xuf": "Kunfal", + "yio": "Dayao Yi", + "ymj": "Muji Yi", + "ypl": "Pula Yi", + "ypw": "Puwa Yi", + "ywm": "Wumeng Yi", + "yym": "Yuanjiang-Mojiang Yi", + "mly": "Malay (individual language)", + "muw": "Mundari", + "xst": "Silt'e", + "ope": "Old Persian", + "scc": "Serbian", + "scr": "Croatian", + "xsk": "Sakan", + "mol": "Moldavian", + "aay": "Aariya", + "acc": "Cubulco Achí", + "cbm": "Yepocapa Southwestern Cakchiquel", + "chs": "Chumash", + "ckc": "Northern Cakchiquel", + "ckd": "South Central Cakchiquel", + "cke": "Eastern Cakchiquel", + "ckf": "Southern Cakchiquel", + "cki": "Santa María De Jesús Cakchiquel", + "ckj": "Santo Domingo Xenacoj Cakchiquel", + "ckk": "Acatenango Southwestern Cakchiquel", + "ckw": "Western Cakchiquel", + "cnm": "Ixtatán Chuj", + "cti": "Tila Chol", + "cun": "Cunén Quiché", + "eml": "Emiliano-Romagnolo", + "eur": "Europanto", + "gmo": "Gamo-Gofa-Dawro", + "hsf": "Southeastern Huastec", + "hva": "San Luís Potosí Huastec", + "ixi": "Nebaj Ixil", + "ixj": "Chajul Ixil", + "jai": "Western Jacalteco", + "mms": "Southern Mam", + "mpf": "Tajumulco Mam", + "mtz": "Tacanec", + "mvc": "Central Mam", + "mvj": "Todos Santos Cuchumatán Mam", + "poa": "Eastern Pokomam", + "pob": "Western Pokomchí", + "pou": "Southern Pokomam", + "ppv": "Papavô", + "quj": "Joyabaj Quiché", + "qut": "West Central Quiché", + "quu": "Eastern Quiché", + "qxi": "San Andrés Quiché", + "sic": "Malinguat", + "stc": "Santa Cruz", + "tlz": "Toala'", + "tzb": "Bachajón Tzeltal", + "tzc": "Chamula Tzotzil", + "tze": "Chenalhó Tzotzil", + "tzs": "San Andrés Larrainzar Tzotzil", + "tzt": "Western Tzutujil", + "tzu": "Huixtán Tzotzil", + "tzz": "Zinacantán Tzotzil", + "vlr": "Vatrata", + "yus": "Chan Santa Cruz Maya", + "nfg": "Nyeng", + "nfk": "Shakara", + "agp": "Paranan", + "bhk": "Albay Bicolano", + "bkb": "Finallig", + "btb": "Beti (Cameroon)", + "cjr": "Chorotega", + "cmk": "Chimakum", + "drh": "Darkhat", + "drw": "Darwazi", + "gav": "Gabutamon", + "mof": "Mohegan-Montauk-Narragansett", + "mst": "Cataelano Mandaya", + "myt": "Sangab Mandaya", + "rmr": "Caló", + "sgl": "Sanglechi-Ishkashimi", + "sul": "Surigaonon", + "sum": "Sumo-Mayangna", + "tnf": "Tangshewi", + "wgw": "Wagawaga", + "ayx": "Ayi (China)", + "bjq": "Southern Betsimisaraka Malagasy", + "dha": "Dhanwar (India)", + "dkl": "Kolum So Dogon", + "mja": "Mahei", + "nbf": "Naxi", + "noo": "Nootka", + "tie": "Tingal", + "tkk": "Takpa", + "baz": "Tunen", + "bjd": "Bandjigali", + "ccq": "Chaungtha", + "cka": "Khumi Awa Chin", + "dap": "Nisi (India)", + "dwl": "Walo Kumbe Dogon", + "elp": "Elpaputih", + "gbc": "Garawa", + "gio": "Gelao", + "hrr": "Horuru", + "ibi": "Ibilo", + "jar": "Jarawa (Nigeria)", + "kdv": "Kado", + "kgh": "Upper Tanudan Kalinga", + "kpp": "Paku Karen", + "kzh": "Kenuzi-Dongola", + "lcq": "Luhu", + "mgx": "Omati", + "nln": "Durango Nahuatl", + "pbz": "Palu", + "pgy": "Pongyong", + "sca": "Sansu", + "tlw": "South Wemale", + "unp": "Worora", + "wiw": "Wirangu", + "ybd": "Yangbye", + "yen": "Yendang", + "yma": "Yamphe", + "daf": "Dan", + "djl": "Djiwarli", + "ggr": "Aghu Tharnggalu", + "ilw": "Talur", + "izi": "Izi-Ezaa-Ikwo-Mgbo", + "meg": "Mea", + "mld": "Malakhel", + "mnt": "Maykulan", + "mwd": "Mudbura", + "myq": "Forest Maninka", + "nbx": "Ngura", + "nlr": "Ngarla", + "pcr": "Panang", + "ppr": "Piru", + "tgg": "Tangga", + "wit": "Wintu", + "xia": "Xiandao", + "yiy": "Yir Yoront", + "yos": "Yos", + "emo": "Emok", + "ggm": "Gugu Mini", + "leg": "Lengua", + "lmm": "Lamam", + "mhh": "Maskoy Pidgin", + "puz": "Purum Naga", + "sap": "Sanapaná", + "yuu": "Yugh", + "aam": "Aramanik", + "adp": "Adap", + "aue": "ǂKxʼauǁʼein", + "bmy": "Bemba (Democratic Republic of Congo)", + "bxx": "Borna (Democratic Republic of Congo)", + "byy": "Buya", + "dzd": "Daza", + "gfx": "Mangetti Dune ǃXung", + "gti": "Gbati-ri", + "ime": "Imeraguen", + "kbf": "Kakauhua", + "koj": "Sara Dunjo", + "kwq": "Kwak", + "kxe": "Kakihum", + "lii": "Lingkhim", + "mwj": "Maligo", + "nnx": "Ngong", + "oun": "ǃOǃung", + "pmu": "Mirpur Panjabi", + "sgo": "Songa", + "thx": "The", + "tsf": "Southwestern Tamang", + "uok": "Uokha", + "xsj": "Subi", + "yds": "Yiddish Sign Language", + "ymt": "Mator-Taygi-Karagas", + "ynh": "Yangho", + "bgm": "Baga Mboteni", + "btl": "Bhatola", + "cbe": "Chipiajes", + "cbh": "Cagua", + "coy": "Coyaima", + "cqu": "Chilean Quechua", + "cum": "Cumeral", + "duj": "Dhuwal", + "ggn": "Eastern Gurung", + "ggo": "Southern Gondi", + "guv": "Gey", + "iap": "Iapama", + "ill": "Iranun", + "kgc": "Kasseng", + "kox": "Coxima", + "ktr": "Kota Marudu Tinagas", + "kvs": "Kunggara", + "kzj": "Coastal Kadazan", + "kzt": "Tambunan Dusun", + "nad": "Nijadali", + "nts": "Natagaimas", + "ome": "Omejes", + "pmc": "Palumata", + "pod": "Ponares", + "ppa": "Pao", + "pry": "Pray 3", + "rna": "Runa", + "svr": "Savara", + "tdu": "Tempasuk Dusun", + "thc": "Tai Hang Tong", + "tid": "Tidong", + "tmp": "Tai Mène", + "tne": "Tinoc Kallahan", + "toe": "Tomedes", + "xba": "Kamba (Brazil)", + "xbx": "Kabixí", + "xip": "Xipináwa", + "xkh": "Karahawyana", + "yri": "Yarí", + "jeg": "Jeng", + "kgd": "Kataang", + "krm": "Krim", + "prb": "Lua'", + "puk": "Pu Ko", + "rie": "Rien", + "rsi": "Rennellese Sign Language", + "skk": "Sok", + "snh": "Shinabo", + "lsg": "Lyons Sign Language", + "mwx": "Mediak", + "mwy": "Mosiro", + "ncp": "Ndaktup", + "ais": "Nataoran Amis", + "asd": "Asas", + "dit": "Dirari", + "dud": "Hun-Saare", + "lba": "Lui", + "llo": "Khlor", + "myd": "Maramba", + "myi": "Mina (India)", + "nns": "Ningye", + "aoh": "Arma", + "ayy": "Tayabas Ayta", + "bbz": "Babalia Creole Arabic", + "bpb": "Barbacoas", + "cca": "Cauca", + "cdg": "Chamari", + "dgu": "Degaru", + "drr": "Dororo", + "ekc": "Eastern Karnic", + "gli": "Guliguli", + "kjf": "Khalaj", + "kxl": "Nepali Kurux", + "kxu": "Kui (India)", + "lmz": "Lumbee", + "nxu": "Narau", + "plp": "Palpa", + "sdm": "Semandang", + "tbb": "Tapeba", + "xrq": "Karranga", + "xtz": "Tasmanian", + "zir": "Ziriya", + "thw": "Thudam", + "bic": "Bikaru", + "bij": "Vaghat-Ya-Bijim-Legeri", + "blg": "Balau", + "gji": "Geji", + "mvm": "Muya", + "ngo": "Ngoni", + "pat": "Papitalai", + "vki": "Ija-Zuba", + "wra": "Warapu", + "ajt": "Judeo-Tunisian Arabic", + "cug": "Chungmboko", + "lak": "Laka (Nigeria)", + "lno": "Lango (South Sudan)", + "pii": "Pini", + "smd": "Sama", + "snb": "Sebuyau", + "uun": "Kulon-Pazeh", + "wrd": "Warduji", + "wya": "Wyandot", +} + + +iso639long = inverse_dict(iso639short) + +iso639code_retired = inverse_dict(iso639retired) diff --git a/venv/lib/python3.10/site-packages/nltk/lazyimport.py b/venv/lib/python3.10/site-packages/nltk/lazyimport.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0c8e4451fff3a19c3608d0d08e3422a77fd8f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/lazyimport.py @@ -0,0 +1,142 @@ +# This module is from mx/DateTime/LazyModule.py and is +# distributed under the terms of the eGenix.com Public License Agreement +# https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf + +""" Helper to enable simple lazy module import. + + 'Lazy' means the actual import is deferred until an attribute is + requested from the module's namespace. This has the advantage of + allowing all imports to be done at the top of a script (in a + prominent and visible place) without having a great impact + on startup time. + + Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:mal@lemburg.com + See the documentation for further information on copyrights, + or contact the author. All Rights Reserved. +""" + +### Constants + +_debug = 0 + +### + + +class LazyModule: + + """Lazy module class. + + Lazy modules are imported into the given namespaces whenever a + non-special attribute (there are some attributes like __doc__ + that class instances handle without calling __getattr__) is + requested. The module is then registered under the given name + in locals usually replacing the import wrapper instance. The + import itself is done using globals as global namespace. + + Example of creating a lazy load module: + + ISO = LazyModule('ISO',locals(),globals()) + + Later, requesting an attribute from ISO will load the module + automatically into the locals() namespace, overriding the + LazyModule instance: + + t = ISO.Week(1998,1,1) + + """ + + # Flag which indicates whether the LazyModule is initialized or not + __lazymodule_init = 0 + + # Name of the module to load + __lazymodule_name = "" + + # Flag which indicates whether the module was loaded or not + __lazymodule_loaded = 0 + + # Locals dictionary where to register the module + __lazymodule_locals = None + + # Globals dictionary to use for the module import + __lazymodule_globals = None + + def __init__(self, name, locals, globals=None): + + """Create a LazyModule instance wrapping module name. + + The module will later on be registered in locals under the + given module name. + + globals is optional and defaults to locals. + + """ + self.__lazymodule_locals = locals + if globals is None: + globals = locals + self.__lazymodule_globals = globals + mainname = globals.get("__name__", "") + if mainname: + self.__name__ = mainname + "." + name + self.__lazymodule_name = name + else: + self.__name__ = self.__lazymodule_name = name + self.__lazymodule_init = 1 + + def __lazymodule_import(self): + + """Import the module now.""" + # Load and register module + local_name = self.__lazymodule_name # e.g. "toolbox" + full_name = self.__name__ # e.g. "nltk.toolbox" + if self.__lazymodule_loaded: + return self.__lazymodule_locals[local_name] + if _debug: + print("LazyModule: Loading module %r" % full_name) + self.__lazymodule_locals[local_name] = module = __import__( + full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*" + ) + + # Fill namespace with all symbols from original module to + # provide faster access. + self.__dict__.update(module.__dict__) + + # Set import flag + self.__dict__["__lazymodule_loaded"] = 1 + + if _debug: + print("LazyModule: Module %r loaded" % full_name) + return module + + def __getattr__(self, name): + + """Import the module on demand and get the attribute.""" + if self.__lazymodule_loaded: + raise AttributeError(name) + if _debug: + print( + "LazyModule: " + "Module load triggered by attribute %r read access" % name + ) + module = self.__lazymodule_import() + return getattr(module, name) + + def __setattr__(self, name, value): + + """Import the module on demand and set the attribute.""" + if not self.__lazymodule_init: + self.__dict__[name] = value + return + if self.__lazymodule_loaded: + self.__lazymodule_locals[self.__lazymodule_name] = value + self.__dict__[name] = value + return + if _debug: + print( + "LazyModule: " + "Module load triggered by attribute %r write access" % name + ) + module = self.__lazymodule_import() + setattr(module, name, value) + + def __repr__(self): + return "" % self.__name__ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__init__.py b/venv/lib/python3.10/site-packages/nltk/misc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac9e0f7125810319ed560d0cdfdc0c1f0114b18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/__init__.py @@ -0,0 +1,11 @@ +# Natural Language Toolkit: Miscellaneous modules +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from nltk.misc.babelfish import babelize_shell +from nltk.misc.chomsky import generate_chomsky +from nltk.misc.minimalset import MinimalSet +from nltk.misc.wordfinder import word_finder diff --git a/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bed118522e612fbe8095a98e67d808d95b9beeeb Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/misc/__pycache__/wordfinder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/misc/babelfish.py b/venv/lib/python3.10/site-packages/nltk/misc/babelfish.py new file mode 100644 index 0000000000000000000000000000000000000000..d317d65a194578e28ffad94bd53803395b5e3c58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/babelfish.py @@ -0,0 +1,10 @@ +""" +This module previously provided an interface to Babelfish online +translation service; this service is no longer available; this +module is kept in NLTK source code in order to provide better error +messages for people following the NLTK Book 2.0. +""" + + +def babelize_shell(): + print("Babelfish online translation service is no longer available.") diff --git a/venv/lib/python3.10/site-packages/nltk/misc/chomsky.py b/venv/lib/python3.10/site-packages/nltk/misc/chomsky.py new file mode 100644 index 0000000000000000000000000000000000000000..0632bca034512041b3e0cf9a6231f8ac1c131e4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/chomsky.py @@ -0,0 +1,134 @@ +# Chomsky random text generator, version 1.1, Raymond Hettinger, 2005/09/13 +# https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440546 + +""" +CHOMSKY is an aid to writing linguistic papers in the style +of the great master. It is based on selected phrases taken +from actual books and articles written by Noam Chomsky. +Upon request, it assembles the phrases in the elegant +stylistic patterns that Chomsky is noted for. +To generate n sentences of linguistic wisdom, type + + (CHOMSKY n) -- for example + (CHOMSKY 5) generates half a screen of linguistic truth. +""" + +leadins = """To characterize a linguistic level L, + On the other hand, + This suggests that + It appears that + Furthermore, + We will bring evidence in favor of the following thesis: + To provide a constituent structure for T(Z,K), + From C1, it follows that + For any transformation which is sufficiently diversified in \ +application to be of any interest, + Analogously, + Clearly, + Note that + Of course, + Suppose, for instance, that + Thus + With this clarification, + Conversely, + We have already seen that + By combining adjunctions and certain deformations, + I suggested that these results would follow from the assumption that + If the position of the trace in (99c) were only relatively \ +inaccessible to movement, + However, this assumption is not correct, since + Comparing these examples with their parasitic gap counterparts in \ +(96) and (97), we see that + In the discussion of resumptive pronouns following (81), + So far, + Nevertheless, + For one thing, + Summarizing, then, we assume that + A consequence of the approach just outlined is that + Presumably, + On our assumptions, + It may be, then, that + It must be emphasized, once again, that + Let us continue to suppose that + Notice, incidentally, that """ +# List of LEADINs to buy time. + +subjects = """ the notion of level of grammaticalness + a case of semigrammaticalness of a different sort + most of the methodological work in modern linguistics + a subset of English sentences interesting on quite independent grounds + the natural general principle that will subsume this case + an important property of these three types of EC + any associated supporting element + the appearance of parasitic gaps in domains relatively inaccessible \ +to ordinary extraction + the speaker-hearer's linguistic intuition + the descriptive power of the base component + the earlier discussion of deviance + this analysis of a formative as a pair of sets of features + this selectionally introduced contextual feature + a descriptively adequate grammar + the fundamental error of regarding functional notions as categorial + relational information + the systematic use of complex symbols + the theory of syntactic features developed earlier""" +# List of SUBJECTs chosen for maximum professorial macho. + +verbs = """can be defined in such a way as to impose + delimits + suffices to account for + cannot be arbitrary in + is not subject to + does not readily tolerate + raises serious doubts about + is not quite equivalent to + does not affect the structure of + may remedy and, at the same time, eliminate + is not to be considered in determining + is to be regarded as + is unspecified with respect to + is, apparently, determined by + is necessary to impose an interpretation on + appears to correlate rather closely with + is rather different from""" +# List of VERBs chosen for autorecursive obfuscation. + +objects = """ problems of phonemic and morphological analysis. + a corpus of utterance tokens upon which conformity has been defined \ +by the paired utterance test. + the traditional practice of grammarians. + the levels of acceptability from fairly high (e.g. (99a)) to virtual \ +gibberish (e.g. (98d)). + a stipulation to place the constructions into these various categories. + a descriptive fact. + a parasitic gap construction. + the extended c-command discussed in connection with (34). + the ultimate standard that determines the accuracy of any proposed grammar. + the system of base rules exclusive of the lexicon. + irrelevant intervening contexts in selectional rules. + nondistinctness in the sense of distinctive feature theory. + a general convention regarding the forms of the grammar. + an abstract underlying order. + an important distinction in language use. + the requirement that branching is not tolerated within the dominance \ +scope of a complex symbol. + the strong generative capacity of the theory.""" +# List of OBJECTs selected for profound sententiousness. + +import random +import textwrap +from itertools import chain, islice + + +def generate_chomsky(times=5, line_length=72): + parts = [] + for part in (leadins, subjects, verbs, objects): + phraselist = list(map(str.strip, part.splitlines())) + random.shuffle(phraselist) + parts.append(phraselist) + output = chain.from_iterable(islice(zip(*parts), 0, times)) + print(textwrap.fill(" ".join(output), line_length)) + + +if __name__ == "__main__": + generate_chomsky() diff --git a/venv/lib/python3.10/site-packages/nltk/misc/minimalset.py b/venv/lib/python3.10/site-packages/nltk/misc/minimalset.py new file mode 100644 index 0000000000000000000000000000000000000000..50d1fa5b6e45193d15e6fa1d2aec687de503f1d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/minimalset.py @@ -0,0 +1,85 @@ +# Natural Language Toolkit: Minimal Sets +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import defaultdict + + +class MinimalSet: + """ + Find contexts where more than one possible target value can + appear. E.g. if targets are word-initial letters, and contexts + are the remainders of words, then we would like to find cases like + "fat" vs "cat", and "training" vs "draining". If targets are + parts-of-speech and contexts are words, then we would like to find + cases like wind (noun) 'air in rapid motion', vs wind (verb) + 'coil, wrap'. + """ + + def __init__(self, parameters=None): + """ + Create a new minimal set. + + :param parameters: The (context, target, display) tuples for the item + :type parameters: list(tuple(str, str, str)) + """ + self._targets = set() # the contrastive information + self._contexts = set() # what we are controlling for + self._seen = defaultdict(set) # to record what we have seen + self._displays = {} # what we will display + + if parameters: + for context, target, display in parameters: + self.add(context, target, display) + + def add(self, context, target, display): + """ + Add a new item to the minimal set, having the specified + context, target, and display form. + + :param context: The context in which the item of interest appears + :type context: str + :param target: The item of interest + :type target: str + :param display: The information to be reported for each item + :type display: str + """ + # Store the set of targets that occurred in this context + self._seen[context].add(target) + + # Keep track of which contexts and targets we have seen + self._contexts.add(context) + self._targets.add(target) + + # For a given context and target, store the display form + self._displays[(context, target)] = display + + def contexts(self, minimum=2): + """ + Determine which contexts occurred with enough distinct targets. + + :param minimum: the minimum number of distinct target forms + :type minimum: int + :rtype: list + """ + return [c for c in self._contexts if len(self._seen[c]) >= minimum] + + def display(self, context, target, default=""): + if (context, target) in self._displays: + return self._displays[(context, target)] + else: + return default + + def display_all(self, context): + result = [] + for target in self._targets: + x = self.display(context, target) + if x: + result.append(x) + return result + + def targets(self): + return self._targets diff --git a/venv/lib/python3.10/site-packages/nltk/misc/sort.py b/venv/lib/python3.10/site-packages/nltk/misc/sort.py new file mode 100644 index 0000000000000000000000000000000000000000..cb543d93929f45505475f9d985afea5e92f58a94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/sort.py @@ -0,0 +1,176 @@ +# Natural Language Toolkit: List Sorting +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides a variety of list sorting algorithms, to +illustrate the many different algorithms (recipes) for solving a +problem, and how to analyze algorithms experimentally. +""" +# These algorithms are taken from: +# Levitin (2004) The Design and Analysis of Algorithms + +################################################################## +# Selection Sort +################################################################## + + +def selection(a): + """ + Selection Sort: scan the list to find its smallest element, then + swap it with the first element. The remainder of the list is one + element smaller; apply the same method to this list, and so on. + """ + count = 0 + + for i in range(len(a) - 1): + min = i + + for j in range(i + 1, len(a)): + if a[j] < a[min]: + min = j + + count += 1 + + a[min], a[i] = a[i], a[min] + + return count + + +################################################################## +# Bubble Sort +################################################################## + + +def bubble(a): + """ + Bubble Sort: compare adjacent elements of the list left-to-right, + and swap them if they are out of order. After one pass through + the list swapping adjacent items, the largest item will be in + the rightmost position. The remainder is one element smaller; + apply the same method to this list, and so on. + """ + count = 0 + for i in range(len(a) - 1): + for j in range(len(a) - i - 1): + if a[j + 1] < a[j]: + a[j], a[j + 1] = a[j + 1], a[j] + count += 1 + return count + + +################################################################## +# Merge Sort +################################################################## + + +def _merge_lists(b, c): + count = 0 + i = j = 0 + a = [] + while i < len(b) and j < len(c): + count += 1 + if b[i] <= c[j]: + a.append(b[i]) + i += 1 + else: + a.append(c[j]) + j += 1 + if i == len(b): + a += c[j:] + else: + a += b[i:] + return a, count + + +def merge(a): + """ + Merge Sort: split the list in half, and sort each half, then + combine the sorted halves. + """ + count = 0 + if len(a) > 1: + midpoint = len(a) // 2 + b = a[:midpoint] + c = a[midpoint:] + count_b = merge(b) + count_c = merge(c) + result, count_a = _merge_lists(b, c) + a[:] = result # copy the result back into a. + count = count_a + count_b + count_c + return count + + +################################################################## +# Quick Sort +################################################################## + + +def _partition(a, l, r): + p = a[l] + i = l + j = r + 1 + count = 0 + while True: + while i < r: + i += 1 + if a[i] >= p: + break + while j > l: + j -= 1 + if j < l or a[j] <= p: + break + a[i], a[j] = a[j], a[i] # swap + count += 1 + if i >= j: + break + a[i], a[j] = a[j], a[i] # undo last swap + a[l], a[j] = a[j], a[l] + return j, count + + +def _quick(a, l, r): + count = 0 + if l < r: + s, count = _partition(a, l, r) + count += _quick(a, l, s - 1) + count += _quick(a, s + 1, r) + return count + + +def quick(a): + return _quick(a, 0, len(a) - 1) + + +################################################################## +# Demonstration +################################################################## + + +def demo(): + from random import shuffle + + for size in (10, 20, 50, 100, 200, 500, 1000): + a = list(range(size)) + + # various sort methods + shuffle(a) + count_selection = selection(a) + shuffle(a) + count_bubble = bubble(a) + shuffle(a) + count_merge = merge(a) + shuffle(a) + count_quick = quick(a) + + print( + ("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d") + % (size, count_selection, count_bubble, count_merge, count_quick) + ) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/misc/wordfinder.py b/venv/lib/python3.10/site-packages/nltk/misc/wordfinder.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ddca0dd6282e988ad38d287ae1029dadc98dfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/misc/wordfinder.py @@ -0,0 +1,139 @@ +# Natural Language Toolkit: Word Finder +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# Simplified from PHP version by Robert Klein +# http://fswordfinder.sourceforge.net/ + +import random + + +# reverse a word with probability 0.5 +def revword(word): + if random.randint(1, 2) == 1: + return word[::-1] + return word + + +# try to insert word at position x,y; direction encoded in xf,yf +def step(word, x, xf, y, yf, grid): + for i in range(len(word)): + if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]: + return False + for i in range(len(word)): + grid[xf(i)][yf(i)] = word[i] + return True + + +# try to insert word at position x,y, in direction dir +def check(word, dir, x, y, grid, rows, cols): + if dir == 1: + if x - len(word) < 0 or y - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y - i, grid) + elif dir == 2: + if x - len(word) < 0: + return False + return step(word, x, lambda i: x - i, y, lambda i: y, grid) + elif dir == 3: + if x - len(word) < 0 or y + (len(word) - 1) >= cols: + return False + return step(word, x, lambda i: x - i, y, lambda i: y + i, grid) + elif dir == 4: + if y - len(word) < 0: + return False + return step(word, x, lambda i: x, y, lambda i: y - i, grid) + + +def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + """ + Attempt to arrange words into a letter-grid with the specified + number of rows and columns. Try each word in several positions + and directions, until it can be fitted into the grid, or the + maximum number of allowable attempts is exceeded. Returns a tuple + consisting of the grid and the words that were successfully + placed. + + :param words: the list of words to be put into the grid + :type words: list + :param rows: the number of rows in the grid + :type rows: int + :param cols: the number of columns in the grid + :type cols: int + :param attempts: the number of times to attempt placing a word + :type attempts: int + :param alph: the alphabet, to be used for filling blank cells + :type alph: list + :rtype: tuple + """ + + # place longer words first + words = sorted(words, key=len, reverse=True) + + grid = [] # the letter grid + used = [] # the words we used + + # initialize the grid + for i in range(rows): + grid.append([""] * cols) + + # try to place each word + for word in words: + word = word.strip().upper() # normalize + save = word # keep a record of the word + word = revword(word) + for attempt in range(attempts): + r = random.randint(0, len(word)) + dir = random.choice([1, 2, 3, 4]) + x = random.randint(0, rows) + y = random.randint(0, cols) + if dir == 1: + x += r + y += r + elif dir == 2: + x += r + elif dir == 3: + x += r + y -= r + elif dir == 4: + y += r + if 0 <= x < rows and 0 <= y < cols: + if check(word, dir, x, y, grid, rows, cols): + # used.append((save, dir, x, y, word)) + used.append(save) + break + + # Fill up the remaining spaces + for i in range(rows): + for j in range(cols): + if grid[i][j] == "": + grid[i][j] = random.choice(alph) + + return grid, used + + +def word_finder(): + from nltk.corpus import words + + wordlist = words.words() + random.shuffle(wordlist) + wordlist = wordlist[:200] + wordlist = [w for w in wordlist if 3 <= len(w) <= 12] + grid, used = wordfinder(wordlist) + + print("Word Finder\n") + for i in range(len(grid)): + for j in range(len(grid[i])): + print(grid[i][j], end=" ") + print() + print() + + for i in range(len(used)): + print("%d:" % (i + 1), used[i]) + + +if __name__ == "__main__": + word_finder() diff --git a/venv/lib/python3.10/site-packages/nltk/probability.py b/venv/lib/python3.10/site-packages/nltk/probability.py new file mode 100644 index 0000000000000000000000000000000000000000..a6de70732ac33e375c42d5e675aac124ffeafdf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/probability.py @@ -0,0 +1,2578 @@ +# Natural Language Toolkit: Probability and Statistics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (additions) +# Trevor Cohn (additions) +# Peter Ljunglöf (additions) +# Liang Dong (additions) +# Geoffrey Sampson (additions) +# Ilia Kurenkov (additions) +# +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for representing and processing probabilistic information. + +The ``FreqDist`` class is used to encode "frequency distributions", +which count the number of times that each outcome of an experiment +occurs. + +The ``ProbDistI`` class defines a standard interface for "probability +distributions", which encode the probability of each outcome for an +experiment. There are two types of probability distribution: + + - "derived probability distributions" are created from frequency + distributions. They attempt to model the probability distribution + that generated the frequency distribution. + - "analytic probability distributions" are created directly from + parameters (such as variance). + +The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface +are used to encode conditional distributions. Conditional probability +distributions can be derived or analytic; but currently the only +implementation of the ``ConditionalProbDistI`` interface is +``ConditionalProbDist``, a derived distribution. + +""" + +import array +import math +import random +import warnings +from abc import ABCMeta, abstractmethod +from collections import Counter, defaultdict +from functools import reduce + +from nltk.internals import raise_unorderable_types + +_NINF = float("-1e300") + +##////////////////////////////////////////////////////// +## Frequency Distributions +##////////////////////////////////////////////////////// + + +class FreqDist(Counter): + """ + A frequency distribution for the outcomes of an experiment. A + frequency distribution records the number of times each outcome of + an experiment has occurred. For example, a frequency distribution + could be used to record the frequency of each word type in a + document. Formally, a frequency distribution can be defined as a + function mapping from each sample to the number of times that + sample occurred as an outcome. + + Frequency distributions are generally constructed by running a + number of experiments, and incrementing the count for a sample + every time it is an outcome of an experiment. For example, the + following code will produce a frequency distribution that encodes + how often each word occurs in a text: + + >>> from nltk.tokenize import word_tokenize + >>> from nltk.probability import FreqDist + >>> sent = 'This is an example sentence' + >>> fdist = FreqDist() + >>> for word in word_tokenize(sent): + ... fdist[word.lower()] += 1 + + An equivalent way to do this is with the initializer: + + >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent)) + + """ + + def __init__(self, samples=None): + """ + Construct a new frequency distribution. If ``samples`` is + given, then the frequency distribution will be initialized + with the count of each object in ``samples``; otherwise, it + will be initialized to be empty. + + In particular, ``FreqDist()`` returns an empty frequency + distribution; and ``FreqDist(samples)`` first creates an empty + frequency distribution, and then calls ``update`` with the + list ``samples``. + + :param samples: The samples to initialize the frequency + distribution with. + :type samples: Sequence + """ + Counter.__init__(self, samples) + + # Cached number of samples in this FreqDist + self._N = None + + def N(self): + """ + Return the total number of sample outcomes that have been + recorded by this FreqDist. For the number of unique + sample values (or bins) with counts greater than zero, use + ``FreqDist.B()``. + + :rtype: int + """ + if self._N is None: + # Not already cached, or cache has been invalidated + self._N = sum(self.values()) + return self._N + + def __setitem__(self, key, val): + """ + Override ``Counter.__setitem__()`` to invalidate the cached N + """ + self._N = None + super().__setitem__(key, val) + + def __delitem__(self, key): + """ + Override ``Counter.__delitem__()`` to invalidate the cached N + """ + self._N = None + super().__delitem__(key) + + def update(self, *args, **kwargs): + """ + Override ``Counter.update()`` to invalidate the cached N + """ + self._N = None + super().update(*args, **kwargs) + + def setdefault(self, key, val): + """ + Override ``Counter.setdefault()`` to invalidate the cached N + """ + self._N = None + super().setdefault(key, val) + + def B(self): + """ + Return the total number of sample values (or "bins") that + have counts greater than zero. For the total + number of sample outcomes recorded, use ``FreqDist.N()``. + (FreqDist.B() is the same as len(FreqDist).) + + :rtype: int + """ + return len(self) + + def hapaxes(self): + """ + Return a list of all samples that occur once (hapax legomena) + + :rtype: list + """ + return [item for item in self if self[item] == 1] + + def Nr(self, r, bins=None): + return self.r_Nr(bins)[r] + + def r_Nr(self, bins=None): + """ + Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0. + + :type bins: int + :param bins: The number of possible sample outcomes. ``bins`` + is used to calculate Nr(0). In particular, Nr(0) is + ``bins-self.B()``. If ``bins`` is not specified, it + defaults to ``self.B()`` (so Nr(0) will be 0). + :rtype: int + """ + + _r_Nr = defaultdict(int) + for count in self.values(): + _r_Nr[count] += 1 + + # Special case for Nr[0]: + _r_Nr[0] = bins - self.B() if bins is not None else 0 + + return _r_Nr + + def _cumulative_frequencies(self, samples): + """ + Return the cumulative frequencies of the specified samples. + If no samples are specified, all counts are returned, starting + with the largest. + + :param samples: the samples whose frequencies should be returned. + :type samples: any + :rtype: list(float) + """ + cf = 0.0 + for sample in samples: + cf += self[sample] + yield cf + + # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs, + # here, freq() does probs + def freq(self, sample): + """ + Return the frequency of a given sample. The frequency of a + sample is defined as the count of that sample divided by the + total number of sample outcomes that have been recorded by + this FreqDist. The count of a sample is defined as the + number of times that sample outcome was recorded by this + FreqDist. Frequencies are always real numbers in the range + [0, 1]. + + :param sample: the sample whose frequency + should be returned. + :type sample: any + :rtype: float + """ + n = self.N() + if n == 0: + return 0 + return self[sample] / n + + def max(self): + """ + Return the sample with the greatest number of outcomes in this + frequency distribution. If two or more samples have the same + number of outcomes, return one of them; which sample is + returned is undefined. If no outcomes have occurred in this + frequency distribution, return None. + + :return: The sample with the maximum number of outcomes in this + frequency distribution. + :rtype: any or None + """ + if len(self) == 0: + raise ValueError( + "A FreqDist must have at least one sample before max is defined." + ) + return self.most_common(1)[0][0] + + def plot( + self, *args, title="", cumulative=False, percents=False, show=True, **kwargs + ): + """ + Plot samples from the frequency distribution + displaying the most frequent sample first. If an integer + parameter is supplied, stop after this many samples have been + plotted. For a cumulative plot, specify cumulative=True. Additional + ``**kwargs`` are passed to matplotlib's plot function. + (Requires Matplotlib to be installed.) + + :param title: The title for the graph. + :type title: str + :param cumulative: Whether the plot is cumulative. (default = False) + :type cumulative: bool + :param percents: Whether the plot uses percents instead of counts. (default = False) + :type percents: bool + :param show: Whether to show the plot, or only return the ax. + :type show: bool + """ + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ValueError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + if len(args) == 0: + args = [len(self)] + samples = [item for item, _ in self.most_common(*args)] + + if cumulative: + freqs = list(self._cumulative_frequencies(samples)) + ylabel = "Cumulative " + else: + freqs = [self[sample] for sample in samples] + ylabel = "" + + if percents: + freqs = [f / self.N() * 100 for f in freqs] + ylabel += "Percents" + else: + ylabel += "Counts" + + ax = plt.gca() + ax.grid(True, color="silver") + + if "linewidth" not in kwargs: + kwargs["linewidth"] = 2 + if title: + ax.set_title(title) + + ax.plot(freqs, **kwargs) + ax.set_xticks(range(len(samples))) + ax.set_xticklabels([str(s) for s in samples], rotation=90) + ax.set_xlabel("Samples") + ax.set_ylabel(ylabel) + + if show: + plt.show() + + return ax + + def tabulate(self, *args, **kwargs): + """ + Tabulate the given samples from the frequency distribution (cumulative), + displaying the most frequent sample first. If an integer + parameter is supplied, stop after this many samples have been + plotted. + + :param samples: The samples to plot (default is all samples) + :type samples: list + :param cumulative: A flag to specify whether the freqs are cumulative (default = False) + :type title: bool + """ + if len(args) == 0: + args = [len(self)] + samples = _get_kwarg( + kwargs, "samples", [item for item, _ in self.most_common(*args)] + ) + + cumulative = _get_kwarg(kwargs, "cumulative", False) + if cumulative: + freqs = list(self._cumulative_frequencies(samples)) + else: + freqs = [self[sample] for sample in samples] + # percents = [f * 100 for f in freqs] only in ProbDist? + + width = max(len(f"{s}") for s in samples) + width = max(width, max(len("%d" % f) for f in freqs)) + + for i in range(len(samples)): + print("%*s" % (width, samples[i]), end=" ") + print() + for i in range(len(samples)): + print("%*d" % (width, freqs[i]), end=" ") + print() + + def copy(self): + """ + Create a copy of this frequency distribution. + + :rtype: FreqDist + """ + return self.__class__(self) + + # Mathematical operatiors + + def __add__(self, other): + """ + Add counts from two counters. + + >>> FreqDist('abbb') + FreqDist('bcc') + FreqDist({'b': 4, 'c': 2, 'a': 1}) + + """ + return self.__class__(super().__add__(other)) + + def __sub__(self, other): + """ + Subtract count, but keep only results with positive counts. + + >>> FreqDist('abbbc') - FreqDist('bccd') + FreqDist({'b': 2, 'a': 1}) + + """ + return self.__class__(super().__sub__(other)) + + def __or__(self, other): + """ + Union is the maximum of value in either of the input counters. + + >>> FreqDist('abbb') | FreqDist('bcc') + FreqDist({'b': 3, 'c': 2, 'a': 1}) + + """ + return self.__class__(super().__or__(other)) + + def __and__(self, other): + """ + Intersection is the minimum of corresponding counts. + + >>> FreqDist('abbb') & FreqDist('bcc') + FreqDist({'b': 1}) + + """ + return self.__class__(super().__and__(other)) + + def __le__(self, other): + """ + Returns True if this frequency distribution is a subset of the other + and for no key the value exceeds the value of the same key from + the other frequency distribution. + + The <= operator forms partial order and satisfying the axioms + reflexivity, antisymmetry and transitivity. + + >>> FreqDist('a') <= FreqDist('a') + True + >>> a = FreqDist('abc') + >>> b = FreqDist('aabc') + >>> (a <= b, b <= a) + (True, False) + >>> FreqDist('a') <= FreqDist('abcd') + True + >>> FreqDist('abc') <= FreqDist('xyz') + False + >>> FreqDist('xyz') <= FreqDist('abc') + False + >>> c = FreqDist('a') + >>> d = FreqDist('aa') + >>> e = FreqDist('aaa') + >>> c <= d and d <= e and c <= e + True + """ + if not isinstance(other, FreqDist): + raise_unorderable_types("<=", self, other) + return set(self).issubset(other) and all( + self[key] <= other[key] for key in self + ) + + def __ge__(self, other): + if not isinstance(other, FreqDist): + raise_unorderable_types(">=", self, other) + return set(self).issuperset(other) and all( + self[key] >= other[key] for key in other + ) + + __lt__ = lambda self, other: self <= other and not self == other + __gt__ = lambda self, other: self >= other and not self == other + + def __repr__(self): + """ + Return a string representation of this FreqDist. + + :rtype: string + """ + return self.pformat() + + def pprint(self, maxlen=10, stream=None): + """ + Print a string representation of this FreqDist to 'stream' + + :param maxlen: The maximum number of items to print + :type maxlen: int + :param stream: The stream to print to. stdout by default + """ + print(self.pformat(maxlen=maxlen), file=stream) + + def pformat(self, maxlen=10): + """ + Return a string representation of this FreqDist. + + :param maxlen: The maximum number of items to display + :type maxlen: int + :rtype: string + """ + items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)] + if len(self) > maxlen: + items.append("...") + return "FreqDist({{{0}}})".format(", ".join(items)) + + def __str__(self): + """ + Return a string representation of this FreqDist. + + :rtype: string + """ + return "" % (len(self), self.N()) + + def __iter__(self): + """ + Return an iterator which yields tokens ordered by frequency. + + :rtype: iterator + """ + for token, _ in self.most_common(self.B()): + yield token + + +##////////////////////////////////////////////////////// +## Probability Distributions +##////////////////////////////////////////////////////// + + +class ProbDistI(metaclass=ABCMeta): + """ + A probability distribution for the outcomes of an experiment. A + probability distribution specifies how likely it is that an + experiment will have any given outcome. For example, a + probability distribution could be used to predict the probability + that a token in a document will have a given type. Formally, a + probability distribution can be defined as a function mapping from + samples to nonnegative real numbers, such that the sum of every + number in the function's range is 1.0. A ``ProbDist`` is often + used to model the probability distribution of the experiment used + to generate a frequency distribution. + """ + + SUM_TO_ONE = True + """True if the probabilities of the samples in this probability + distribution will always sum to one.""" + + @abstractmethod + def __init__(self): + """ + Classes inheriting from ProbDistI should implement __init__. + """ + + @abstractmethod + def prob(self, sample): + """ + Return the probability for a given sample. Probabilities + are always real numbers in the range [0, 1]. + + :param sample: The sample whose probability + should be returned. + :type sample: any + :rtype: float + """ + + def logprob(self, sample): + """ + Return the base 2 logarithm of the probability for a given sample. + + :param sample: The sample whose probability + should be returned. + :type sample: any + :rtype: float + """ + # Default definition, in terms of prob() + p = self.prob(sample) + return math.log(p, 2) if p != 0 else _NINF + + @abstractmethod + def max(self): + """ + Return the sample with the greatest probability. If two or + more samples have the same probability, return one of them; + which sample is returned is undefined. + + :rtype: any + """ + + @abstractmethod + def samples(self): + """ + Return a list of all samples that have nonzero probabilities. + Use ``prob`` to find the probability of each sample. + + :rtype: list + """ + + # cf self.SUM_TO_ONE + def discount(self): + """ + Return the ratio by which counts are discounted on average: c*/c + + :rtype: float + """ + return 0.0 + + # Subclasses should define more efficient implementations of this, + # where possible. + def generate(self): + """ + Return a randomly selected sample from this probability distribution. + The probability of returning each sample ``samp`` is equal to + ``self.prob(samp)``. + """ + p = random.random() + p_init = p + for sample in self.samples(): + p -= self.prob(sample) + if p <= 0: + return sample + # allow for some rounding error: + if p < 0.0001: + return sample + # we *should* never get here + if self.SUM_TO_ONE: + warnings.warn( + "Probability distribution %r sums to %r; generate()" + " is returning an arbitrary sample." % (self, p_init - p) + ) + return random.choice(list(self.samples())) + + +class UniformProbDist(ProbDistI): + """ + A probability distribution that assigns equal probability to each + sample in a given set; and a zero probability to all other + samples. + """ + + def __init__(self, samples): + """ + Construct a new uniform probability distribution, that assigns + equal probability to each sample in ``samples``. + + :param samples: The samples that should be given uniform + probability. + :type samples: list + :raise ValueError: If ``samples`` is empty. + """ + if len(samples) == 0: + raise ValueError( + "A Uniform probability distribution must " + "have at least one sample." + ) + self._sampleset = set(samples) + self._prob = 1.0 / len(self._sampleset) + self._samples = list(self._sampleset) + + def prob(self, sample): + return self._prob if sample in self._sampleset else 0 + + def max(self): + return self._samples[0] + + def samples(self): + return self._samples + + def __repr__(self): + return "" % len(self._sampleset) + + +class RandomProbDist(ProbDistI): + """ + Generates a random probability distribution whereby each sample + will be between 0 and 1 with equal probability (uniform random distribution. + Also called a continuous uniform distribution). + """ + + def __init__(self, samples): + if len(samples) == 0: + raise ValueError( + "A probability distribution must " + "have at least one sample." + ) + self._probs = self.unirand(samples) + self._samples = list(self._probs.keys()) + + @classmethod + def unirand(cls, samples): + """ + The key function that creates a randomized initial distribution + that still sums to 1. Set as a dictionary of prob values so that + it can still be passed to MutableProbDist and called with identical + syntax to UniformProbDist + """ + samples = set(samples) + randrow = [random.random() for i in range(len(samples))] + total = sum(randrow) + for i, x in enumerate(randrow): + randrow[i] = x / total + + total = sum(randrow) + if total != 1: + # this difference, if present, is so small (near NINF) that it + # can be subtracted from any element without risking probs not (0 1) + randrow[-1] -= total - 1 + + return {s: randrow[i] for i, s in enumerate(samples)} + + def max(self): + if not hasattr(self, "_max"): + self._max = max((p, v) for (v, p) in self._probs.items())[1] + return self._max + + def prob(self, sample): + return self._probs.get(sample, 0) + + def samples(self): + return self._samples + + def __repr__(self): + return "" % len(self._probs) + + +class DictionaryProbDist(ProbDistI): + """ + A probability distribution whose probabilities are directly + specified by a given dictionary. The given dictionary maps + samples to probabilities. + """ + + def __init__(self, prob_dict=None, log=False, normalize=False): + """ + Construct a new probability distribution from the given + dictionary, which maps values to probabilities (or to log + probabilities, if ``log`` is true). If ``normalize`` is + true, then the probability values are scaled by a constant + factor such that they sum to 1. + + If called without arguments, the resulting probability + distribution assigns zero probability to all values. + """ + + self._prob_dict = prob_dict.copy() if prob_dict is not None else {} + self._log = log + + # Normalize the distribution, if requested. + if normalize: + if len(prob_dict) == 0: + raise ValueError( + "A DictionaryProbDist must have at least one sample " + + "before it can be normalized." + ) + if log: + value_sum = sum_logs(list(self._prob_dict.values())) + if value_sum <= _NINF: + logp = math.log(1.0 / len(prob_dict), 2) + for x in prob_dict: + self._prob_dict[x] = logp + else: + for (x, p) in self._prob_dict.items(): + self._prob_dict[x] -= value_sum + else: + value_sum = sum(self._prob_dict.values()) + if value_sum == 0: + p = 1.0 / len(prob_dict) + for x in prob_dict: + self._prob_dict[x] = p + else: + norm_factor = 1.0 / value_sum + for (x, p) in self._prob_dict.items(): + self._prob_dict[x] *= norm_factor + + def prob(self, sample): + if self._log: + return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0 + else: + return self._prob_dict.get(sample, 0) + + def logprob(self, sample): + if self._log: + return self._prob_dict.get(sample, _NINF) + else: + if sample not in self._prob_dict: + return _NINF + elif self._prob_dict[sample] == 0: + return _NINF + else: + return math.log(self._prob_dict[sample], 2) + + def max(self): + if not hasattr(self, "_max"): + self._max = max((p, v) for (v, p) in self._prob_dict.items())[1] + return self._max + + def samples(self): + return self._prob_dict.keys() + + def __repr__(self): + return "" % len(self._prob_dict) + + +class MLEProbDist(ProbDistI): + """ + The maximum likelihood estimate for the probability distribution + of the experiment used to generate a frequency distribution. The + "maximum likelihood estimate" approximates the probability of + each sample as the frequency of that sample in the frequency + distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the maximum likelihood estimate to create a probability + distribution for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + """ + self._freqdist = freqdist + + def freqdist(self): + """ + Return the frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._freqdist + + def prob(self, sample): + return self._freqdist.freq(sample) + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + return "" % self._freqdist.N() + + +class LidstoneProbDist(ProbDistI): + """ + The Lidstone estimate for the probability distribution of the + experiment used to generate a frequency distribution. The + "Lidstone estimate" is parameterized by a real number *gamma*, + which typically ranges from 0 to 1. The Lidstone estimate + approximates the probability of a sample with count *c* from an + experiment with *N* outcomes and *B* bins as + ``c+gamma)/(N+B*gamma)``. This is equivalent to adding + *gamma* to the count for each bin, and taking the maximum + likelihood estimate of the resulting frequency distribution. + """ + + SUM_TO_ONE = False + + def __init__(self, freqdist, gamma, bins=None): + """ + Use the Lidstone estimate to create a probability distribution + for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type gamma: float + :param gamma: A real number used to parameterize the + estimate. The Lidstone estimate is equivalent to adding + *gamma* to the count for each bin, and taking the + maximum likelihood estimate of the resulting frequency + distribution. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + if (bins == 0) or (bins is None and freqdist.N() == 0): + name = self.__class__.__name__[:-8] + raise ValueError( + "A %s probability distribution " % name + "must have at least one bin." + ) + if (bins is not None) and (bins < freqdist.B()): + name = self.__class__.__name__[:-8] + raise ValueError( + "\nThe number of bins in a %s distribution " % name + + "(%d) must be greater than or equal to\n" % bins + + "the number of bins in the FreqDist used " + + "to create it (%d)." % freqdist.B() + ) + + self._freqdist = freqdist + self._gamma = float(gamma) + self._N = self._freqdist.N() + + if bins is None: + bins = freqdist.B() + self._bins = bins + + self._divisor = self._N + bins * gamma + if self._divisor == 0.0: + # In extreme cases we force the probability to be 0, + # which it will be, since the count will be 0: + self._gamma = 0 + self._divisor = 1 + + def freqdist(self): + """ + Return the frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._freqdist + + def prob(self, sample): + c = self._freqdist[sample] + return (c + self._gamma) / self._divisor + + def max(self): + # For Lidstone distributions, probability is monotonic with + # frequency, so the most probable sample is the one that + # occurs most frequently. + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def discount(self): + gb = self._gamma * self._bins + return gb / (self._N + gb) + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class LaplaceProbDist(LidstoneProbDist): + """ + The Laplace estimate for the probability distribution of the + experiment used to generate a frequency distribution. The + "Laplace estimate" approximates the probability of a sample with + count *c* from an experiment with *N* outcomes and *B* bins as + *(c+1)/(N+B)*. This is equivalent to adding one to the count for + each bin, and taking the maximum likelihood estimate of the + resulting frequency distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the Laplace estimate to create a probability distribution + for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + LidstoneProbDist.__init__(self, freqdist, 1, bins) + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + return "" % self._freqdist.N() + + +class ELEProbDist(LidstoneProbDist): + """ + The expected likelihood estimate for the probability distribution + of the experiment used to generate a frequency distribution. The + "expected likelihood estimate" approximates the probability of a + sample with count *c* from an experiment with *N* outcomes and + *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5 + to the count for each bin, and taking the maximum likelihood + estimate of the resulting frequency distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the expected likelihood estimate to create a probability + distribution for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + LidstoneProbDist.__init__(self, freqdist, 0.5, bins) + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class HeldoutProbDist(ProbDistI): + """ + The heldout estimate for the probability distribution of the + experiment used to generate two frequency distributions. These + two frequency distributions are called the "heldout frequency + distribution" and the "base frequency distribution." The + "heldout estimate" uses uses the "heldout frequency + distribution" to predict the probability of each sample, given its + frequency in the "base frequency distribution". + + In particular, the heldout estimate approximates the probability + for a sample that occurs *r* times in the base distribution as + the average frequency in the heldout distribution of all samples + that occur *r* times in the base distribution. + + This average frequency is *Tr[r]/(Nr[r].N)*, where: + + - *Tr[r]* is the total count in the heldout distribution for + all samples that occur *r* times in the base distribution. + - *Nr[r]* is the number of samples that occur *r* times in + the base distribution. + - *N* is the number of outcomes recorded by the heldout + frequency distribution. + + In order to increase the efficiency of the ``prob`` member + function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r* + when the ``HeldoutProbDist`` is created. + + :type _estimate: list(float) + :ivar _estimate: A list mapping from *r*, the number of + times that a sample occurs in the base distribution, to the + probability estimate for that sample. ``_estimate[r]`` is + calculated by finding the average frequency in the heldout + distribution of all samples that occur *r* times in the base + distribution. In particular, ``_estimate[r]`` = + *Tr[r]/(Nr[r].N)*. + :type _max_r: int + :ivar _max_r: The maximum number of times that any sample occurs + in the base distribution. ``_max_r`` is used to decide how + large ``_estimate`` must be. + """ + + SUM_TO_ONE = False + + def __init__(self, base_fdist, heldout_fdist, bins=None): + """ + Use the heldout estimate to create a probability distribution + for the experiment used to generate ``base_fdist`` and + ``heldout_fdist``. + + :type base_fdist: FreqDist + :param base_fdist: The base frequency distribution. + :type heldout_fdist: FreqDist + :param heldout_fdist: The heldout frequency distribution. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + + self._base_fdist = base_fdist + self._heldout_fdist = heldout_fdist + + # The max number of times any sample occurs in base_fdist. + self._max_r = base_fdist[base_fdist.max()] + + # Calculate Tr, Nr, and N. + Tr = self._calculate_Tr() + r_Nr = base_fdist.r_Nr(bins) + Nr = [r_Nr[r] for r in range(self._max_r + 1)] + N = heldout_fdist.N() + + # Use Tr, Nr, and N to compute the probability estimate for + # each value of r. + self._estimate = self._calculate_estimate(Tr, Nr, N) + + def _calculate_Tr(self): + """ + Return the list *Tr*, where *Tr[r]* is the total count in + ``heldout_fdist`` for all samples that occur *r* + times in ``base_fdist``. + + :rtype: list(float) + """ + Tr = [0.0] * (self._max_r + 1) + for sample in self._heldout_fdist: + r = self._base_fdist[sample] + Tr[r] += self._heldout_fdist[sample] + return Tr + + def _calculate_estimate(self, Tr, Nr, N): + """ + Return the list *estimate*, where *estimate[r]* is the probability + estimate for any sample that occurs *r* times in the base frequency + distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*. + In the special case that *N[r]=0*, *estimate[r]* will never be used; + so we define *estimate[r]=None* for those cases. + + :rtype: list(float) + :type Tr: list(float) + :param Tr: the list *Tr*, where *Tr[r]* is the total count in + the heldout distribution for all samples that occur *r* + times in base distribution. + :type Nr: list(float) + :param Nr: The list *Nr*, where *Nr[r]* is the number of + samples that occur *r* times in the base distribution. + :type N: int + :param N: The total number of outcomes recorded by the heldout + frequency distribution. + """ + estimate = [] + for r in range(self._max_r + 1): + if Nr[r] == 0: + estimate.append(None) + else: + estimate.append(Tr[r] / (Nr[r] * N)) + return estimate + + def base_fdist(self): + """ + Return the base frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._base_fdist + + def heldout_fdist(self): + """ + Return the heldout frequency distribution that this + probability distribution is based on. + + :rtype: FreqDist + """ + return self._heldout_fdist + + def samples(self): + return self._base_fdist.keys() + + def prob(self, sample): + # Use our precomputed probability estimate. + r = self._base_fdist[sample] + return self._estimate[r] + + def max(self): + # Note: the Heldout estimation is *not* necessarily monotonic; + # so this implementation is currently broken. However, it + # should give the right answer *most* of the time. :) + return self._base_fdist.max() + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + s = "" + return s % (self._base_fdist.N(), self._heldout_fdist.N()) + + +class CrossValidationProbDist(ProbDistI): + """ + The cross-validation estimate for the probability distribution of + the experiment used to generate a set of frequency distribution. + The "cross-validation estimate" for the probability of a sample + is found by averaging the held-out estimates for the sample in + each pair of frequency distributions. + """ + + SUM_TO_ONE = False + + def __init__(self, freqdists, bins): + """ + Use the cross-validation estimate to create a probability + distribution for the experiment used to generate + ``freqdists``. + + :type freqdists: list(FreqDist) + :param freqdists: A list of the frequency distributions + generated by the experiment. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + self._freqdists = freqdists + + # Create a heldout probability distribution for each pair of + # frequency distributions in freqdists. + self._heldout_probdists = [] + for fdist1 in freqdists: + for fdist2 in freqdists: + if fdist1 is not fdist2: + probdist = HeldoutProbDist(fdist1, fdist2, bins) + self._heldout_probdists.append(probdist) + + def freqdists(self): + """ + Return the list of frequency distributions that this ``ProbDist`` is based on. + + :rtype: list(FreqDist) + """ + return self._freqdists + + def samples(self): + # [xx] nb: this is not too efficient + return set(sum((list(fd) for fd in self._freqdists), [])) + + def prob(self, sample): + # Find the average probability estimate returned by each + # heldout distribution. + prob = 0.0 + for heldout_probdist in self._heldout_probdists: + prob += heldout_probdist.prob(sample) + return prob / len(self._heldout_probdists) + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % len(self._freqdists) + + +class WittenBellProbDist(ProbDistI): + """ + The Witten-Bell estimate of a probability distribution. This distribution + allocates uniform probability mass to as yet unseen events by using the + number of events that have only been seen once. The probability mass + reserved for unseen events is equal to *T / (N + T)* + where *T* is the number of observed event types and *N* is the total + number of observed events. This equates to the maximum likelihood estimate + of a new type event occurring. The remaining probability mass is discounted + such that all probability estimates sum to one, yielding: + + - *p = T / Z (N + T)*, if count = 0 + - *p = c / (N + T)*, otherwise + """ + + def __init__(self, freqdist, bins=None): + """ + Creates a distribution of Witten-Bell probability estimates. This + distribution allocates uniform probability mass to as yet unseen + events by using the number of events that have only been seen once. The + probability mass reserved for unseen events is equal to *T / (N + T)* + where *T* is the number of observed event types and *N* is the total + number of observed events. This equates to the maximum likelihood + estimate of a new type event occurring. The remaining probability mass + is discounted such that all probability estimates sum to one, + yielding: + + - *p = T / Z (N + T)*, if count = 0 + - *p = c / (N + T)*, otherwise + + The parameters *T* and *N* are taken from the ``freqdist`` parameter + (the ``B()`` and ``N()`` values). The normalizing factor *Z* is + calculated using these values along with the ``bins`` parameter. + + :param freqdist: The frequency counts upon which to base the + estimation. + :type freqdist: FreqDist + :param bins: The number of possible event types. This must be at least + as large as the number of bins in the ``freqdist``. If None, then + it's assumed to be equal to that of the ``freqdist`` + :type bins: int + """ + assert bins is None or bins >= freqdist.B(), ( + "bins parameter must not be less than %d=freqdist.B()" % freqdist.B() + ) + if bins is None: + bins = freqdist.B() + self._freqdist = freqdist + self._T = self._freqdist.B() + self._Z = bins - self._freqdist.B() + self._N = self._freqdist.N() + # self._P0 is P(0), precalculated for efficiency: + if self._N == 0: + # if freqdist is empty, we approximate P(0) by a UniformProbDist: + self._P0 = 1.0 / self._Z + else: + self._P0 = self._T / (self._Z * (self._N + self._T)) + + def prob(self, sample): + # inherit docs from ProbDistI + c = self._freqdist[sample] + return c / (self._N + self._T) if c != 0 else self._P0 + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def freqdist(self): + return self._freqdist + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +##////////////////////////////////////////////////////// +## Good-Turing Probability Distributions +##////////////////////////////////////////////////////// + +# Good-Turing frequency estimation was contributed by Alan Turing and +# his statistical assistant I.J. Good, during their collaboration in +# the WWII. It is a statistical technique for predicting the +# probability of occurrence of objects belonging to an unknown number +# of species, given past observations of such objects and their +# species. (In drawing balls from an urn, the 'objects' would be balls +# and the 'species' would be the distinct colors of the balls (finite +# but unknown in number). +# +# Good-Turing method calculates the probability mass to assign to +# events with zero or low counts based on the number of events with +# higher counts. It does so by using the adjusted count *c\**: +# +# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1 +# - *things with frequency zero in training* = N(1) for c == 0 +# +# where *c* is the original count, *N(i)* is the number of event types +# observed with count *i*. We can think the count of unseen as the count +# of frequency one (see Jurafsky & Martin 2nd Edition, p101). +# +# This method is problematic because the situation ``N(c+1) == 0`` +# is quite common in the original Good-Turing estimation; smoothing or +# interpolation of *N(i)* values is essential in practice. +# +# Bill Gale and Geoffrey Sampson present a simple and effective approach, +# Simple Good-Turing. As a smoothing curve they simply use a power curve: +# +# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic +# relationship) +# +# They estimate a and b by simple linear regression technique on the +# logarithmic form of the equation: +# +# log Nr = a + b*log(r) +# +# However, they suggest that such a simple curve is probably only +# appropriate for high values of r. For low values of r, they use the +# measured Nr directly. (see M&S, p.213) +# +# Gale and Sampson propose to use r while the difference between r and +# r* is 1.96 greater than the standard deviation, and switch to r* if +# it is less or equal: +# +# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr)) +# +# The 1.96 coefficient correspond to a 0.05 significance criterion, +# some implementations can use a coefficient of 1.65 for a 0.1 +# significance criterion. +# + +##////////////////////////////////////////////////////// +## Simple Good-Turing Probablity Distributions +##////////////////////////////////////////////////////// + + +class SimpleGoodTuringProbDist(ProbDistI): + """ + SimpleGoodTuring ProbDist approximates from frequency to frequency of + frequency into a linear line under log space by linear regression. + Details of Simple Good-Turing algorithm can be found in: + + - Good Turing smoothing without tears" (Gale & Sampson 1995), + Journal of Quantitative Linguistics, vol. 2 pp. 217-237. + - "Speech and Language Processing (Jurafsky & Martin), + 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c)) + - https://www.grsampson.net/RGoodTur.html + + Given a set of pair (xi, yi), where the xi denotes the frequency and + yi denotes the frequency of frequency, we want to minimize their + square variation. E(x) and E(y) represent the mean of xi and yi. + + - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x))) + - intercept: a = E(y) - b.E(x) + """ + + SUM_TO_ONE = False + + def __init__(self, freqdist, bins=None): + """ + :param freqdist: The frequency counts upon which to base the + estimation. + :type freqdist: FreqDist + :param bins: The number of possible event types. This must be + larger than the number of bins in the ``freqdist``. If None, + then it's assumed to be equal to ``freqdist``.B() + 1 + :type bins: int + """ + assert ( + bins is None or bins > freqdist.B() + ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1) + if bins is None: + bins = freqdist.B() + 1 + self._freqdist = freqdist + self._bins = bins + r, nr = self._r_Nr() + self.find_best_fit(r, nr) + self._switch(r, nr) + self._renormalize(r, nr) + + def _r_Nr_non_zero(self): + r_Nr = self._freqdist.r_Nr() + del r_Nr[0] + return r_Nr + + def _r_Nr(self): + """ + Split the frequency distribution in two list (r, Nr), where Nr(r) > 0 + """ + nonzero = self._r_Nr_non_zero() + + if not nonzero: + return [], [] + return zip(*sorted(nonzero.items())) + + def find_best_fit(self, r, nr): + """ + Use simple linear regression to tune parameters self._slope and + self._intercept in the log-log space based on count and Nr(count) + (Work in log space to avoid floating point underflow.) + """ + # For higher sample frequencies the data points becomes horizontal + # along line Nr=1. To create a more evident linear model in log-log + # space, we average positive Nr values with the surrounding zero + # values. (Church and Gale, 1991) + + if not r or not nr: + # Empty r or nr? + return + + zr = [] + for j in range(len(r)): + i = r[j - 1] if j > 0 else 0 + k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1] + zr_ = 2.0 * nr[j] / (k - i) + zr.append(zr_) + + log_r = [math.log(i) for i in r] + log_zr = [math.log(i) for i in zr] + + xy_cov = x_var = 0.0 + x_mean = sum(log_r) / len(log_r) + y_mean = sum(log_zr) / len(log_zr) + for (x, y) in zip(log_r, log_zr): + xy_cov += (x - x_mean) * (y - y_mean) + x_var += (x - x_mean) ** 2 + self._slope = xy_cov / x_var if x_var != 0 else 0.0 + if self._slope >= -1: + warnings.warn( + "SimpleGoodTuring did not find a proper best fit " + "line for smoothing probabilities of occurrences. " + "The probability estimates are likely to be " + "unreliable." + ) + self._intercept = y_mean - self._slope * x_mean + + def _switch(self, r, nr): + """ + Calculate the r frontier where we must switch from Nr to Sr + when estimating E[Nr]. + """ + for i, r_ in enumerate(r): + if len(r) == i + 1 or r[i + 1] != r_ + 1: + # We are at the end of r, or there is a gap in r + self._switch_at = r_ + break + + Sr = self.smoothedNr + smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_) + unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i] + + std = math.sqrt(self._variance(r_, nr[i], nr[i + 1])) + if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std: + self._switch_at = r_ + break + + def _variance(self, r, nr, nr_1): + r = float(r) + nr = float(nr) + nr_1 = float(nr_1) + return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr) + + def _renormalize(self, r, nr): + """ + It is necessary to renormalize all the probability estimates to + ensure a proper probability distribution results. This can be done + by keeping the estimate of the probability mass for unseen items as + N(1)/N and renormalizing all the estimates for previously seen items + (as Gale and Sampson (1995) propose). (See M&S P.213, 1999) + """ + prob_cov = 0.0 + for r_, nr_ in zip(r, nr): + prob_cov += nr_ * self._prob_measure(r_) + if prob_cov: + self._renormal = (1 - self._prob_measure(0)) / prob_cov + + def smoothedNr(self, r): + """ + Return the number of samples with count r. + + :param r: The amount of frequency. + :type r: int + :rtype: float + """ + + # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic + # relationship) + # Estimate a and b by simple linear regression technique on + # the logarithmic form of the equation: log Nr = a + b*log(r) + + return math.exp(self._intercept + self._slope * math.log(r)) + + def prob(self, sample): + """ + Return the sample's probability. + + :param sample: sample of the event + :type sample: str + :rtype: float + """ + count = self._freqdist[sample] + p = self._prob_measure(count) + if count == 0: + if self._bins == self._freqdist.B(): + p = 0.0 + else: + p = p / (self._bins - self._freqdist.B()) + else: + p = p * self._renormal + return p + + def _prob_measure(self, count): + if count == 0 and self._freqdist.N() == 0: + return 1.0 + elif count == 0 and self._freqdist.N() != 0: + return self._freqdist.Nr(1) / self._freqdist.N() + + if self._switch_at > count: + Er_1 = self._freqdist.Nr(count + 1) + Er = self._freqdist.Nr(count) + else: + Er_1 = self.smoothedNr(count + 1) + Er = self.smoothedNr(count) + + r_star = (count + 1) * Er_1 / Er + return r_star / self._freqdist.N() + + def check(self): + prob_sum = 0.0 + for i in range(0, len(self._Nr)): + prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal + print("Probability Sum:", prob_sum) + # assert prob_sum != 1.0, "probability sum should be one!" + + def discount(self): + """ + This function returns the total mass of probability transfers from the + seen samples to the unseen samples. + """ + return self.smoothedNr(1) / self._freqdist.N() + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def freqdist(self): + return self._freqdist + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class MutableProbDist(ProbDistI): + """ + An mutable probdist where the probabilities may be easily modified. This + simply copies an existing probdist, storing the probability values in a + mutable dictionary and providing an update method. + """ + + def __init__(self, prob_dist, samples, store_logs=True): + """ + Creates the mutable probdist based on the given prob_dist and using + the list of samples given. These values are stored as log + probabilities if the store_logs flag is set. + + :param prob_dist: the distribution from which to garner the + probabilities + :type prob_dist: ProbDist + :param samples: the complete set of samples + :type samples: sequence of any + :param store_logs: whether to store the probabilities as logarithms + :type store_logs: bool + """ + self._samples = samples + self._sample_dict = {samples[i]: i for i in range(len(samples))} + self._data = array.array("d", [0.0]) * len(samples) + for i in range(len(samples)): + if store_logs: + self._data[i] = prob_dist.logprob(samples[i]) + else: + self._data[i] = prob_dist.prob(samples[i]) + self._logs = store_logs + + def max(self): + # inherit documentation + return max((p, v) for (v, p) in self._sample_dict.items())[1] + + def samples(self): + # inherit documentation + return self._samples + + def prob(self, sample): + # inherit documentation + i = self._sample_dict.get(sample) + if i is None: + return 0.0 + return 2 ** (self._data[i]) if self._logs else self._data[i] + + def logprob(self, sample): + # inherit documentation + i = self._sample_dict.get(sample) + if i is None: + return float("-inf") + return self._data[i] if self._logs else math.log(self._data[i], 2) + + def update(self, sample, prob, log=True): + """ + Update the probability for the given sample. This may cause the object + to stop being the valid probability distribution - the user must + ensure that they update the sample probabilities such that all samples + have probabilities between 0 and 1 and that all probabilities sum to + one. + + :param sample: the sample for which to update the probability + :type sample: any + :param prob: the new probability + :type prob: float + :param log: is the probability already logged + :type log: bool + """ + i = self._sample_dict.get(sample) + assert i is not None + if self._logs: + self._data[i] = prob if log else math.log(prob, 2) + else: + self._data[i] = 2 ** (prob) if log else prob + + +##///////////////////////////////////////////////////// +## Kneser-Ney Probability Distribution +##////////////////////////////////////////////////////// + +# This method for calculating probabilities was introduced in 1995 by Reinhard +# Kneser and Hermann Ney. It was meant to improve the accuracy of language +# models that use backing-off to deal with sparse data. The authors propose two +# ways of doing so: a marginal distribution constraint on the back-off +# distribution and a leave-one-out distribution. For a start, the first one is +# implemented as a class below. +# +# The idea behind a back-off n-gram model is that we have a series of +# frequency distributions for our n-grams so that in case we have not seen a +# given n-gram during training (and as a result have a 0 probability for it) we +# can 'back off' (hence the name!) and try testing whether we've seen the +# n-1-gram part of the n-gram in training. +# +# The novelty of Kneser and Ney's approach was that they decided to fiddle +# around with the way this latter, backed off probability was being calculated +# whereas their peers seemed to focus on the primary probability. +# +# The implementation below uses one of the techniques described in their paper +# titled "Improved backing-off for n-gram language modeling." In the same paper +# another technique is introduced to attempt to smooth the back-off +# distribution as well as the primary one. There is also a much-cited +# modification of this method proposed by Chen and Goodman. +# +# In order for the implementation of Kneser-Ney to be more efficient, some +# changes have been made to the original algorithm. Namely, the calculation of +# the normalizing function gamma has been significantly simplified and +# combined slightly differently with beta. None of these changes affect the +# nature of the algorithm, but instead aim to cut out unnecessary calculations +# and take advantage of storing and retrieving information in dictionaries +# where possible. + + +class KneserNeyProbDist(ProbDistI): + """ + Kneser-Ney estimate of a probability distribution. This is a version of + back-off that counts how likely an n-gram is provided the n-1-gram had + been seen in training. Extends the ProbDistI interface, requires a trigram + FreqDist instance to train on. Optionally, a different from default discount + value can be specified. The default discount is set to 0.75. + + """ + + def __init__(self, freqdist, bins=None, discount=0.75): + """ + :param freqdist: The trigram frequency distribution upon which to base + the estimation + :type freqdist: FreqDist + :param bins: Included for compatibility with nltk.tag.hmm + :type bins: int or float + :param discount: The discount applied when retrieving counts of + trigrams + :type discount: float (preferred, but can be set to int) + """ + + if not bins: + self._bins = freqdist.B() + else: + self._bins = bins + self._D = discount + + # cache for probability calculation + self._cache = {} + + # internal bigram and trigram frequency distributions + self._bigrams = defaultdict(int) + self._trigrams = freqdist + + # helper dictionaries used to calculate probabilities + self._wordtypes_after = defaultdict(float) + self._trigrams_contain = defaultdict(float) + self._wordtypes_before = defaultdict(float) + for w0, w1, w2 in freqdist: + self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)] + self._wordtypes_after[(w0, w1)] += 1 + self._trigrams_contain[w1] += 1 + self._wordtypes_before[(w1, w2)] += 1 + + def prob(self, trigram): + # sample must be a triple + if len(trigram) != 3: + raise ValueError("Expected an iterable with 3 members.") + trigram = tuple(trigram) + w0, w1, w2 = trigram + + if trigram in self._cache: + return self._cache[trigram] + else: + # if the sample trigram was seen during training + if trigram in self._trigrams: + prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[ + (w0, w1) + ] + + # else if the 'rougher' environment was seen during training + elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before: + aftr = self._wordtypes_after[(w0, w1)] + bfr = self._wordtypes_before[(w1, w2)] + + # the probability left over from alphas + leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)] + + # the beta (including normalization) + beta = bfr / (self._trigrams_contain[w1] - aftr) + + prob = leftover_prob * beta + + # else the sample was completely unseen during training + else: + prob = 0.0 + + self._cache[trigram] = prob + return prob + + def discount(self): + """ + Return the value by which counts are discounted. By default set to 0.75. + + :rtype: float + """ + return self._D + + def set_discount(self, discount): + """ + Set the value by which counts are discounted to the value of discount. + + :param discount: the new value to discount counts by + :type discount: float (preferred, but int possible) + :rtype: None + """ + self._D = discount + + def samples(self): + return self._trigrams.keys() + + def max(self): + return self._trigrams.max() + + def __repr__(self): + """ + Return a string representation of this ProbDist + + :rtype: str + """ + return f">> from nltk.probability import ConditionalFreqDist + >>> from nltk.tokenize import word_tokenize + >>> sent = "the the the dog dog some other words that we do not care about" + >>> cfdist = ConditionalFreqDist() + >>> for word in word_tokenize(sent): + ... condition = len(word) + ... cfdist[condition][word] += 1 + + An equivalent way to do this is with the initializer: + + >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent)) + + The frequency distribution for each condition is accessed using + the indexing operator: + + >>> cfdist[3] + FreqDist({'the': 3, 'dog': 2, 'not': 1}) + >>> cfdist[3].freq('the') + 0.5 + >>> cfdist[3]['dog'] + 2 + + When the indexing operator is used to access the frequency + distribution for a condition that has not been accessed before, + ``ConditionalFreqDist`` creates a new empty FreqDist for that + condition. + + """ + + def __init__(self, cond_samples=None): + """ + Construct a new empty conditional frequency distribution. In + particular, the count for every sample, under every condition, + is zero. + + :param cond_samples: The samples to initialize the conditional + frequency distribution with + :type cond_samples: Sequence of (condition, sample) tuples + """ + defaultdict.__init__(self, FreqDist) + + if cond_samples: + for (cond, sample) in cond_samples: + self[cond][sample] += 1 + + def __reduce__(self): + kv_pairs = ((cond, self[cond]) for cond in self.conditions()) + return (self.__class__, (), None, None, kv_pairs) + + def conditions(self): + """ + Return a list of the conditions that have been accessed for + this ``ConditionalFreqDist``. Use the indexing operator to + access the frequency distribution for a given condition. + Note that the frequency distributions for some conditions + may contain zero sample outcomes. + + :rtype: list + """ + return list(self.keys()) + + def N(self): + """ + Return the total number of sample outcomes that have been + recorded by this ``ConditionalFreqDist``. + + :rtype: int + """ + return sum(fdist.N() for fdist in self.values()) + + def plot( + self, + *args, + samples=None, + title="", + cumulative=False, + percents=False, + conditions=None, + show=True, + **kwargs, + ): + """ + Plot the given samples from the conditional frequency distribution. + For a cumulative plot, specify cumulative=True. Additional ``*args`` and + ``**kwargs`` are passed to matplotlib's plot function. + (Requires Matplotlib to be installed.) + + :param samples: The samples to plot + :type samples: list + :param title: The title for the graph + :type title: str + :param cumulative: Whether the plot is cumulative. (default = False) + :type cumulative: bool + :param percents: Whether the plot uses percents instead of counts. (default = False) + :type percents: bool + :param conditions: The conditions to plot (default is all) + :type conditions: list + :param show: Whether to show the plot, or only return the ax. + :type show: bool + """ + try: + import matplotlib.pyplot as plt # import statement fix + except ImportError as e: + raise ValueError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + if not conditions: + conditions = self.conditions() + else: + conditions = [c for c in conditions if c in self] + if not samples: + samples = sorted({v for c in conditions for v in self[c]}) + if "linewidth" not in kwargs: + kwargs["linewidth"] = 2 + ax = plt.gca() + if conditions: + freqs = [] + for condition in conditions: + if cumulative: + # freqs should be a list of list where each sub list will be a frequency of a condition + freq = list(self[condition]._cumulative_frequencies(samples)) + else: + freq = [self[condition][sample] for sample in samples] + + if percents: + freq = [f / self[condition].N() * 100 for f in freq] + + freqs.append(freq) + + if cumulative: + ylabel = "Cumulative " + legend_loc = "lower right" + else: + ylabel = "" + legend_loc = "upper right" + + if percents: + ylabel += "Percents" + else: + ylabel += "Counts" + + i = 0 + for freq in freqs: + kwargs["label"] = conditions[i] # label for each condition + i += 1 + ax.plot(freq, *args, **kwargs) + ax.legend(loc=legend_loc) + ax.grid(True, color="silver") + ax.set_xticks(range(len(samples))) + ax.set_xticklabels([str(s) for s in samples], rotation=90) + if title: + ax.set_title(title) + ax.set_xlabel("Samples") + ax.set_ylabel(ylabel) + + if show: + plt.show() + + return ax + + def tabulate(self, *args, **kwargs): + """ + Tabulate the given samples from the conditional frequency distribution. + + :param samples: The samples to plot + :type samples: list + :param conditions: The conditions to plot (default is all) + :type conditions: list + :param cumulative: A flag to specify whether the freqs are cumulative (default = False) + :type title: bool + """ + + cumulative = _get_kwarg(kwargs, "cumulative", False) + conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions())) + samples = _get_kwarg( + kwargs, + "samples", + sorted({v for c in conditions if c in self for v in self[c]}), + ) # this computation could be wasted + + width = max(len("%s" % s) for s in samples) + freqs = dict() + for c in conditions: + if cumulative: + freqs[c] = list(self[c]._cumulative_frequencies(samples)) + else: + freqs[c] = [self[c][sample] for sample in samples] + width = max(width, max(len("%d" % f) for f in freqs[c])) + + condition_size = max(len("%s" % c) for c in conditions) + print(" " * condition_size, end=" ") + for s in samples: + print("%*s" % (width, s), end=" ") + print() + for c in conditions: + print("%*s" % (condition_size, c), end=" ") + for f in freqs[c]: + print("%*d" % (width, f), end=" ") + print() + + # Mathematical operators + + def __add__(self, other): + """ + Add counts from two ConditionalFreqDists. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] += other[cond] + return result + + def __sub__(self, other): + """ + Subtract count, but keep only results with positive counts. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] -= other[cond] + if not result[cond]: + del result[cond] + return result + + def __or__(self, other): + """ + Union is the maximum of value in either of the input counters. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] |= other[cond] + return result + + def __and__(self, other): + """ + Intersection is the minimum of corresponding counts. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = ConditionalFreqDist() + for cond in self.conditions(): + newfreqdist = self[cond] & other[cond] + if newfreqdist: + result[cond] = newfreqdist + return result + + # @total_ordering doesn't work here, since the class inherits from a builtin class + def __le__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types("<=", self, other) + return set(self.conditions()).issubset(other.conditions()) and all( + self[c] <= other[c] for c in self.conditions() + ) + + def __lt__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types("<", self, other) + return self <= other and self != other + + def __ge__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types(">=", self, other) + return other <= self + + def __gt__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types(">", self, other) + return other < self + + def deepcopy(self): + from copy import deepcopy + + return deepcopy(self) + + copy = deepcopy + + def __repr__(self): + """ + Return a string representation of this ``ConditionalFreqDist``. + + :rtype: str + """ + return "" % len(self) + + +class ConditionalProbDistI(dict, metaclass=ABCMeta): + """ + A collection of probability distributions for a single experiment + run under different conditions. Conditional probability + distributions are used to estimate the likelihood of each sample, + given the condition under which the experiment was run. For + example, a conditional probability distribution could be used to + estimate the probability of each word type in a document, given + the length of the word type. Formally, a conditional probability + distribution can be defined as a function that maps from each + condition to the ``ProbDist`` for the experiment under that + condition. + """ + + @abstractmethod + def __init__(self): + """ + Classes inheriting from ConditionalProbDistI should implement __init__. + """ + + def conditions(self): + """ + Return a list of the conditions that are represented by + this ``ConditionalProbDist``. Use the indexing operator to + access the probability distribution for a given condition. + + :rtype: list + """ + return list(self.keys()) + + def __repr__(self): + """ + Return a string representation of this ``ConditionalProbDist``. + + :rtype: str + """ + return "<%s with %d conditions>" % (type(self).__name__, len(self)) + + +class ConditionalProbDist(ConditionalProbDistI): + """ + A conditional probability distribution modeling the experiments + that were used to generate a conditional frequency distribution. + A ConditionalProbDist is constructed from a + ``ConditionalFreqDist`` and a ``ProbDist`` factory: + + - The ``ConditionalFreqDist`` specifies the frequency + distribution for each condition. + - The ``ProbDist`` factory is a function that takes a + condition's frequency distribution, and returns its + probability distribution. A ``ProbDist`` class's name (such as + ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify + that class's constructor. + + The first argument to the ``ProbDist`` factory is the frequency + distribution that it should model; and the remaining arguments are + specified by the ``factory_args`` parameter to the + ``ConditionalProbDist`` constructor. For example, the following + code constructs a ``ConditionalProbDist``, where the probability + distribution for each condition is an ``ELEProbDist`` with 10 bins: + + >>> from nltk.corpus import brown + >>> from nltk.probability import ConditionalFreqDist + >>> from nltk.probability import ConditionalProbDist, ELEProbDist + >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000]) + >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10) + >>> cpdist['passed'].max() + 'VBD' + >>> cpdist['passed'].prob('VBD') #doctest: +ELLIPSIS + 0.423... + + """ + + def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args): + """ + Construct a new conditional probability distribution, based on + the given conditional frequency distribution and ``ProbDist`` + factory. + + :type cfdist: ConditionalFreqDist + :param cfdist: The ``ConditionalFreqDist`` specifying the + frequency distribution for each condition. + :type probdist_factory: class or function + :param probdist_factory: The function or class that maps + a condition's frequency distribution to its probability + distribution. The function is called with the frequency + distribution as its first argument, + ``factory_args`` as its remaining arguments, and + ``factory_kw_args`` as keyword arguments. + :type factory_args: (any) + :param factory_args: Extra arguments for ``probdist_factory``. + These arguments are usually used to specify extra + properties for the probability distributions of individual + conditions, such as the number of bins they contain. + :type factory_kw_args: (any) + :param factory_kw_args: Extra keyword arguments for ``probdist_factory``. + """ + self._probdist_factory = probdist_factory + self._factory_args = factory_args + self._factory_kw_args = factory_kw_args + + for condition in cfdist: + self[condition] = probdist_factory( + cfdist[condition], *factory_args, **factory_kw_args + ) + + def __missing__(self, key): + self[key] = self._probdist_factory( + FreqDist(), *self._factory_args, **self._factory_kw_args + ) + return self[key] + + +class DictionaryConditionalProbDist(ConditionalProbDistI): + """ + An alternative ConditionalProbDist that simply wraps a dictionary of + ProbDists rather than creating these from FreqDists. + """ + + def __init__(self, probdist_dict): + """ + :param probdist_dict: a dictionary containing the probdists indexed + by the conditions + :type probdist_dict: dict any -> probdist + """ + self.update(probdist_dict) + + def __missing__(self, key): + self[key] = DictionaryProbDist() + return self[key] + + +##////////////////////////////////////////////////////// +## Adding in log-space. +##////////////////////////////////////////////////////// + +# If the difference is bigger than this, then just take the bigger one: +_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2) + + +def add_logs(logx, logy): + """ + Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return + *log(x+y)*. Conceptually, this is the same as returning + ``log(2**(logx)+2**(logy))``, but the actual implementation + avoids overflow errors that could result from direct computation. + """ + if logx < logy + _ADD_LOGS_MAX_DIFF: + return logy + if logy < logx + _ADD_LOGS_MAX_DIFF: + return logx + base = min(logx, logy) + return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2) + + +def sum_logs(logs): + return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF + + +##////////////////////////////////////////////////////// +## Probabilistic Mix-in +##////////////////////////////////////////////////////// + + +class ProbabilisticMixIn: + """ + A mix-in class to associate probabilities with other classes + (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class, + define a new class that derives from an existing class and from + ProbabilisticMixIn. You will need to define a new constructor for + the new class, which explicitly calls the constructors of both its + parent classes. For example: + + >>> from nltk.probability import ProbabilisticMixIn + >>> class A: + ... def __init__(self, x, y): self.data = (x,y) + ... + >>> class ProbabilisticA(A, ProbabilisticMixIn): + ... def __init__(self, x, y, **prob_kwarg): + ... A.__init__(self, x, y) + ... ProbabilisticMixIn.__init__(self, **prob_kwarg) + + See the documentation for the ProbabilisticMixIn + ``constructor<__init__>`` for information about the arguments it + expects. + + You should generally also redefine the string representation + methods, the comparison methods, and the hashing method. + """ + + def __init__(self, **kwargs): + """ + Initialize this object's probability. This initializer should + be called by subclass constructors. ``prob`` should generally be + the first argument for those constructors. + + :param prob: The probability associated with the object. + :type prob: float + :param logprob: The log of the probability associated with + the object. + :type logprob: float + """ + if "prob" in kwargs: + if "logprob" in kwargs: + raise TypeError("Must specify either prob or logprob " "(not both)") + else: + ProbabilisticMixIn.set_prob(self, kwargs["prob"]) + elif "logprob" in kwargs: + ProbabilisticMixIn.set_logprob(self, kwargs["logprob"]) + else: + self.__prob = self.__logprob = None + + def set_prob(self, prob): + """ + Set the probability associated with this object to ``prob``. + + :param prob: The new probability + :type prob: float + """ + self.__prob = prob + self.__logprob = None + + def set_logprob(self, logprob): + """ + Set the log probability associated with this object to + ``logprob``. I.e., set the probability associated with this + object to ``2**(logprob)``. + + :param logprob: The new log probability + :type logprob: float + """ + self.__logprob = logprob + self.__prob = None + + def prob(self): + """ + Return the probability associated with this object. + + :rtype: float + """ + if self.__prob is None: + if self.__logprob is None: + return None + self.__prob = 2 ** (self.__logprob) + return self.__prob + + def logprob(self): + """ + Return ``log(p)``, where ``p`` is the probability associated + with this object. + + :rtype: float + """ + if self.__logprob is None: + if self.__prob is None: + return None + self.__logprob = math.log(self.__prob, 2) + return self.__logprob + + +class ImmutableProbabilisticMixIn(ProbabilisticMixIn): + def set_prob(self, prob): + raise ValueError("%s is immutable" % self.__class__.__name__) + + def set_logprob(self, prob): + raise ValueError("%s is immutable" % self.__class__.__name__) + + +## Helper function for processing keyword arguments + + +def _get_kwarg(kwargs, key, default): + if key in kwargs: + arg = kwargs[key] + del kwargs[key] + else: + arg = default + return arg + + +##////////////////////////////////////////////////////// +## Demonstration +##////////////////////////////////////////////////////// + + +def _create_rand_fdist(numsamples, numoutcomes): + """ + Create a new frequency distribution, with random samples. The + samples are numbers from 1 to ``numsamples``, and are generated by + summing two numbers, each of which has a uniform distribution. + """ + + fdist = FreqDist() + for x in range(numoutcomes): + y = random.randint(1, (1 + numsamples) // 2) + random.randint( + 0, numsamples // 2 + ) + fdist[y] += 1 + return fdist + + +def _create_sum_pdist(numsamples): + """ + Return the true probability distribution for the experiment + ``_create_rand_fdist(numsamples, x)``. + """ + fdist = FreqDist() + for x in range(1, (1 + numsamples) // 2 + 1): + for y in range(0, numsamples // 2 + 1): + fdist[x + y] += 1 + return MLEProbDist(fdist) + + +def demo(numsamples=6, numoutcomes=500): + """ + A demonstration of frequency distributions and probability + distributions. This demonstration creates three frequency + distributions with, and uses them to sample a random process with + ``numsamples`` samples. Each frequency distribution is sampled + ``numoutcomes`` times. These three frequency distributions are + then used to build six probability distributions. Finally, the + probability estimates of these distributions are compared to the + actual probability of each sample. + + :type numsamples: int + :param numsamples: The number of samples to use in each demo + frequency distributions. + :type numoutcomes: int + :param numoutcomes: The total number of outcomes for each + demo frequency distribution. These outcomes are divided into + ``numsamples`` bins. + :rtype: None + """ + + # Randomly sample a stochastic process three times. + fdist1 = _create_rand_fdist(numsamples, numoutcomes) + fdist2 = _create_rand_fdist(numsamples, numoutcomes) + fdist3 = _create_rand_fdist(numsamples, numoutcomes) + + # Use our samples to create probability distributions. + pdists = [ + MLEProbDist(fdist1), + LidstoneProbDist(fdist1, 0.5, numsamples), + HeldoutProbDist(fdist1, fdist2, numsamples), + HeldoutProbDist(fdist2, fdist1, numsamples), + CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples), + SimpleGoodTuringProbDist(fdist1), + SimpleGoodTuringProbDist(fdist1, 7), + _create_sum_pdist(numsamples), + ] + + # Find the probability of each sample. + vals = [] + for n in range(1, numsamples + 1): + vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists])) + + # Print the results in a formatted table. + print( + "%d samples (1-%d); %d outcomes were sampled for each FreqDist" + % (numsamples, numsamples, numoutcomes) + ) + print("=" * 9 * (len(pdists) + 2)) + FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual" + print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1])) + print("-" * 9 * (len(pdists) + 2)) + FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f" + for val in vals: + print(FORMATSTR % val) + + # Print the totals for each column (should all be 1.0) + zvals = list(zip(*vals)) + sums = [sum(val) for val in zvals[1:]] + print("-" * 9 * (len(pdists) + 2)) + FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f" + print(FORMATSTR % tuple(sums)) + print("=" * 9 * (len(pdists) + 2)) + + # Display the distributions themselves, if they're short enough. + if len("%s" % fdist1) < 70: + print(" fdist1: %s" % fdist1) + print(" fdist2: %s" % fdist2) + print(" fdist3: %s" % fdist3) + print() + + print("Generating:") + for pdist in pdists: + fdist = FreqDist(pdist.generate() for i in range(5000)) + print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55])) + print() + + +def gt_demo(): + from nltk import corpus + + emma_words = corpus.gutenberg.words("austen-emma.txt") + fd = FreqDist(emma_words) + sgt = SimpleGoodTuringProbDist(fd) + print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring")) + fd_keys_sorted = ( + key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True) + ) + for key in fd_keys_sorted: + print("%18s %8d %14e" % (key, fd[key], sgt.prob(key))) + + +if __name__ == "__main__": + demo(6, 10) + demo(5, 5000) + gt_demo() + +__all__ = [ + "ConditionalFreqDist", + "ConditionalProbDist", + "ConditionalProbDistI", + "CrossValidationProbDist", + "DictionaryConditionalProbDist", + "DictionaryProbDist", + "ELEProbDist", + "FreqDist", + "SimpleGoodTuringProbDist", + "HeldoutProbDist", + "ImmutableProbabilisticMixIn", + "LaplaceProbDist", + "LidstoneProbDist", + "MLEProbDist", + "MutableProbDist", + "KneserNeyProbDist", + "ProbDistI", + "ProbabilisticMixIn", + "UniformProbDist", + "WittenBellProbDist", + "add_logs", + "log_likelihood", + "sum_logs", + "entropy", +] diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/__init__.py b/venv/lib/python3.10/site-packages/nltk/sentiment/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..37c21108d41d8daafbcee02c34646291db597e88 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sentiment/__init__.py @@ -0,0 +1,13 @@ +# Natural Language Toolkit: Sentiment Analysis +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Sentiment Analysis Package + +""" +from nltk.sentiment.sentiment_analyzer import SentimentAnalyzer +from nltk.sentiment.vader import SentimentIntensityAnalyzer diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d7702a55dacd9e5998a1388d35190c933b87486 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdd6610b0368c1a48202536acf2b202a06e0fb2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..193f9c1f8a939da47e0345bf5a01c3b36c015875 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/sentiment/__pycache__/vader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py b/venv/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..6654de34d8fbe801bcdde6ef37e1951e207f5ff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sentiment/sentiment_analyzer.py @@ -0,0 +1,255 @@ +# +# Natural Language Toolkit: Sentiment Analyzer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +A SentimentAnalyzer is a tool to implement and facilitate Sentiment Analysis tasks +using NLTK features and classifiers, especially for teaching and demonstrative +purposes. +""" + +import sys +from collections import defaultdict + +from nltk.classify.util import accuracy as eval_accuracy +from nltk.classify.util import apply_features +from nltk.collocations import BigramCollocationFinder +from nltk.metrics import BigramAssocMeasures +from nltk.metrics import f_measure as eval_f_measure +from nltk.metrics import precision as eval_precision +from nltk.metrics import recall as eval_recall +from nltk.probability import FreqDist + + +class SentimentAnalyzer: + """ + A Sentiment Analysis tool based on machine learning approaches. + """ + + def __init__(self, classifier=None): + self.feat_extractors = defaultdict(list) + self.classifier = classifier + + def all_words(self, documents, labeled=None): + """ + Return all words/tokens from the documents (with duplicates). + + :param documents: a list of (words, label) tuples. + :param labeled: if `True`, assume that each document is represented by a + (words, label) tuple: (list(str), str). If `False`, each document is + considered as being a simple list of strings: list(str). + :rtype: list(str) + :return: A list of all words/tokens in `documents`. + """ + all_words = [] + if labeled is None: + labeled = documents and isinstance(documents[0], tuple) + if labeled: + for words, _sentiment in documents: + all_words.extend(words) + elif not labeled: + for words in documents: + all_words.extend(words) + return all_words + + def apply_features(self, documents, labeled=None): + """ + Apply all feature extractor functions to the documents. This is a wrapper + around `nltk.classify.util.apply_features`. + + If `labeled=False`, return featuresets as: + [feature_func(doc) for doc in documents] + If `labeled=True`, return featuresets as: + [(feature_func(tok), label) for (tok, label) in toks] + + :param documents: a list of documents. `If labeled=True`, the method expects + a list of (words, label) tuples. + :rtype: LazyMap + """ + return apply_features(self.extract_features, documents, labeled) + + def unigram_word_feats(self, words, top_n=None, min_freq=0): + """ + Return most common top_n word features. + + :param words: a list of words/tokens. + :param top_n: number of best words/tokens to use, sorted by frequency. + :rtype: list(str) + :return: A list of `top_n` words/tokens (with no duplicates) sorted by + frequency. + """ + # Stopwords are not removed + unigram_feats_freqs = FreqDist(word for word in words) + return [ + w + for w, f in unigram_feats_freqs.most_common(top_n) + if unigram_feats_freqs[w] > min_freq + ] + + def bigram_collocation_feats( + self, documents, top_n=None, min_freq=3, assoc_measure=BigramAssocMeasures.pmi + ): + """ + Return `top_n` bigram features (using `assoc_measure`). + Note that this method is based on bigram collocations measures, and not + on simple bigram frequency. + + :param documents: a list (or iterable) of tokens. + :param top_n: number of best words/tokens to use, sorted by association + measure. + :param assoc_measure: bigram association measure to use as score function. + :param min_freq: the minimum number of occurrencies of bigrams to take + into consideration. + + :return: `top_n` ngrams scored by the given association measure. + """ + finder = BigramCollocationFinder.from_documents(documents) + finder.apply_freq_filter(min_freq) + return finder.nbest(assoc_measure, top_n) + + def classify(self, instance): + """ + Classify a single instance applying the features that have already been + stored in the SentimentAnalyzer. + + :param instance: a list (or iterable) of tokens. + :return: the classification result given by applying the classifier. + """ + instance_feats = self.apply_features([instance], labeled=False) + return self.classifier.classify(instance_feats[0]) + + def add_feat_extractor(self, function, **kwargs): + """ + Add a new function to extract features from a document. This function will + be used in extract_features(). + Important: in this step our kwargs are only representing additional parameters, + and NOT the document we have to parse. The document will always be the first + parameter in the parameter list, and it will be added in the extract_features() + function. + + :param function: the extractor function to add to the list of feature extractors. + :param kwargs: additional parameters required by the `function` function. + """ + self.feat_extractors[function].append(kwargs) + + def extract_features(self, document): + """ + Apply extractor functions (and their parameters) to the present document. + We pass `document` as the first parameter of the extractor functions. + If we want to use the same extractor function multiple times, we have to + add it to the extractors with `add_feat_extractor` using multiple sets of + parameters (one for each call of the extractor function). + + :param document: the document that will be passed as argument to the + feature extractor functions. + :return: A dictionary of populated features extracted from the document. + :rtype: dict + """ + all_features = {} + for extractor in self.feat_extractors: + for param_set in self.feat_extractors[extractor]: + feats = extractor(document, **param_set) + all_features.update(feats) + return all_features + + def train(self, trainer, training_set, save_classifier=None, **kwargs): + """ + Train classifier on the training set, optionally saving the output in the + file specified by `save_classifier`. + Additional arguments depend on the specific trainer used. For example, + a MaxentClassifier can use `max_iter` parameter to specify the number + of iterations, while a NaiveBayesClassifier cannot. + + :param trainer: `train` method of a classifier. + E.g.: NaiveBayesClassifier.train + :param training_set: the training set to be passed as argument to the + classifier `train` method. + :param save_classifier: the filename of the file where the classifier + will be stored (optional). + :param kwargs: additional parameters that will be passed as arguments to + the classifier `train` function. + :return: A classifier instance trained on the training set. + :rtype: + """ + print("Training classifier") + self.classifier = trainer(training_set, **kwargs) + if save_classifier: + self.save_file(self.classifier, save_classifier) + + return self.classifier + + def save_file(self, content, filename): + """ + Store `content` in `filename`. Can be used to store a SentimentAnalyzer. + """ + print("Saving", filename, file=sys.stderr) + with open(filename, "wb") as storage_file: + import pickle + + # The protocol=2 parameter is for python2 compatibility + pickle.dump(content, storage_file, protocol=2) + + def evaluate( + self, + test_set, + classifier=None, + accuracy=True, + f_measure=True, + precision=True, + recall=True, + verbose=False, + ): + """ + Evaluate and print classifier performance on the test set. + + :param test_set: A list of (tokens, label) tuples to use as gold set. + :param classifier: a classifier instance (previously trained). + :param accuracy: if `True`, evaluate classifier accuracy. + :param f_measure: if `True`, evaluate classifier f_measure. + :param precision: if `True`, evaluate classifier precision. + :param recall: if `True`, evaluate classifier recall. + :return: evaluation results. + :rtype: dict(str): float + """ + if classifier is None: + classifier = self.classifier + print(f"Evaluating {type(classifier).__name__} results...") + metrics_results = {} + if accuracy: + accuracy_score = eval_accuracy(classifier, test_set) + metrics_results["Accuracy"] = accuracy_score + + gold_results = defaultdict(set) + test_results = defaultdict(set) + labels = set() + for i, (feats, label) in enumerate(test_set): + labels.add(label) + gold_results[label].add(i) + observed = classifier.classify(feats) + test_results[observed].add(i) + + for label in labels: + if precision: + precision_score = eval_precision( + gold_results[label], test_results[label] + ) + metrics_results[f"Precision [{label}]"] = precision_score + if recall: + recall_score = eval_recall(gold_results[label], test_results[label]) + metrics_results[f"Recall [{label}]"] = recall_score + if f_measure: + f_measure_score = eval_f_measure( + gold_results[label], test_results[label] + ) + metrics_results[f"F-measure [{label}]"] = f_measure_score + + # Print evaluation results (in alphabetical order) + if verbose: + for result in sorted(metrics_results): + print(f"{result}: {metrics_results[result]}") + + return metrics_results diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/util.py b/venv/lib/python3.10/site-packages/nltk/sentiment/util.py new file mode 100644 index 0000000000000000000000000000000000000000..0a698981e1d2be99e97e5e474f016781921a2595 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sentiment/util.py @@ -0,0 +1,887 @@ +# +# Natural Language Toolkit: Sentiment Analyzer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Pierpaolo Pantone <24alsecondo@gmail.com> +# URL: +# For license information, see LICENSE.TXT + +""" +Utility methods for Sentiment Analysis. +""" + +import codecs +import csv +import json +import pickle +import random +import re +import sys +import time +from copy import deepcopy + +import nltk +from nltk.corpus import CategorizedPlaintextCorpusReader +from nltk.data import load +from nltk.tokenize.casual import EMOTICON_RE + +# //////////////////////////////////////////////////////////// +# { Regular expressions +# //////////////////////////////////////////////////////////// + +# Regular expression for negation by Christopher Potts +NEGATION = r""" + (?: + ^(?:never|no|nothing|nowhere|noone|none|not| + havent|hasnt|hadnt|cant|couldnt|shouldnt| + wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint + )$ + ) + | + n't""" + +NEGATION_RE = re.compile(NEGATION, re.VERBOSE) + +CLAUSE_PUNCT = r"^[.:;!?]$" +CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT) + +# Happy and sad emoticons + +HAPPY = { + ":-)", + ":)", + ";)", + ":o)", + ":]", + ":3", + ":c)", + ":>", + "=]", + "8)", + "=)", + ":}", + ":^)", + ":-D", + ":D", + "8-D", + "8D", + "x-D", + "xD", + "X-D", + "XD", + "=-D", + "=D", + "=-3", + "=3", + ":-))", + ":'-)", + ":')", + ":*", + ":^*", + ">:P", + ":-P", + ":P", + "X-P", + "x-p", + "xp", + "XP", + ":-p", + ":p", + "=p", + ":-b", + ":b", + ">:)", + ">;)", + ">:-)", + "<3", +} + +SAD = { + ":L", + ":-/", + ">:/", + ":S", + ">:[", + ":@", + ":-(", + ":[", + ":-||", + "=L", + ":<", + ":-[", + ":-<", + "=\\", + "=/", + ">:(", + ":(", + ">.<", + ":'-(", + ":'(", + ":\\", + ":-c", + ":c", + ":{", + ">:\\", + ";(", +} + + +def timer(method): + """ + A timer decorator to measure execution performance of methods. + """ + + def timed(*args, **kw): + start = time.time() + result = method(*args, **kw) + end = time.time() + tot_time = end - start + hours = tot_time // 3600 + mins = tot_time // 60 % 60 + # in Python 2.x round() will return a float, so we convert it to int + secs = int(round(tot_time % 60)) + if hours == 0 and mins == 0 and secs < 10: + print(f"[TIMER] {method.__name__}(): {method.__name__:.3f} seconds") + else: + print(f"[TIMER] {method.__name__}(): {hours}h {mins}m {secs}s") + return result + + return timed + + +# //////////////////////////////////////////////////////////// +# { Feature extractor functions +# //////////////////////////////////////////////////////////// +""" +Feature extractor functions are declared outside the SentimentAnalyzer class. +Users should have the possibility to create their own feature extractors +without modifying SentimentAnalyzer. +""" + + +def extract_unigram_feats(document, unigrams, handle_negation=False): + """ + Populate a dictionary of unigram features, reflecting the presence/absence in + the document of each of the tokens in `unigrams`. + + :param document: a list of words/tokens. + :param unigrams: a list of words/tokens whose presence/absence has to be + checked in `document`. + :param handle_negation: if `handle_negation == True` apply `mark_negation` + method to `document` before checking for unigram presence/absence. + :return: a dictionary of unigram features {unigram : boolean}. + + >>> words = ['ice', 'police', 'riot'] + >>> document = 'ice is melting due to global warming'.split() + >>> sorted(extract_unigram_feats(document, words).items()) + [('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)] + """ + features = {} + if handle_negation: + document = mark_negation(document) + for word in unigrams: + features[f"contains({word})"] = word in set(document) + return features + + +def extract_bigram_feats(document, bigrams): + """ + Populate a dictionary of bigram features, reflecting the presence/absence in + the document of each of the tokens in `bigrams`. This extractor function only + considers contiguous bigrams obtained by `nltk.bigrams`. + + :param document: a list of words/tokens. + :param unigrams: a list of bigrams whose presence/absence has to be + checked in `document`. + :return: a dictionary of bigram features {bigram : boolean}. + + >>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')] + >>> document = 'ice is melting due to global warming'.split() + >>> sorted(extract_bigram_feats(document, bigrams).items()) # doctest: +NORMALIZE_WHITESPACE + [('contains(global - warming)', True), ('contains(love - you)', False), + ('contains(police - prevented)', False)] + """ + features = {} + for bigr in bigrams: + features[f"contains({bigr[0]} - {bigr[1]})"] = bigr in nltk.bigrams(document) + return features + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def mark_negation(document, double_neg_flip=False, shallow=False): + """ + Append _NEG suffix to words that appear in the scope between a negation + and a punctuation mark. + + :param document: a list of words/tokens, or a tuple (words, label). + :param shallow: if True, the method will modify the original document in place. + :param double_neg_flip: if True, double negation is considered affirmation + (we activate/deactivate negation scope every time we find a negation). + :return: if `shallow == True` the method will modify the original document + and return it. If `shallow == False` the method will return a modified + document, leaving the original unmodified. + + >>> sent = "I didn't like this movie . It was bad .".split() + >>> mark_negation(sent) + ['I', "didn't", 'like_NEG', 'this_NEG', 'movie_NEG', '.', 'It', 'was', 'bad', '.'] + """ + if not shallow: + document = deepcopy(document) + # check if the document is labeled. If so, do not consider the label. + labeled = document and isinstance(document[0], (tuple, list)) + if labeled: + doc = document[0] + else: + doc = document + neg_scope = False + for i, word in enumerate(doc): + if NEGATION_RE.search(word): + if not neg_scope or (neg_scope and double_neg_flip): + neg_scope = not neg_scope + continue + else: + doc[i] += "_NEG" + elif neg_scope and CLAUSE_PUNCT_RE.search(word): + neg_scope = not neg_scope + elif neg_scope and not CLAUSE_PUNCT_RE.search(word): + doc[i] += "_NEG" + + return document + + +def output_markdown(filename, **kwargs): + """ + Write the output of an analysis to a file. + """ + with codecs.open(filename, "at") as outfile: + text = "\n*** \n\n" + text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) + for k in sorted(kwargs): + if isinstance(kwargs[k], dict): + dictionary = kwargs[k] + text += f" - **{k}:**\n" + for entry in sorted(dictionary): + text += f" - {entry}: {dictionary[entry]} \n" + elif isinstance(kwargs[k], list): + text += f" - **{k}:**\n" + for entry in kwargs[k]: + text += f" - {entry}\n" + else: + text += f" - **{k}:** {kwargs[k]} \n" + outfile.write(text) + + +def split_train_test(all_instances, n=None): + """ + Randomly split `n` instances of the dataset into train and test sets. + + :param all_instances: a list of instances (e.g. documents) that will be split. + :param n: the number of instances to consider (in case we want to use only a + subset). + :return: two lists of instances. Train set is 8/10 of the total and test set + is 2/10 of the total. + """ + random.seed(12345) + random.shuffle(all_instances) + if not n or n > len(all_instances): + n = len(all_instances) + train_set = all_instances[: int(0.8 * n)] + test_set = all_instances[int(0.8 * n) : n] + + return train_set, test_set + + +def _show_plot(x_values, y_values, x_labels=None, y_labels=None): + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ImportError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + plt.locator_params(axis="y", nbins=3) + axes = plt.axes() + axes.yaxis.grid() + plt.plot(x_values, y_values, "ro", color="red") + plt.ylim(ymin=-1.2, ymax=1.2) + plt.tight_layout(pad=5) + if x_labels: + plt.xticks(x_values, x_labels, rotation="vertical") + if y_labels: + plt.yticks([-1, 0, 1], y_labels, rotation="horizontal") + # Pad margins so that markers are not clipped by the axes + plt.margins(0.2) + plt.show() + + +# //////////////////////////////////////////////////////////// +# { Parsing and conversion functions +# //////////////////////////////////////////////////////////// + + +def json2csv_preprocess( + json_file, + outfile, + fields, + encoding="utf8", + errors="replace", + gzip_compress=False, + skip_retweets=True, + skip_tongue_tweets=True, + skip_ambiguous_tweets=True, + strip_off_emoticons=True, + remove_duplicates=True, + limit=None, +): + """ + Convert json file to csv file, preprocessing each row to obtain a suitable + dataset for tweets Semantic Analysis. + + :param json_file: the original json file containing tweets. + :param outfile: the output csv filename. + :param fields: a list of fields that will be extracted from the json file and + kept in the output csv file. + :param encoding: the encoding of the files. + :param errors: the error handling strategy for the output writer. + :param gzip_compress: if True, create a compressed GZIP file. + + :param skip_retweets: if True, remove retweets. + :param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P" + emoticons. + :param skip_ambiguous_tweets: if True, remove tweets containing both happy + and sad emoticons. + :param strip_off_emoticons: if True, strip off emoticons from all tweets. + :param remove_duplicates: if True, remove tweets appearing more than once. + :param limit: an integer to set the number of tweets to convert. After the + limit is reached the conversion will stop. It can be useful to create + subsets of the original tweets json data. + """ + with codecs.open(json_file, encoding=encoding) as fp: + (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) + # write the list of fields as header + writer.writerow(fields) + + if remove_duplicates == True: + tweets_cache = [] + i = 0 + for line in fp: + tweet = json.loads(line) + row = extract_fields(tweet, fields) + try: + text = row[fields.index("text")] + # Remove retweets + if skip_retweets == True: + if re.search(r"\bRT\b", text): + continue + # Remove tweets containing ":P" and ":-P" emoticons + if skip_tongue_tweets == True: + if re.search(r"\:\-?P\b", text): + continue + # Remove tweets containing both happy and sad emoticons + if skip_ambiguous_tweets == True: + all_emoticons = EMOTICON_RE.findall(text) + if all_emoticons: + if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD): + continue + # Strip off emoticons from all tweets + if strip_off_emoticons == True: + row[fields.index("text")] = re.sub( + r"(?!\n)\s+", " ", EMOTICON_RE.sub("", text) + ) + # Remove duplicate tweets + if remove_duplicates == True: + if row[fields.index("text")] in tweets_cache: + continue + else: + tweets_cache.append(row[fields.index("text")]) + except ValueError: + pass + writer.writerow(row) + i += 1 + if limit and i >= limit: + break + outf.close() + + +def parse_tweets_set( + filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True +): + """ + Parse csv file containing tweets and output data a list of (text, label) tuples. + + :param filename: the input csv filename. + :param label: the label to be appended to each tweet contained in the csv file. + :param word_tokenizer: the tokenizer instance that will be used to tokenize + each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()). + If no word_tokenizer is specified, tweets will not be tokenized. + :param sent_tokenizer: the tokenizer that will be used to split each tweet into + sentences. + :param skip_header: if True, skip the first line of the csv file (which usually + contains headers). + + :return: a list of (text, label) tuples. + """ + tweets = [] + if not sent_tokenizer: + sent_tokenizer = load("tokenizers/punkt/english.pickle") + + with codecs.open(filename, "rt") as csvfile: + reader = csv.reader(csvfile) + if skip_header == True: + next(reader, None) # skip the header + i = 0 + for tweet_id, text in reader: + # text = text[1] + i += 1 + sys.stdout.write(f"Loaded {i} tweets\r") + # Apply sentence and word tokenizer to text + if word_tokenizer: + tweet = [ + w + for sent in sent_tokenizer.tokenize(text) + for w in word_tokenizer.tokenize(sent) + ] + else: + tweet = text + tweets.append((tweet, label)) + + print(f"Loaded {i} tweets") + return tweets + + +# //////////////////////////////////////////////////////////// +# { Demos +# //////////////////////////////////////////////////////////// + + +def demo_tweets(trainer, n_instances=None, output=None): + """ + Train and test Naive Bayes classifier on 10000 tweets, tokenized using + TweetTokenizer. + Features are composed of: + + - 1000 most frequent unigrams + - 100 top bigrams (using BigramAssocMeasures.pmi) + + :param trainer: `train` method of a classifier. + :param n_instances: the number of total tweets that have to be used for + training and testing. Tweets will be equally split between positive and + negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import stopwords, twitter_samples + from nltk.sentiment import SentimentAnalyzer + from nltk.tokenize import TweetTokenizer + + # Different customizations for the TweetTokenizer + tokenizer = TweetTokenizer(preserve_case=False) + # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) + # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) + + if n_instances is not None: + n_instances = int(n_instances / 2) + + fields = ["id", "text"] + positive_json = twitter_samples.abspath("positive_tweets.json") + positive_csv = "positive_tweets.csv" + json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) + + negative_json = twitter_samples.abspath("negative_tweets.json") + negative_csv = "negative_tweets.csv" + json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) + + neg_docs = parse_tweets_set(negative_csv, label="neg", word_tokenizer=tokenizer) + pos_docs = parse_tweets_set(positive_csv, label="pos", word_tokenizer=tokenizer) + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_tweets = train_pos_docs + train_neg_docs + testing_tweets = test_pos_docs + test_neg_docs + + sentim_analyzer = SentimentAnalyzer() + # stopwords = stopwords.words('english') + # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] + all_words = [word for word in sentim_analyzer.all_words(training_tweets)] + + # Add simple unigram word features + unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + + # Add bigram collocation features + bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( + [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12 + ) + sentim_analyzer.add_feat_extractor( + extract_bigram_feats, bigrams=bigram_collocs_feats + ) + + training_set = sentim_analyzer.apply_features(training_tweets) + test_set = sentim_analyzer.apply_features(testing_tweets) + + classifier = sentim_analyzer.train(trainer, training_set) + # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="labeled_tweets", + Classifier=type(classifier).__name__, + Tokenizer=tokenizer.__class__.__name__, + Feats=extr, + Results=results, + Instances=n_instances, + ) + + +def demo_movie_reviews(trainer, n_instances=None, output=None): + """ + Train classifier on all instances of the Movie Reviews dataset. + The corpus has been preprocessed using the default sentence tokenizer and + WordPunctTokenizer. + Features are composed of: + + - most frequent unigrams + + :param trainer: `train` method of a classifier. + :param n_instances: the number of total reviews that have to be used for + training and testing. Reviews will be equally split between positive and + negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import movie_reviews + from nltk.sentiment import SentimentAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + pos_docs = [ + (list(movie_reviews.words(pos_id)), "pos") + for pos_id in movie_reviews.fileids("pos")[:n_instances] + ] + neg_docs = [ + (list(movie_reviews.words(neg_id)), "neg") + for neg_id in movie_reviews.fileids("neg")[:n_instances] + ] + # We separately split positive and negative instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_docs = train_pos_docs + train_neg_docs + testing_docs = test_pos_docs + test_neg_docs + + sentim_analyzer = SentimentAnalyzer() + all_words = sentim_analyzer.all_words(training_docs) + + # Add simple unigram word features + unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + # Apply features to obtain a feature-value representation of our datasets + training_set = sentim_analyzer.apply_features(training_docs) + test_set = sentim_analyzer.apply_features(testing_docs) + + classifier = sentim_analyzer.train(trainer, training_set) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="Movie_reviews", + Classifier=type(classifier).__name__, + Tokenizer="WordPunctTokenizer", + Feats=extr, + Results=results, + Instances=n_instances, + ) + + +def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): + """ + Train and test a classifier on instances of the Subjective Dataset by Pang and + Lee. The dataset is made of 5000 subjective and 5000 objective sentences. + All tokens (words and punctuation marks) are separated by a whitespace, so + we use the basic WhitespaceTokenizer to parse the data. + + :param trainer: `train` method of a classifier. + :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file. + :param n_instances: the number of total sentences that have to be used for + training and testing. Sentences will be equally split between positive + and negative. + :param output: the output file where results have to be reported. + """ + from nltk.corpus import subjectivity + from nltk.sentiment import SentimentAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + subj_docs = [ + (sent, "subj") for sent in subjectivity.sents(categories="subj")[:n_instances] + ] + obj_docs = [ + (sent, "obj") for sent in subjectivity.sents(categories="obj")[:n_instances] + ] + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_subj_docs, test_subj_docs = split_train_test(subj_docs) + train_obj_docs, test_obj_docs = split_train_test(obj_docs) + + training_docs = train_subj_docs + train_obj_docs + testing_docs = test_subj_docs + test_obj_docs + + sentim_analyzer = SentimentAnalyzer() + all_words_neg = sentim_analyzer.all_words( + [mark_negation(doc) for doc in training_docs] + ) + + # Add simple unigram word features handling negation + unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) + sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) + + # Apply features to obtain a feature-value representation of our datasets + training_set = sentim_analyzer.apply_features(training_docs) + test_set = sentim_analyzer.apply_features(testing_docs) + + classifier = sentim_analyzer.train(trainer, training_set) + try: + classifier.show_most_informative_features() + except AttributeError: + print( + "Your classifier does not provide a show_most_informative_features() method." + ) + results = sentim_analyzer.evaluate(test_set) + + if save_analyzer == True: + sentim_analyzer.save_file(sentim_analyzer, "sa_subjectivity.pickle") + + if output: + extr = [f.__name__ for f in sentim_analyzer.feat_extractors] + output_markdown( + output, + Dataset="subjectivity", + Classifier=type(classifier).__name__, + Tokenizer="WhitespaceTokenizer", + Feats=extr, + Instances=n_instances, + Results=results, + ) + + return sentim_analyzer + + +def demo_sent_subjectivity(text): + """ + Classify a single sentence as subjective or objective using a stored + SentimentAnalyzer. + + :param text: a sentence whose subjectivity has to be classified. + """ + from nltk.classify import NaiveBayesClassifier + from nltk.tokenize import regexp + + word_tokenizer = regexp.WhitespaceTokenizer() + try: + sentim_analyzer = load("sa_subjectivity.pickle") + except LookupError: + print("Cannot find the sentiment analyzer you want to load.") + print("Training a new one using NaiveBayesClassifier.") + sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True) + + # Tokenize and convert to lower case + tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)] + print(sentim_analyzer.classify(tokenized_text)) + + +def demo_liu_hu_lexicon(sentence, plot=False): + """ + Basic example of sentiment classification using Liu and Hu opinion lexicon. + This function simply counts the number of positive, negative and neutral words + in the sentence and classifies it depending on which polarity is more represented. + Words that do not appear in the lexicon are considered as neutral. + + :param sentence: a sentence whose polarity has to be classified. + :param plot: if True, plot a visual representation of the sentence polarity. + """ + from nltk.corpus import opinion_lexicon + from nltk.tokenize import treebank + + tokenizer = treebank.TreebankWordTokenizer() + pos_words = 0 + neg_words = 0 + tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)] + + x = list(range(len(tokenized_sent))) # x axis for the plot + y = [] + + for word in tokenized_sent: + if word in opinion_lexicon.positive(): + pos_words += 1 + y.append(1) # positive + elif word in opinion_lexicon.negative(): + neg_words += 1 + y.append(-1) # negative + else: + y.append(0) # neutral + + if pos_words > neg_words: + print("Positive") + elif pos_words < neg_words: + print("Negative") + elif pos_words == neg_words: + print("Neutral") + + if plot == True: + _show_plot( + x, y, x_labels=tokenized_sent, y_labels=["Negative", "Neutral", "Positive"] + ) + + +def demo_vader_instance(text): + """ + Output polarity scores for a text using Vader approach. + + :param text: a text whose polarity has to be evaluated. + """ + from nltk.sentiment import SentimentIntensityAnalyzer + + vader_analyzer = SentimentIntensityAnalyzer() + print(vader_analyzer.polarity_scores(text)) + + +def demo_vader_tweets(n_instances=None, output=None): + """ + Classify 10000 positive and negative tweets using Vader approach. + + :param n_instances: the number of total tweets that have to be classified. + :param output: the output file where results have to be reported. + """ + from collections import defaultdict + + from nltk.corpus import twitter_samples + from nltk.metrics import accuracy as eval_accuracy + from nltk.metrics import f_measure as eval_f_measure + from nltk.metrics import precision as eval_precision + from nltk.metrics import recall as eval_recall + from nltk.sentiment import SentimentIntensityAnalyzer + + if n_instances is not None: + n_instances = int(n_instances / 2) + + fields = ["id", "text"] + positive_json = twitter_samples.abspath("positive_tweets.json") + positive_csv = "positive_tweets.csv" + json2csv_preprocess( + positive_json, + positive_csv, + fields, + strip_off_emoticons=False, + limit=n_instances, + ) + + negative_json = twitter_samples.abspath("negative_tweets.json") + negative_csv = "negative_tweets.csv" + json2csv_preprocess( + negative_json, + negative_csv, + fields, + strip_off_emoticons=False, + limit=n_instances, + ) + + pos_docs = parse_tweets_set(positive_csv, label="pos") + neg_docs = parse_tweets_set(negative_csv, label="neg") + + # We separately split subjective and objective instances to keep a balanced + # uniform class distribution in both train and test sets. + train_pos_docs, test_pos_docs = split_train_test(pos_docs) + train_neg_docs, test_neg_docs = split_train_test(neg_docs) + + training_tweets = train_pos_docs + train_neg_docs + testing_tweets = test_pos_docs + test_neg_docs + + vader_analyzer = SentimentIntensityAnalyzer() + + gold_results = defaultdict(set) + test_results = defaultdict(set) + acc_gold_results = [] + acc_test_results = [] + labels = set() + num = 0 + for i, (text, label) in enumerate(testing_tweets): + labels.add(label) + gold_results[label].add(i) + acc_gold_results.append(label) + score = vader_analyzer.polarity_scores(text)["compound"] + if score > 0: + observed = "pos" + else: + observed = "neg" + num += 1 + acc_test_results.append(observed) + test_results[observed].add(i) + metrics_results = {} + for label in labels: + accuracy_score = eval_accuracy(acc_gold_results, acc_test_results) + metrics_results["Accuracy"] = accuracy_score + precision_score = eval_precision(gold_results[label], test_results[label]) + metrics_results[f"Precision [{label}]"] = precision_score + recall_score = eval_recall(gold_results[label], test_results[label]) + metrics_results[f"Recall [{label}]"] = recall_score + f_measure_score = eval_f_measure(gold_results[label], test_results[label]) + metrics_results[f"F-measure [{label}]"] = f_measure_score + + for result in sorted(metrics_results): + print(f"{result}: {metrics_results[result]}") + + if output: + output_markdown( + output, + Approach="Vader", + Dataset="labeled_tweets", + Instances=n_instances, + Results=metrics_results, + ) + + +if __name__ == "__main__": + from sklearn.svm import LinearSVC + + from nltk.classify import MaxentClassifier, NaiveBayesClassifier + from nltk.classify.scikitlearn import SklearnClassifier + from nltk.twitter.common import _outf_writer, extract_fields + + naive_bayes = NaiveBayesClassifier.train + svm = SklearnClassifier(LinearSVC()).train + maxent = MaxentClassifier.train + + demo_tweets(naive_bayes) + # demo_movie_reviews(svm) + # demo_subjectivity(svm) + # demo_sent_subjectivity("she's an artist , but hasn't picked up a brush in a year . ") + # demo_liu_hu_lexicon("This movie was actually neither that funny, nor super witty.", plot=True) + # demo_vader_instance("This movie was actually neither that funny, nor super witty.") + # demo_vader_tweets() diff --git a/venv/lib/python3.10/site-packages/nltk/sentiment/vader.py b/venv/lib/python3.10/site-packages/nltk/sentiment/vader.py new file mode 100644 index 0000000000000000000000000000000000000000..2381b39a3a0da7750506283db9b2d3a5fe1d4633 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/sentiment/vader.py @@ -0,0 +1,633 @@ +# Natural Language Toolkit: vader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: C.J. Hutto +# Ewan Klein (modifications) +# Pierpaolo Pantone <24alsecondo@gmail.com> (modifications) +# George Berry (modifications) +# Malavika Suresh (modifications) +# URL: +# For license information, see LICENSE.TXT +# +# Modifications to the original VADER code have been made in order to +# integrate it into NLTK. These have involved changes to +# ensure Python 3 compatibility, and refactoring to achieve greater modularity. + +""" +If you use the VADER sentiment analysis tools, please cite: + +Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for +Sentiment Analysis of Social Media Text. Eighth International Conference on +Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014. +""" + +import math +import re +import string +from itertools import product + +import nltk.data +from nltk.util import pairwise + + +class VaderConstants: + """ + A class to keep the Vader lists and constants. + """ + + ##Constants## + # (empirically derived mean sentiment intensity rating increase for booster words) + B_INCR = 0.293 + B_DECR = -0.293 + + # (empirically derived mean sentiment intensity rating increase for using + # ALLCAPs to emphasize a word) + C_INCR = 0.733 + + N_SCALAR = -0.74 + + NEGATE = { + "aint", + "arent", + "cannot", + "cant", + "couldnt", + "darent", + "didnt", + "doesnt", + "ain't", + "aren't", + "can't", + "couldn't", + "daren't", + "didn't", + "doesn't", + "dont", + "hadnt", + "hasnt", + "havent", + "isnt", + "mightnt", + "mustnt", + "neither", + "don't", + "hadn't", + "hasn't", + "haven't", + "isn't", + "mightn't", + "mustn't", + "neednt", + "needn't", + "never", + "none", + "nope", + "nor", + "not", + "nothing", + "nowhere", + "oughtnt", + "shant", + "shouldnt", + "uhuh", + "wasnt", + "werent", + "oughtn't", + "shan't", + "shouldn't", + "uh-uh", + "wasn't", + "weren't", + "without", + "wont", + "wouldnt", + "won't", + "wouldn't", + "rarely", + "seldom", + "despite", + } + + # booster/dampener 'intensifiers' or 'degree adverbs' + # https://en.wiktionary.org/wiki/Category:English_degree_adverbs + + BOOSTER_DICT = { + "absolutely": B_INCR, + "amazingly": B_INCR, + "awfully": B_INCR, + "completely": B_INCR, + "considerably": B_INCR, + "decidedly": B_INCR, + "deeply": B_INCR, + "effing": B_INCR, + "enormously": B_INCR, + "entirely": B_INCR, + "especially": B_INCR, + "exceptionally": B_INCR, + "extremely": B_INCR, + "fabulously": B_INCR, + "flipping": B_INCR, + "flippin": B_INCR, + "fricking": B_INCR, + "frickin": B_INCR, + "frigging": B_INCR, + "friggin": B_INCR, + "fully": B_INCR, + "fucking": B_INCR, + "greatly": B_INCR, + "hella": B_INCR, + "highly": B_INCR, + "hugely": B_INCR, + "incredibly": B_INCR, + "intensely": B_INCR, + "majorly": B_INCR, + "more": B_INCR, + "most": B_INCR, + "particularly": B_INCR, + "purely": B_INCR, + "quite": B_INCR, + "really": B_INCR, + "remarkably": B_INCR, + "so": B_INCR, + "substantially": B_INCR, + "thoroughly": B_INCR, + "totally": B_INCR, + "tremendously": B_INCR, + "uber": B_INCR, + "unbelievably": B_INCR, + "unusually": B_INCR, + "utterly": B_INCR, + "very": B_INCR, + "almost": B_DECR, + "barely": B_DECR, + "hardly": B_DECR, + "just enough": B_DECR, + "kind of": B_DECR, + "kinda": B_DECR, + "kindof": B_DECR, + "kind-of": B_DECR, + "less": B_DECR, + "little": B_DECR, + "marginally": B_DECR, + "occasionally": B_DECR, + "partly": B_DECR, + "scarcely": B_DECR, + "slightly": B_DECR, + "somewhat": B_DECR, + "sort of": B_DECR, + "sorta": B_DECR, + "sortof": B_DECR, + "sort-of": B_DECR, + } + + # check for special case idioms using a sentiment-laden keyword known to SAGE + SPECIAL_CASE_IDIOMS = { + "the shit": 3, + "the bomb": 3, + "bad ass": 1.5, + "yeah right": -2, + "cut the mustard": 2, + "kiss of death": -1.5, + "hand to mouth": -2, + } + + # for removing punctuation + REGEX_REMOVE_PUNCTUATION = re.compile(f"[{re.escape(string.punctuation)}]") + + PUNC_LIST = [ + ".", + "!", + "?", + ",", + ";", + ":", + "-", + "'", + '"', + "!!", + "!!!", + "??", + "???", + "?!?", + "!?!", + "?!?!", + "!?!?", + ] + + def __init__(self): + pass + + def negated(self, input_words, include_nt=True): + """ + Determine if input contains negation words + """ + neg_words = self.NEGATE + if any(word.lower() in neg_words for word in input_words): + return True + if include_nt: + if any("n't" in word.lower() for word in input_words): + return True + for first, second in pairwise(input_words): + if second.lower() == "least" and first.lower() != "at": + return True + return False + + def normalize(self, score, alpha=15): + """ + Normalize the score to be between -1 and 1 using an alpha that + approximates the max expected value + """ + norm_score = score / math.sqrt((score * score) + alpha) + return norm_score + + def scalar_inc_dec(self, word, valence, is_cap_diff): + """ + Check if the preceding words increase, decrease, or negate/nullify the + valence + """ + scalar = 0.0 + word_lower = word.lower() + if word_lower in self.BOOSTER_DICT: + scalar = self.BOOSTER_DICT[word_lower] + if valence < 0: + scalar *= -1 + # check if booster/dampener word is in ALLCAPS (while others aren't) + if word.isupper() and is_cap_diff: + if valence > 0: + scalar += self.C_INCR + else: + scalar -= self.C_INCR + return scalar + + +class SentiText: + """ + Identify sentiment-relevant string-level properties of input text. + """ + + def __init__(self, text, punc_list, regex_remove_punctuation): + if not isinstance(text, str): + text = str(text.encode("utf-8")) + self.text = text + self.PUNC_LIST = punc_list + self.REGEX_REMOVE_PUNCTUATION = regex_remove_punctuation + self.words_and_emoticons = self._words_and_emoticons() + # doesn't separate words from + # adjacent punctuation (keeps emoticons & contractions) + self.is_cap_diff = self.allcap_differential(self.words_and_emoticons) + + def _words_plus_punc(self): + """ + Returns mapping of form: + { + 'cat,': 'cat', + ',cat': 'cat', + } + """ + no_punc_text = self.REGEX_REMOVE_PUNCTUATION.sub("", self.text) + # removes punctuation (but loses emoticons & contractions) + words_only = no_punc_text.split() + # remove singletons + words_only = {w for w in words_only if len(w) > 1} + # the product gives ('cat', ',') and (',', 'cat') + punc_before = {"".join(p): p[1] for p in product(self.PUNC_LIST, words_only)} + punc_after = {"".join(p): p[0] for p in product(words_only, self.PUNC_LIST)} + words_punc_dict = punc_before + words_punc_dict.update(punc_after) + return words_punc_dict + + def _words_and_emoticons(self): + """ + Removes leading and trailing puncutation + Leaves contractions and most emoticons + Does not preserve punc-plus-letter emoticons (e.g. :D) + """ + wes = self.text.split() + words_punc_dict = self._words_plus_punc() + wes = [we for we in wes if len(we) > 1] + for i, we in enumerate(wes): + if we in words_punc_dict: + wes[i] = words_punc_dict[we] + return wes + + def allcap_differential(self, words): + """ + Check whether just some words in the input are ALL CAPS + + :param list words: The words to inspect + :returns: `True` if some but not all items in `words` are ALL CAPS + """ + is_different = False + allcap_words = 0 + for word in words: + if word.isupper(): + allcap_words += 1 + cap_differential = len(words) - allcap_words + if 0 < cap_differential < len(words): + is_different = True + return is_different + + +class SentimentIntensityAnalyzer: + """ + Give a sentiment intensity score to sentences. + """ + + def __init__( + self, + lexicon_file="sentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt", + ): + self.lexicon_file = nltk.data.load(lexicon_file) + self.lexicon = self.make_lex_dict() + self.constants = VaderConstants() + + def make_lex_dict(self): + """ + Convert lexicon file to a dictionary + """ + lex_dict = {} + for line in self.lexicon_file.split("\n"): + (word, measure) = line.strip().split("\t")[0:2] + lex_dict[word] = float(measure) + return lex_dict + + def polarity_scores(self, text): + """ + Return a float for sentiment strength based on the input text. + Positive values are positive valence, negative value are negative + valence. + + :note: Hashtags are not taken into consideration (e.g. #BAD is neutral). If you + are interested in processing the text in the hashtags too, then we recommend + preprocessing your data to remove the #, after which the hashtag text may be + matched as if it was a normal word in the sentence. + """ + # text, words_and_emoticons, is_cap_diff = self.preprocess(text) + sentitext = SentiText( + text, self.constants.PUNC_LIST, self.constants.REGEX_REMOVE_PUNCTUATION + ) + sentiments = [] + words_and_emoticons = sentitext.words_and_emoticons + for item in words_and_emoticons: + valence = 0 + i = words_and_emoticons.index(item) + if ( + i < len(words_and_emoticons) - 1 + and item.lower() == "kind" + and words_and_emoticons[i + 1].lower() == "of" + ) or item.lower() in self.constants.BOOSTER_DICT: + sentiments.append(valence) + continue + + sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) + + sentiments = self._but_check(words_and_emoticons, sentiments) + + return self.score_valence(sentiments, text) + + def sentiment_valence(self, valence, sentitext, item, i, sentiments): + is_cap_diff = sentitext.is_cap_diff + words_and_emoticons = sentitext.words_and_emoticons + item_lowercase = item.lower() + if item_lowercase in self.lexicon: + # get the sentiment valence + valence = self.lexicon[item_lowercase] + + # check if sentiment laden word is in ALL CAPS (while others aren't) + if item.isupper() and is_cap_diff: + if valence > 0: + valence += self.constants.C_INCR + else: + valence -= self.constants.C_INCR + + for start_i in range(0, 3): + if ( + i > start_i + and words_and_emoticons[i - (start_i + 1)].lower() + not in self.lexicon + ): + # dampen the scalar modifier of preceding words and emoticons + # (excluding the ones that immediately preceed the item) based + # on their distance from the current item. + s = self.constants.scalar_inc_dec( + words_and_emoticons[i - (start_i + 1)], valence, is_cap_diff + ) + if start_i == 1 and s != 0: + s = s * 0.95 + if start_i == 2 and s != 0: + s = s * 0.9 + valence = valence + s + valence = self._never_check( + valence, words_and_emoticons, start_i, i + ) + if start_i == 2: + valence = self._idioms_check(valence, words_and_emoticons, i) + + # future work: consider other sentiment-laden idioms + # other_idioms = + # {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, + # "upper hand": 1, "break a leg": 2, + # "cooking with gas": 2, "in the black": 2, "in the red": -2, + # "on the ball": 2,"under the weather": -2} + + valence = self._least_check(valence, words_and_emoticons, i) + + sentiments.append(valence) + return sentiments + + def _least_check(self, valence, words_and_emoticons, i): + # check for negation case using "least" + if ( + i > 1 + and words_and_emoticons[i - 1].lower() not in self.lexicon + and words_and_emoticons[i - 1].lower() == "least" + ): + if ( + words_and_emoticons[i - 2].lower() != "at" + and words_and_emoticons[i - 2].lower() != "very" + ): + valence = valence * self.constants.N_SCALAR + elif ( + i > 0 + and words_and_emoticons[i - 1].lower() not in self.lexicon + and words_and_emoticons[i - 1].lower() == "least" + ): + valence = valence * self.constants.N_SCALAR + return valence + + def _but_check(self, words_and_emoticons, sentiments): + words_and_emoticons = [w_e.lower() for w_e in words_and_emoticons] + but = {"but"} & set(words_and_emoticons) + if but: + bi = words_and_emoticons.index(next(iter(but))) + for sidx, sentiment in enumerate(sentiments): + if sidx < bi: + sentiments[sidx] = sentiment * 0.5 + elif sidx > bi: + sentiments[sidx] = sentiment * 1.5 + return sentiments + + def _idioms_check(self, valence, words_and_emoticons, i): + onezero = f"{words_and_emoticons[i - 1]} {words_and_emoticons[i]}" + + twoonezero = "{} {} {}".format( + words_and_emoticons[i - 2], + words_and_emoticons[i - 1], + words_and_emoticons[i], + ) + + twoone = f"{words_and_emoticons[i - 2]} {words_and_emoticons[i - 1]}" + + threetwoone = "{} {} {}".format( + words_and_emoticons[i - 3], + words_and_emoticons[i - 2], + words_and_emoticons[i - 1], + ) + + threetwo = "{} {}".format( + words_and_emoticons[i - 3], words_and_emoticons[i - 2] + ) + + sequences = [onezero, twoonezero, twoone, threetwoone, threetwo] + + for seq in sequences: + if seq in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[seq] + break + + if len(words_and_emoticons) - 1 > i: + zeroone = f"{words_and_emoticons[i]} {words_and_emoticons[i + 1]}" + if zeroone in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[zeroone] + if len(words_and_emoticons) - 1 > i + 1: + zeroonetwo = "{} {} {}".format( + words_and_emoticons[i], + words_and_emoticons[i + 1], + words_and_emoticons[i + 2], + ) + if zeroonetwo in self.constants.SPECIAL_CASE_IDIOMS: + valence = self.constants.SPECIAL_CASE_IDIOMS[zeroonetwo] + + # check for booster/dampener bi-grams such as 'sort of' or 'kind of' + if ( + threetwo in self.constants.BOOSTER_DICT + or twoone in self.constants.BOOSTER_DICT + ): + valence = valence + self.constants.B_DECR + return valence + + def _never_check(self, valence, words_and_emoticons, start_i, i): + if start_i == 0: + if self.constants.negated([words_and_emoticons[i - 1]]): + valence = valence * self.constants.N_SCALAR + if start_i == 1: + if words_and_emoticons[i - 2] == "never" and ( + words_and_emoticons[i - 1] == "so" + or words_and_emoticons[i - 1] == "this" + ): + valence = valence * 1.5 + elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): + valence = valence * self.constants.N_SCALAR + if start_i == 2: + if ( + words_and_emoticons[i - 3] == "never" + and ( + words_and_emoticons[i - 2] == "so" + or words_and_emoticons[i - 2] == "this" + ) + or ( + words_and_emoticons[i - 1] == "so" + or words_and_emoticons[i - 1] == "this" + ) + ): + valence = valence * 1.25 + elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): + valence = valence * self.constants.N_SCALAR + return valence + + def _punctuation_emphasis(self, sum_s, text): + # add emphasis from exclamation points and question marks + ep_amplifier = self._amplify_ep(text) + qm_amplifier = self._amplify_qm(text) + punct_emph_amplifier = ep_amplifier + qm_amplifier + return punct_emph_amplifier + + def _amplify_ep(self, text): + # check for added emphasis resulting from exclamation points (up to 4 of them) + ep_count = text.count("!") + if ep_count > 4: + ep_count = 4 + # (empirically derived mean sentiment intensity rating increase for + # exclamation points) + ep_amplifier = ep_count * 0.292 + return ep_amplifier + + def _amplify_qm(self, text): + # check for added emphasis resulting from question marks (2 or 3+) + qm_count = text.count("?") + qm_amplifier = 0 + if qm_count > 1: + if qm_count <= 3: + # (empirically derived mean sentiment intensity rating increase for + # question marks) + qm_amplifier = qm_count * 0.18 + else: + qm_amplifier = 0.96 + return qm_amplifier + + def _sift_sentiment_scores(self, sentiments): + # want separate positive versus negative sentiment scores + pos_sum = 0.0 + neg_sum = 0.0 + neu_count = 0 + for sentiment_score in sentiments: + if sentiment_score > 0: + pos_sum += ( + float(sentiment_score) + 1 + ) # compensates for neutral words that are counted as 1 + if sentiment_score < 0: + neg_sum += ( + float(sentiment_score) - 1 + ) # when used with math.fabs(), compensates for neutrals + if sentiment_score == 0: + neu_count += 1 + return pos_sum, neg_sum, neu_count + + def score_valence(self, sentiments, text): + if sentiments: + sum_s = float(sum(sentiments)) + # compute and add emphasis from punctuation in text + punct_emph_amplifier = self._punctuation_emphasis(sum_s, text) + if sum_s > 0: + sum_s += punct_emph_amplifier + elif sum_s < 0: + sum_s -= punct_emph_amplifier + + compound = self.constants.normalize(sum_s) + # discriminate between positive, negative and neutral sentiment scores + pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments) + + if pos_sum > math.fabs(neg_sum): + pos_sum += punct_emph_amplifier + elif pos_sum < math.fabs(neg_sum): + neg_sum -= punct_emph_amplifier + + total = pos_sum + math.fabs(neg_sum) + neu_count + pos = math.fabs(pos_sum / total) + neg = math.fabs(neg_sum / total) + neu = math.fabs(neu_count / total) + + else: + compound = 0.0 + pos = 0.0 + neg = 0.0 + neu = 0.0 + + sentiment_dict = { + "neg": round(neg, 3), + "neu": round(neu, 3), + "pos": round(pos, 3), + "compound": round(compound, 4), + } + + return sentiment_dict diff --git a/venv/lib/python3.10/site-packages/nltk/text.py b/venv/lib/python3.10/site-packages/nltk/text.py new file mode 100644 index 0000000000000000000000000000000000000000..85614dbd22f6a4afd388cf96cb5f8e3520883c16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/text.py @@ -0,0 +1,779 @@ +# Natural Language Toolkit: Texts +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +This module brings together a variety of NLTK functionality for +text analysis, and provides simple, interactive interfaces. +Functionality includes: concordancing, collocation discovery, +regular expression search over tokenized strings, and +distributional similarity. +""" + +import re +import sys +from collections import Counter, defaultdict, namedtuple +from functools import reduce +from math import log + +from nltk.collocations import BigramCollocationFinder +from nltk.lm import MLE +from nltk.lm.preprocessing import padded_everygram_pipeline +from nltk.metrics import BigramAssocMeasures, f_measure +from nltk.probability import ConditionalFreqDist as CFD +from nltk.probability import FreqDist +from nltk.tokenize import sent_tokenize +from nltk.util import LazyConcatenation, tokenwrap + +ConcordanceLine = namedtuple( + "ConcordanceLine", + ["left", "query", "right", "offset", "left_print", "right_print", "line"], +) + + +class ContextIndex: + """ + A bidirectional index between words and their 'contexts' in a text. + The context of a word is usually defined to be the words that occur + in a fixed window around the word; but other definitions may also + be used by providing a custom context function. + """ + + @staticmethod + def _default_context(tokens, i): + """One left token and one right token, normalized to lowercase""" + left = tokens[i - 1].lower() if i != 0 else "*START*" + right = tokens[i + 1].lower() if i != len(tokens) - 1 else "*END*" + return (left, right) + + def __init__(self, tokens, context_func=None, filter=None, key=lambda x: x): + self._key = key + self._tokens = tokens + if context_func: + self._context_func = context_func + else: + self._context_func = self._default_context + if filter: + tokens = [t for t in tokens if filter(t)] + self._word_to_contexts = CFD( + (self._key(w), self._context_func(tokens, i)) for i, w in enumerate(tokens) + ) + self._context_to_words = CFD( + (self._context_func(tokens, i), self._key(w)) for i, w in enumerate(tokens) + ) + + def tokens(self): + """ + :rtype: list(str) + :return: The document that this context index was + created from. + """ + return self._tokens + + def word_similarity_dict(self, word): + """ + Return a dictionary mapping from words to 'similarity scores,' + indicating how often these two words occur in the same + context. + """ + word = self._key(word) + word_contexts = set(self._word_to_contexts[word]) + + scores = {} + for w, w_contexts in self._word_to_contexts.items(): + scores[w] = f_measure(word_contexts, set(w_contexts)) + + return scores + + def similar_words(self, word, n=20): + scores = defaultdict(int) + for c in self._word_to_contexts[self._key(word)]: + for w in self._context_to_words[c]: + if w != word: + scores[w] += ( + self._context_to_words[c][word] * self._context_to_words[c][w] + ) + return sorted(scores, key=scores.get, reverse=True)[:n] + + def common_contexts(self, words, fail_on_unknown=False): + """ + Find contexts where the specified words can all appear; and + return a frequency distribution mapping each context to the + number of times that context was used. + + :param words: The words used to seed the similarity search + :type words: str + :param fail_on_unknown: If true, then raise a value error if + any of the given words do not occur at all in the index. + """ + words = [self._key(w) for w in words] + contexts = [set(self._word_to_contexts[w]) for w in words] + empty = [words[i] for i in range(len(words)) if not contexts[i]] + common = reduce(set.intersection, contexts) + if empty and fail_on_unknown: + raise ValueError("The following word(s) were not found:", " ".join(words)) + elif not common: + # nothing in common -- just return an empty freqdist. + return FreqDist() + else: + fd = FreqDist( + c for w in words for c in self._word_to_contexts[w] if c in common + ) + return fd + + +class ConcordanceIndex: + """ + An index that can be used to look up the offset locations at which + a given word occurs in a document. + """ + + def __init__(self, tokens, key=lambda x: x): + """ + Construct a new concordance index. + + :param tokens: The document (list of tokens) that this + concordance index was created from. This list can be used + to access the context of a given word occurrence. + :param key: A function that maps each token to a normalized + version that will be used as a key in the index. E.g., if + you use ``key=lambda s:s.lower()``, then the index will be + case-insensitive. + """ + self._tokens = tokens + """The document (list of tokens) that this concordance index + was created from.""" + + self._key = key + """Function mapping each token to an index key (or None).""" + + self._offsets = defaultdict(list) + """Dictionary mapping words (or keys) to lists of offset indices.""" + # Initialize the index (self._offsets) + for index, word in enumerate(tokens): + word = self._key(word) + self._offsets[word].append(index) + + def tokens(self): + """ + :rtype: list(str) + :return: The document that this concordance index was + created from. + """ + return self._tokens + + def offsets(self, word): + """ + :rtype: list(int) + :return: A list of the offset positions at which the given + word occurs. If a key function was specified for the + index, then given word's key will be looked up. + """ + word = self._key(word) + return self._offsets[word] + + def __repr__(self): + return "" % ( + len(self._tokens), + len(self._offsets), + ) + + def find_concordance(self, word, width=80): + """ + Find all concordance lines given the query word. + + Provided with a list of words, these will be found as a phrase. + """ + if isinstance(word, list): + phrase = word + else: + phrase = [word] + + half_width = (width - len(" ".join(phrase)) - 2) // 2 + context = width // 4 # approx number of words of context + + # Find the instances of the word to create the ConcordanceLine + concordance_list = [] + offsets = self.offsets(phrase[0]) + for i, word in enumerate(phrase[1:]): + word_offsets = {offset - i - 1 for offset in self.offsets(word)} + offsets = sorted(word_offsets.intersection(offsets)) + if offsets: + for i in offsets: + query_word = " ".join(self._tokens[i : i + len(phrase)]) + # Find the context of query word. + left_context = self._tokens[max(0, i - context) : i] + right_context = self._tokens[i + len(phrase) : i + context] + # Create the pretty lines with the query_word in the middle. + left_print = " ".join(left_context)[-half_width:] + right_print = " ".join(right_context)[:half_width] + # The WYSIWYG line of the concordance. + line_print = " ".join([left_print, query_word, right_print]) + # Create the ConcordanceLine + concordance_line = ConcordanceLine( + left_context, + query_word, + right_context, + i, + left_print, + right_print, + line_print, + ) + concordance_list.append(concordance_line) + return concordance_list + + def print_concordance(self, word, width=80, lines=25): + """ + Print concordance lines given the query word. + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param lines: The number of lines to display (default=25) + :type lines: int + :param width: The width of each line, in characters (default=80) + :type width: int + :param save: The option to save the concordance. + :type save: bool + """ + concordance_list = self.find_concordance(word, width=width) + + if not concordance_list: + print("no matches") + else: + lines = min(lines, len(concordance_list)) + print(f"Displaying {lines} of {len(concordance_list)} matches:") + for i, concordance_line in enumerate(concordance_list[:lines]): + print(concordance_line.line) + + +class TokenSearcher: + """ + A class that makes it easier to use regular expressions to search + over tokenized strings. The tokenized string is converted to a + string where tokens are marked with angle brackets -- e.g., + ``''``. The regular expression + passed to the ``findall()`` method is modified to treat angle + brackets as non-capturing parentheses, in addition to matching the + token boundaries; and to have ``'.'`` not match the angle brackets. + """ + + def __init__(self, tokens): + self._raw = "".join("<" + w + ">" for w in tokens) + + def findall(self, regexp): + """ + Find instances of the regular expression in the text. + The text is a list of tokens, and a regexp pattern to match + a single token must be surrounded by angle brackets. E.g. + + >>> from nltk.text import TokenSearcher + >>> from nltk.book import text1, text5, text9 + >>> text5.findall("<.*><.*>") + you rule bro; telling you bro; u twizted bro + >>> text1.findall("(<.*>)") + monied; nervous; dangerous; white; white; white; pious; queer; good; + mature; white; Cape; great; wise; wise; butterless; white; fiendish; + pale; furious; better; certain; complete; dismasted; younger; brave; + brave; brave; brave + >>> text9.findall("{3,}") + thread through those; the thought that; that the thing; the thing + that; that that thing; through these than through; them that the; + through the thick; them that they; thought that the + + :param regexp: A regular expression + :type regexp: str + """ + # preprocess the regular expression + regexp = re.sub(r"\s", "", regexp) + regexp = re.sub(r"<", "(?:<(?:", regexp) + regexp = re.sub(r">", ")>)", regexp) + regexp = re.sub(r"(?]", regexp) + + # perform the search + hits = re.findall(regexp, self._raw) + + # Sanity check + for h in hits: + if not h.startswith("<") and h.endswith(">"): + raise ValueError("Bad regexp for TokenSearcher.findall") + + # postprocess the output + hits = [h[1:-1].split("><") for h in hits] + return hits + + +class Text: + """ + A wrapper around a sequence of simple (string) tokens, which is + intended to support initial exploration of texts (via the + interactive console). Its methods perform a variety of analyses + on the text's contexts (e.g., counting, concordancing, collocation + discovery), and display the results. If you wish to write a + program which makes use of these analyses, then you should bypass + the ``Text`` class, and use the appropriate analysis function or + class directly instead. + + A ``Text`` is typically initialized from a given document or + corpus. E.g.: + + >>> import nltk.corpus + >>> from nltk.text import Text + >>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt')) + + """ + + # This defeats lazy loading, but makes things faster. This + # *shouldn't* be necessary because the corpus view *should* be + # doing intelligent caching, but without this it's running slow. + # Look into whether the caching is working correctly. + _COPY_TOKENS = True + + def __init__(self, tokens, name=None): + """ + Create a Text object. + + :param tokens: The source text. + :type tokens: sequence of str + """ + if self._COPY_TOKENS: + tokens = list(tokens) + self.tokens = tokens + + if name: + self.name = name + elif "]" in tokens[:20]: + end = tokens[:20].index("]") + self.name = " ".join(str(tok) for tok in tokens[1:end]) + else: + self.name = " ".join(str(tok) for tok in tokens[:8]) + "..." + + # //////////////////////////////////////////////////////////// + # Support item & slice access + # //////////////////////////////////////////////////////////// + + def __getitem__(self, i): + return self.tokens[i] + + def __len__(self): + return len(self.tokens) + + # //////////////////////////////////////////////////////////// + # Interactive console methods + # //////////////////////////////////////////////////////////// + + def concordance(self, word, width=79, lines=25): + """ + Prints a concordance for ``word`` with the specified context window. + Word matching is not case-sensitive. + + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param width: The width of each line, in characters (default=80) + :type width: int + :param lines: The number of lines to display (default=25) + :type lines: int + + :seealso: ``ConcordanceIndex`` + """ + if "_concordance_index" not in self.__dict__: + self._concordance_index = ConcordanceIndex( + self.tokens, key=lambda s: s.lower() + ) + + return self._concordance_index.print_concordance(word, width, lines) + + def concordance_list(self, word, width=79, lines=25): + """ + Generate a concordance for ``word`` with the specified context window. + Word matching is not case-sensitive. + + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param width: The width of each line, in characters (default=80) + :type width: int + :param lines: The number of lines to display (default=25) + :type lines: int + + :seealso: ``ConcordanceIndex`` + """ + if "_concordance_index" not in self.__dict__: + self._concordance_index = ConcordanceIndex( + self.tokens, key=lambda s: s.lower() + ) + return self._concordance_index.find_concordance(word, width)[:lines] + + def collocation_list(self, num=20, window_size=2): + """ + Return collocations derived from the text, ignoring stopwords. + + >>> from nltk.book import text4 + >>> text4.collocation_list()[:2] + [('United', 'States'), ('fellow', 'citizens')] + + :param num: The maximum number of collocations to return. + :type num: int + :param window_size: The number of tokens spanned by a collocation (default=2) + :type window_size: int + :rtype: list(tuple(str, str)) + """ + if not ( + "_collocations" in self.__dict__ + and self._num == num + and self._window_size == window_size + ): + self._num = num + self._window_size = window_size + + # print("Building collocations list") + from nltk.corpus import stopwords + + ignored_words = stopwords.words("english") + finder = BigramCollocationFinder.from_words(self.tokens, window_size) + finder.apply_freq_filter(2) + finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) + bigram_measures = BigramAssocMeasures() + self._collocations = list( + finder.nbest(bigram_measures.likelihood_ratio, num) + ) + return self._collocations + + def collocations(self, num=20, window_size=2): + """ + Print collocations derived from the text, ignoring stopwords. + + >>> from nltk.book import text4 + >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE + United States; fellow citizens; years ago; four years; Federal + Government; General Government; American people; Vice President; God + bless; Chief Justice; one another; fellow Americans; Old World; + Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian + tribes; public debt; foreign nations + + + :param num: The maximum number of collocations to print. + :type num: int + :param window_size: The number of tokens spanned by a collocation (default=2) + :type window_size: int + """ + + collocation_strings = [ + w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size) + ] + print(tokenwrap(collocation_strings, separator="; ")) + + def count(self, word): + """ + Count the number of times this word appears in the text. + """ + return self.tokens.count(word) + + def index(self, word): + """ + Find the index of the first occurrence of the word in the text. + """ + return self.tokens.index(word) + + def readability(self, method): + # code from nltk_contrib.readability + raise NotImplementedError + + def similar(self, word, num=20): + """ + Distributional similarity: find other words which appear in the + same contexts as the specified word; list most similar words first. + + :param word: The word used to seed the similarity search + :type word: str + :param num: The number of words to generate (default=20) + :type num: int + :seealso: ContextIndex.similar_words() + """ + if "_word_context_index" not in self.__dict__: + # print('Building word-context index...') + self._word_context_index = ContextIndex( + self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower() + ) + + # words = self._word_context_index.similar_words(word, num) + + word = word.lower() + wci = self._word_context_index._word_to_contexts + if word in wci.conditions(): + contexts = set(wci[word]) + fd = Counter( + w + for w in wci.conditions() + for c in wci[w] + if c in contexts and not w == word + ) + words = [w for w, _ in fd.most_common(num)] + print(tokenwrap(words)) + else: + print("No matches") + + def common_contexts(self, words, num=20): + """ + Find contexts where the specified words appear; list + most frequent common contexts first. + + :param words: The words used to seed the similarity search + :type words: str + :param num: The number of words to generate (default=20) + :type num: int + :seealso: ContextIndex.common_contexts() + """ + if "_word_context_index" not in self.__dict__: + # print('Building word-context index...') + self._word_context_index = ContextIndex( + self.tokens, key=lambda s: s.lower() + ) + + try: + fd = self._word_context_index.common_contexts(words, True) + if not fd: + print("No common contexts were found") + else: + ranked_contexts = [w for w, _ in fd.most_common(num)] + print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts)) + + except ValueError as e: + print(e) + + def dispersion_plot(self, words): + """ + Produce a plot showing the distribution of the words through the text. + Requires pylab to be installed. + + :param words: The words to be plotted + :type words: list(str) + :seealso: nltk.draw.dispersion_plot() + """ + from nltk.draw import dispersion_plot + + dispersion_plot(self, words) + + def _train_default_ngram_lm(self, tokenized_sents, n=3): + train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents) + model = MLE(order=n) + model.fit(train_data, padded_sents) + return model + + def generate(self, length=100, text_seed=None, random_seed=42): + """ + Print random text, generated using a trigram language model. + See also `help(nltk.lm)`. + + :param length: The length of text to generate (default=100) + :type length: int + + :param text_seed: Generation can be conditioned on preceding context. + :type text_seed: list(str) + + :param random_seed: A random seed or an instance of `random.Random`. If provided, + makes the random sampling part of generation reproducible. (default=42) + :type random_seed: int + """ + # Create the model when using it the first time. + self._tokenized_sents = [ + sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens)) + ] + if not hasattr(self, "_trigram_model"): + print("Building ngram index...", file=sys.stderr) + self._trigram_model = self._train_default_ngram_lm( + self._tokenized_sents, n=3 + ) + + generated_tokens = [] + + assert length > 0, "The `length` must be more than 0." + while len(generated_tokens) < length: + for idx, token in enumerate( + self._trigram_model.generate( + length, text_seed=text_seed, random_seed=random_seed + ) + ): + if token == "": + continue + if token == "": + break + generated_tokens.append(token) + random_seed += 1 + + prefix = " ".join(text_seed) + " " if text_seed else "" + output_str = prefix + tokenwrap(generated_tokens[:length]) + print(output_str) + return output_str + + def plot(self, *args): + """ + See documentation for FreqDist.plot() + :seealso: nltk.prob.FreqDist.plot() + """ + return self.vocab().plot(*args) + + def vocab(self): + """ + :seealso: nltk.prob.FreqDist + """ + if "_vocab" not in self.__dict__: + # print("Building vocabulary index...") + self._vocab = FreqDist(self) + return self._vocab + + def findall(self, regexp): + """ + Find instances of the regular expression in the text. + The text is a list of tokens, and a regexp pattern to match + a single token must be surrounded by angle brackets. E.g. + + >>> from nltk.book import text1, text5, text9 + >>> text5.findall("<.*><.*>") + you rule bro; telling you bro; u twizted bro + >>> text1.findall("(<.*>)") + monied; nervous; dangerous; white; white; white; pious; queer; good; + mature; white; Cape; great; wise; wise; butterless; white; fiendish; + pale; furious; better; certain; complete; dismasted; younger; brave; + brave; brave; brave + >>> text9.findall("{3,}") + thread through those; the thought that; that the thing; the thing + that; that that thing; through these than through; them that the; + through the thick; them that they; thought that the + + :param regexp: A regular expression + :type regexp: str + """ + + if "_token_searcher" not in self.__dict__: + self._token_searcher = TokenSearcher(self) + + hits = self._token_searcher.findall(regexp) + hits = [" ".join(h) for h in hits] + print(tokenwrap(hits, "; ")) + + # //////////////////////////////////////////////////////////// + # Helper Methods + # //////////////////////////////////////////////////////////// + + _CONTEXT_RE = re.compile(r"\w+|[\.\!\?]") + + def _context(self, tokens, i): + """ + One left & one right token, both case-normalized. Skip over + non-sentence-final punctuation. Used by the ``ContextIndex`` + that is created for ``similar()`` and ``common_contexts()``. + """ + # Left context + j = i - 1 + while j >= 0 and not self._CONTEXT_RE.match(tokens[j]): + j -= 1 + left = tokens[j] if j != 0 else "*START*" + + # Right context + j = i + 1 + while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]): + j += 1 + right = tokens[j] if j != len(tokens) else "*END*" + + return (left, right) + + # //////////////////////////////////////////////////////////// + # String Display + # //////////////////////////////////////////////////////////// + + def __str__(self): + return "" % self.name + + def __repr__(self): + return "" % self.name + + +# Prototype only; this approach will be slow to load +class TextCollection(Text): + """A collection of texts, which can be loaded with list of texts, or + with a corpus consisting of one or more texts, and which supports + counting, concordancing, collocation discovery, etc. Initialize a + TextCollection as follows: + + >>> import nltk.corpus + >>> from nltk.text import TextCollection + >>> from nltk.book import text1, text2, text3 + >>> gutenberg = TextCollection(nltk.corpus.gutenberg) + >>> mytexts = TextCollection([text1, text2, text3]) + + Iterating over a TextCollection produces all the tokens of all the + texts in order. + """ + + def __init__(self, source): + if hasattr(source, "words"): # bridge to the text corpus reader + source = [source.words(f) for f in source.fileids()] + + self._texts = source + Text.__init__(self, LazyConcatenation(source)) + self._idf_cache = {} + + def tf(self, term, text): + """The frequency of the term in text.""" + return text.count(term) / len(text) + + def idf(self, term): + """The number of texts in the corpus divided by the + number of texts that the term appears in. + If a term does not appear in the corpus, 0.0 is returned.""" + # idf values are cached for performance. + idf = self._idf_cache.get(term) + if idf is None: + matches = len([True for text in self._texts if term in text]) + if len(self._texts) == 0: + raise ValueError("IDF undefined for empty document collection") + idf = log(len(self._texts) / matches) if matches else 0.0 + self._idf_cache[term] = idf + return idf + + def tf_idf(self, term, text): + return self.tf(term, text) * self.idf(term) + + +def demo(): + from nltk.corpus import brown + + text = Text(brown.words(categories="news")) + print(text) + print() + print("Concordance:") + text.concordance("news") + print() + print("Distributionally similar words:") + text.similar("news") + print() + print("Collocations:") + text.collocations() + print() + # print("Automatically generated text:") + # text.generate() + # print() + print("Dispersion plot:") + text.dispersion_plot(["news", "report", "said", "announced"]) + print() + print("Vocabulary plot:") + text.plot(50) + print() + print("Indexing:") + print("text[3]:", text[3]) + print("text[3:5]:", text[3:5]) + print("text.vocab()['news']:", text.vocab()["news"]) + + +if __name__ == "__main__": + demo() + +__all__ = [ + "ContextIndex", + "ConcordanceIndex", + "TokenSearcher", + "Text", + "TextCollection", +] diff --git a/venv/lib/python3.10/site-packages/nltk/tgrep.py b/venv/lib/python3.10/site-packages/nltk/tgrep.py new file mode 100644 index 0000000000000000000000000000000000000000..45ce2ab92629296fc52931ff12720d62aab939cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tgrep.py @@ -0,0 +1,1039 @@ +#!/usr/bin/env python +# +# Natural Language Toolkit: TGrep search +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Roberts +# URL: +# For license information, see LICENSE.TXT + +""" +============================================ + TGrep search implementation for NLTK trees +============================================ + +This module supports TGrep2 syntax for matching parts of NLTK Trees. +Note that many tgrep operators require the tree passed to be a +``ParentedTree``. + +External links: + +- `Tgrep tutorial `_ +- `Tgrep2 manual `_ +- `Tgrep2 source `_ + +Usage +===== + +>>> from nltk.tree import ParentedTree +>>> from nltk.tgrep import tgrep_nodes, tgrep_positions +>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))') +>>> list(tgrep_nodes('NN', [tree])) +[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]] +>>> list(tgrep_positions('NN', [tree])) +[[(0, 2), (2, 1)]] +>>> list(tgrep_nodes('DT', [tree])) +[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]] +>>> list(tgrep_nodes('DT $ JJ', [tree])) +[[ParentedTree('DT', ['the'])]] + +This implementation adds syntax to select nodes based on their NLTK +tree position. This syntax is ``N`` plus a Python tuple representing +the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are +valid node selectors. Example: + +>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))') +>>> tree[0,0] +ParentedTree('DT', ['the']) +>>> tree[0,0].treeposition() +(0, 0) +>>> list(tgrep_nodes('N(0,0)', [tree])) +[[ParentedTree('DT', ['the'])]] + +Caveats: +======== + +- Link modifiers: "?" and "=" are not implemented. +- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are + not implemented. +- The "=" and "~" links are not implemented. + +Known Issues: +============= + +- There are some issues with link relations involving leaf nodes + (which are represented as bare strings in NLTK trees). For + instance, consider the tree:: + + (S (A x)) + + The search string ``* !>> S`` should select all nodes which are not + dominated in some way by an ``S`` node (i.e., all nodes which are + not descendants of an ``S``). Clearly, in this tree, the only node + which fulfills this criterion is the top node (since it is not + dominated by anything). However, the code here will find both the + top node and the leaf node ``x``. This is because we cannot recover + the parent of the leaf, since it is stored as a bare string. + + A possible workaround, when performing this kind of search, would be + to filter out all leaf nodes. + +Implementation notes +==================== + +This implementation is (somewhat awkwardly) based on lambda functions +which are predicates on a node. A predicate is a function which is +either True or False; using a predicate function, we can identify sets +of nodes with particular properties. A predicate function, could, for +instance, return True only if a particular node has a label matching a +particular regular expression, and has a daughter node which has no +sisters. Because tgrep2 search strings can do things statefully (such +as substituting in macros, and binding nodes with node labels), the +actual predicate function is declared with three arguments:: + + pred = lambda n, m, l: return True # some logic here + +``n`` + is a node in a tree; this argument must always be given + +``m`` + contains a dictionary, mapping macro names onto predicate functions + +``l`` + is a dictionary to map node labels onto nodes in the tree + +``m`` and ``l`` are declared to default to ``None``, and so need not be +specified in a call to a predicate. Predicates which call other +predicates must always pass the value of these arguments on. The +top-level predicate (constructed by ``_tgrep_exprs_action``) binds the +macro definitions to ``m`` and initialises ``l`` to an empty dictionary. +""" + +import functools +import re + +try: + import pyparsing +except ImportError: + print("Warning: nltk.tgrep will not work without the `pyparsing` package") + print("installed.") + +import nltk.tree + + +class TgrepException(Exception): + """Tgrep exception type.""" + + pass + + +def ancestors(node): + """ + Returns the list of all nodes dominating the given tree node. + This method will not work with leaf nodes, since there is no way + to recover the parent. + """ + results = [] + try: + current = node.parent() + except AttributeError: + # if node is a leaf, we cannot retrieve its parent + return results + while current: + results.append(current) + current = current.parent() + return results + + +def unique_ancestors(node): + """ + Returns the list of all nodes dominating the given node, where + there is only a single path of descent. + """ + results = [] + try: + current = node.parent() + except AttributeError: + # if node is a leaf, we cannot retrieve its parent + return results + while current and len(current) == 1: + results.append(current) + current = current.parent() + return results + + +def _descendants(node): + """ + Returns the list of all nodes which are descended from the given + tree node in some way. + """ + try: + treepos = node.treepositions() + except AttributeError: + return [] + return [node[x] for x in treepos[1:]] + + +def _leftmost_descendants(node): + """ + Returns the set of all nodes descended in some way through + left branches from this node. + """ + try: + treepos = node.treepositions() + except AttributeError: + return [] + return [node[x] for x in treepos[1:] if all(y == 0 for y in x)] + + +def _rightmost_descendants(node): + """ + Returns the set of all nodes descended in some way through + right branches from this node. + """ + try: + rightmost_leaf = max(node.treepositions()) + except AttributeError: + return [] + return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)] + + +def _istree(obj): + """Predicate to check whether `obj` is a nltk.tree.Tree.""" + return isinstance(obj, nltk.tree.Tree) + + +def _unique_descendants(node): + """ + Returns the list of all nodes descended from the given node, where + there is only a single path of descent. + """ + results = [] + current = node + while current and _istree(current) and len(current) == 1: + current = current[0] + results.append(current) + return results + + +def _before(node): + """ + Returns the set of all nodes that are before the given node. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + return [tree[x] for x in tree.treepositions() if x[: len(pos)] < pos[: len(x)]] + + +def _immediately_before(node): + """ + Returns the set of all nodes that are immediately before the given + node. + + Tree node A immediately precedes node B if the last terminal + symbol (word) produced by A immediately precedes the first + terminal symbol produced by B. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + # go "upwards" from pos until there is a place we can go to the left + idx = len(pos) - 1 + while 0 <= idx and pos[idx] == 0: + idx -= 1 + if idx < 0: + return [] + pos = list(pos[: idx + 1]) + pos[-1] -= 1 + before = tree[pos] + return [before] + _rightmost_descendants(before) + + +def _after(node): + """ + Returns the set of all nodes that are after the given node. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + return [tree[x] for x in tree.treepositions() if x[: len(pos)] > pos[: len(x)]] + + +def _immediately_after(node): + """ + Returns the set of all nodes that are immediately after the given + node. + + Tree node A immediately follows node B if the first terminal + symbol (word) produced by A immediately follows the last + terminal symbol produced by B. + """ + try: + pos = node.treeposition() + tree = node.root() + current = node.parent() + except AttributeError: + return [] + # go "upwards" from pos until there is a place we can go to the + # right + idx = len(pos) - 1 + while 0 <= idx and pos[idx] == len(current) - 1: + idx -= 1 + current = current.parent() + if idx < 0: + return [] + pos = list(pos[: idx + 1]) + pos[-1] += 1 + after = tree[pos] + return [after] + _leftmost_descendants(after) + + +def _tgrep_node_literal_value(node): + """ + Gets the string value of a given parse tree node, for comparison + using the tgrep node literal predicates. + """ + return node.label() if _istree(node) else str(node) + + +def _tgrep_macro_use_action(_s, _l, tokens): + """ + Builds a lambda function which looks up the macro name used. + """ + assert len(tokens) == 1 + assert tokens[0][0] == "@" + macro_name = tokens[0][1:] + + def macro_use(n, m=None, l=None): + if m is None or macro_name not in m: + raise TgrepException(f"macro {macro_name} not defined") + return m[macro_name](n, m, l) + + return macro_use + + +def _tgrep_node_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + depending on the name of its node. + """ + if tokens[0] == "'": + # strip initial apostrophe (tgrep2 print command) + tokens = tokens[1:] + if len(tokens) > 1: + # disjunctive definition of a node name + assert list(set(tokens[1::2])) == ["|"] + # recursively call self to interpret each node name definition + tokens = [_tgrep_node_action(None, None, [node]) for node in tokens[::2]] + # capture tokens and return the disjunction + return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens) + else: + if hasattr(tokens[0], "__call__"): + # this is a previously interpreted parenthetical node + # definition (lambda function) + return tokens[0] + elif tokens[0] == "*" or tokens[0] == "__": + return lambda n, m=None, l=None: True + elif tokens[0].startswith('"'): + assert tokens[0].endswith('"') + node_lit = tokens[0][1:-1].replace('\\"', '"').replace("\\\\", "\\") + return ( + lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s + )(node_lit) + elif tokens[0].startswith("/"): + assert tokens[0].endswith("/") + node_lit = tokens[0][1:-1] + return ( + lambda r: lambda n, m=None, l=None: r.search( + _tgrep_node_literal_value(n) + ) + )(re.compile(node_lit)) + elif tokens[0].startswith("i@"): + node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()]) + return ( + lambda f: lambda n, m=None, l=None: f( + _tgrep_node_literal_value(n).lower() + ) + )(node_func) + else: + return ( + lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s + )(tokens[0]) + + +def _tgrep_parens_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + from a parenthetical notation. + """ + assert len(tokens) == 3 + assert tokens[0] == "(" + assert tokens[2] == ")" + return tokens[1] + + +def _tgrep_nltk_tree_pos_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which returns true if the node is located at a specific tree + position. + """ + # recover the tuple from the parsed string + node_tree_position = tuple(int(x) for x in tokens if x.isdigit()) + # capture the node's tree position + return ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "treeposition") and n.treeposition() == i + ) + )(node_tree_position) + + +def _tgrep_relation_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + depending on its relation to other nodes in the tree. + """ + # process negation first if needed + negated = False + if tokens[0] == "!": + negated = True + tokens = tokens[1:] + if tokens[0] == "[": + # process square-bracketed relation expressions + assert len(tokens) == 3 + assert tokens[2] == "]" + retval = tokens[1] + else: + # process operator-node relation expressions + assert len(tokens) == 2 + operator, predicate = tokens + # A < B A is the parent of (immediately dominates) B. + if operator == "<": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in n) + ) + # A > B A is the child of B. + elif operator == ">": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and predicate(n.parent(), m, l) + ) + # A <, B Synonymous with A <1 B. + elif operator == "<," or operator == "<1": + retval = lambda n, m=None, l=None: ( + _istree(n) and bool(list(n)) and predicate(n[0], m, l) + ) + # A >, B Synonymous with A >1 B. + elif operator == ">," or operator == ">1": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and (n is n.parent()[0]) + and predicate(n.parent(), m, l) + ) + # A N B A is the Nth child of B (the first child is >1). + elif operator[0] == ">" and operator[1:].isdigit(): + idx = int(operator[1:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and 0 <= i < len(n.parent()) + and (n is n.parent()[i]) + and predicate(n.parent(), m, l) + ) + )(idx - 1) + # A <' B B is the last child of A (also synonymous with A <-1 B). + # A <- B B is the last child of A (synonymous with A <-1 B). + elif operator == "<'" or operator == "<-" or operator == "<-1": + retval = lambda n, m=None, l=None: ( + _istree(n) and bool(list(n)) and predicate(n[-1], m, l) + ) + # A >' B A is the last child of B (also synonymous with A >-1 B). + # A >- B A is the last child of B (synonymous with A >-1 B). + elif operator == ">'" or operator == ">-" or operator == ">-1": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and (n is n.parent()[-1]) + and predicate(n.parent(), m, l) + ) + # A <-N B B is the N th-to-last child of A (the last child is <-1). + elif operator[:2] == "<-" and operator[2:].isdigit(): + idx = -int(operator[2:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + _istree(n) + and bool(list(n)) + and 0 <= (i + len(n)) < len(n) + and predicate(n[i + len(n)], m, l) + ) + )(idx) + # A >-N B A is the N th-to-last child of B (the last child is >-1). + elif operator[:2] == ">-" and operator[2:].isdigit(): + idx = -int(operator[2:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and 0 <= (i + len(n.parent())) < len(n.parent()) + and (n is n.parent()[i + len(n.parent())]) + and predicate(n.parent(), m, l) + ) + )(idx) + # A <: B B is the only child of A + elif operator == "<:": + retval = lambda n, m=None, l=None: ( + _istree(n) and len(n) == 1 and predicate(n[0], m, l) + ) + # A >: B A is the only child of B. + elif operator == ">:": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and len(n.parent()) == 1 + and predicate(n.parent(), m, l) + ) + # A << B A dominates B (A is an ancestor of B). + elif operator == "<<": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _descendants(n)) + ) + # A >> B A is dominated by B (A is a descendant of B). + elif operator == ">>": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in ancestors(n) + ) + # A <<, B B is a left-most descendant of A. + elif operator == "<<," or operator == "<<1": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _leftmost_descendants(n)) + ) + # A >>, B A is a left-most descendant of B. + elif operator == ">>,": + retval = lambda n, m=None, l=None: any( + (predicate(x, m, l) and n in _leftmost_descendants(x)) + for x in ancestors(n) + ) + # A <<' B B is a right-most descendant of A. + elif operator == "<<'": + retval = lambda n, m=None, l=None: ( + _istree(n) + and any(predicate(x, m, l) for x in _rightmost_descendants(n)) + ) + # A >>' B A is a right-most descendant of B. + elif operator == ">>'": + retval = lambda n, m=None, l=None: any( + (predicate(x, m, l) and n in _rightmost_descendants(x)) + for x in ancestors(n) + ) + # A <<: B There is a single path of descent from A and B is on it. + elif operator == "<<:": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _unique_descendants(n)) + ) + # A >>: B There is a single path of descent from B and A is on it. + elif operator == ">>:": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in unique_ancestors(n) + ) + # A . B A immediately precedes B. + elif operator == ".": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _immediately_after(n) + ) + # A , B A immediately follows B. + elif operator == ",": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _immediately_before(n) + ) + # A .. B A precedes B. + elif operator == "..": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _after(n) + ) + # A ,, B A follows B. + elif operator == ",,": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _before(n) + ) + # A $ B A is a sister of B (and A != B). + elif operator == "$" or operator == "%": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent() if x is not n) + ) + # A $. B A is a sister of and immediately precedes B. + elif operator == "$." or operator == "%.": + retval = lambda n, m=None, l=None: ( + hasattr(n, "right_sibling") + and bool(n.right_sibling()) + and predicate(n.right_sibling(), m, l) + ) + # A $, B A is a sister of and immediately follows B. + elif operator == "$," or operator == "%,": + retval = lambda n, m=None, l=None: ( + hasattr(n, "left_sibling") + and bool(n.left_sibling()) + and predicate(n.left_sibling(), m, l) + ) + # A $.. B A is a sister of and precedes B. + elif operator == "$.." or operator == "%..": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and hasattr(n, "parent_index") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent()[n.parent_index() + 1 :]) + ) + # A $,, B A is a sister of and follows B. + elif operator == "$,," or operator == "%,,": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and hasattr(n, "parent_index") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent()[: n.parent_index()]) + ) + else: + raise TgrepException(f'cannot interpret tgrep operator "{operator}"') + # now return the built function + if negated: + return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval) + else: + return retval + + +def _tgrep_conjunction_action(_s, _l, tokens, join_char="&"): + """ + Builds a lambda function representing a predicate on a tree node + from the conjunction of several other such lambda functions. + + This is prototypically called for expressions like + (`tgrep_rel_conjunction`):: + + < NP & < AP < VP + + where tokens is a list of predicates representing the relations + (`< NP`, `< AP`, and `< VP`), possibly with the character `&` + included (as in the example here). + + This is also called for expressions like (`tgrep_node_expr2`):: + + NP < NN + S=s < /NP/=n : s < /VP/=v : n .. v + + tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional) + list of segmented patterns (`tgrep_expr_labeled`, processed by + `_tgrep_segmented_pattern_action`). + """ + # filter out the ampersand + tokens = [x for x in tokens if x != join_char] + if len(tokens) == 1: + return tokens[0] + else: + return ( + lambda ts: lambda n, m=None, l=None: all( + predicate(n, m, l) for predicate in ts + ) + )(tokens) + + +def _tgrep_segmented_pattern_action(_s, _l, tokens): + """ + Builds a lambda function representing a segmented pattern. + + Called for expressions like (`tgrep_expr_labeled`):: + + =s .. =v < =n + + This is a segmented pattern, a tgrep2 expression which begins with + a node label. + + The problem is that for segemented_pattern_action (': =v < =s'), + the first element (in this case, =v) is specifically selected by + virtue of matching a particular node in the tree; to retrieve + the node, we need the label, not a lambda function. For node + labels inside a tgrep_node_expr, we need a lambda function which + returns true if the node visited is the same as =v. + + We solve this by creating two copies of a node_label_use in the + grammar; the label use inside a tgrep_expr_labeled has a separate + parse action to the pred use inside a node_expr. See + `_tgrep_node_label_use_action` and + `_tgrep_node_label_pred_use_action`. + """ + # tokens[0] is a string containing the node label + node_label = tokens[0] + # tokens[1:] is an (optional) list of predicates which must all + # hold of the bound node + reln_preds = tokens[1:] + + def pattern_segment_pred(n, m=None, l=None): + """This predicate function ignores its node argument.""" + # look up the bound node using its label + if l is None or node_label not in l: + raise TgrepException(f"node_label ={node_label} not bound in pattern") + node = l[node_label] + # match the relation predicates against the node + return all(pred(node, m, l) for pred in reln_preds) + + return pattern_segment_pred + + +def _tgrep_node_label_use_action(_s, _l, tokens): + """ + Returns the node label used to begin a tgrep_expr_labeled. See + `_tgrep_segmented_pattern_action`. + + Called for expressions like (`tgrep_node_label_use`):: + + =s + + when they appear as the first element of a `tgrep_expr_labeled` + expression (see `_tgrep_segmented_pattern_action`). + + It returns the node label. + """ + assert len(tokens) == 1 + assert tokens[0].startswith("=") + return tokens[0][1:] + + +def _tgrep_node_label_pred_use_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which describes the use of a previously bound node label. + + Called for expressions like (`tgrep_node_label_use_pred`):: + + =s + + when they appear inside a tgrep_node_expr (for example, inside a + relation). The predicate returns true if and only if its node + argument is identical the the node looked up in the node label + dictionary using the node's label. + """ + assert len(tokens) == 1 + assert tokens[0].startswith("=") + node_label = tokens[0][1:] + + def node_label_use_pred(n, m=None, l=None): + # look up the bound node using its label + if l is None or node_label not in l: + raise TgrepException(f"node_label ={node_label} not bound in pattern") + node = l[node_label] + # truth means the given node is this node + return n is node + + return node_label_use_pred + + +def _tgrep_bind_node_label_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which can optionally bind a matching node into the tgrep2 string's + label_dict. + + Called for expressions like (`tgrep_node_expr2`):: + + /NP/ + @NP=n + """ + # tokens[0] is a tgrep_node_expr + if len(tokens) == 1: + return tokens[0] + else: + # if present, tokens[1] is the character '=', and tokens[2] is + # a tgrep_node_label, a string value containing the node label + assert len(tokens) == 3 + assert tokens[1] == "=" + node_pred = tokens[0] + node_label = tokens[2] + + def node_label_bind_pred(n, m=None, l=None): + if node_pred(n, m, l): + # bind `n` into the dictionary `l` + if l is None: + raise TgrepException( + "cannot bind node_label {}: label_dict is None".format( + node_label + ) + ) + l[node_label] = n + return True + else: + return False + + return node_label_bind_pred + + +def _tgrep_rel_disjunction_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + from the disjunction of several other such lambda functions. + """ + # filter out the pipe + tokens = [x for x in tokens if x != "|"] + if len(tokens) == 1: + return tokens[0] + elif len(tokens) == 2: + return (lambda a, b: lambda n, m=None, l=None: a(n, m, l) or b(n, m, l))( + tokens[0], tokens[1] + ) + + +def _macro_defn_action(_s, _l, tokens): + """ + Builds a dictionary structure which defines the given macro. + """ + assert len(tokens) == 3 + assert tokens[0] == "@" + return {tokens[1]: tokens[2]} + + +def _tgrep_exprs_action(_s, _l, tokens): + """ + This is the top-lebel node in a tgrep2 search string; the + predicate function it returns binds together all the state of a + tgrep2 search string. + + Builds a lambda function representing a predicate on a tree node + from the disjunction of several tgrep expressions. Also handles + macro definitions and macro name binding, and node label + definitions and node label binding. + """ + if len(tokens) == 1: + return lambda n, m=None, l=None: tokens[0](n, None, {}) + # filter out all the semicolons + tokens = [x for x in tokens if x != ";"] + # collect all macro definitions + macro_dict = {} + macro_defs = [tok for tok in tokens if isinstance(tok, dict)] + for macro_def in macro_defs: + macro_dict.update(macro_def) + # collect all tgrep expressions + tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)] + # create a new scope for the node label dictionary + def top_level_pred(n, m=macro_dict, l=None): + label_dict = {} + # bind macro definitions and OR together all tgrep_exprs + return any(predicate(n, m, label_dict) for predicate in tgrep_exprs) + + return top_level_pred + + +def _build_tgrep_parser(set_parse_actions=True): + """ + Builds a pyparsing-based parser object for tokenizing and + interpreting tgrep search strings. + """ + tgrep_op = pyparsing.Optional("!") + pyparsing.Regex("[$%,.<>][%,.<>0-9-':]*") + tgrep_qstring = pyparsing.QuotedString( + quoteChar='"', escChar="\\", unquoteResults=False + ) + tgrep_node_regex = pyparsing.QuotedString( + quoteChar="/", escChar="\\", unquoteResults=False + ) + tgrep_qstring_icase = pyparsing.Regex('i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"') + tgrep_node_regex_icase = pyparsing.Regex("i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/") + tgrep_node_literal = pyparsing.Regex("[^][ \r\t\n;:.,&|<>()$!@%'^=]+") + tgrep_expr = pyparsing.Forward() + tgrep_relations = pyparsing.Forward() + tgrep_parens = pyparsing.Literal("(") + tgrep_expr + ")" + tgrep_nltk_tree_pos = ( + pyparsing.Literal("N(") + + pyparsing.Optional( + pyparsing.Word(pyparsing.nums) + + "," + + pyparsing.Optional( + pyparsing.delimitedList(pyparsing.Word(pyparsing.nums), delim=",") + + pyparsing.Optional(",") + ) + ) + + ")" + ) + tgrep_node_label = pyparsing.Regex("[A-Za-z0-9]+") + tgrep_node_label_use = pyparsing.Combine("=" + tgrep_node_label) + # see _tgrep_segmented_pattern_action + tgrep_node_label_use_pred = tgrep_node_label_use.copy() + macro_name = pyparsing.Regex("[^];:.,&|<>()[$!@%'^=\r\t\n ]+") + macro_name.setWhitespaceChars("") + macro_use = pyparsing.Combine("@" + macro_name) + tgrep_node_expr = ( + tgrep_node_label_use_pred + | macro_use + | tgrep_nltk_tree_pos + | tgrep_qstring_icase + | tgrep_node_regex_icase + | tgrep_qstring + | tgrep_node_regex + | "*" + | tgrep_node_literal + ) + tgrep_node_expr2 = ( + tgrep_node_expr + + pyparsing.Literal("=").setWhitespaceChars("") + + tgrep_node_label.copy().setWhitespaceChars("") + ) | tgrep_node_expr + tgrep_node = tgrep_parens | ( + pyparsing.Optional("'") + + tgrep_node_expr2 + + pyparsing.ZeroOrMore("|" + tgrep_node_expr) + ) + tgrep_brackets = pyparsing.Optional("!") + "[" + tgrep_relations + "]" + tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node) + tgrep_rel_conjunction = pyparsing.Forward() + tgrep_rel_conjunction << ( + tgrep_relation + + pyparsing.ZeroOrMore(pyparsing.Optional("&") + tgrep_rel_conjunction) + ) + tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore( + "|" + tgrep_relations + ) + tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations) + tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations) + tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(":" + tgrep_expr_labeled) + macro_defn = ( + pyparsing.Literal("@") + pyparsing.White().suppress() + macro_name + tgrep_expr2 + ) + tgrep_exprs = ( + pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(";" + macro_defn) + ";") + + tgrep_expr2 + + pyparsing.ZeroOrMore(";" + (macro_defn | tgrep_expr2)) + + pyparsing.ZeroOrMore(";").suppress() + ) + if set_parse_actions: + tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action) + tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action) + macro_use.setParseAction(_tgrep_macro_use_action) + tgrep_node.setParseAction(_tgrep_node_action) + tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action) + tgrep_parens.setParseAction(_tgrep_parens_action) + tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action) + tgrep_relation.setParseAction(_tgrep_relation_action) + tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action) + tgrep_relations.setParseAction(_tgrep_rel_disjunction_action) + macro_defn.setParseAction(_macro_defn_action) + # the whole expression is also the conjunction of two + # predicates: the first node predicate, and the remaining + # relation predicates + tgrep_expr.setParseAction(_tgrep_conjunction_action) + tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action) + tgrep_expr2.setParseAction( + functools.partial(_tgrep_conjunction_action, join_char=":") + ) + tgrep_exprs.setParseAction(_tgrep_exprs_action) + return tgrep_exprs.ignore("#" + pyparsing.restOfLine) + + +def tgrep_tokenize(tgrep_string): + """ + Tokenizes a TGrep search string into separate tokens. + """ + parser = _build_tgrep_parser(False) + if isinstance(tgrep_string, bytes): + tgrep_string = tgrep_string.decode() + return list(parser.parseString(tgrep_string)) + + +def tgrep_compile(tgrep_string): + """ + Parses (and tokenizes, if necessary) a TGrep search string into a + lambda function. + """ + parser = _build_tgrep_parser(True) + if isinstance(tgrep_string, bytes): + tgrep_string = tgrep_string.decode() + return list(parser.parseString(tgrep_string, parseAll=True))[0] + + +def treepositions_no_leaves(tree): + """ + Returns all the tree positions in the given tree which are not + leaf nodes. + """ + treepositions = tree.treepositions() + # leaves are treeposition tuples that are not prefixes of any + # other treeposition + prefixes = set() + for pos in treepositions: + for length in range(len(pos)): + prefixes.add(pos[:length]) + return [pos for pos in treepositions if pos in prefixes] + + +def tgrep_positions(pattern, trees, search_leaves=True): + """ + Return the tree positions in the trees which match the given pattern. + + :param pattern: a tgrep search pattern + :type pattern: str or output of tgrep_compile() + :param trees: a sequence of NLTK trees (usually ParentedTrees) + :type trees: iter(ParentedTree) or iter(Tree) + :param search_leaves: whether to return matching leaf nodes + :type search_leaves: bool + :rtype: iter(tree positions) + """ + + if isinstance(pattern, (bytes, str)): + pattern = tgrep_compile(pattern) + + for tree in trees: + try: + if search_leaves: + positions = tree.treepositions() + else: + positions = treepositions_no_leaves(tree) + yield [position for position in positions if pattern(tree[position])] + except AttributeError: + yield [] + + +def tgrep_nodes(pattern, trees, search_leaves=True): + """ + Return the tree nodes in the trees which match the given pattern. + + :param pattern: a tgrep search pattern + :type pattern: str or output of tgrep_compile() + :param trees: a sequence of NLTK trees (usually ParentedTrees) + :type trees: iter(ParentedTree) or iter(Tree) + :param search_leaves: whether to return matching leaf nodes + :type search_leaves: bool + :rtype: iter(tree nodes) + """ + + if isinstance(pattern, (bytes, str)): + pattern = tgrep_compile(pattern) + + for tree in trees: + try: + if search_leaves: + positions = tree.treepositions() + else: + positions = treepositions_no_leaves(tree) + yield [tree[position] for position in positions if pattern(tree[position])] + except AttributeError: + yield [] diff --git a/venv/lib/python3.10/site-packages/nltk/toolbox.py b/venv/lib/python3.10/site-packages/nltk/toolbox.py new file mode 100644 index 0000000000000000000000000000000000000000..40155cbaec4f2554a26e1762f7b86bd7eeefb5b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/toolbox.py @@ -0,0 +1,524 @@ +# Natural Language Toolkit: Toolbox Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Aumann +# URL: +# For license information, see LICENSE.TXT + +""" +Module for reading, writing and manipulating +Toolbox databases and settings files. +""" + +import codecs +import re +from io import StringIO +from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder + +from nltk.data import PathPointer, find + + +class StandardFormat: + """ + Class for reading and processing standard format marker files and strings. + """ + + def __init__(self, filename=None, encoding=None): + self._encoding = encoding + if filename is not None: + self.open(filename) + + def open(self, sfm_file): + """ + Open a standard format marker file for sequential reading. + + :param sfm_file: name of the standard format marker input file + :type sfm_file: str + """ + if isinstance(sfm_file, PathPointer): + self._file = sfm_file.open(self._encoding) + else: + self._file = codecs.open(sfm_file, "r", self._encoding) + + def open_string(self, s): + """ + Open a standard format marker string for sequential reading. + + :param s: string to parse as a standard format marker input file + :type s: str + """ + self._file = StringIO(s) + + def raw_fields(self): + """ + Return an iterator that returns the next field in a (marker, value) + tuple. Linebreaks and trailing white space are preserved except + for the final newline in each field. + + :rtype: iter(tuple(str, str)) + """ + join_string = "\n" + line_regexp = r"^%s(?:\\(\S+)\s*)?(.*)$" + # discard a BOM in the first line + first_line_pat = re.compile(line_regexp % "(?:\xef\xbb\xbf)?") + line_pat = re.compile(line_regexp % "") + # need to get first line outside the loop for correct handling + # of the first marker if it spans multiple lines + file_iter = iter(self._file) + # PEP 479, prevent RuntimeError when StopIteration is raised inside generator + try: + line = next(file_iter) + except StopIteration: + # no more data is available, terminate the generator + return + mobj = re.match(first_line_pat, line) + mkr, line_value = mobj.groups() + value_lines = [line_value] + self.line_num = 0 + for line in file_iter: + self.line_num += 1 + mobj = re.match(line_pat, line) + line_mkr, line_value = mobj.groups() + if line_mkr: + yield (mkr, join_string.join(value_lines)) + mkr = line_mkr + value_lines = [line_value] + else: + value_lines.append(line_value) + self.line_num += 1 + yield (mkr, join_string.join(value_lines)) + + def fields( + self, + strip=True, + unwrap=True, + encoding=None, + errors="strict", + unicode_fields=None, + ): + """ + Return an iterator that returns the next field in a ``(marker, value)`` + tuple, where ``marker`` and ``value`` are unicode strings if an ``encoding`` + was specified in the ``fields()`` method. Otherwise they are non-unicode strings. + + :param strip: strip trailing whitespace from the last line of each field + :type strip: bool + :param unwrap: Convert newlines in a field to spaces. + :type unwrap: bool + :param encoding: Name of an encoding to use. If it is specified then + the ``fields()`` method returns unicode strings rather than non + unicode strings. + :type encoding: str or None + :param errors: Error handling scheme for codec. Same as the ``decode()`` + builtin string method. + :type errors: str + :param unicode_fields: Set of marker names whose values are UTF-8 encoded. + Ignored if encoding is None. If the whole file is UTF-8 encoded set + ``encoding='utf8'`` and leave ``unicode_fields`` with its default + value of None. + :type unicode_fields: sequence + :rtype: iter(tuple(str, str)) + """ + if encoding is None and unicode_fields is not None: + raise ValueError("unicode_fields is set but not encoding.") + unwrap_pat = re.compile(r"\n+") + for mkr, val in self.raw_fields(): + if unwrap: + val = unwrap_pat.sub(" ", val) + if strip: + val = val.rstrip() + yield (mkr, val) + + def close(self): + """Close a previously opened standard format marker file or string.""" + self._file.close() + try: + del self.line_num + except AttributeError: + pass + + +class ToolboxData(StandardFormat): + def parse(self, grammar=None, **kwargs): + if grammar: + return self._chunk_parse(grammar=grammar, **kwargs) + else: + return self._record_parse(**kwargs) + + def _record_parse(self, key=None, **kwargs): + r""" + Returns an element tree structure corresponding to a toolbox data file with + all markers at the same level. + + Thus the following Toolbox database:: + \_sh v3.0 400 Rotokas Dictionary + \_DateStampHasFourDigitYear + + \lx kaa + \ps V.A + \ge gag + \gp nek i pas + + \lx kaa + \ps V.B + \ge strangle + \gp pasim nek + + after parsing will end up with the same structure (ignoring the extra + whitespace) as the following XML fragment after being parsed by + ElementTree:: + +
+ <_sh>v3.0 400 Rotokas Dictionary + <_DateStampHasFourDigitYear/> +
+ + + kaa + V.A + gag + nek i pas + + + + kaa + V.B + strangle + pasim nek + +
+ + :param key: Name of key marker at the start of each record. If set to + None (the default value) the first marker that doesn't begin with + an underscore is assumed to be the key. + :type key: str + :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` + :type kwargs: dict + :rtype: ElementTree._ElementInterface + :return: contents of toolbox data divided into header and records + """ + builder = TreeBuilder() + builder.start("toolbox_data", {}) + builder.start("header", {}) + in_records = False + for mkr, value in self.fields(**kwargs): + if key is None and not in_records and mkr[0] != "_": + key = mkr + if mkr == key: + if in_records: + builder.end("record") + else: + builder.end("header") + in_records = True + builder.start("record", {}) + builder.start(mkr, {}) + builder.data(value) + builder.end(mkr) + if in_records: + builder.end("record") + else: + builder.end("header") + builder.end("toolbox_data") + return builder.close() + + def _tree2etree(self, parent): + from nltk.tree import Tree + + root = Element(parent.label()) + for child in parent: + if isinstance(child, Tree): + root.append(self._tree2etree(child)) + else: + text, tag = child + e = SubElement(root, tag) + e.text = text + return root + + def _chunk_parse(self, grammar=None, root_label="record", trace=0, **kwargs): + """ + Returns an element tree structure corresponding to a toolbox data file + parsed according to the chunk grammar. + + :type grammar: str + :param grammar: Contains the chunking rules used to parse the + database. See ``chunk.RegExp`` for documentation. + :type root_label: str + :param root_label: The node value that should be used for the + top node of the chunk structure. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + :type kwargs: dict + :param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()`` + :rtype: ElementTree._ElementInterface + """ + from nltk import chunk + from nltk.tree import Tree + + cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace) + db = self.parse(**kwargs) + tb_etree = Element("toolbox_data") + header = db.find("header") + tb_etree.append(header) + for record in db.findall("record"): + parsed = cp.parse([(elem.text, elem.tag) for elem in record]) + tb_etree.append(self._tree2etree(parsed)) + return tb_etree + + +_is_value = re.compile(r"\S") + + +def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None): + """ + Return a string with a standard format representation of the toolbox + data in tree (tree can be a toolbox database or a single record). + + :param tree: flat representation of toolbox data (whole database or single record) + :type tree: ElementTree._ElementInterface + :param encoding: Name of an encoding to use. + :type encoding: str + :param errors: Error handling scheme for codec. Same as the ``encode()`` + builtin string method. + :type errors: str + :param unicode_fields: + :type unicode_fields: dict(str) or set(str) + :rtype: str + """ + if tree.tag == "record": + root = Element("toolbox_data") + root.append(tree) + tree = root + + if tree.tag != "toolbox_data": + raise ValueError("not a toolbox_data element structure") + if encoding is None and unicode_fields is not None: + raise ValueError( + "if encoding is not specified then neither should unicode_fields" + ) + l = [] + for rec in tree: + l.append("\n") + for field in rec: + mkr = field.tag + value = field.text + if encoding is not None: + if unicode_fields is not None and mkr in unicode_fields: + cur_encoding = "utf8" + else: + cur_encoding = encoding + if re.search(_is_value, value): + l.append((f"\\{mkr} {value}\n").encode(cur_encoding, errors)) + else: + l.append((f"\\{mkr}{value}\n").encode(cur_encoding, errors)) + else: + if re.search(_is_value, value): + l.append(f"\\{mkr} {value}\n") + else: + l.append(f"\\{mkr}{value}\n") + return "".join(l[1:]) + + +class ToolboxSettings(StandardFormat): + """This class is the base class for settings files.""" + + def __init__(self): + super().__init__() + + def parse(self, encoding=None, errors="strict", **kwargs): + """ + Return the contents of toolbox settings file with a nested structure. + + :param encoding: encoding used by settings file + :type encoding: str + :param errors: Error handling scheme for codec. Same as ``decode()`` builtin method. + :type errors: str + :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` + :type kwargs: dict + :rtype: ElementTree._ElementInterface + """ + builder = TreeBuilder() + for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs): + # Check whether the first char of the field marker + # indicates a block start (+) or end (-) + block = mkr[0] + if block in ("+", "-"): + mkr = mkr[1:] + else: + block = None + # Build tree on the basis of block char + if block == "+": + builder.start(mkr, {}) + builder.data(value) + elif block == "-": + builder.end(mkr) + else: + builder.start(mkr, {}) + builder.data(value) + builder.end(mkr) + return builder.close() + + +def to_settings_string(tree, encoding=None, errors="strict", unicode_fields=None): + # write XML to file + l = list() + _to_settings_string( + tree.getroot(), + l, + encoding=encoding, + errors=errors, + unicode_fields=unicode_fields, + ) + return "".join(l) + + +def _to_settings_string(node, l, **kwargs): + # write XML to file + tag = node.tag + text = node.text + if len(node) == 0: + if text: + l.append(f"\\{tag} {text}\n") + else: + l.append("\\%s\n" % tag) + else: + if text: + l.append(f"\\+{tag} {text}\n") + else: + l.append("\\+%s\n" % tag) + for n in node: + _to_settings_string(n, l, **kwargs) + l.append("\\-%s\n" % tag) + return + + +def remove_blanks(elem): + """ + Remove all elements and subelements with no text and no child elements. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + """ + out = list() + for child in elem: + remove_blanks(child) + if child.text or len(child) > 0: + out.append(child) + elem[:] = out + + +def add_default_fields(elem, default_fields): + """ + Add blank elements and subelements specified in default_fields. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param default_fields: fields to add to each type of element and subelement + :type default_fields: dict(tuple) + """ + for field in default_fields.get(elem.tag, []): + if elem.find(field) is None: + SubElement(elem, field) + for child in elem: + add_default_fields(child, default_fields) + + +def sort_fields(elem, field_orders): + """ + Sort the elements and subelements in order specified in field_orders. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param field_orders: order of fields for each type of element and subelement + :type field_orders: dict(tuple) + """ + order_dicts = dict() + for field, order in field_orders.items(): + order_dicts[field] = order_key = dict() + for i, subfield in enumerate(order): + order_key[subfield] = i + _sort_fields(elem, order_dicts) + + +def _sort_fields(elem, orders_dicts): + """sort the children of elem""" + try: + order = orders_dicts[elem.tag] + except KeyError: + pass + else: + tmp = sorted( + ((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem) + ) + elem[:] = [child for key, child in tmp] + for child in elem: + if len(child): + _sort_fields(child, orders_dicts) + + +def add_blank_lines(tree, blanks_before, blanks_between): + """ + Add blank lines before all elements and subelements specified in blank_before. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param blank_before: elements and subelements to add blank lines before + :type blank_before: dict(tuple) + """ + try: + before = blanks_before[tree.tag] + between = blanks_between[tree.tag] + except KeyError: + for elem in tree: + if len(elem): + add_blank_lines(elem, blanks_before, blanks_between) + else: + last_elem = None + for elem in tree: + tag = elem.tag + if last_elem is not None and last_elem.tag != tag: + if tag in before and last_elem is not None: + e = last_elem.getiterator()[-1] + e.text = (e.text or "") + "\n" + else: + if tag in between: + e = last_elem.getiterator()[-1] + e.text = (e.text or "") + "\n" + if len(elem): + add_blank_lines(elem, blanks_before, blanks_between) + last_elem = elem + + +def demo(): + from itertools import islice + + # zip_path = find('corpora/toolbox.zip') + # lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse() + file_path = find("corpora/toolbox/rotokas.dic") + lexicon = ToolboxData(file_path).parse() + print("first field in fourth record:") + print(lexicon[3][0].tag) + print(lexicon[3][0].text) + + print("\nfields in sequential order:") + for field in islice(lexicon.find("record"), 10): + print(field.tag, field.text) + + print("\nlx fields:") + for field in islice(lexicon.findall("record/lx"), 10): + print(field.text) + + settings = ToolboxSettings() + file_path = find("corpora/toolbox/MDF/MDF_AltH.typ") + settings.open(file_path) + # settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ')) + tree = settings.parse(unwrap=False, encoding="cp1252") + print(tree.find("expset/expMDF/rtfPageSetup/paperSize").text) + settings_tree = ElementTree(tree) + print(to_settings_string(settings_tree).encode("utf8")) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/tree/transforms.py b/venv/lib/python3.10/site-packages/nltk/tree/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..99cd6893ce9f168ffa024f2bb8c39177617dced2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tree/transforms.py @@ -0,0 +1,338 @@ +# Natural Language Toolkit: Tree Transformations +# +# Copyright (C) 2005-2007 Oregon Graduate Institute +# Author: Nathan Bodenstab +# URL: +# For license information, see LICENSE.TXT + +r""" +A collection of methods for tree (grammar) transformations used +in parsing natural language. + +Although many of these methods are technically grammar transformations +(ie. Chomsky Norm Form), when working with treebanks it is much more +natural to visualize these modifications in a tree structure. Hence, +we will do all transformation directly to the tree itself. +Transforming the tree directly also allows us to do parent annotation. +A grammar can then be simply induced from the modified tree. + +The following is a short tutorial on the available transformations. + + 1. Chomsky Normal Form (binarization) + + It is well known that any grammar has a Chomsky Normal Form (CNF) + equivalent grammar where CNF is defined by every production having + either two non-terminals or one terminal on its right hand side. + When we have hierarchically structured data (ie. a treebank), it is + natural to view this in terms of productions where the root of every + subtree is the head (left hand side) of the production and all of + its children are the right hand side constituents. In order to + convert a tree into CNF, we simply need to ensure that every subtree + has either two subtrees as children (binarization), or one leaf node + (non-terminal). In order to binarize a subtree with more than two + children, we must introduce artificial nodes. + + There are two popular methods to convert a tree into CNF: left + factoring and right factoring. The following example demonstrates + the difference between them. Example:: + + Original Right-Factored Left-Factored + + A A A + / | \ / \ / \ + B C D ==> B A| OR A| D + / \ / \ + C D B C + + 2. Parent Annotation + + In addition to binarizing the tree, there are two standard + modifications to node labels we can do in the same traversal: parent + annotation and Markov order-N smoothing (or sibling smoothing). + + The purpose of parent annotation is to refine the probabilities of + productions by adding a small amount of context. With this simple + addition, a CYK (inside-outside, dynamic programming chart parse) + can improve from 74% to 79% accuracy. A natural generalization from + parent annotation is to grandparent annotation and beyond. The + tradeoff becomes accuracy gain vs. computational complexity. We + must also keep in mind data sparcity issues. Example:: + + Original Parent Annotation + + A A^ + / | \ / \ + B C D ==> B^
A|^ where ? is the + / \ parent of A + C^ D^ + + + 3. Markov order-N smoothing + + Markov smoothing combats data sparcity issues as well as decreasing + computational requirements by limiting the number of children + included in artificial nodes. In practice, most people use an order + 2 grammar. Example:: + + Original No Smoothing Markov order 1 Markov order 2 etc. + + __A__ A A A + / /|\ \ / \ / \ / \ + B C D E F ==> B A| ==> B A| ==> B A| + / \ / \ / \ + C ... C ... C ... + + + + Annotation decisions can be thought about in the vertical direction + (parent, grandparent, etc) and the horizontal direction (number of + siblings to keep). Parameters to the following functions specify + these values. For more information see: + + Dan Klein and Chris Manning (2003) "Accurate Unlexicalized + Parsing", ACL-03. https://www.aclweb.org/anthology/P03-1054 + + 4. Unary Collapsing + + Collapse unary productions (ie. subtrees with a single child) into a + new non-terminal (Tree node). This is useful when working with + algorithms that do not allow unary productions, yet you do not wish + to lose the parent information. Example:: + + A + | + B ==> A+B + / \ / \ + C D C D + +""" + +from nltk.tree.tree import Tree + + +def chomsky_normal_form( + tree, factor="right", horzMarkov=None, vertMarkov=0, childChar="|", parentChar="^" +): + # assume all subtrees have homogeneous children + # assume all terminals have no siblings + + # A semi-hack to have elegant looking code below. As a result, + # any subtree with a branching factor greater than 999 will be incorrectly truncated. + if horzMarkov is None: + horzMarkov = 999 + + # Traverse the tree depth-first keeping a list of ancestor nodes to the root. + # I chose not to use the tree.treepositions() method since it requires + # two traversals of the tree (one to get the positions, one to iterate + # over them) and node access time is proportional to the height of the node. + # This method is 7x faster which helps when parsing 40,000 sentences. + + nodeList = [(tree, [tree.label()])] + while nodeList != []: + node, parent = nodeList.pop() + if isinstance(node, Tree): + + # parent annotation + parentString = "" + originalNode = node.label() + if vertMarkov != 0 and node != tree and isinstance(node[0], Tree): + parentString = "{}<{}>".format(parentChar, "-".join(parent)) + node.set_label(node.label() + parentString) + parent = [originalNode] + parent[: vertMarkov - 1] + + # add children to the agenda before we mess with them + for child in node: + nodeList.append((child, parent)) + + # chomsky normal form factorization + if len(node) > 2: + childNodes = [child.label() for child in node] + nodeCopy = node.copy() + node[0:] = [] # delete the children + + curNode = node + numChildren = len(nodeCopy) + for i in range(1, numChildren - 1): + if factor == "right": + newHead = "{}{}<{}>{}".format( + originalNode, + childChar, + "-".join( + childNodes[i : min([i + horzMarkov, numChildren])] + ), + parentString, + ) # create new head + newNode = Tree(newHead, []) + curNode[0:] = [nodeCopy.pop(0), newNode] + else: + newHead = "{}{}<{}>{}".format( + originalNode, + childChar, + "-".join( + childNodes[max([numChildren - i - horzMarkov, 0]) : -i] + ), + parentString, + ) + newNode = Tree(newHead, []) + curNode[0:] = [newNode, nodeCopy.pop()] + + curNode = newNode + + curNode[0:] = [child for child in nodeCopy] + + +def un_chomsky_normal_form( + tree, expandUnary=True, childChar="|", parentChar="^", unaryChar="+" +): + # Traverse the tree-depth first keeping a pointer to the parent for modification purposes. + nodeList = [(tree, [])] + while nodeList != []: + node, parent = nodeList.pop() + if isinstance(node, Tree): + # if the node contains the 'childChar' character it means that + # it is an artificial node and can be removed, although we still need + # to move its children to its parent + childIndex = node.label().find(childChar) + if childIndex != -1: + nodeIndex = parent.index(node) + parent.remove(parent[nodeIndex]) + # Generated node was on the left if the nodeIndex is 0 which + # means the grammar was left factored. We must insert the children + # at the beginning of the parent's children + if nodeIndex == 0: + parent.insert(0, node[0]) + parent.insert(1, node[1]) + else: + parent.extend([node[0], node[1]]) + + # parent is now the current node so the children of parent will be added to the agenda + node = parent + else: + parentIndex = node.label().find(parentChar) + if parentIndex != -1: + # strip the node name of the parent annotation + node.set_label(node.label()[:parentIndex]) + + # expand collapsed unary productions + if expandUnary == True: + unaryIndex = node.label().find(unaryChar) + if unaryIndex != -1: + newNode = Tree( + node.label()[unaryIndex + 1 :], [i for i in node] + ) + node.set_label(node.label()[:unaryIndex]) + node[0:] = [newNode] + + for child in node: + nodeList.append((child, node)) + + +def collapse_unary(tree, collapsePOS=False, collapseRoot=False, joinChar="+"): + """ + Collapse subtrees with a single child (ie. unary productions) + into a new non-terminal (Tree node) joined by 'joinChar'. + This is useful when working with algorithms that do not allow + unary productions, and completely removing the unary productions + would require loss of useful information. The Tree is modified + directly (since it is passed by reference) and no value is returned. + + :param tree: The Tree to be collapsed + :type tree: Tree + :param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie. + Part-of-Speech tags) since they are always unary productions + :type collapsePOS: bool + :param collapseRoot: 'False' (default) will not modify the root production + if it is unary. For the Penn WSJ treebank corpus, this corresponds + to the TOP -> productions. + :type collapseRoot: bool + :param joinChar: A string used to connect collapsed node values (default = "+") + :type joinChar: str + """ + + if collapseRoot == False and isinstance(tree, Tree) and len(tree) == 1: + nodeList = [tree[0]] + else: + nodeList = [tree] + + # depth-first traversal of tree + while nodeList != []: + node = nodeList.pop() + if isinstance(node, Tree): + if ( + len(node) == 1 + and isinstance(node[0], Tree) + and (collapsePOS == True or isinstance(node[0, 0], Tree)) + ): + node.set_label(node.label() + joinChar + node[0].label()) + node[0:] = [child for child in node[0]] + # since we assigned the child's children to the current node, + # evaluate the current node again + nodeList.append(node) + else: + for child in node: + nodeList.append(child) + + +################################################################# +# Demonstration +################################################################# + + +def demo(): + """ + A demonstration showing how each tree transform can be used. + """ + + from copy import deepcopy + + from nltk.draw.tree import draw_trees + from nltk.tree.tree import Tree + + # original tree from WSJ bracketed text + sentence = """(TOP + (S + (S + (VP + (VBN Turned) + (ADVP (RB loose)) + (PP + (IN in) + (NP + (NP (NNP Shane) (NNP Longman) (POS 's)) + (NN trading) + (NN room))))) + (, ,) + (NP (DT the) (NN yuppie) (NNS dealers)) + (VP (AUX do) (NP (NP (RB little)) (ADJP (RB right)))) + (. .)))""" + t = Tree.fromstring(sentence, remove_empty_top_bracketing=True) + + # collapse subtrees with only one child + collapsedTree = deepcopy(t) + collapse_unary(collapsedTree) + + # convert the tree to CNF + cnfTree = deepcopy(collapsedTree) + chomsky_normal_form(cnfTree) + + # convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two + parentTree = deepcopy(collapsedTree) + chomsky_normal_form(parentTree, horzMarkov=2, vertMarkov=1) + + # convert the tree back to its original form (used to make CYK results comparable) + original = deepcopy(parentTree) + un_chomsky_normal_form(original) + + # convert tree back to bracketed text + sentence2 = original.pprint() + print(sentence) + print(sentence2) + print("Sentences the same? ", sentence == sentence2) + + draw_trees(t, collapsedTree, cnfTree, parentTree, original) + + +if __name__ == "__main__": + demo() + +__all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"] diff --git a/venv/lib/python3.10/site-packages/nltk/treeprettyprinter.py b/venv/lib/python3.10/site-packages/nltk/treeprettyprinter.py new file mode 100644 index 0000000000000000000000000000000000000000..ed4e766b47c123d66b7df326cbfde26a2db99363 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/treeprettyprinter.py @@ -0,0 +1,28 @@ +# Natural Language Toolkit: ASCII visualization of NLTK trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Andreas van Cranenburgh +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Pretty-printing of discontinuous trees. +Adapted from the disco-dop project, by Andreas van Cranenburgh. +https://github.com/andreasvc/disco-dop + +Interesting reference (not used for this code): +T. Eschbach et al., Orth. Hypergraph Drawing, Journal of +Graph Algorithms and Applications, 10(2) 141--157 (2006)149. +https://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf +""" + +from nltk.internals import Deprecated +from nltk.tree.prettyprinter import TreePrettyPrinter as TPP + + +class TreePrettyPrinter(Deprecated, TPP): + """Import `TreePrettyPrinter` using `from nltk.tree import TreePrettyPrinter` instead.""" + + +__all__ = ["TreePrettyPrinter"] diff --git a/venv/lib/python3.10/site-packages/nltk/treetransforms.py b/venv/lib/python3.10/site-packages/nltk/treetransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6ebc061f321c701c7851370cd00cacb4499a256c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/treetransforms.py @@ -0,0 +1,126 @@ +# Natural Language Toolkit: Tree Transformations +# +# Copyright (C) 2005-2007 Oregon Graduate Institute +# Author: Nathan Bodenstab +# URL: +# For license information, see LICENSE.TXT + +r""" +A collection of methods for tree (grammar) transformations used +in parsing natural language. + +Although many of these methods are technically grammar transformations +(ie. Chomsky Norm Form), when working with treebanks it is much more +natural to visualize these modifications in a tree structure. Hence, +we will do all transformation directly to the tree itself. +Transforming the tree directly also allows us to do parent annotation. +A grammar can then be simply induced from the modified tree. + +The following is a short tutorial on the available transformations. + + 1. Chomsky Normal Form (binarization) + + It is well known that any grammar has a Chomsky Normal Form (CNF) + equivalent grammar where CNF is defined by every production having + either two non-terminals or one terminal on its right hand side. + When we have hierarchically structured data (ie. a treebank), it is + natural to view this in terms of productions where the root of every + subtree is the head (left hand side) of the production and all of + its children are the right hand side constituents. In order to + convert a tree into CNF, we simply need to ensure that every subtree + has either two subtrees as children (binarization), or one leaf node + (non-terminal). In order to binarize a subtree with more than two + children, we must introduce artificial nodes. + + There are two popular methods to convert a tree into CNF: left + factoring and right factoring. The following example demonstrates + the difference between them. Example:: + + Original Right-Factored Left-Factored + + A A A + / | \ / \ / \ + B C D ==> B A| OR A| D + / \ / \ + C D B C + + 2. Parent Annotation + + In addition to binarizing the tree, there are two standard + modifications to node labels we can do in the same traversal: parent + annotation and Markov order-N smoothing (or sibling smoothing). + + The purpose of parent annotation is to refine the probabilities of + productions by adding a small amount of context. With this simple + addition, a CYK (inside-outside, dynamic programming chart parse) + can improve from 74% to 79% accuracy. A natural generalization from + parent annotation is to grandparent annotation and beyond. The + tradeoff becomes accuracy gain vs. computational complexity. We + must also keep in mind data sparcity issues. Example:: + + Original Parent Annotation + + A A^ + / | \ / \ + B C D ==> B^ A|^ where ? is the + / \ parent of A + C^ D^ + + + 3. Markov order-N smoothing + + Markov smoothing combats data sparcity issues as well as decreasing + computational requirements by limiting the number of children + included in artificial nodes. In practice, most people use an order + 2 grammar. Example:: + + Original No Smoothing Markov order 1 Markov order 2 etc. + + __A__ A A A + / /|\ \ / \ / \ / \ + B C D E F ==> B A| ==> B A| ==> B A| + / \ / \ / \ + C ... C ... C ... + + + + Annotation decisions can be thought about in the vertical direction + (parent, grandparent, etc) and the horizontal direction (number of + siblings to keep). Parameters to the following functions specify + these values. For more information see: + + Dan Klein and Chris Manning (2003) "Accurate Unlexicalized + Parsing", ACL-03. https://www.aclweb.org/anthology/P03-1054 + + 4. Unary Collapsing + + Collapse unary productions (ie. subtrees with a single child) into a + new non-terminal (Tree node). This is useful when working with + algorithms that do not allow unary productions, yet you do not wish + to lose the parent information. Example:: + + A + | + B ==> A+B + / \ / \ + C D C D + +""" + +from nltk.internals import deprecated +from nltk.tree.transforms import chomsky_normal_form as cnf +from nltk.tree.transforms import collapse_unary as cu +from nltk.tree.transforms import un_chomsky_normal_form as ucnf + +chomsky_normal_form = deprecated( + "Import using `from nltk.tree import chomsky_normal_form` instead." +)(cnf) +un_chomsky_normal_form = deprecated( + "Import using `from nltk.tree import un_chomsky_normal_form` instead." +)(ucnf) +collapse_unary = deprecated( + "Import using `from nltk.tree import collapse_unary` instead." +)(cu) + + +__all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"] diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ba50997227cd53f35253afe98d8356069316078 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5340d00bb103add2caba9797069e6c366a05f45c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitter_demo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..822bec418df1af6724b1814d49f3aa744f1c78c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/twitter/__pycache__/twitterclient.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/util.py b/venv/lib/python3.10/site-packages/nltk/util.py new file mode 100644 index 0000000000000000000000000000000000000000..4d2d96fb74f2ec375596ae8761f565351cbedf31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/util.py @@ -0,0 +1,1216 @@ +# Natural Language Toolkit: Utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Eric Kafe (acyclic closures) +# URL: +# For license information, see LICENSE.TXT + +import inspect +import locale +import os +import pydoc +import re +import textwrap +import warnings +from collections import defaultdict, deque +from itertools import chain, combinations, islice, tee +from pprint import pprint +from urllib.request import ( + HTTPPasswordMgrWithDefaultRealm, + ProxyBasicAuthHandler, + ProxyDigestAuthHandler, + ProxyHandler, + build_opener, + getproxies, + install_opener, +) + +from nltk.collections import * +from nltk.internals import deprecated, raise_unorderable_types, slice_bounds + +###################################################################### +# Short usage message +###################################################################### + + +@deprecated("Use help(obj) instead.") +def usage(obj): + str(obj) # In case it's lazy, this will load it. + + if not isinstance(obj, type): + obj = obj.__class__ + + print(f"{obj.__name__} supports the following operations:") + for (name, method) in sorted(pydoc.allmethods(obj).items()): + if name.startswith("_"): + continue + if getattr(method, "__deprecated__", False): + continue + + try: + sig = str(inspect.signature(method)) + except ValueError as e: + # builtins sometimes don't support introspection + if "builtin" in str(e): + continue + else: + raise + + args = sig.lstrip("(").rstrip(")").split(", ") + meth = inspect.getattr_static(obj, name) + if isinstance(meth, (classmethod, staticmethod)): + name = f"cls.{name}" + elif args and args[0] == "self": + name = f"self.{name}" + args.pop(0) + print( + textwrap.fill( + f"{name}({', '.join(args)})", + initial_indent=" - ", + subsequent_indent=" " * (len(name) + 5), + ) + ) + + +########################################################################## +# IDLE +########################################################################## + + +def in_idle(): + """ + Return True if this function is run within idle. Tkinter + programs that are run in idle should never call ``Tk.mainloop``; so + this function should be used to gate all calls to ``Tk.mainloop``. + + :warning: This function works by checking ``sys.stdin``. If the + user has modified ``sys.stdin``, then it may return incorrect + results. + :rtype: bool + """ + import sys + + return sys.stdin.__class__.__name__ in ("PyShell", "RPCProxy") + + +########################################################################## +# PRETTY PRINTING +########################################################################## + + +def pr(data, start=0, end=None): + """ + Pretty print a sequence of data items + + :param data: the data stream to print + :type data: sequence or iter + :param start: the start position + :type start: int + :param end: the end position + :type end: int + """ + pprint(list(islice(data, start, end))) + + +def print_string(s, width=70): + """ + Pretty print a string, breaking lines on whitespace + + :param s: the string to print, consisting of words and spaces + :type s: str + :param width: the display width + :type width: int + """ + print("\n".join(textwrap.wrap(s, width=width))) + + +def tokenwrap(tokens, separator=" ", width=70): + """ + Pretty print a list of text tokens, breaking lines on whitespace + + :param tokens: the tokens to print + :type tokens: list + :param separator: the string to use to separate tokens + :type separator: str + :param width: the display width (default=70) + :type width: int + """ + return "\n".join(textwrap.wrap(separator.join(tokens), width=width)) + + +########################################################################## +# Indexing +########################################################################## + + +class Index(defaultdict): + def __init__(self, pairs): + defaultdict.__init__(self, list) + for key, value in pairs: + self[key].append(value) + + +###################################################################### +## Regexp display (thanks to David Mertz) +###################################################################### + + +def re_show(regexp, string, left="{", right="}"): + """ + Return a string with markers surrounding the matched substrings. + Search str for substrings matching ``regexp`` and wrap the matches + with braces. This is convenient for learning about regular expressions. + + :param regexp: The regular expression. + :type regexp: str + :param string: The string being matched. + :type string: str + :param left: The left delimiter (printed before the matched substring) + :type left: str + :param right: The right delimiter (printed after the matched substring) + :type right: str + :rtype: str + """ + print(re.compile(regexp, re.M).sub(left + r"\g<0>" + right, string.rstrip())) + + +########################################################################## +# READ FROM FILE OR STRING +########################################################################## + +# recipe from David Mertz +def filestring(f): + if hasattr(f, "read"): + return f.read() + elif isinstance(f, str): + with open(f) as infile: + return infile.read() + else: + raise ValueError("Must be called with a filename or file-like object") + + +########################################################################## +# Breadth-First Search +########################################################################## + + +def breadth_first(tree, children=iter, maxdepth=-1): + """Traverse the nodes of a tree in breadth-first order. + (No check for cycles.) + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + """ + queue = deque([(tree, 0)]) + + while queue: + node, depth = queue.popleft() + yield node + + if depth != maxdepth: + try: + queue.extend((c, depth + 1) for c in children(node)) + except TypeError: + pass + + +########################################################################## +# Graph Drawing +########################################################################## + + +def edge_closure(tree, children=iter, maxdepth=-1, verbose=False): + """Yield the edges of a graph in breadth-first order, + discarding eventual cycles. + The first argument should be the start node; + children should be a function taking as argument a graph node + and returning an iterator of the node's children. + + >>> from nltk.util import edge_closure + >>> print(list(edge_closure('A', lambda node:{'A':['B','C'], 'B':'C', 'C':'B'}[node]))) + [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')] + """ + traversed = set() + edges = set() + queue = deque([(tree, 0)]) + while queue: + node, depth = queue.popleft() + traversed.add(node) + if depth != maxdepth: + try: + for child in children(node): + if child not in traversed: + queue.append((child, depth + 1)) + else: + if verbose: + warnings.warn( + f"Discarded redundant search for {child} at depth {depth + 1}", + stacklevel=2, + ) + edge = (node, child) + if edge not in edges: + yield edge + edges.add(edge) + except TypeError: + pass + + +def edges2dot(edges, shapes=None, attr=None): + """ + :param edges: the set (or list) of edges of a directed graph. + + :return dot_string: a representation of 'edges' as a string in the DOT + graph language, which can be converted to an image by the 'dot' program + from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). + + :param shapes: dictionary of strings that trigger a specified shape. + :param attr: dictionary with global graph attributes + + >>> import nltk + >>> from nltk.util import edges2dot + >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])) + digraph G { + "A" -> "B"; + "A" -> "C"; + "B" -> "C"; + "C" -> "B"; + } + + """ + if not shapes: + shapes = dict() + if not attr: + attr = dict() + + dot_string = "digraph G {\n" + + for pair in attr.items(): + dot_string += f"{pair[0]} = {pair[1]};\n" + + for edge in edges: + for shape in shapes.items(): + for node in range(2): + if shape[0] in repr(edge[node]): + dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n' + dot_string += f'"{edge[0]}" -> "{edge[1]}";\n' + + dot_string += "}\n" + return dot_string + + +def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None): + """ + + Build a Minimum Spanning Tree (MST) of an unweighted graph, + by traversing the nodes of a tree in breadth-first order, + discarding eventual cycles. + + Return a representation of this MST as a string in the DOT graph language, + which can be converted to an image by the 'dot' program from the Graphviz + package, or nltk.parse.dependencygraph.dot2img(dot_string). + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> wn=nltk.corpus.wordnet + >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd + >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) + digraph G { + "Synset('bound.a.01')" -> "Synset('unfree.a.02')"; + "Synset('unfree.a.02')" -> "Synset('confined.a.02')"; + "Synset('unfree.a.02')" -> "Synset('dependent.a.01')"; + "Synset('unfree.a.02')" -> "Synset('restricted.a.01')"; + "Synset('restricted.a.01')" -> "Synset('classified.a.02')"; + } + + """ + return edges2dot( + edge_closure( + tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node] + ), + shapes, + attr, + ) + + +########################################################################## +# Breadth-First / Depth-first Searches with Cycle Detection +########################################################################## + + +def acyclic_breadth_first(tree, children=iter, maxdepth=-1): + """Traverse the nodes of a tree in breadth-first order, + discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + """ + traversed = set() + queue = deque([(tree, 0)]) + while queue: + node, depth = queue.popleft() + yield node + traversed.add(node) + if depth != maxdepth: + try: + for child in children(node): + if child not in traversed: + queue.append((child, depth + 1)) + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth + 1 + ), + stacklevel=2, + ) + except TypeError: + pass + + +def acyclic_depth_first(tree, children=iter, depth=-1, cut_mark=None, traversed=None): + """Traverse the nodes of a tree in depth-first order, + discarding eventual cycles within any branch, + adding cut_mark (when specified) if cycles were truncated. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + Catches all cycles: + + >>> import nltk + >>> from nltk.util import acyclic_depth_first as acyclic_tree + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(acyclic_tree(wn.synset('dog.n.01'), lambda s:s.hypernyms(),cut_mark='...')) + [Synset('dog.n.01'), + [Synset('canine.n.02'), + [Synset('carnivore.n.01'), + [Synset('placental.n.01'), + [Synset('mammal.n.01'), + [Synset('vertebrate.n.01'), + [Synset('chordate.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), + [Synset('entity.n.01')]]]]]]]]]]]]], + [Synset('domestic_animal.n.01'), "Cycle(Synset('animal.n.01'),-3,...)"]] + """ + if traversed is None: + traversed = {tree} + out_tree = [tree] + if depth != 0: + try: + for child in children(tree): + if child not in traversed: + # Recurse with a common "traversed" set for all children: + traversed.add(child) + out_tree += [ + acyclic_depth_first( + child, children, depth - 1, cut_mark, traversed + ) + ] + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth - 1 + ), + stacklevel=3, + ) + if cut_mark: + out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] + except TypeError: + pass + elif cut_mark: + out_tree += [cut_mark] + return out_tree + + +def acyclic_branches_depth_first( + tree, children=iter, depth=-1, cut_mark=None, traversed=None +): + """Traverse the nodes of a tree in depth-first order, + discarding eventual cycles within the same branch, + but keep duplicate paths in different branches. + Add cut_mark (when defined) if cycles were truncated. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + Catches only only cycles within the same branch, + but keeping cycles from different branches: + + >>> import nltk + >>> from nltk.util import acyclic_branches_depth_first as tree + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(tree(wn.synset('certified.a.01'), lambda s:s.also_sees(), cut_mark='...', depth=4)) + [Synset('certified.a.01'), + [Synset('authorized.a.01'), + [Synset('lawful.a.01'), + [Synset('legal.a.01'), + "Cycle(Synset('lawful.a.01'),0,...)", + [Synset('legitimate.a.01'), '...']], + [Synset('straight.a.06'), + [Synset('honest.a.01'), '...'], + "Cycle(Synset('lawful.a.01'),0,...)"]], + [Synset('legitimate.a.01'), + "Cycle(Synset('authorized.a.01'),1,...)", + [Synset('legal.a.01'), + [Synset('lawful.a.01'), '...'], + "Cycle(Synset('legitimate.a.01'),0,...)"], + [Synset('valid.a.01'), + "Cycle(Synset('legitimate.a.01'),0,...)", + [Synset('reasonable.a.01'), '...']]], + [Synset('official.a.01'), "Cycle(Synset('authorized.a.01'),1,...)"]], + [Synset('documented.a.01')]] + """ + if traversed is None: + traversed = {tree} + out_tree = [tree] + if depth != 0: + try: + for child in children(tree): + if child not in traversed: + # Recurse with a different "traversed" set for each child: + out_tree += [ + acyclic_branches_depth_first( + child, + children, + depth - 1, + cut_mark, + traversed.union({child}), + ) + ] + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth - 1 + ), + stacklevel=3, + ) + if cut_mark: + out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] + except TypeError: + pass + elif cut_mark: + out_tree += [cut_mark] + return out_tree + + +def acyclic_dic2tree(node, dic): + """Convert acyclic dictionary 'dic', where the keys are nodes, and the + values are lists of children, to output tree suitable for pprint(), + starting at root 'node', with subtrees as nested lists.""" + return [node] + [acyclic_dic2tree(child, dic) for child in dic[node]] + + +def unweighted_minimum_spanning_dict(tree, children=iter): + """ + Output a dictionary representing a Minimum Spanning Tree (MST) + of an unweighted graph, by traversing the nodes of a tree in + breadth-first order, discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> from nltk.util import unweighted_minimum_spanning_dict as umsd + >>> from pprint import pprint + >>> pprint(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) + {Synset('bound.a.01'): [Synset('unfree.a.02')], + Synset('classified.a.02'): [], + Synset('confined.a.02'): [], + Synset('dependent.a.01'): [], + Synset('restricted.a.01'): [Synset('classified.a.02')], + Synset('unfree.a.02'): [Synset('confined.a.02'), + Synset('dependent.a.01'), + Synset('restricted.a.01')]} + + """ + traversed = set() # Empty set of traversed nodes + queue = deque([tree]) # Initialize queue + agenda = {tree} # Set of all nodes ever queued + mstdic = {} # Empty MST dictionary + while queue: + node = queue.popleft() # Node is not yet in the MST dictionary, + mstdic[node] = [] # so add it with an empty list of children + if node not in traversed: # Avoid cycles + traversed.add(node) + for child in children(node): + if child not in agenda: # Queue nodes only once + mstdic[node].append(child) # Add child to the MST + queue.append(child) # Add child to queue + agenda.add(child) + return mstdic + + +def unweighted_minimum_spanning_tree(tree, children=iter): + """ + Output a Minimum Spanning Tree (MST) of an unweighted graph, + by traversing the nodes of a tree in breadth-first order, + discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> from nltk.util import unweighted_minimum_spanning_tree as mst + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(mst(wn.synset('bound.a.01'), lambda s:s.also_sees())) + [Synset('bound.a.01'), + [Synset('unfree.a.02'), + [Synset('confined.a.02')], + [Synset('dependent.a.01')], + [Synset('restricted.a.01'), [Synset('classified.a.02')]]]] + """ + return acyclic_dic2tree(tree, unweighted_minimum_spanning_dict(tree, children)) + + +########################################################################## +# Guess Character Encoding +########################################################################## + +# adapted from io.py in the docutils extension module (https://docutils.sourceforge.io/) +# http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html + + +def guess_encoding(data): + """ + Given a byte string, attempt to decode it. + Tries the standard 'UTF8' and 'latin-1' encodings, + Plus several gathered from locale information. + + The calling program *must* first call:: + + locale.setlocale(locale.LC_ALL, '') + + If successful it returns ``(decoded_unicode, successful_encoding)``. + If unsuccessful it raises a ``UnicodeError``. + """ + successful_encoding = None + # we make 'utf-8' the first encoding + encodings = ["utf-8"] + # + # next we add anything we can learn from the locale + try: + encodings.append(locale.nl_langinfo(locale.CODESET)) + except AttributeError: + pass + try: + encodings.append(locale.getlocale()[1]) + except (AttributeError, IndexError): + pass + try: + encodings.append(locale.getdefaultlocale()[1]) + except (AttributeError, IndexError): + pass + # + # we try 'latin-1' last + encodings.append("latin-1") + for enc in encodings: + # some of the locale calls + # may have returned None + if not enc: + continue + try: + decoded = str(data, enc) + successful_encoding = enc + + except (UnicodeError, LookupError): + pass + else: + break + if not successful_encoding: + raise UnicodeError( + "Unable to decode input data. " + "Tried the following encodings: %s." + % ", ".join([repr(enc) for enc in encodings if enc]) + ) + else: + return (decoded, successful_encoding) + + +########################################################################## +# Remove repeated elements from a list deterministcally +########################################################################## + + +def unique_list(xs): + seen = set() + # not seen.add(x) here acts to make the code shorter without using if statements, seen.add(x) always returns None. + return [x for x in xs if x not in seen and not seen.add(x)] + + +########################################################################## +# Invert a dictionary +########################################################################## + + +def invert_dict(d): + inverted_dict = defaultdict(list) + for key in d: + if hasattr(d[key], "__iter__"): + for term in d[key]: + inverted_dict[term].append(key) + else: + inverted_dict[d[key]] = key + return inverted_dict + + +########################################################################## +# Utilities for directed graphs: transitive closure, and inversion +# The graph is represented as a dictionary of sets +########################################################################## + + +def transitive_closure(graph, reflexive=False): + """ + Calculate the transitive closure of a directed graph, + optionally the reflexive transitive closure. + + The algorithm is a slight modification of the "Marking Algorithm" of + Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms". + + :param graph: the initial graph, represented as a dictionary of sets + :type graph: dict(set) + :param reflexive: if set, also make the closure reflexive + :type reflexive: bool + :rtype: dict(set) + """ + if reflexive: + base_set = lambda k: {k} + else: + base_set = lambda k: set() + # The graph U_i in the article: + agenda_graph = {k: graph[k].copy() for k in graph} + # The graph M_i in the article: + closure_graph = {k: base_set(k) for k in graph} + for i in graph: + agenda = agenda_graph[i] + closure = closure_graph[i] + while agenda: + j = agenda.pop() + closure.add(j) + closure |= closure_graph.setdefault(j, base_set(j)) + agenda |= agenda_graph.get(j, base_set(j)) + agenda -= closure + return closure_graph + + +def invert_graph(graph): + """ + Inverts a directed graph. + + :param graph: the graph, represented as a dictionary of sets + :type graph: dict(set) + :return: the inverted graph + :rtype: dict(set) + """ + inverted = {} + for key in graph: + for value in graph[key]: + inverted.setdefault(value, set()).add(key) + return inverted + + +########################################################################## +# HTML Cleaning +########################################################################## + + +def clean_html(html): + raise NotImplementedError( + "To remove HTML markup, use BeautifulSoup's get_text() function" + ) + + +def clean_url(url): + raise NotImplementedError( + "To remove HTML markup, use BeautifulSoup's get_text() function" + ) + + +########################################################################## +# FLATTEN LISTS +########################################################################## + + +def flatten(*args): + """ + Flatten a list. + + >>> from nltk.util import flatten + >>> flatten(1, 2, ['b', 'a' , ['c', 'd']], 3) + [1, 2, 'b', 'a', 'c', 'd', 3] + + :param args: items and lists to be combined into a single list + :rtype: list + """ + + x = [] + for l in args: + if not isinstance(l, (list, tuple)): + l = [l] + for item in l: + if isinstance(item, (list, tuple)): + x.extend(flatten(item)) + else: + x.append(item) + return x + + +########################################################################## +# Ngram iteration +########################################################################## + + +def pad_sequence( + sequence, + n, + pad_left=False, + pad_right=False, + left_pad_symbol=None, + right_pad_symbol=None, +): + """ + Returns a padded sequence of items before ngram extraction. + + >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='', right_pad_symbol='')) + ['', 1, 2, 3, 4, 5, ''] + >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='')) + ['', 1, 2, 3, 4, 5] + >>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='')) + [1, 2, 3, 4, 5, ''] + + :param sequence: the source data to be padded + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :param left_pad_symbol: the symbol to use for left padding (default is None) + :type left_pad_symbol: any + :param right_pad_symbol: the symbol to use for right padding (default is None) + :type right_pad_symbol: any + :rtype: sequence or iter + """ + sequence = iter(sequence) + if pad_left: + sequence = chain((left_pad_symbol,) * (n - 1), sequence) + if pad_right: + sequence = chain(sequence, (right_pad_symbol,) * (n - 1)) + return sequence + + +# add a flag to pad the sequence so we get peripheral ngrams? + + +def ngrams(sequence, n, **kwargs): + """ + Return the ngrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import ngrams + >>> list(ngrams([1,2,3,4,5], 3)) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + Wrap with list for a list version of this function. Set pad_left + or pad_right to true in order to get additional ngrams: + + >>> list(ngrams([1,2,3,4,5], 2, pad_right=True)) + [(1, 2), (2, 3), (3, 4), (4, 5), (5, None)] + >>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='
')) + [(1, 2), (2, 3), (3, 4), (4, 5), (5, '')] + >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='')) + [('', 1), (1, 2), (2, 3), (3, 4), (4, 5)] + >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='', right_pad_symbol='')) + [('', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '')] + + + :param sequence: the source data to be converted into ngrams + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :param left_pad_symbol: the symbol to use for left padding (default is None) + :type left_pad_symbol: any + :param right_pad_symbol: the symbol to use for right padding (default is None) + :type right_pad_symbol: any + :rtype: sequence or iter + """ + sequence = pad_sequence(sequence, n, **kwargs) + + # Creates the sliding window, of n no. of items. + # `iterables` is a tuple of iterables where each iterable is a window of n items. + iterables = tee(sequence, n) + + for i, sub_iterable in enumerate(iterables): # For each window, + for _ in range(i): # iterate through every order of ngrams + next(sub_iterable, None) # generate the ngrams within the window. + return zip(*iterables) # Unpack and flattens the iterables. + + +def bigrams(sequence, **kwargs): + """ + Return the bigrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import bigrams + >>> list(bigrams([1,2,3,4,5])) + [(1, 2), (2, 3), (3, 4), (4, 5)] + + Use bigrams for a list version of this function. + + :param sequence: the source data to be converted into bigrams + :type sequence: sequence or iter + :rtype: iter(tuple) + """ + + yield from ngrams(sequence, 2, **kwargs) + + +def trigrams(sequence, **kwargs): + """ + Return the trigrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import trigrams + >>> list(trigrams([1,2,3,4,5])) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + Use trigrams for a list version of this function. + + :param sequence: the source data to be converted into trigrams + :type sequence: sequence or iter + :rtype: iter(tuple) + """ + + yield from ngrams(sequence, 3, **kwargs) + + +def everygrams( + sequence, min_len=1, max_len=-1, pad_left=False, pad_right=False, **kwargs +): + """ + Returns all possible ngrams generated from a sequence of items, as an iterator. + + >>> sent = 'a b c'.split() + + New version outputs for everygrams. + >>> list(everygrams(sent)) + [('a',), ('a', 'b'), ('a', 'b', 'c'), ('b',), ('b', 'c'), ('c',)] + + Old version outputs for everygrams. + >>> sorted(everygrams(sent), key=len) + [('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')] + + >>> list(everygrams(sent, max_len=2)) + [('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',)] + + :param sequence: the source data to be converted into ngrams. If max_len is + not provided, this sequence will be loaded into memory + :type sequence: sequence or iter + :param min_len: minimum length of the ngrams, aka. n-gram order/degree of ngram + :type min_len: int + :param max_len: maximum length of the ngrams (set to length of sequence by default) + :type max_len: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :rtype: iter(tuple) + """ + + # Get max_len for padding. + if max_len == -1: + try: + max_len = len(sequence) + except TypeError: + sequence = list(sequence) + max_len = len(sequence) + + # Pad if indicated using max_len. + sequence = pad_sequence(sequence, max_len, pad_left, pad_right, **kwargs) + + # Sliding window to store grams. + history = list(islice(sequence, max_len)) + + # Yield ngrams from sequence. + while history: + for ngram_len in range(min_len, len(history) + 1): + yield tuple(history[:ngram_len]) + + # Append element to history if sequence has more items. + try: + history.append(next(sequence)) + except StopIteration: + pass + + del history[0] + + +def skipgrams(sequence, n, k, **kwargs): + """ + Returns all possible skipgrams generated from a sequence of items, as an iterator. + Skipgrams are ngrams that allows tokens to be skipped. + Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf + + >>> sent = "Insurgents killed in ongoing fighting".split() + >>> list(skipgrams(sent, 2, 2)) + [('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')] + >>> list(skipgrams(sent, 3, 2)) + [('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')] + + :param sequence: the source data to be converted into trigrams + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param k: the skip distance + :type k: int + :rtype: iter(tuple) + """ + + # Pads the sequence as desired by **kwargs. + if "pad_left" in kwargs or "pad_right" in kwargs: + sequence = pad_sequence(sequence, n, **kwargs) + + # Note when iterating through the ngrams, the pad_right here is not + # the **kwargs padding, it's for the algorithm to detect the SENTINEL + # object on the right pad to stop inner loop. + SENTINEL = object() + for ngram in ngrams(sequence, n + k, pad_right=True, right_pad_symbol=SENTINEL): + head = ngram[:1] + tail = ngram[1:] + for skip_tail in combinations(tail, n - 1): + if skip_tail[-1] is SENTINEL: + continue + yield head + skip_tail + + +###################################################################### +# Binary Search in a File +###################################################################### + +# inherited from pywordnet, by Oliver Steele +def binary_search_file(file, key, cache=None, cacheDepth=-1): + """ + Return the line from the file with first word key. + Searches through a sorted file using the binary search algorithm. + + :type file: file + :param file: the file to be searched through. + :type key: str + :param key: the identifier we are searching for. + """ + + key = key + " " + keylen = len(key) + start = 0 + currentDepth = 0 + + if hasattr(file, "name"): + end = os.stat(file.name).st_size - 1 + else: + file.seek(0, 2) + end = file.tell() - 1 + file.seek(0) + + if cache is None: + cache = {} + + while start < end: + lastState = start, end + middle = (start + end) // 2 + + if cache.get(middle): + offset, line = cache[middle] + + else: + line = "" + while True: + file.seek(max(0, middle - 1)) + if middle > 0: + file.discard_line() + offset = file.tell() + line = file.readline() + if line != "": + break + # at EOF; try to find start of the last line + middle = (start + middle) // 2 + if middle == end - 1: + return None + if currentDepth < cacheDepth: + cache[middle] = (offset, line) + + if offset > end: + assert end != middle - 1, "infinite loop" + end = middle - 1 + elif line[:keylen] == key: + return line + elif line > key: + assert end != middle - 1, "infinite loop" + end = middle - 1 + elif line < key: + start = offset + len(line) - 1 + + currentDepth += 1 + thisState = start, end + + if lastState == thisState: + # Detects the condition where we're searching past the end + # of the file, which is otherwise difficult to detect + return None + + return None + + +###################################################################### +# Proxy configuration +###################################################################### + + +def set_proxy(proxy, user=None, password=""): + """ + Set the HTTP proxy for Python to download through. + + If ``proxy`` is None then tries to set proxy from environment or system + settings. + + :param proxy: The HTTP proxy server to use. For example: + 'http://proxy.example.com:3128/' + :param user: The username to authenticate with. Use None to disable + authentication. + :param password: The password to authenticate with. + """ + if proxy is None: + # Try and find the system proxy settings + try: + proxy = getproxies()["http"] + except KeyError as e: + raise ValueError("Could not detect default proxy settings") from e + + # Set up the proxy handler + proxy_handler = ProxyHandler({"https": proxy, "http": proxy}) + opener = build_opener(proxy_handler) + + if user is not None: + # Set up basic proxy authentication if provided + password_manager = HTTPPasswordMgrWithDefaultRealm() + password_manager.add_password(realm=None, uri=proxy, user=user, passwd=password) + opener.add_handler(ProxyBasicAuthHandler(password_manager)) + opener.add_handler(ProxyDigestAuthHandler(password_manager)) + + # Override the existing url opener + install_opener(opener) + + +###################################################################### +# ElementTree pretty printing from https://www.effbot.org/zone/element-lib.htm +###################################################################### + + +def elementtree_indent(elem, level=0): + """ + Recursive function to indent an ElementTree._ElementInterface + used for pretty printing. Run indent on elem and then output + in the normal way. + + :param elem: element to be indented. will be modified. + :type elem: ElementTree._ElementInterface + :param level: level of indentation for this element + :type level: nonnegative integer + :rtype: ElementTree._ElementInterface + :return: Contents of elem indented to reflect its structure + """ + + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + for elem in elem: + elementtree_indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +###################################################################### +# Mathematical approximations +###################################################################### + + +def choose(n, k): + """ + This function is a fast way to calculate binomial coefficients, commonly + known as nCk, i.e. the number of combinations of n things taken k at a time. + (https://en.wikipedia.org/wiki/Binomial_coefficient). + + This is the *scipy.special.comb()* with long integer computation but this + approximation is faster, see https://github.com/nltk/nltk/issues/1181 + + >>> choose(4, 2) + 6 + >>> choose(6, 2) + 15 + + :param n: The number of things. + :type n: int + :param r: The number of times a thing is taken. + :type r: int + """ + if 0 <= k <= n: + ntok, ktok = 1, 1 + for t in range(1, min(k, n - k) + 1): + ntok *= n + ktok *= t + n -= 1 + return ntok // ktok + else: + return 0 + + +###################################################################### +# Iteration utilities +###################################################################### + + +def pairwise(iterable): + """s -> (s0,s1), (s1,s2), (s2, s3), ...""" + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +###################################################################### +# Parallelization. +###################################################################### + + +def parallelize_preprocess(func, iterator, processes, progress_bar=False): + from joblib import Parallel, delayed + from tqdm import tqdm + + iterator = tqdm(iterator) if progress_bar else iterator + if processes <= 1: + return map(func, iterator) + return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator) diff --git a/venv/lib/python3.10/site-packages/nltk/wsd.py b/venv/lib/python3.10/site-packages/nltk/wsd.py new file mode 100644 index 0000000000000000000000000000000000000000..8e29ce1e44b302d751a55d9512363f364a7c3f47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/wsd.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Word Sense Disambiguation Algorithms +# +# Authors: Liling Tan , +# Dmitrijs Milajevs +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import wordnet + + +def lesk(context_sentence, ambiguous_word, pos=None, synsets=None): + """Return a synset for an ambiguous word in a context. + + :param iter context_sentence: The context sentence where the ambiguous word + occurs, passed as an iterable of words. + :param str ambiguous_word: The ambiguous word that requires WSD. + :param str pos: A specified Part-of-Speech (POS). + :param iter synsets: Possible synsets of the ambiguous word. + :return: ``lesk_sense`` The Synset() object with the highest signature overlaps. + + This function is an implementation of the original Lesk algorithm (1986) [1]. + + Usage example:: + + >>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n') + Synset('savings_bank.n.02') + + [1] Lesk, Michael. "Automatic sense disambiguation using machine + readable dictionaries: how to tell a pine cone from an ice cream + cone." Proceedings of the 5th Annual International Conference on + Systems Documentation. ACM, 1986. + https://dl.acm.org/citation.cfm?id=318728 + """ + + context = set(context_sentence) + if synsets is None: + synsets = wordnet.synsets(ambiguous_word) + + if pos: + synsets = [ss for ss in synsets if str(ss.pos()) == pos] + + if not synsets: + return None + + _, sense = max( + (len(context.intersection(ss.definition().split())), ss) for ss in synsets + ) + + return sense diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93954631f49219cb5da3a257d2d6ef6395a3d4f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_elffile.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_elffile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..262ab15cc79db604d2f43e5331c155d0990cf642 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_elffile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a09f7089ea418d15aa69f36c59f315a37e1734f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_musllinux.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_musllinux.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..915ee416d33fe1609702d9e417891a3e96be89b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_musllinux.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96102171957c492d112ed7c5c568f5cd3920d135 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3575db96b9475be0b97dad94fb7a27653397a890 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/_tokenizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/_tokenizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d689c8101e2c0fcc7620f649be07ca1e50e3fb29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/_tokenizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..273a6cd035b453e7680b2fc42fc758fedcc0293c Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/metadata.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de3791ab88a8802041e5260318355a4ef6074f57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/metadata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02e914a64adfa79a3b1a6df31b2de9d23643dead Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/specifiers.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/specifiers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a3215d0f0e919da9ef1eba61a8e1492d1c77c38 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/specifiers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82eab295e2389e6ca3bd676f93eba72ccfc80751 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26b42903d16a0d5911661dc7a1632f57b8ece462 Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56ab9b282323452edd4db9d38baae3ddcffc7f9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/packaging/_musllinux.py b/venv/lib/python3.10/site-packages/packaging/_musllinux.py new file mode 100644 index 0000000000000000000000000000000000000000..86419df9d7087f3f8b6d0096f32a52c24b05e7c1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_musllinux.py @@ -0,0 +1,83 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Optional, Sequence + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(archs: Sequence[str]) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param archs: Sequence of compatible architectures. + The first one shall be the closest to the actual architecture and be the part of + platform tag after the ``linux_`` prefix, e.g. ``x86_64``. + The ``linux_`` prefix is assumed as a prerequisite for the current platform to + be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for arch in archs: + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/venv/lib/python3.10/site-packages/packaging/_parser.py b/venv/lib/python3.10/site-packages/packaging/_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..684df75457cb82d3683dc99ff52c5bf911f3341b --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/_parser.py @@ -0,0 +1,356 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList: + retval = _parse_marker(tokenizer) + tokenizer.expect("END", expected="end of marker expression") + return retval + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if env_var in ("platform_python_implementation", "python_implementation"): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/venv/lib/python3.10/site-packages/packaging/markers.py b/venv/lib/python3.10/site-packages/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..8b98fca7233be6dd9324cd2b6d71b6a8ac91a6cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/markers.py @@ -0,0 +1,252 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, +) +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers(self._markers, current_environment) diff --git a/venv/lib/python3.10/site-packages/packaging/metadata.py b/venv/lib/python3.10/site-packages/packaging/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..fb274930799da0f8ee17566b5b587b4047282c7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/metadata.py @@ -0,0 +1,825 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import Literal, TypedDict + else: + try: + from typing_extensions import Literal, TypedDict + except ImportError: + + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +try: + ExceptionGroup +except NameError: # pragma: no cover + + class ExceptionGroup(Exception): # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + +else: # pragma: no cover + ExceptionGroup = ExceptionGroup + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[Optional[List[str]]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[Optional[str]] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[Optional[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/venv/lib/python3.10/site-packages/packaging/requirements.py b/venv/lib/python3.10/site-packages/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc43a7e98d87dba0c2069bfb4554f71d228cad4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/requirements.py @@ -0,0 +1,90 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from typing import Any, Iterator, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/venv/lib/python3.10/site-packages/packaging/specifiers.py b/venv/lib/python3.10/site-packages/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..2d015bab5958fd9767cf5c9e449f2fa33292c962 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/specifiers.py @@ -0,0 +1,1017 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: List[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: List[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/venv/lib/python3.10/site-packages/packaging/utils.py b/venv/lib/python3.10/site-packages/packaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c2f75aa806282d322c76c2117c0f0fdfb09d25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/utils.py @@ -0,0 +1,172 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidName(ValueError): + """ + An invalid distribution name; users should refer to the packaging user guide. + """ + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +# Core metadata spec for `Name` +_validate_regex = re.compile( + r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE +) +_canonicalize_regex = re.compile(r"[-_.]+") +_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName: + if validate and not _validate_regex.match(name): + raise InvalidName(f"name is invalid: {name!r}") + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def is_normalized_name(name: str) -> bool: + return _normalized_regex.match(name) is not None + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name. + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + + try: + version = Version(parts[1]) + except InvalidVersion as e: + raise InvalidWheelFilename( + f"Invalid wheel filename (invalid version): {filename}" + ) from e + + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + + try: + version = Version(version_part) + except InvalidVersion as e: + raise InvalidSdistFilename( + f"Invalid sdist filename (invalid version): {filename}" + ) from e + + return (name, version) diff --git a/venv/lib/python3.10/site-packages/packaging/version.py b/venv/lib/python3.10/site-packages/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5faab9bd0dcf28847960162b2b4f13a8a556ef20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/packaging/version.py @@ -0,0 +1,563 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import itertools +import re +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local