diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208da9f5678f4b79282d5e6886502627ab9161ab --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__init__.py @@ -0,0 +1,197 @@ +# Natural Language Toolkit: Chunkers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT +# + +""" +Classes and interfaces for identifying non-overlapping linguistic +groups (such as base noun phrases) in unrestricted text. This task is +called "chunk parsing" or "chunking", and the identified groups are +called "chunks". The chunked text is represented using a shallow +tree called a "chunk structure." A chunk structure is a tree +containing tokens and chunks, where each chunk is a subtree containing +only tokens. For example, the chunk structure for base noun phrase +chunks in the sentence "I saw the big dog on the hill" is:: + + (SENTENCE: + (NP: ) + + (NP: ) + + (NP: )) + +To convert a chunk structure back to a list of tokens, simply use the +chunk structure's ``leaves()`` method. + +This module defines ``ChunkParserI``, a standard interface for +chunking texts; and ``RegexpChunkParser``, a regular-expression based +implementation of that interface. It also defines ``ChunkScore``, a +utility class for scoring chunk parsers. + +RegexpChunkParser +================= + +``RegexpChunkParser`` is an implementation of the chunk parser interface +that uses regular-expressions over tags to chunk a text. Its +``parse()`` method first constructs a ``ChunkString``, which encodes a +particular chunking of the input text. Initially, nothing is +chunked. ``parse.RegexpChunkParser`` then applies a sequence of +``RegexpChunkRule`` rules to the ``ChunkString``, each of which modifies +the chunking that it encodes. Finally, the ``ChunkString`` is +transformed back into a chunk structure, which is returned. + +``RegexpChunkParser`` can only be used to chunk a single kind of phrase. +For example, you can use an ``RegexpChunkParser`` to chunk the noun +phrases in a text, or the verb phrases in a text; but you can not +use it to simultaneously chunk both noun phrases and verb phrases in +the same text. (This is a limitation of ``RegexpChunkParser``, not of +chunk parsers in general.) + +RegexpChunkRules +---------------- + +A ``RegexpChunkRule`` is a transformational rule that updates the +chunking of a text by modifying its ``ChunkString``. Each +``RegexpChunkRule`` defines the ``apply()`` method, which modifies +the chunking encoded by a ``ChunkString``. The +``RegexpChunkRule`` class itself can be used to implement any +transformational rule based on regular expressions. There are +also a number of subclasses, which can be used to implement +simpler types of rules: + + - ``ChunkRule`` chunks anything that matches a given regular + expression. + - ``StripRule`` strips anything that matches a given regular + expression. + - ``UnChunkRule`` will un-chunk any chunk that matches a given + regular expression. + - ``MergeRule`` can be used to merge two contiguous chunks. + - ``SplitRule`` can be used to split a single chunk into two + smaller chunks. + - ``ExpandLeftRule`` will expand a chunk to incorporate new + unchunked material on the left. + - ``ExpandRightRule`` will expand a chunk to incorporate new + unchunked material on the right. + +Tag Patterns +~~~~~~~~~~~~ + +A ``RegexpChunkRule`` uses a modified version of regular +expression patterns, called "tag patterns". Tag patterns are +used to match sequences of tags. Examples of tag patterns are:: + + r'(
||)+' + r'+' + r'' + +The differences between regular expression patterns and tag +patterns are: + + - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so + ``'+'`` matches one or more repetitions of ``''``, not + ``''``. + - Whitespace in tag patterns is ignored. So + ``'
| '`` is equivalent to ``'
|'`` + - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so + ``''`` matches any single tag starting with ``'NN'``. + +The function ``tag_pattern2re_pattern`` can be used to transform +a tag pattern to an equivalent regular expression pattern. + +Efficiency +---------- + +Preliminary tests indicate that ``RegexpChunkParser`` can chunk at a +rate of about 300 tokens/second, with a moderately complex rule set. + +There may be problems if ``RegexpChunkParser`` is used with more than +5,000 tokens at a time. In particular, evaluation of some regular +expressions may cause the Python regular expression engine to +exceed its maximum recursion depth. We have attempted to minimize +these problems, but it is impossible to avoid them completely. We +therefore recommend that you apply the chunk parser to a single +sentence at a time. + +Emacs Tip +--------- + +If you evaluate the following elisp expression in emacs, it will +colorize a ``ChunkString`` when you use an interactive python shell +with emacs or xemacs ("C-c !"):: + + (let () + (defconst comint-mode-font-lock-keywords + '(("<[^>]+>" 0 'font-lock-reference-face) + ("[{}]" 0 'font-lock-function-name-face))) + (add-hook 'comint-mode-hook (lambda () (turn-on-font-lock)))) + +You can evaluate this code by copying it to a temporary buffer, +placing the cursor after the last close parenthesis, and typing +"``C-x C-e``". You should evaluate it before running the interactive +session. The change will last until you close emacs. + +Unresolved Issues +----------------- + +If we use the ``re`` module for regular expressions, Python's +regular expression engine generates "maximum recursion depth +exceeded" errors when processing very large texts, even for +regular expressions that should not require any recursion. We +therefore use the ``pre`` module instead. But note that ``pre`` +does not include Unicode support, so this module will not work +with unicode strings. Note also that ``pre`` regular expressions +are not quite as advanced as ``re`` ones (e.g., no leftward +zero-length assertions). + +:type CHUNK_TAG_PATTERN: regexp +:var CHUNK_TAG_PATTERN: A regular expression to test whether a tag + pattern is valid. +""" + +from nltk.chunk.api import ChunkParserI +from nltk.chunk.regexp import RegexpChunkParser, RegexpParser +from nltk.chunk.util import ( + ChunkScore, + accuracy, + conllstr2tree, + conlltags2tree, + ieerstr2tree, + tagstr2tree, + tree2conllstr, + tree2conlltags, +) +from nltk.data import load + +# Standard treebank POS tagger +_BINARY_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_binary.pickle" +_MULTICLASS_NE_CHUNKER = "chunkers/maxent_ne_chunker/english_ace_multiclass.pickle" + + +def ne_chunk(tagged_tokens, binary=False): + """ + Use NLTK's currently recommended named entity chunker to + chunk the given list of tagged tokens. + """ + if binary: + chunker_pickle = _BINARY_NE_CHUNKER + else: + chunker_pickle = _MULTICLASS_NE_CHUNKER + chunker = load(chunker_pickle) + return chunker.parse(tagged_tokens) + + +def ne_chunk_sents(tagged_sentences, binary=False): + """ + Use NLTK's currently recommended named entity chunker to chunk the + given list of tagged sentences, each consisting of a list of tagged tokens. + """ + if binary: + chunker_pickle = _BINARY_NE_CHUNKER + else: + chunker_pickle = _MULTICLASS_NE_CHUNKER + chunker = load(chunker_pickle) + return chunker.parse_sents(tagged_sentences) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f8d244d5cf7fa283a013db972bdf061330b28d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57ebcc8669d8e487028bf8c76cb1f06d66b7d4d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f97814dfa5404663540eaeac43fd595189065120 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/named_entity.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21723e9aaff3cf99ea80543654b8b841e68e54f4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/regexp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c04508e105cfd62d64d51cf569a8e005c61df378 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/chunk/regexp.py b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..4369119706106db2892e47675ffd039e85db888d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/chunk/regexp.py @@ -0,0 +1,1475 @@ +# Natural Language Toolkit: Regular Expression Chunkers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT + +import re + +import regex + +from nltk.chunk.api import ChunkParserI +from nltk.tree import Tree + +# ////////////////////////////////////////////////////// +# ChunkString +# ////////////////////////////////////////////////////// + + +class ChunkString: + """ + A string-based encoding of a particular chunking of a text. + Internally, the ``ChunkString`` class uses a single string to + encode the chunking of the input text. This string contains a + sequence of angle-bracket delimited tags, with chunking indicated + by braces. An example of this encoding is:: + + {
}{
}<.>{
}<.> + + ``ChunkString`` are created from tagged texts (i.e., lists of + ``tokens`` whose type is ``TaggedType``). Initially, nothing is + chunked. + + The chunking of a ``ChunkString`` can be modified with the ``xform()`` + method, which uses a regular expression to transform the string + representation. These transformations should only add and remove + braces; they should *not* modify the sequence of angle-bracket + delimited tags. + + :type _str: str + :ivar _str: The internal string representation of the text's + encoding. This string representation contains a sequence of + angle-bracket delimited tags, with chunking indicated by + braces. An example of this encoding is:: + + {
}{
}<.>{
}<.> + + :type _pieces: list(tagged tokens and chunks) + :ivar _pieces: The tagged tokens and chunks encoded by this ``ChunkString``. + :ivar _debug: The debug level. See the constructor docs. + + :cvar IN_CHUNK_PATTERN: A zero-width regexp pattern string that + will only match positions that are in chunks. + :cvar IN_STRIP_PATTERN: A zero-width regexp pattern string that + will only match positions that are in strips. + """ + + CHUNK_TAG_CHAR = r"[^\{\}<>]" + CHUNK_TAG = r"(<%s+?>)" % CHUNK_TAG_CHAR + + IN_CHUNK_PATTERN = r"(?=[^\{]*\})" + IN_STRIP_PATTERN = r"(?=[^\}]*(\{|$))" + + # These are used by _verify + _CHUNK = r"(\{%s+?\})+?" % CHUNK_TAG + _STRIP = r"(%s+?)+?" % CHUNK_TAG + _VALID = re.compile(r"^(\{?%s\}?)*?$" % CHUNK_TAG) + _BRACKETS = re.compile(r"[^\{\}]+") + _BALANCED_BRACKETS = re.compile(r"(\{\})*$") + + def __init__(self, chunk_struct, debug_level=1): + """ + Construct a new ``ChunkString`` that encodes the chunking of + the text ``tagged_tokens``. + + :type chunk_struct: Tree + :param chunk_struct: The chunk structure to be further chunked. + :type debug_level: int + :param debug_level: The level of debugging which should be + applied to transformations on the ``ChunkString``. The + valid levels are: + + - 0: no checks + - 1: full check on to_chunkstruct + - 2: full check on to_chunkstruct and cursory check after + each transformation. + - 3: full check on to_chunkstruct and full check after + each transformation. + + We recommend you use at least level 1. You should + probably use level 3 if you use any non-standard + subclasses of ``RegexpChunkRule``. + """ + self._root_label = chunk_struct.label() + self._pieces = chunk_struct[:] + tags = [self._tag(tok) for tok in self._pieces] + self._str = "<" + "><".join(tags) + ">" + self._debug = debug_level + + def _tag(self, tok): + if isinstance(tok, tuple): + return tok[1] + elif isinstance(tok, Tree): + return tok.label() + else: + raise ValueError("chunk structures must contain tagged " "tokens or trees") + + def _verify(self, s, verify_tags): + """ + Check to make sure that ``s`` still corresponds to some chunked + version of ``_pieces``. + + :type verify_tags: bool + :param verify_tags: Whether the individual tags should be + checked. If this is false, ``_verify`` will check to make + sure that ``_str`` encodes a chunked version of *some* + list of tokens. If this is true, then ``_verify`` will + check to make sure that the tags in ``_str`` match those in + ``_pieces``. + + :raise ValueError: if the internal string representation of + this ``ChunkString`` is invalid or not consistent with _pieces. + """ + # Check overall form + if not ChunkString._VALID.match(s): + raise ValueError( + "Transformation generated invalid " "chunkstring:\n %s" % s + ) + + # Check that parens are balanced. If the string is long, we + # have to do this in pieces, to avoid a maximum recursion + # depth limit for regular expressions. + brackets = ChunkString._BRACKETS.sub("", s) + for i in range(1 + len(brackets) // 5000): + substr = brackets[i * 5000 : i * 5000 + 5000] + if not ChunkString._BALANCED_BRACKETS.match(substr): + raise ValueError( + "Transformation generated invalid " "chunkstring:\n %s" % s + ) + + if verify_tags <= 0: + return + + tags1 = (re.split(r"[\{\}<>]+", s))[1:-1] + tags2 = [self._tag(piece) for piece in self._pieces] + if tags1 != tags2: + raise ValueError( + "Transformation generated invalid " "chunkstring: tag changed" + ) + + def to_chunkstruct(self, chunk_label="CHUNK"): + """ + Return the chunk structure encoded by this ``ChunkString``. + + :rtype: Tree + :raise ValueError: If a transformation has generated an + invalid chunkstring. + """ + if self._debug > 0: + self._verify(self._str, 1) + + # Use this alternating list to create the chunkstruct. + pieces = [] + index = 0 + piece_in_chunk = 0 + for piece in re.split("[{}]", self._str): + + # Find the list of tokens contained in this piece. + length = piece.count("<") + subsequence = self._pieces[index : index + length] + + # Add this list of tokens to our pieces. + if piece_in_chunk: + pieces.append(Tree(chunk_label, subsequence)) + else: + pieces += subsequence + + # Update index, piece_in_chunk + index += length + piece_in_chunk = not piece_in_chunk + + return Tree(self._root_label, pieces) + + def xform(self, regexp, repl): + """ + Apply the given transformation to the string encoding of this + ``ChunkString``. In particular, find all occurrences that match + ``regexp``, and replace them using ``repl`` (as done by + ``re.sub``). + + This transformation should only add and remove braces; it + should *not* modify the sequence of angle-bracket delimited + tags. Furthermore, this transformation may not result in + improper bracketing. Note, in particular, that bracketing may + not be nested. + + :type regexp: str or regexp + :param regexp: A regular expression matching the substring + that should be replaced. This will typically include a + named group, which can be used by ``repl``. + :type repl: str + :param repl: An expression specifying what should replace the + matched substring. Typically, this will include a named + replacement group, specified by ``regexp``. + :rtype: None + :raise ValueError: If this transformation generated an + invalid chunkstring. + """ + # Do the actual substitution + s = re.sub(regexp, repl, self._str) + + # The substitution might have generated "empty chunks" + # (substrings of the form "{}"). Remove them, so they don't + # interfere with other transformations. + s = re.sub(r"\{\}", "", s) + + # Make sure that the transformation was legal. + if self._debug > 1: + self._verify(s, self._debug - 2) + + # Commit the transformation. + self._str = s + + def __repr__(self): + """ + Return a string representation of this ``ChunkString``. + It has the form:: + + }{
}'> + + :rtype: str + """ + return "" % repr(self._str) + + def __str__(self): + """ + Return a formatted representation of this ``ChunkString``. + This representation will include extra spaces to ensure that + tags will line up with the representation of other + ``ChunkStrings`` for the same text, regardless of the chunking. + + :rtype: str + """ + # Add spaces to make everything line up. + str = re.sub(r">(?!\})", r"> ", self._str) + str = re.sub(r"([^\{])<", r"\1 <", str) + if str[0] == "<": + str = " " + str + return str + + +# ////////////////////////////////////////////////////// +# Chunking Rules +# ////////////////////////////////////////////////////// + + +class RegexpChunkRule: + """ + A rule specifying how to modify the chunking in a ``ChunkString``, + using a transformational regular expression. The + ``RegexpChunkRule`` class itself can be used to implement any + transformational rule based on regular expressions. There are + also a number of subclasses, which can be used to implement + simpler types of rules, based on matching regular expressions. + + Each ``RegexpChunkRule`` has a regular expression and a + replacement expression. When a ``RegexpChunkRule`` is "applied" + to a ``ChunkString``, it searches the ``ChunkString`` for any + substring that matches the regular expression, and replaces it + using the replacement expression. This search/replace operation + has the same semantics as ``re.sub``. + + Each ``RegexpChunkRule`` also has a description string, which + gives a short (typically less than 75 characters) description of + the purpose of the rule. + + This transformation defined by this ``RegexpChunkRule`` should + only add and remove braces; it should *not* modify the sequence + of angle-bracket delimited tags. Furthermore, this transformation + may not result in nested or mismatched bracketing. + """ + + def __init__(self, regexp, repl, descr): + """ + Construct a new RegexpChunkRule. + + :type regexp: regexp or str + :param regexp: The regular expression for this ``RegexpChunkRule``. + When this rule is applied to a ``ChunkString``, any + substring that matches ``regexp`` will be replaced using + the replacement string ``repl``. Note that this must be a + normal regular expression, not a tag pattern. + :type repl: str + :param repl: The replacement expression for this ``RegexpChunkRule``. + When this rule is applied to a ``ChunkString``, any substring + that matches ``regexp`` will be replaced using ``repl``. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + if isinstance(regexp, str): + regexp = re.compile(regexp) + self._repl = repl + self._descr = descr + self._regexp = regexp + + def apply(self, chunkstr): + # Keep docstring generic so we can inherit it. + """ + Apply this rule to the given ``ChunkString``. See the + class reference documentation for a description of what it + means to apply a rule. + + :type chunkstr: ChunkString + :param chunkstr: The chunkstring to which this rule is applied. + :rtype: None + :raise ValueError: If this transformation generated an + invalid chunkstring. + """ + chunkstr.xform(self._regexp, self._repl) + + def descr(self): + """ + Return a short description of the purpose and/or effect of + this rule. + + :rtype: str + """ + return self._descr + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + }'->''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + + repr(self._repl) + + ">" + ) + + @staticmethod + def fromstring(s): + """ + Create a RegexpChunkRule from a string description. + Currently, the following formats are supported:: + + {regexp} # chunk rule + }regexp{ # strip rule + regexp}{regexp # split rule + regexp{}regexp # merge rule + + Where ``regexp`` is a regular expression for the rule. Any + text following the comment marker (``#``) will be used as + the rule's description: + + >>> from nltk.chunk.regexp import RegexpChunkRule + >>> RegexpChunkRule.fromstring('{
?+}') + ?+'> + """ + # Split off the comment (but don't split on '\#') + m = re.match(r"(?P(\\.|[^#])*)(?P#.*)?", s) + rule = m.group("rule").strip() + comment = (m.group("comment") or "")[1:].strip() + + # Pattern bodies: chunk, strip, split, merge + try: + if not rule: + raise ValueError("Empty chunk pattern") + if rule[0] == "{" and rule[-1] == "}": + return ChunkRule(rule[1:-1], comment) + elif rule[0] == "}" and rule[-1] == "{": + return StripRule(rule[1:-1], comment) + elif "}{" in rule: + left, right = rule.split("}{") + return SplitRule(left, right, comment) + elif "{}" in rule: + left, right = rule.split("{}") + return MergeRule(left, right, comment) + elif re.match("[^{}]*{[^{}]*}[^{}]*", rule): + left, chunk, right = re.split("[{}]", rule) + return ChunkRuleWithContext(left, chunk, right, comment) + else: + raise ValueError("Illegal chunk pattern: %s" % rule) + except (ValueError, re.error) as e: + raise ValueError("Illegal chunk pattern: %s" % rule) from e + + +class ChunkRule(RegexpChunkRule): + """ + A rule specifying how to add chunks to a ``ChunkString``, using a + matching tag pattern. When applied to a ``ChunkString``, it will + find any substring that matches this tag pattern and that is not + already part of a chunk, and create a new chunk containing that + substring. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``ChunkRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + chunk any substring that matches this tag pattern and that + is not already part of a chunk. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile( + "(?P%s)%s" + % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_STRIP_PATTERN) + ) + RegexpChunkRule.__init__(self, regexp, r"{\g}", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class StripRule(RegexpChunkRule): + """ + A rule specifying how to remove strips to a ``ChunkString``, + using a matching tag pattern. When applied to a + ``ChunkString``, it will find any substring that matches this + tag pattern and that is contained in a chunk, and remove it + from that chunk, thus creating two new chunks. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``StripRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + find any substring that matches this tag pattern and that + is contained in a chunk, and remove it from that chunk, + thus creating two new chunks. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile( + "(?P%s)%s" + % (tag_pattern2re_pattern(tag_pattern), ChunkString.IN_CHUNK_PATTERN) + ) + RegexpChunkRule.__init__(self, regexp, r"}\g{", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class UnChunkRule(RegexpChunkRule): + """ + A rule specifying how to remove chunks to a ``ChunkString``, + using a matching tag pattern. When applied to a + ``ChunkString``, it will find any complete chunk that matches this + tag pattern, and un-chunk it. + """ + + def __init__(self, tag_pattern, descr): + """ + Construct a new ``UnChunkRule``. + + :type tag_pattern: str + :param tag_pattern: This rule's tag pattern. When + applied to a ``ChunkString``, this rule will + find any complete chunk that matches this tag pattern, + and un-chunk it. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + self._pattern = tag_pattern + regexp = re.compile(r"\{(?P%s)\}" % tag_pattern2re_pattern(tag_pattern)) + RegexpChunkRule.__init__(self, regexp, r"\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + '> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "" + + +class MergeRule(RegexpChunkRule): + """ + A rule specifying how to merge chunks in a ``ChunkString``, using + two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk whose end + matches left pattern, and immediately followed by a chunk whose + beginning matches right pattern. It will then merge those two + chunks into a single chunk. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``MergeRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + ``left_tag_pattern``, and immediately followed by a chunk + whose beginning matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + this pattern, and immediately followed by a chunk + whose beginning matches ``right_tag_pattern``. It will + then merge those two chunks into a single chunk. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + "(?P%s)}{(?=%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class SplitRule(RegexpChunkRule): + """ + A rule specifying how to split chunks in a ``ChunkString``, using + two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk that + matches the left pattern followed by the right pattern. It will + then split the chunk into two new chunks, at the point between the + two pattern matches. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``SplitRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this rule will + find any chunk containing a substring that matches + ``left_tag_pattern`` followed by this pattern. It will + then split the chunk into two new chunks at the point + between these two matching patterns. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this rule will + find any chunk containing a substring that matches this + pattern followed by ``right_tag_pattern``. It will then + split the chunk into two new chunks at the point between + these two matching patterns. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + "(?P%s)(?=%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g}{", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', '
'> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ExpandLeftRule(RegexpChunkRule): + """ + A rule specifying how to expand chunks in a ``ChunkString`` to the left, + using two matching tag patterns: a left pattern, and a right pattern. + When applied to a ``ChunkString``, it will find any chunk whose beginning + matches right pattern, and immediately preceded by a strip whose + end matches left pattern. It will then expand the chunk to incorporate + the new material on the left. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``ExpandRightRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose beginning matches + ``right_tag_pattern``, and immediately preceded by a strip + whose end matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose beginning matches + this pattern, and immediately preceded by a strip + whose end matches ``left_tag_pattern``. It will + then expand the chunk to incorporate the new material on the left. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + r"(?P%s)\{(?P%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"{\g\g", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ExpandRightRule(RegexpChunkRule): + """ + A rule specifying how to expand chunks in a ``ChunkString`` to the + right, using two matching tag patterns: a left pattern, and a + right pattern. When applied to a ``ChunkString``, it will find any + chunk whose end matches left pattern, and immediately followed by + a strip whose beginning matches right pattern. It will then + expand the chunk to incorporate the new material on the right. + """ + + def __init__(self, left_tag_pattern, right_tag_pattern, descr): + """ + Construct a new ``ExpandRightRule``. + + :type right_tag_pattern: str + :param right_tag_pattern: This rule's right tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + ``left_tag_pattern``, and immediately followed by a strip + whose beginning matches this pattern. It will + then merge those two chunks into a single chunk. + :type left_tag_pattern: str + :param left_tag_pattern: This rule's left tag + pattern. When applied to a ``ChunkString``, this + rule will find any chunk whose end matches + this pattern, and immediately followed by a strip + whose beginning matches ``right_tag_pattern``. It will + then expand the chunk to incorporate the new material on the right. + + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_tag_pattern)) + + self._left_tag_pattern = left_tag_pattern + self._right_tag_pattern = right_tag_pattern + regexp = re.compile( + r"(?P%s)\}(?P%s)" + % ( + tag_pattern2re_pattern(left_tag_pattern), + tag_pattern2re_pattern(right_tag_pattern), + ) + ) + RegexpChunkRule.__init__(self, regexp, r"\g\g}", descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', ''> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return ( + "" + ) + + +class ChunkRuleWithContext(RegexpChunkRule): + """ + A rule specifying how to add chunks to a ``ChunkString``, using + three matching tag patterns: one for the left context, one for the + chunk, and one for the right context. When applied to a + ``ChunkString``, it will find any substring that matches the chunk + tag pattern, is surrounded by substrings that match the two + context patterns, and is not already part of a chunk; and create a + new chunk containing the substring that matched the chunk tag + pattern. + + Caveat: Both the left and right context are consumed when this + rule matches; therefore, if you need to find overlapping matches, + you will need to apply your rule more than once. + """ + + def __init__( + self, + left_context_tag_pattern, + chunk_tag_pattern, + right_context_tag_pattern, + descr, + ): + """ + Construct a new ``ChunkRuleWithContext``. + + :type left_context_tag_pattern: str + :param left_context_tag_pattern: A tag pattern that must match + the left context of ``chunk_tag_pattern`` for this rule to + apply. + :type chunk_tag_pattern: str + :param chunk_tag_pattern: A tag pattern that must match for this + rule to apply. If the rule does apply, then this pattern + also identifies the substring that will be made into a chunk. + :type right_context_tag_pattern: str + :param right_context_tag_pattern: A tag pattern that must match + the right context of ``chunk_tag_pattern`` for this rule to + apply. + :type descr: str + :param descr: A short description of the purpose and/or effect + of this rule. + """ + # Ensure that the individual patterns are coherent. E.g., if + # left='(' and right=')', then this will raise an exception: + re.compile(tag_pattern2re_pattern(left_context_tag_pattern)) + re.compile(tag_pattern2re_pattern(chunk_tag_pattern)) + re.compile(tag_pattern2re_pattern(right_context_tag_pattern)) + + self._left_context_tag_pattern = left_context_tag_pattern + self._chunk_tag_pattern = chunk_tag_pattern + self._right_context_tag_pattern = right_context_tag_pattern + regexp = re.compile( + "(?P%s)(?P%s)(?P%s)%s" + % ( + tag_pattern2re_pattern(left_context_tag_pattern), + tag_pattern2re_pattern(chunk_tag_pattern), + tag_pattern2re_pattern(right_context_tag_pattern), + ChunkString.IN_STRIP_PATTERN, + ) + ) + replacement = r"\g{\g}\g" + RegexpChunkRule.__init__(self, regexp, replacement, descr) + + def __repr__(self): + """ + Return a string representation of this rule. It has the form:: + + ', '', '
'> + + Note that this representation does not include the + description string; that string can be accessed + separately with the ``descr()`` method. + + :rtype: str + """ + return "".format( + self._left_context_tag_pattern, + self._chunk_tag_pattern, + self._right_context_tag_pattern, + ) + + +# ////////////////////////////////////////////////////// +# Tag Pattern Format Conversion +# ////////////////////////////////////////////////////// + +# this should probably be made more strict than it is -- e.g., it +# currently accepts 'foo'. +CHUNK_TAG_PATTERN = re.compile( + r"^(({}|<{}>)*)$".format(r"([^\{\}<>]|\{\d+,?\}|\{\d*,\d+\})+", r"[^\{\}<>]+") +) + + +def tag_pattern2re_pattern(tag_pattern): + """ + Convert a tag pattern to a regular expression pattern. A "tag + pattern" is a modified version of a regular expression, designed + for matching sequences of tags. The differences between regular + expression patterns and tag patterns are: + + - In tag patterns, ``'<'`` and ``'>'`` act as parentheses; so + ``'+'`` matches one or more repetitions of ``''``, not + ``''``. + - Whitespace in tag patterns is ignored. So + ``'
| '`` is equivalent to ``'
|'`` + - In tag patterns, ``'.'`` is equivalent to ``'[^{}<>]'``; so + ``''`` matches any single tag starting with ``'NN'``. + + In particular, ``tag_pattern2re_pattern`` performs the following + transformations on the given pattern: + + - Replace '.' with '[^<>{}]' + - Remove any whitespace + - Add extra parens around '<' and '>', to make '<' and '>' act + like parentheses. E.g., so that in '+', the '+' has scope + over the entire ''; and so that in '', the '|' has + scope over 'NN' and 'IN', but not '<' or '>'. + - Check to make sure the resulting pattern is valid. + + :type tag_pattern: str + :param tag_pattern: The tag pattern to convert to a regular + expression pattern. + :raise ValueError: If ``tag_pattern`` is not a valid tag pattern. + In particular, ``tag_pattern`` should not include braces; and it + should not contain nested or mismatched angle-brackets. + :rtype: str + :return: A regular expression pattern corresponding to + ``tag_pattern``. + """ + # Clean up the regular expression + tag_pattern = re.sub(r"\s", "", tag_pattern) + tag_pattern = re.sub(r"<", "(<(", tag_pattern) + tag_pattern = re.sub(r">", ")>)", tag_pattern) + + # Check the regular expression + if not CHUNK_TAG_PATTERN.match(tag_pattern): + raise ValueError("Bad tag pattern: %r" % tag_pattern) + + # Replace "." with CHUNK_TAG_CHAR. + # We have to do this after, since it adds {}[]<>s, which would + # confuse CHUNK_TAG_PATTERN. + # PRE doesn't have lookback assertions, so reverse twice, and do + # the pattern backwards (with lookahead assertions). This can be + # made much cleaner once we can switch back to SRE. + def reverse_str(str): + lst = list(str) + lst.reverse() + return "".join(lst) + + tc_rev = reverse_str(ChunkString.CHUNK_TAG_CHAR) + reversed = reverse_str(tag_pattern) + reversed = re.sub(r"\.(?!\\(\\\\)*($|[^\\]))", tc_rev, reversed) + tag_pattern = reverse_str(reversed) + + return tag_pattern + + +# ////////////////////////////////////////////////////// +# RegexpChunkParser +# ////////////////////////////////////////////////////// + + +class RegexpChunkParser(ChunkParserI): + """ + A regular expression based chunk parser. ``RegexpChunkParser`` uses a + sequence of "rules" to find chunks of a single type within a + text. The chunking of the text is encoded using a ``ChunkString``, + and each rule acts by modifying the chunking in the + ``ChunkString``. The rules are all implemented using regular + expression matching and substitution. + + The ``RegexpChunkRule`` class and its subclasses (``ChunkRule``, + ``StripRule``, ``UnChunkRule``, ``MergeRule``, and ``SplitRule``) + define the rules that are used by ``RegexpChunkParser``. Each rule + defines an ``apply()`` method, which modifies the chunking encoded + by a given ``ChunkString``. + + :type _rules: list(RegexpChunkRule) + :ivar _rules: The list of rules that should be applied to a text. + :type _trace: int + :ivar _trace: The default level of tracing. + + """ + + def __init__(self, rules, chunk_label="NP", root_label="S", trace=0): + """ + Construct a new ``RegexpChunkParser``. + + :type rules: list(RegexpChunkRule) + :param rules: The sequence of rules that should be used to + generate the chunking for a tagged text. + :type chunk_label: str + :param chunk_label: The node value that should be used for + chunk subtrees. This is typically a short string + describing the type of information contained by the chunk, + such as ``"NP"`` for base noun phrases. + :type root_label: str + :param root_label: The node value that should be used for the + top node of the chunk structure. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + """ + self._rules = rules + self._trace = trace + self._chunk_label = chunk_label + self._root_label = root_label + + def _trace_apply(self, chunkstr, verbose): + """ + Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in + turn. Generate trace output between each rule. If ``verbose`` + is true, then generate verbose output. + + :type chunkstr: ChunkString + :param chunkstr: The chunk string to which each rule should be + applied. + :type verbose: bool + :param verbose: Whether output should be verbose. + :rtype: None + """ + print("# Input:") + print(chunkstr) + for rule in self._rules: + rule.apply(chunkstr) + if verbose: + print("#", rule.descr() + " (" + repr(rule) + "):") + else: + print("#", rule.descr() + ":") + print(chunkstr) + + def _notrace_apply(self, chunkstr): + """ + Apply each rule of this ``RegexpChunkParser`` to ``chunkstr``, in + turn. + + :param chunkstr: The chunk string to which each rule should be + applied. + :type chunkstr: ChunkString + :rtype: None + """ + + for rule in self._rules: + rule.apply(chunkstr) + + def parse(self, chunk_struct, trace=None): + """ + :type chunk_struct: Tree + :param chunk_struct: the chunk structure to be (further) chunked + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. This value + overrides the trace level value that was given to the + constructor. + :rtype: Tree + :return: a chunk structure that encodes the chunks in a given + tagged sentence. A chunk is a non-overlapping linguistic + group, such as a noun phrase. The set of chunks + identified in the chunk structure depends on the rules + used to define this ``RegexpChunkParser``. + """ + if len(chunk_struct) == 0: + print("Warning: parsing empty text") + return Tree(self._root_label, []) + + try: + chunk_struct.label() + except AttributeError: + chunk_struct = Tree(self._root_label, chunk_struct) + + # Use the default trace value? + if trace is None: + trace = self._trace + + chunkstr = ChunkString(chunk_struct) + + # Apply the sequence of rules to the chunkstring. + if trace: + verbose = trace > 1 + self._trace_apply(chunkstr, verbose) + else: + self._notrace_apply(chunkstr) + + # Use the chunkstring to create a chunk structure. + return chunkstr.to_chunkstruct(self._chunk_label) + + def rules(self): + """ + :return: the sequence of rules used by ``RegexpChunkParser``. + :rtype: list(RegexpChunkRule) + """ + return self._rules + + def __repr__(self): + """ + :return: a concise string representation of this + ``RegexpChunkParser``. + :rtype: str + """ + return "" % len(self._rules) + + def __str__(self): + """ + :return: a verbose string representation of this ``RegexpChunkParser``. + :rtype: str + """ + s = "RegexpChunkParser with %d rules:\n" % len(self._rules) + margin = 0 + for rule in self._rules: + margin = max(margin, len(rule.descr())) + if margin < 35: + format = " %" + repr(-(margin + 3)) + "s%s\n" + else: + format = " %s\n %s\n" + for rule in self._rules: + s += format % (rule.descr(), repr(rule)) + return s[:-1] + + +# ////////////////////////////////////////////////////// +# Chunk Grammar +# ////////////////////////////////////////////////////// + + +class RegexpParser(ChunkParserI): + r""" + A grammar based chunk parser. ``chunk.RegexpParser`` uses a set of + regular expression patterns to specify the behavior of the parser. + The chunking of the text is encoded using a ``ChunkString``, and + each rule acts by modifying the chunking in the ``ChunkString``. + The rules are all implemented using regular expression matching + and substitution. + + A grammar contains one or more clauses in the following form:: + + NP: + {} # chunk determiners and adjectives + }<[\.VI].*>+{ # strip any tag beginning with V, I, or . + <.*>}{
# split a chunk at a determiner + {} # merge chunk ending with det/adj + # with one starting with a noun + + The patterns of a clause are executed in order. An earlier + pattern may introduce a chunk boundary that prevents a later + pattern from executing. Sometimes an individual pattern will + match on multiple, overlapping extents of the input. As with + regular expression substitution more generally, the chunker will + identify the first match possible, then continue looking for matches + after this one has ended. + + The clauses of a grammar are also executed in order. A cascaded + chunk parser is one having more than one clause. The maximum depth + of a parse tree created by this chunk parser is the same as the + number of clauses in the grammar. + + When tracing is turned on, the comment portion of a line is displayed + each time the corresponding pattern is applied. + + :type _start: str + :ivar _start: The start symbol of the grammar (the root node of + resulting trees) + :type _stages: int + :ivar _stages: The list of parsing stages corresponding to the grammar + + """ + + def __init__(self, grammar, root_label="S", loop=1, trace=0): + """ + Create a new chunk parser, from the given start state + and set of chunk patterns. + + :param grammar: The grammar, or a list of RegexpChunkParser objects + :type grammar: str or list(RegexpChunkParser) + :param root_label: The top node of the tree being created + :type root_label: str or Nonterminal + :param loop: The number of times to run through the patterns + :type loop: int + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + """ + self._trace = trace + self._stages = [] + self._grammar = grammar + self._loop = loop + + if isinstance(grammar, str): + self._read_grammar(grammar, root_label, trace) + else: + # Make sur the grammar looks like it has the right type: + type_err = ( + "Expected string or list of RegexpChunkParsers " "for the grammar." + ) + try: + grammar = list(grammar) + except BaseException as e: + raise TypeError(type_err) from e + for elt in grammar: + if not isinstance(elt, RegexpChunkParser): + raise TypeError(type_err) + self._stages = grammar + + def _read_grammar(self, grammar, root_label, trace): + """ + Helper function for __init__: read the grammar if it is a + string. + """ + rules = [] + lhs = None + pattern = regex.compile("(?P(\\.|[^:])*)(:(?P.*))") + for line in grammar.split("\n"): + line = line.strip() + + # New stage begins if there's an unescaped ':' + m = pattern.match(line) + if m: + # Record the stage that we just completed. + self._add_stage(rules, lhs, root_label, trace) + # Start a new stage. + lhs = m.group("nonterminal").strip() + rules = [] + line = m.group("rule").strip() + + # Skip blank & comment-only lines + if line == "" or line.startswith("#"): + continue + + # Add the rule + rules.append(RegexpChunkRule.fromstring(line)) + + # Record the final stage + self._add_stage(rules, lhs, root_label, trace) + + def _add_stage(self, rules, lhs, root_label, trace): + """ + Helper function for __init__: add a new stage to the parser. + """ + if rules != []: + if not lhs: + raise ValueError("Expected stage marker (eg NP:)") + parser = RegexpChunkParser( + rules, chunk_label=lhs, root_label=root_label, trace=trace + ) + self._stages.append(parser) + + def parse(self, chunk_struct, trace=None): + """ + Apply the chunk parser to this input. + + :type chunk_struct: Tree + :param chunk_struct: the chunk structure to be (further) chunked + (this tree is modified, and is also returned) + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. This value + overrides the trace level value that was given to the + constructor. + :return: the chunked output. + :rtype: Tree + """ + if trace is None: + trace = self._trace + for i in range(self._loop): + for parser in self._stages: + chunk_struct = parser.parse(chunk_struct, trace=trace) + return chunk_struct + + def __repr__(self): + """ + :return: a concise string representation of this ``chunk.RegexpParser``. + :rtype: str + """ + return "" % len(self._stages) + + def __str__(self): + """ + :return: a verbose string representation of this + ``RegexpParser``. + :rtype: str + """ + s = "chunk.RegexpParser with %d stages:\n" % len(self._stages) + margin = 0 + for parser in self._stages: + s += "%s\n" % parser + return s[:-1] + + +# ////////////////////////////////////////////////////// +# Demonstration code +# ////////////////////////////////////////////////////// + + +def demo_eval(chunkparser, text): + """ + Demonstration code for evaluating a chunk parser, using a + ``ChunkScore``. This function assumes that ``text`` contains one + sentence per line, and that each sentence has the form expected by + ``tree.chunk``. It runs the given chunk parser on each sentence in + the text, and scores the result. It prints the final score + (precision, recall, and f-measure); and reports the set of chunks + that were missed and the set of chunks that were incorrect. (At + most 10 missing chunks and 10 incorrect chunks are reported). + + :param chunkparser: The chunkparser to be tested + :type chunkparser: ChunkParserI + :param text: The chunked tagged text that should be used for + evaluation. + :type text: str + """ + from nltk import chunk + from nltk.tree import Tree + + # Evaluate our chunk parser. + chunkscore = chunk.ChunkScore() + + for sentence in text.split("\n"): + print(sentence) + sentence = sentence.strip() + if not sentence: + continue + gold = chunk.tagstr2tree(sentence) + tokens = gold.leaves() + test = chunkparser.parse(Tree("S", tokens), trace=1) + chunkscore.score(gold, test) + print() + + print("/" + ("=" * 75) + "\\") + print("Scoring", chunkparser) + print("-" * 77) + print("Precision: %5.1f%%" % (chunkscore.precision() * 100), " " * 4, end=" ") + print("Recall: %5.1f%%" % (chunkscore.recall() * 100), " " * 6, end=" ") + print("F-Measure: %5.1f%%" % (chunkscore.f_measure() * 100)) + + # Missed chunks. + if chunkscore.missed(): + print("Missed:") + missed = chunkscore.missed() + for chunk in missed[:10]: + print(" ", " ".join(map(str, chunk))) + if len(chunkscore.missed()) > 10: + print(" ...") + + # Incorrect chunks. + if chunkscore.incorrect(): + print("Incorrect:") + incorrect = chunkscore.incorrect() + for chunk in incorrect[:10]: + print(" ", " ".join(map(str, chunk))) + if len(chunkscore.incorrect()) > 10: + print(" ...") + + print("\\" + ("=" * 75) + "/") + print() + + +def demo(): + """ + A demonstration for the ``RegexpChunkParser`` class. A single text is + parsed with four different chunk parsers, using a variety of rules + and strategies. + """ + + from nltk import Tree, chunk + + text = """\ + [ the/DT little/JJ cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] ./. + [ John/NNP ] saw/VBD [the/DT cats/NNS] [the/DT dog/NN] chased/VBD ./. + [ John/NNP ] thinks/VBZ [ Mary/NN ] saw/VBD [ the/DT cat/NN ] sit/VB on/IN [ the/DT mat/NN ]./. + """ + + print("*" * 75) + print("Evaluation text:") + print(text) + print("*" * 75) + print() + + grammar = r""" + NP: # NP stage + {
?*} # chunk determiners, adjectives and nouns + {+} # chunk proper nouns + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: + {<.*>} # start by chunking each tag + }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods + {} # merge det/adj with nouns + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: {
?*} # chunk determiners, adjectives and nouns + VP: {?} # VP = verb words + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + grammar = r""" + NP: {<.*>*} # start by chunking everything + }<[\.VI].*>+{ # strip any verbs, prepositions or periods + <.*>}{
# separate on determiners + PP: {} # PP = preposition + noun phrase + VP: {*} # VP = verb words + NPs and PPs + """ + cp = chunk.RegexpParser(grammar) + demo_eval(cp, text) + + # Evaluation + + from nltk.corpus import conll2000 + + print() + print("Demonstration of empty grammar:") + + cp = chunk.RegexpParser("") + print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt", chunk_types=("NP",)))) + + print() + print("Demonstration of accuracy evaluation using CoNLL tags:") + + grammar = r""" + NP: + {<.*>} # start by chunking each tag + }<[\.VI].*>+{ # unchunk any verbs, prepositions or periods + {} # merge det/adj with nouns + """ + cp = chunk.RegexpParser(grammar) + print(chunk.accuracy(cp, conll2000.chunked_sents("test.txt")[:5])) + + print() + print("Demonstration of tagged token input") + + grammar = r""" + NP: {<.*>*} # start by chunking everything + }<[\.VI].*>+{ # strip any verbs, prepositions or periods + <.*>}{
# separate on determiners + PP: {} # PP = preposition + noun phrase + VP: {*} # VP = verb words + NPs and PPs + """ + cp = chunk.RegexpParser(grammar) + print( + cp.parse( + [ + ("the", "DT"), + ("little", "JJ"), + ("cat", "NN"), + ("sat", "VBD"), + ("on", "IN"), + ("the", "DT"), + ("mat", "NN"), + (".", "."), + ] + ) + ) + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/api.py new file mode 100644 index 0000000000000000000000000000000000000000..8da588408f83894b512166334197ec43b6899631 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/api.py @@ -0,0 +1,74 @@ +# Natural Language Toolkit: Clusterer Interfaces +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Porting: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk.probability import DictionaryProbDist + + +class ClusterI(metaclass=ABCMeta): + """ + Interface covering basic clustering functionality. + """ + + @abstractmethod + def cluster(self, vectors, assign_clusters=False): + """ + Assigns the vectors to clusters, learning the clustering parameters + from the data. Returns a cluster identifier for each vector. + """ + + @abstractmethod + def classify(self, token): + """ + Classifies the token into a cluster, setting the token's CLUSTER + parameter to that cluster identifier. + """ + + def likelihood(self, vector, label): + """ + Returns the likelihood (a float) of the token having the + corresponding cluster. + """ + if self.classify(vector) == label: + return 1.0 + else: + return 0.0 + + def classification_probdist(self, vector): + """ + Classifies the token into a cluster, returning + a probability distribution over the cluster identifiers. + """ + likelihoods = {} + sum = 0.0 + for cluster in self.cluster_names(): + likelihoods[cluster] = self.likelihood(vector, cluster) + sum += likelihoods[cluster] + for cluster in self.cluster_names(): + likelihoods[cluster] /= sum + return DictionaryProbDist(likelihoods) + + @abstractmethod + def num_clusters(self): + """ + Returns the number of clusters. + """ + + def cluster_names(self): + """ + Returns the names of the clusters. + :rtype: list + """ + return list(range(self.num_clusters())) + + def cluster_name(self, index): + """ + Returns the names of the cluster at index. + """ + return index diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/em.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/em.py new file mode 100644 index 0000000000000000000000000000000000000000..cb46fe35700afed79b728336bd1f07c33ed50dcb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/em.py @@ -0,0 +1,219 @@ +# Natural Language Toolkit: Expectation Maximization Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import VectorSpaceClusterer + + +class EMClusterer(VectorSpaceClusterer): + """ + The Gaussian EM clusterer models the vectors as being produced by + a mixture of k Gaussian sources. The parameters of these sources + (prior probability, mean and covariance matrix) are then found to + maximise the likelihood of the given data. This is done with the + expectation maximisation algorithm. It starts with k arbitrarily + chosen means, priors and covariance matrices. It then calculates + the membership probabilities for each vector in each of the + clusters; this is the 'E' step. The cluster parameters are then + updated in the 'M' step using the maximum likelihood estimate from + the cluster membership probabilities. This process continues until + the likelihood of the data does not significantly increase. + """ + + def __init__( + self, + initial_means, + priors=None, + covariance_matrices=None, + conv_threshold=1e-6, + bias=0.1, + normalise=False, + svd_dimensions=None, + ): + """ + Creates an EM clusterer with the given starting parameters, + convergence threshold and vector mangling parameters. + + :param initial_means: the means of the gaussian cluster centers + :type initial_means: [seq of] numpy array or seq of SparseArray + :param priors: the prior probability for each cluster + :type priors: numpy array or seq of float + :param covariance_matrices: the covariance matrix for each cluster + :type covariance_matrices: [seq of] numpy array + :param conv_threshold: maximum change in likelihood before deemed + convergent + :type conv_threshold: int or float + :param bias: variance bias used to ensure non-singular covariance + matrices + :type bias: float + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._means = numpy.array(initial_means, numpy.float64) + self._num_clusters = len(initial_means) + self._conv_threshold = conv_threshold + self._covariance_matrices = covariance_matrices + self._priors = priors + self._bias = bias + + def num_clusters(self): + return self._num_clusters + + def cluster_vectorspace(self, vectors, trace=False): + assert len(vectors) > 0 + + # set the parameters to initial values + dimensions = len(vectors[0]) + means = self._means + priors = self._priors + if not priors: + priors = self._priors = ( + numpy.ones(self._num_clusters, numpy.float64) / self._num_clusters + ) + covariances = self._covariance_matrices + if not covariances: + covariances = self._covariance_matrices = [ + numpy.identity(dimensions, numpy.float64) + for i in range(self._num_clusters) + ] + + # do the E and M steps until the likelihood plateaus + lastl = self._loglikelihood(vectors, priors, means, covariances) + converged = False + + while not converged: + if trace: + print("iteration; loglikelihood", lastl) + # E-step, calculate hidden variables, h[i,j] + h = numpy.zeros((len(vectors), self._num_clusters), numpy.float64) + for i in range(len(vectors)): + for j in range(self._num_clusters): + h[i, j] = priors[j] * self._gaussian( + means[j], covariances[j], vectors[i] + ) + h[i, :] /= sum(h[i, :]) + + # M-step, update parameters - cvm, p, mean + for j in range(self._num_clusters): + covariance_before = covariances[j] + new_covariance = numpy.zeros((dimensions, dimensions), numpy.float64) + new_mean = numpy.zeros(dimensions, numpy.float64) + sum_hj = 0.0 + for i in range(len(vectors)): + delta = vectors[i] - means[j] + new_covariance += h[i, j] * numpy.multiply.outer(delta, delta) + sum_hj += h[i, j] + new_mean += h[i, j] * vectors[i] + covariances[j] = new_covariance / sum_hj + means[j] = new_mean / sum_hj + priors[j] = sum_hj / len(vectors) + + # bias term to stop covariance matrix being singular + covariances[j] += self._bias * numpy.identity(dimensions, numpy.float64) + + # calculate likelihood - FIXME: may be broken + l = self._loglikelihood(vectors, priors, means, covariances) + + # check for convergence + if abs(lastl - l) < self._conv_threshold: + converged = True + lastl = l + + def classify_vectorspace(self, vector): + best = None + for j in range(self._num_clusters): + p = self._priors[j] * self._gaussian( + self._means[j], self._covariance_matrices[j], vector + ) + if not best or p > best[0]: + best = (p, j) + return best[1] + + def likelihood_vectorspace(self, vector, cluster): + cid = self.cluster_names().index(cluster) + return self._priors[cluster] * self._gaussian( + self._means[cluster], self._covariance_matrices[cluster], vector + ) + + def _gaussian(self, mean, cvm, x): + m = len(mean) + assert cvm.shape == (m, m), "bad sized covariance matrix, %s" % str(cvm.shape) + try: + det = numpy.linalg.det(cvm) + inv = numpy.linalg.inv(cvm) + a = det**-0.5 * (2 * numpy.pi) ** (-m / 2.0) + dx = x - mean + print(dx, inv) + b = -0.5 * numpy.dot(numpy.dot(dx, inv), dx) + return a * numpy.exp(b) + except OverflowError: + # happens when the exponent is negative infinity - i.e. b = 0 + # i.e. the inverse of cvm is huge (cvm is almost zero) + return 0 + + def _loglikelihood(self, vectors, priors, means, covariances): + llh = 0.0 + for vector in vectors: + p = 0 + for j in range(len(priors)): + p += priors[j] * self._gaussian(means[j], covariances[j], vector) + llh += numpy.log(p) + return llh + + def __repr__(self): + return "" % list(self._means) + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk import cluster + + # example from figure 14.10, page 519, Manning and Schutze + + vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]] + means = [[4, 2], [4, 2.01]] + + clusterer = cluster.EMClusterer(means, bias=0.1) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As: ", clusters) + print() + + for c in range(2): + print("Cluster:", c) + print("Prior: ", clusterer._priors[c]) + print("Mean: ", clusterer._means[c]) + print("Covar: ", clusterer._covariance_matrices[c]) + print() + + # classify a new vector + vector = numpy.array([2, 2]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + + # show the classification probabilities + vector = numpy.array([2, 2]) + print("classification_probdist(%s):" % vector) + pdist = clusterer.classification_probdist(vector) + for sample in pdist.samples(): + print(f"{sample} => {pdist.prob(sample) * 100:.0f}%") + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/gaac.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/gaac.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb9e2c51141ba915bf4defe2d8cdeadaa14e6b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/gaac.py @@ -0,0 +1,170 @@ +# Natural Language Toolkit: Group Average Agglomerative Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.util import Dendrogram, VectorSpaceClusterer, cosine_distance + + +class GAAClusterer(VectorSpaceClusterer): + """ + The Group Average Agglomerative starts with each of the N vectors as singleton + clusters. It then iteratively merges pairs of clusters which have the + closest centroids. This continues until there is only one cluster. The + order of merges gives rise to a dendrogram: a tree with the earlier merges + lower than later merges. The membership of a given number of clusters c, 1 + <= c <= N, can be found by cutting the dendrogram at depth c. + + This clusterer uses the cosine similarity metric only, which allows for + efficient speed-up in the clustering process. + """ + + def __init__(self, num_clusters=1, normalise=True, svd_dimensions=None): + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_clusters = num_clusters + self._dendrogram = None + self._groups_values = None + + def cluster(self, vectors, assign_clusters=False, trace=False): + # stores the merge order + self._dendrogram = Dendrogram( + [numpy.array(vector, numpy.float64) for vector in vectors] + ) + return VectorSpaceClusterer.cluster(self, vectors, assign_clusters, trace) + + def cluster_vectorspace(self, vectors, trace=False): + # variables describing the initial situation + N = len(vectors) + cluster_len = [1] * N + cluster_count = N + index_map = numpy.arange(N) + + # construct the similarity matrix + dims = (N, N) + dist = numpy.ones(dims, dtype=float) * numpy.inf + for i in range(N): + for j in range(i + 1, N): + dist[i, j] = cosine_distance(vectors[i], vectors[j]) + + while cluster_count > max(self._num_clusters, 1): + i, j = numpy.unravel_index(dist.argmin(), dims) + if trace: + print("merging %d and %d" % (i, j)) + + # update similarities for merging i and j + self._merge_similarities(dist, cluster_len, i, j) + + # remove j + dist[:, j] = numpy.inf + dist[j, :] = numpy.inf + + # merge the clusters + cluster_len[i] = cluster_len[i] + cluster_len[j] + self._dendrogram.merge(index_map[i], index_map[j]) + cluster_count -= 1 + + # update the index map to reflect the indexes if we + # had removed j + index_map[j + 1 :] -= 1 + index_map[j] = N + + self.update_clusters(self._num_clusters) + + def _merge_similarities(self, dist, cluster_len, i, j): + # the new cluster i merged from i and j adopts the average of + # i and j's similarity to each other cluster, weighted by the + # number of points in the clusters i and j + i_weight = cluster_len[i] + j_weight = cluster_len[j] + weight_sum = i_weight + j_weight + + # update for x 0 + if self._should_normalise: + centroid = self._normalise(cluster[0]) + else: + centroid = numpy.array(cluster[0]) + for vector in cluster[1:]: + if self._should_normalise: + centroid += self._normalise(vector) + else: + centroid += vector + centroid /= len(cluster) + self._centroids.append(centroid) + self._num_clusters = len(self._centroids) + + def classify_vectorspace(self, vector): + best = None + for i in range(self._num_clusters): + centroid = self._centroids[i] + dist = cosine_distance(vector, centroid) + if not best or dist < best[0]: + best = (dist, i) + return best[1] + + def dendrogram(self): + """ + :return: The dendrogram representing the current clustering + :rtype: Dendrogram + """ + return self._dendrogram + + def num_clusters(self): + return self._num_clusters + + def __repr__(self): + return "" % self._num_clusters + + +def demo(): + """ + Non-interactive demonstration of the clusterers with simple 2-D data. + """ + + from nltk.cluster import GAAClusterer + + # use a set of tokens with 2D indices + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test the GAAC clusterer with 4 clusters + clusterer = GAAClusterer(4) + clusters = clusterer.cluster(vectors, True) + + print("Clusterer:", clusterer) + print("Clustered:", vectors) + print("As:", clusters) + print() + + # show the dendrogram + clusterer.dendrogram().show() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/kmeans.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/kmeans.py new file mode 100644 index 0000000000000000000000000000000000000000..6b0d02f7dc0178f5bb1406d7a71a07ae46acaa93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/kmeans.py @@ -0,0 +1,231 @@ +# Natural Language Toolkit: K-Means Clusterer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +import copy +import random +import sys + +try: + import numpy +except ImportError: + pass + + +from nltk.cluster.util import VectorSpaceClusterer + + +class KMeansClusterer(VectorSpaceClusterer): + """ + The K-means clusterer starts with k arbitrary chosen means then allocates + each vector to the cluster with the closest mean. It then recalculates the + means of each cluster as the centroid of the vectors in the cluster. This + process repeats until the cluster memberships stabilise. This is a + hill-climbing algorithm which may converge to a local maximum. Hence the + clustering is often repeated with random initial means and the most + commonly occurring output means are chosen. + """ + + def __init__( + self, + num_means, + distance, + repeats=1, + conv_test=1e-6, + initial_means=None, + normalise=False, + svd_dimensions=None, + rng=None, + avoid_empty_clusters=False, + ): + + """ + :param num_means: the number of means to use (may use fewer) + :type num_means: int + :param distance: measure of distance between two vectors + :type distance: function taking two vectors and returning a float + :param repeats: number of randomised clustering trials to use + :type repeats: int + :param conv_test: maximum variation in mean differences before + deemed convergent + :type conv_test: number + :param initial_means: set of k initial means + :type initial_means: sequence of vectors + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + :param rng: random number generator (or None) + :type rng: Random + :param avoid_empty_clusters: include current centroid in computation + of next one; avoids undefined behavior + when clusters become empty + :type avoid_empty_clusters: boolean + """ + VectorSpaceClusterer.__init__(self, normalise, svd_dimensions) + self._num_means = num_means + self._distance = distance + self._max_difference = conv_test + assert not initial_means or len(initial_means) == num_means + self._means = initial_means + assert repeats >= 1 + assert not (initial_means and repeats > 1) + self._repeats = repeats + self._rng = rng if rng else random.Random() + self._avoid_empty_clusters = avoid_empty_clusters + + def cluster_vectorspace(self, vectors, trace=False): + if self._means and self._repeats > 1: + print("Warning: means will be discarded for subsequent trials") + + meanss = [] + for trial in range(self._repeats): + if trace: + print("k-means trial", trial) + if not self._means or trial > 1: + self._means = self._rng.sample(list(vectors), self._num_means) + self._cluster_vectorspace(vectors, trace) + meanss.append(self._means) + + if len(meanss) > 1: + # sort the means first (so that different cluster numbering won't + # effect the distance comparison) + for means in meanss: + means.sort(key=sum) + + # find the set of means that's minimally different from the others + min_difference = min_means = None + for i in range(len(meanss)): + d = 0 + for j in range(len(meanss)): + if i != j: + d += self._sum_distances(meanss[i], meanss[j]) + if min_difference is None or d < min_difference: + min_difference, min_means = d, meanss[i] + + # use the best means + self._means = min_means + + def _cluster_vectorspace(self, vectors, trace=False): + if self._num_means < len(vectors): + # perform k-means clustering + converged = False + while not converged: + # assign the tokens to clusters based on minimum distance to + # the cluster means + clusters = [[] for m in range(self._num_means)] + for vector in vectors: + index = self.classify_vectorspace(vector) + clusters[index].append(vector) + + if trace: + print("iteration") + # for i in range(self._num_means): + # print ' mean', i, 'allocated', len(clusters[i]), 'vectors' + + # recalculate cluster means by computing the centroid of each cluster + new_means = list(map(self._centroid, clusters, self._means)) + + # measure the degree of change from the previous step for convergence + difference = self._sum_distances(self._means, new_means) + if difference < self._max_difference: + converged = True + + # remember the new means + self._means = new_means + + def classify_vectorspace(self, vector): + # finds the closest cluster centroid + # returns that cluster's index + best_distance = best_index = None + for index in range(len(self._means)): + mean = self._means[index] + dist = self._distance(vector, mean) + if best_distance is None or dist < best_distance: + best_index, best_distance = index, dist + return best_index + + def num_clusters(self): + if self._means: + return len(self._means) + else: + return self._num_means + + def means(self): + """ + The means used for clustering. + """ + return self._means + + def _sum_distances(self, vectors1, vectors2): + difference = 0.0 + for u, v in zip(vectors1, vectors2): + difference += self._distance(u, v) + return difference + + def _centroid(self, cluster, mean): + if self._avoid_empty_clusters: + centroid = copy.copy(mean) + for vector in cluster: + centroid += vector + return centroid / (1 + len(cluster)) + else: + if not len(cluster): + sys.stderr.write("Error: no centroid defined for empty cluster.\n") + sys.stderr.write( + "Try setting argument 'avoid_empty_clusters' to True\n" + ) + assert False + centroid = copy.copy(cluster[0]) + for vector in cluster[1:]: + centroid += vector + return centroid / len(cluster) + + def __repr__(self): + return "" % (self._means, self._repeats) + + +################################################################################# + + +def demo(): + # example from figure 14.9, page 517, Manning and Schutze + + from nltk.cluster import KMeansClusterer, euclidean_distance + + vectors = [numpy.array(f) for f in [[2, 1], [1, 3], [4, 7], [6, 7]]] + means = [[4, 3], [5, 5]] + + clusterer = KMeansClusterer(2, euclidean_distance, initial_means=means) + clusters = clusterer.cluster(vectors, True, trace=True) + + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]] + + # test k-means using the euclidean distance metric, 2 means and repeat + # clustering 10 times with random seeds + + clusterer = KMeansClusterer(2, euclidean_distance, repeats=10) + clusters = clusterer.cluster(vectors, True) + print("Clustered:", vectors) + print("As:", clusters) + print("Means:", clusterer.means()) + print() + + # classify a new vector + vector = numpy.array([3, 3]) + print("classify(%s):" % vector, end=" ") + print(clusterer.classify(vector)) + print() + + +if __name__ == "__main__": + demo() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/cluster/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8ed5e9f0b97be7ce80eef87d36fdbf8c59bdfb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/cluster/util.py @@ -0,0 +1,300 @@ +# Natural Language Toolkit: Clusterer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Contributor: J Richard Snape +# URL: +# For license information, see LICENSE.TXT +import copy +from abc import abstractmethod +from math import sqrt +from sys import stdout + +try: + import numpy +except ImportError: + pass + +from nltk.cluster.api import ClusterI + + +class VectorSpaceClusterer(ClusterI): + """ + Abstract clusterer which takes tokens and maps them into a vector space. + Optionally performs singular value decomposition to reduce the + dimensionality. + """ + + def __init__(self, normalise=False, svd_dimensions=None): + """ + :param normalise: should vectors be normalised to length 1 + :type normalise: boolean + :param svd_dimensions: number of dimensions to use in reducing vector + dimensionsionality with SVD + :type svd_dimensions: int + """ + self._Tt = None + self._should_normalise = normalise + self._svd_dimensions = svd_dimensions + + def cluster(self, vectors, assign_clusters=False, trace=False): + assert len(vectors) > 0 + + # normalise the vectors + if self._should_normalise: + vectors = list(map(self._normalise, vectors)) + + # use SVD to reduce the dimensionality + if self._svd_dimensions and self._svd_dimensions < len(vectors[0]): + [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors))) + S = d[: self._svd_dimensions] * numpy.identity( + self._svd_dimensions, numpy.float64 + ) + T = u[:, : self._svd_dimensions] + Dt = vt[: self._svd_dimensions, :] + vectors = numpy.transpose(numpy.dot(S, Dt)) + self._Tt = numpy.transpose(T) + + # call abstract method to cluster the vectors + self.cluster_vectorspace(vectors, trace) + + # assign the vectors to clusters + if assign_clusters: + return [self.classify(vector) for vector in vectors] + + @abstractmethod + def cluster_vectorspace(self, vectors, trace): + """ + Finds the clusters using the given set of vectors. + """ + + def classify(self, vector): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + cluster = self.classify_vectorspace(vector) + return self.cluster_name(cluster) + + @abstractmethod + def classify_vectorspace(self, vector): + """ + Returns the index of the appropriate cluster for the vector. + """ + + def likelihood(self, vector, label): + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return self.likelihood_vectorspace(vector, label) + + def likelihood_vectorspace(self, vector, cluster): + """ + Returns the likelihood of the vector belonging to the cluster. + """ + predicted = self.classify_vectorspace(vector) + return 1.0 if cluster == predicted else 0.0 + + def vector(self, vector): + """ + Returns the vector after normalisation and dimensionality reduction + """ + if self._should_normalise: + vector = self._normalise(vector) + if self._Tt is not None: + vector = numpy.dot(self._Tt, vector) + return vector + + def _normalise(self, vector): + """ + Normalises the vector to unit length. + """ + return vector / sqrt(numpy.dot(vector, vector)) + + +def euclidean_distance(u, v): + """ + Returns the euclidean distance between vectors u and v. This is equivalent + to the length of the vector (u - v). + """ + diff = u - v + return sqrt(numpy.dot(diff, diff)) + + +def cosine_distance(u, v): + """ + Returns 1 minus the cosine of the angle between vectors v and u. This is + equal to ``1 - (u.v / |u||v|)``. + """ + return 1 - (numpy.dot(u, v) / (sqrt(numpy.dot(u, u)) * sqrt(numpy.dot(v, v)))) + + +class _DendrogramNode: + """Tree node of a dendrogram.""" + + def __init__(self, value, *children): + self._value = value + self._children = children + + def leaves(self, values=True): + if self._children: + leaves = [] + for child in self._children: + leaves.extend(child.leaves(values)) + return leaves + elif values: + return [self._value] + else: + return [self] + + def groups(self, n): + queue = [(self._value, self)] + + while len(queue) < n: + priority, node = queue.pop() + if not node._children: + queue.push((priority, node)) + break + for child in node._children: + if child._children: + queue.append((child._value, child)) + else: + queue.append((0, child)) + # makes the earliest merges at the start, latest at the end + queue.sort() + + groups = [] + for priority, node in queue: + groups.append(node.leaves()) + return groups + + def __lt__(self, comparator): + return cosine_distance(self._value, comparator._value) < 0 + + +class Dendrogram: + """ + Represents a dendrogram, a tree with a specified branching order. This + must be initialised with the leaf items, then iteratively call merge for + each branch. This class constructs a tree representing the order of calls + to the merge function. + """ + + def __init__(self, items=[]): + """ + :param items: the items at the leaves of the dendrogram + :type items: sequence of (any) + """ + self._items = [_DendrogramNode(item) for item in items] + self._original_items = copy.copy(self._items) + self._merge = 1 + + def merge(self, *indices): + """ + Merges nodes at given indices in the dendrogram. The nodes will be + combined which then replaces the first node specified. All other nodes + involved in the merge will be removed. + + :param indices: indices of the items to merge (at least two) + :type indices: seq of int + """ + assert len(indices) >= 2 + node = _DendrogramNode(self._merge, *(self._items[i] for i in indices)) + self._merge += 1 + self._items[indices[0]] = node + for i in indices[1:]: + del self._items[i] + + def groups(self, n): + """ + Finds the n-groups of items (leaves) reachable from a cut at depth n. + :param n: number of groups + :type n: int + """ + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + return root.groups(n) + + def show(self, leaf_labels=[]): + """ + Print the dendrogram in ASCII art to standard out. + + :param leaf_labels: an optional list of strings to use for labeling the + leaves + :type leaf_labels: list + """ + + # ASCII rendering characters + JOIN, HLINK, VLINK = "+", "-", "|" + + # find the root (or create one) + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = self._original_items + + if leaf_labels: + last_row = leaf_labels + else: + last_row = ["%s" % leaf._value for leaf in leaves] + + # find the bottom row and the best cell width + width = max(map(len, last_row)) + 1 + lhalf = width // 2 + rhalf = int(width - lhalf - 1) + + # display functions + def format(centre, left=" ", right=" "): + return f"{lhalf * left}{centre}{right * rhalf}" + + def display(str): + stdout.write(str) + + # for each merge, top down + queue = [(root._value, root)] + verticals = [format(" ") for leaf in leaves] + while queue: + priority, node = queue.pop() + child_left_leaf = list(map(lambda c: c.leaves(False)[0], node._children)) + indices = list(map(leaves.index, child_left_leaf)) + if child_left_leaf: + min_idx = min(indices) + max_idx = max(indices) + for i in range(len(leaves)): + if leaves[i] in child_left_leaf: + if i == min_idx: + display(format(JOIN, " ", HLINK)) + elif i == max_idx: + display(format(JOIN, HLINK, " ")) + else: + display(format(JOIN, HLINK, HLINK)) + verticals[i] = format(VLINK) + elif min_idx <= i <= max_idx: + display(format(HLINK, HLINK, HLINK)) + else: + display(verticals[i]) + display("\n") + for child in node._children: + if child._children: + queue.append((child._value, child)) + queue.sort() + + for vertical in verticals: + display(vertical) + display("\n") + + # finally, display the last line + display("".join(item.center(width) for item in last_row)) + display("\n") + + def __repr__(self): + if len(self._items) > 1: + root = _DendrogramNode(self._merge, *self._items) + else: + root = self._items[0] + leaves = root.leaves(False) + return "" % len(leaves) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e48682b4bec887b03af740718ae8fc93173dfe0c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/bracket_parse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56e9ff499326fa046fc273ce733f2e41935abba4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/categorized_sents.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8ff71626fe080350d41e0fa048ba45fc79f71a3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/lin.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ccfb9913d358ac801793099167ef2141d4f5929 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/panlex_lite.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc6c52700a52e56bcdb84fcad41a15d607c84b28 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/pl196x.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acb1ed50f9c537bcfa423e65c9f6cd76ba14bb40 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/plaintext.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1147dc852a18bdd83818617c08a93511f4f75f4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/ppattach.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99114d9c01ca4a16f9539e820dd43e583da4fa85 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/propbank.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46b91e6aa8ffa296a64365b76de9467dd16a3b13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/rte.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa86b40e463e72aff123a5fd7764986a711c094 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/corpus/reader/__pycache__/toolbox.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3387daec4b489d83a4f87b9652a0309f7c4e1ce5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__init__.py @@ -0,0 +1,31 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Transformation Based Learning + +A general purpose package for Transformation Based Learning, +currently used by nltk.tag.BrillTagger. + +isort:skip_file +""" + +from nltk.tbl.template import Template + +# API: Template(...), Template.expand(...) + +from nltk.tbl.feature import Feature + +# API: Feature(...), Feature.expand(...) + +from nltk.tbl.rule import Rule + +# API: Rule.format(...), Rule.templatetid + +from nltk.tbl.erroranalysis import error_list diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74d5a790a7d68faa2e6152f9b441f068e97ad45d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84f50dc713fbb8fe6f0c3c527717b28fb3db26e8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5849ad49e199270469f290609d54fafb3f5e36c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/demo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f25c3802e1d2a2c8b61db7068bfde305b4017a8a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/erroranalysis.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1a8c43712e90343252bfec6cb73c37dbb10d9d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/feature.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8316f8fea0809d940fc9354dfcd52c908e470468 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/rule.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55fff14e862a54814aabb3730769f8648fe9d199 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/__pycache__/template.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/api.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/demo.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..a5298e396e964f1f33e89a81263014249bca7cfa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/demo.py @@ -0,0 +1,418 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import os +import pickle +import random +import time + +from nltk.corpus import treebank +from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger +from nltk.tag.brill import Pos, Word +from nltk.tbl import Template, error_list + + +def demo(): + """ + Run a demo with defaults. See source comments for details, + or docstrings of any of the more specific demo_* functions. + """ + postag() + + +def demo_repr_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="repr") + + +def demo_str_rule_format(): + """ + Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose")) + """ + postag(ruleformat="str") + + +def demo_verbose_rule_format(): + """ + Exemplify Rule.format("verbose") + """ + postag(ruleformat="verbose") + + +def demo_multiposition_feature(): + """ + The feature/s of a template takes a list of positions + relative to the current word where the feature should be + looked for, conceptually joined by logical OR. For instance, + Pos([-1, 1]), given a value V, will hold whenever V is found + one step to the left and/or one step to the right. + + For contiguous ranges, a 2-arg form giving inclusive end + points can also be used: Pos(-3, -1) is the same as the arg + below. + """ + postag(templates=[Template(Pos([-3, -2, -1]))]) + + +def demo_multifeature_template(): + """ + Templates can have more than a single feature. + """ + postag(templates=[Template(Word([0]), Pos([-2, -1]))]) + + +def demo_template_statistics(): + """ + Show aggregate statistics per template. Little used templates are + candidates for deletion, much used templates may possibly be refined. + + Deleting unused templates is mostly about saving time and/or space: + training is basically O(T) in the number of templates T + (also in terms of memory usage, which often will be the limiting factor). + """ + postag(incremental_stats=True, template_stats=True) + + +def demo_generated_templates(): + """ + Template.expand and Feature.expand are class methods facilitating + generating large amounts of templates. See their documentation for + details. + + Note: training with 500 templates can easily fill all available + even on relatively small corpora + """ + wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False) + tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True) + templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3))) + print( + "Generated {} templates for transformation-based learning".format( + len(templates) + ) + ) + postag(templates=templates, incremental_stats=True, template_stats=True) + + +def demo_learning_curve(): + """ + Plot a learning curve -- the contribution on tagging accuracy of + the individual rules. + Note: requires matplotlib + """ + postag( + incremental_stats=True, + separate_baseline_data=True, + learning_curve_output="learningcurve.png", + ) + + +def demo_error_analysis(): + """ + Writes a file with context for each erroneous word after tagging testing data + """ + postag(error_output="errors.txt") + + +def demo_serialize_tagger(): + """ + Serializes the learned tagger to a file in pickle format; reloads it + and validates the process. + """ + postag(serialize_output="tagger.pcl") + + +def demo_high_accuracy_rules(): + """ + Discard rules with low accuracy. This may hurt performance a bit, + but will often produce rules which are more interesting read to a human. + """ + postag(num_sents=3000, min_acc=0.96, min_score=10) + + +def postag( + templates=None, + tagged_data=None, + num_sents=1000, + max_rules=300, + min_score=3, + min_acc=None, + train=0.8, + trace=3, + randomize=False, + ruleformat="str", + incremental_stats=False, + template_stats=False, + error_output=None, + serialize_output=None, + learning_curve_output=None, + learning_curve_take=300, + baseline_backoff_tagger=None, + separate_baseline_data=False, + cache_baseline_tagger=None, +): + """ + Brill Tagger Demonstration + :param templates: how many sentences of training and testing data to use + :type templates: list of Template + + :param tagged_data: maximum number of rule instances to create + :type tagged_data: C{int} + + :param num_sents: how many sentences of training and testing data to use + :type num_sents: C{int} + + :param max_rules: maximum number of rule instances to create + :type max_rules: C{int} + + :param min_score: the minimum score for a rule in order for it to be considered + :type min_score: C{int} + + :param min_acc: the minimum score for a rule in order for it to be considered + :type min_acc: C{float} + + :param train: the fraction of the the corpus to be used for training (1=all) + :type train: C{float} + + :param trace: the level of diagnostic tracing output to produce (0-4) + :type trace: C{int} + + :param randomize: whether the training data should be a random subset of the corpus + :type randomize: C{bool} + + :param ruleformat: rule output format, one of "str", "repr", "verbose" + :type ruleformat: C{str} + + :param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow) + :type incremental_stats: C{bool} + + :param template_stats: if true, will print per-template statistics collected in training and (optionally) testing + :type template_stats: C{bool} + + :param error_output: the file where errors will be saved + :type error_output: C{string} + + :param serialize_output: the file where the learned tbl tagger will be saved + :type serialize_output: C{string} + + :param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available) + :type learning_curve_output: C{string} + + :param learning_curve_take: how many rules plotted + :type learning_curve_take: C{int} + + :param baseline_backoff_tagger: the file where rules will be saved + :type baseline_backoff_tagger: tagger + + :param separate_baseline_data: use a fraction of the training data exclusively for training baseline + :type separate_baseline_data: C{bool} + + :param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get + deterministic output from the baseline unigram tagger between python versions) + :type cache_baseline_tagger: C{string} + + + Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This + is fast and fine for a demo, but is likely to generalize worse on unseen data. + Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high). + """ + + # defaults + baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER + if templates is None: + from nltk.tag.brill import brill24, describe_template_sets + + # some pre-built template sets taken from typical systems or publications are + # available. Print a list with describe_template_sets() + # for instance: + templates = brill24() + (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data + ) + + # creating (or reloading from cache) a baseline tagger (unigram tagger) + # this is just a mechanism for getting deterministic output from the baseline between + # python versions + if cache_baseline_tagger: + if not os.path.exists(cache_baseline_tagger): + baseline_tagger = UnigramTagger( + baseline_data, backoff=baseline_backoff_tagger + ) + with open(cache_baseline_tagger, "w") as print_rules: + pickle.dump(baseline_tagger, print_rules) + print( + "Trained baseline tagger, pickled it to {}".format( + cache_baseline_tagger + ) + ) + with open(cache_baseline_tagger) as print_rules: + baseline_tagger = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {cache_baseline_tagger}") + else: + baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) + print("Trained baseline tagger") + if gold_data: + print( + " Accuracy on test set: {:0.4f}".format( + baseline_tagger.accuracy(gold_data) + ) + ) + + # creating a Brill tagger + tbrill = time.time() + trainer = BrillTaggerTrainer( + baseline_tagger, templates, trace, ruleformat=ruleformat + ) + print("Training tbl tagger...") + brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) + print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") + if gold_data: + print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) + + # printing the learned rules, if learned silently + if trace == 1: + print("\nLearned rules: ") + for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): + print(f"{ruleno:4d} {rule.format(ruleformat):s}") + + # printing template statistics (optionally including comparison with the training data) + # note: if not separate_baseline_data, then baseline accuracy will be artificially high + if incremental_stats: + print( + "Incrementally tagging the test data, collecting individual rule statistics" + ) + (taggedtest, teststats) = brill_tagger.batch_tag_incremental( + testing_data, gold_data + ) + print(" Rule statistics collected") + if not separate_baseline_data: + print( + "WARNING: train_stats asked for separate_baseline_data=True; the baseline " + "will be artificially high" + ) + trainstats = brill_tagger.train_stats() + if template_stats: + brill_tagger.print_template_statistics(teststats) + if learning_curve_output: + _demo_plot( + learning_curve_output, teststats, trainstats, take=learning_curve_take + ) + print(f"Wrote plot of learning curve to {learning_curve_output}") + else: + print("Tagging the test data") + taggedtest = brill_tagger.tag_sents(testing_data) + if template_stats: + brill_tagger.print_template_statistics() + + # writing error analysis to file + if error_output is not None: + with open(error_output, "w") as f: + f.write("Errors for Brill Tagger %r\n\n" % serialize_output) + f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") + print(f"Wrote tagger errors including context to {error_output}") + + # serializing the tagger to a pickle file and reloading (just to see it works) + if serialize_output is not None: + taggedtest = brill_tagger.tag_sents(testing_data) + with open(serialize_output, "w") as print_rules: + pickle.dump(brill_tagger, print_rules) + print(f"Wrote pickled tagger to {serialize_output}") + with open(serialize_output) as print_rules: + brill_tagger_reloaded = pickle.load(print_rules) + print(f"Reloaded pickled tagger from {serialize_output}") + taggedtest_reloaded = brill_tagger.tag_sents(testing_data) + if taggedtest == taggedtest_reloaded: + print("Reloaded tagger tried on test set, results identical") + else: + print("PROBLEM: Reloaded tagger gave different results on test set") + + +def _demo_prepare_data( + tagged_data, train, num_sents, randomize, separate_baseline_data +): + # train is the proportion of data used in training; the rest is reserved + # for testing. + if tagged_data is None: + print("Loading tagged data from treebank... ") + tagged_data = treebank.tagged_sents() + if num_sents is None or len(tagged_data) <= num_sents: + num_sents = len(tagged_data) + if randomize: + random.seed(len(tagged_data)) + random.shuffle(tagged_data) + cutoff = int(num_sents * train) + training_data = tagged_data[:cutoff] + gold_data = tagged_data[cutoff:num_sents] + testing_data = [[t[0] for t in sent] for sent in gold_data] + if not separate_baseline_data: + baseline_data = training_data + else: + bl_cutoff = len(training_data) // 3 + (baseline_data, training_data) = ( + training_data[:bl_cutoff], + training_data[bl_cutoff:], + ) + (trainseqs, traintokens) = corpus_size(training_data) + (testseqs, testtokens) = corpus_size(testing_data) + (bltrainseqs, bltraintokens) = corpus_size(baseline_data) + print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)") + print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)") + print( + "Read baseline data ({:d} sents/{:d} wds) {:s}".format( + bltrainseqs, + bltraintokens, + "" if separate_baseline_data else "[reused the training set]", + ) + ) + return (training_data, baseline_data, gold_data, testing_data) + + +def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None): + testcurve = [teststats["initialerrors"]] + for rulescore in teststats["rulescores"]: + testcurve.append(testcurve[-1] - rulescore) + testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]] + + traincurve = [trainstats["initialerrors"]] + for rulescore in trainstats["rulescores"]: + traincurve.append(traincurve[-1] - rulescore) + traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]] + + import matplotlib.pyplot as plt + + r = list(range(len(testcurve))) + plt.plot(r, testcurve, r, traincurve) + plt.axis([None, None, None, 1.0]) + plt.savefig(learning_curve_output) + + +NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")]) + +REGEXP_TAGGER = RegexpTagger( + [ + (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), # cardinal numbers + (r"(The|the|A|a|An|an)$", "AT"), # articles + (r".*able$", "JJ"), # adjectives + (r".*ness$", "NN"), # nouns formed from adjectives + (r".*ly$", "RB"), # adverbs + (r".*s$", "NNS"), # plural nouns + (r".*ing$", "VBG"), # gerunds + (r".*ed$", "VBD"), # past tense verbs + (r".*", "NN"), # nouns (default) + ] +) + + +def corpus_size(seqs): + return (len(seqs), sum(len(x) for x in seqs)) + + +if __name__ == "__main__": + demo_learning_curve() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py new file mode 100644 index 0000000000000000000000000000000000000000..8b192e75d8b410942960cbf5ea1476a42f0decf7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/erroranalysis.py @@ -0,0 +1,38 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +# returns a list of errors in string format + + +def error_list(train_sents, test_sents): + """ + Returns a list of human-readable strings indicating the errors in the + given tagging of the corpus. + + :param train_sents: The correct tagging of the corpus + :type train_sents: list(tuple) + :param test_sents: The tagged corpus + :type test_sents: list(tuple) + """ + hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % ( + "left context", + "word/test->gold".center(22), + "right context", + ) + errors = [hdr] + for (train_sent, test_sent) in zip(train_sents, test_sents): + for wordnum, (word, train_pos) in enumerate(train_sent): + test_pos = test_sent[wordnum][1] + if train_pos != test_pos: + left = " ".join("%s/%s" % w for w in train_sent[:wordnum]) + right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :]) + mid = f"{word}/{test_pos}->{train_pos}" + errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}") + + return errors diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/rule.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/rule.py new file mode 100644 index 0000000000000000000000000000000000000000..7faea23bd36ddbf974de4499bb1f9106a78e4c0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/rule.py @@ -0,0 +1,322 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod + +from nltk import jsontags + + +###################################################################### +# Tag Rules +###################################################################### +class TagRule(metaclass=ABCMeta): + """ + An interface for tag transformations on a tagged corpus, as + performed by tbl taggers. Each transformation finds all tokens + in the corpus that are tagged with a specific original tag and + satisfy a specific condition, and replaces their tags with a + replacement tag. For any given transformation, the original + tag, replacement tag, and condition are fixed. Conditions may + depend on the token under consideration, as well as any other + tokens in the corpus. + + Tag rules must be comparable and hashable. + """ + + def __init__(self, original_tag, replacement_tag): + + self.original_tag = original_tag + """The tag which this TagRule may cause to be replaced.""" + + self.replacement_tag = replacement_tag + """The tag with which this TagRule may replace another tag.""" + + def apply(self, tokens, positions=None): + """ + Apply this rule at every position in positions where it + applies to the given sentence. I.e., for each position p + in *positions*, if *tokens[p]* is tagged with this rule's + original tag, and satisfies this rule's condition, then set + its tag to be this rule's replacement tag. + + :param tokens: The tagged sentence + :type tokens: list(tuple(str, str)) + :type positions: list(int) + :param positions: The positions where the transformation is to + be tried. If not specified, try it at all positions. + :return: The indices of tokens whose tags were changed by this + rule. + :rtype: int + """ + if positions is None: + positions = list(range(len(tokens))) + + # Determine the indices at which this rule applies. + change = [i for i in positions if self.applies(tokens, i)] + + # Make the changes. Note: this must be done in a separate + # step from finding applicable locations, since we don't want + # the rule to interact with itself. + for i in change: + tokens[i] = (tokens[i][0], self.replacement_tag) + + return change + + @abstractmethod + def applies(self, tokens, index): + """ + :return: True if the rule would change the tag of + ``tokens[index]``, False otherwise + :rtype: bool + :param tokens: A tagged sentence + :type tokens: list(str) + :param index: The index to check + :type index: int + """ + + # Rules must be comparable and hashable for the algorithm to work + def __eq__(self, other): + raise TypeError("Rules must implement __eq__()") + + def __ne__(self, other): + raise TypeError("Rules must implement __ne__()") + + def __hash__(self): + raise TypeError("Rules must implement __hash__()") + + +@jsontags.register_tag +class Rule(TagRule): + """ + A Rule checks the current corpus position for a certain set of conditions; + if they are all fulfilled, the Rule is triggered, meaning that it + will change tag A to tag B. For other tags than A, nothing happens. + + The conditions are parameters to the Rule instance. Each condition is a feature-value pair, + with a set of positions to check for the value of the corresponding feature. + Conceptually, the positions are joined by logical OR, and the feature set by logical AND. + + More formally, the Rule is then applicable to the M{n}th token iff: + + - The M{n}th token is tagged with the Rule's original tag; and + - For each (Feature(positions), M{value}) tuple: + + - The value of Feature of at least one token in {n+p for p in positions} + is M{value}. + """ + + json_tag = "nltk.tbl.Rule" + + def __init__(self, templateid, original_tag, replacement_tag, conditions): + """ + Construct a new Rule that changes a token's tag from + C{original_tag} to C{replacement_tag} if all of the properties + specified in C{conditions} hold. + + :param templateid: the template id (a zero-padded string, '001' etc, + so it will sort nicely) + :type templateid: string + + :param conditions: A list of Feature(positions), + each of which specifies that the property (computed by + Feature.extract_property()) of at least one + token in M{n} + p in positions is C{value}. + :type conditions: C{iterable} of C{Feature} + + """ + TagRule.__init__(self, original_tag, replacement_tag) + self._conditions = conditions + self.templateid = templateid + + def encode_json_obj(self): + return { + "templateid": self.templateid, + "original": self.original_tag, + "replacement": self.replacement_tag, + "conditions": self._conditions, + } + + @classmethod + def decode_json_obj(cls, obj): + return cls( + obj["templateid"], + obj["original"], + obj["replacement"], + tuple(tuple(feat) for feat in obj["conditions"]), + ) + + def applies(self, tokens, index): + # Inherit docs from TagRule + + # Does the given token have this Rule's "original tag"? + if tokens[index][1] != self.original_tag: + return False + + # Check to make sure that every condition holds. + for (feature, val) in self._conditions: + + # Look for *any* token that satisfies the condition. + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + if feature.extract_property(tokens, index + pos) == val: + break + else: + # No token satisfied the condition; return false. + return False + + # Every condition checked out, so the Rule is applicable. + return True + + def __eq__(self, other): + return self is other or ( + other is not None + and other.__class__ == self.__class__ + and self.original_tag == other.original_tag + and self.replacement_tag == other.replacement_tag + and self._conditions == other._conditions + ) + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + + # Cache our hash value (justified by profiling.) + try: + return self.__hash + except AttributeError: + self.__hash = hash(repr(self)) + return self.__hash + + def __repr__(self): + # Cache the repr (justified by profiling -- this is used as + # a sort key when deterministic=True.) + try: + return self.__repr + except AttributeError: + self.__repr = "{}('{}', {}, {}, [{}])".format( + self.__class__.__name__, + self.templateid, + repr(self.original_tag), + repr(self.replacement_tag), + # list(self._conditions) would be simpler but will not generate + # the same Rule.__repr__ in python 2 and 3 and thus break some tests + ", ".join(f"({f},{repr(v)})" for (f, v) in self._conditions), + ) + + return self.__repr + + def __str__(self): + def _condition_to_logic(feature, value): + """ + Return a compact, predicate-logic styled string representation + of the given condition. + """ + return "{}:{}@[{}]".format( + feature.PROPERTY_NAME, + value, + ",".join(str(w) for w in feature.positions), + ) + + conditions = " & ".join( + [_condition_to_logic(f, v) for (f, v) in self._conditions] + ) + s = f"{self.original_tag}->{self.replacement_tag} if {conditions}" + + return s + + def format(self, fmt): + """ + Return a string representation of this rule. + + >>> from nltk.tbl.rule import Rule + >>> from nltk.tag.brill import Pos + + >>> r = Rule("23", "VB", "NN", [(Pos([-2,-1]), 'DT')]) + + r.format("str") == str(r) + True + >>> r.format("str") + 'VB->NN if Pos:DT@[-2,-1]' + + r.format("repr") == repr(r) + True + >>> r.format("repr") + "Rule('23', 'VB', 'NN', [(Pos([-2, -1]),'DT')])" + + >>> r.format("verbose") + 'VB -> NN if the Pos of words i-2...i-1 is "DT"' + + >>> r.format("not_found") + Traceback (most recent call last): + File "", line 1, in + File "nltk/tbl/rule.py", line 256, in format + raise ValueError("unknown rule format spec: {0}".format(fmt)) + ValueError: unknown rule format spec: not_found + >>> + + :param fmt: format specification + :type fmt: str + :return: string representation + :rtype: str + """ + if fmt == "str": + return self.__str__() + elif fmt == "repr": + return self.__repr__() + elif fmt == "verbose": + return self._verbose_format() + else: + raise ValueError(f"unknown rule format spec: {fmt}") + + def _verbose_format(self): + """ + Return a wordy, human-readable string representation + of the given rule. + + Not sure how useful this is. + """ + + def condition_to_str(feature, value): + return 'the {} of {} is "{}"'.format( + feature.PROPERTY_NAME, + range_to_str(feature.positions), + value, + ) + + def range_to_str(positions): + if len(positions) == 1: + p = positions[0] + if p == 0: + return "this word" + if p == -1: + return "the preceding word" + elif p == 1: + return "the following word" + elif p < 0: + return "word i-%d" % -p + elif p > 0: + return "word i+%d" % p + else: + # for complete compatibility with the wordy format of nltk2 + mx = max(positions) + mn = min(positions) + if mx - mn == len(positions) - 1: + return "words i%+d...i%+d" % (mn, mx) + else: + return "words {{{}}}".format( + ",".join("i%+d" % d for d in positions) + ) + + replacement = f"{self.original_tag} -> {self.replacement_tag}" + conditions = (" if " if self._conditions else "") + ", and ".join( + condition_to_str(f, v) for (f, v) in self._conditions + ) + return replacement + conditions diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tbl/template.py b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/template.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9ed5df52f5730bd767a04a121637a5c2be01d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tbl/template.py @@ -0,0 +1,325 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import itertools as it +from abc import ABCMeta, abstractmethod + +from nltk.tbl.feature import Feature +from nltk.tbl.rule import Rule + + +class BrillTemplateI(metaclass=ABCMeta): + """ + An interface for generating lists of transformational rules that + apply at given sentence positions. ``BrillTemplateI`` is used by + ``Brill`` training algorithms to generate candidate rules. + """ + + @abstractmethod + def applicable_rules(self, tokens, i, correctTag): + """ + Return a list of the transformational rules that would correct + the ``i``-th subtoken's tag in the given token. In particular, + return a list of zero or more rules that would change + ``tokens[i][1]`` to ``correctTag``, if applied to ``token[i]``. + + If the ``i``-th token already has the correct tag (i.e., if + ``tagged_tokens[i][1] == correctTag``), then + ``applicable_rules()`` should return the empty list. + + :param tokens: The tagged tokens being tagged. + :type tokens: list(tuple) + :param i: The index of the token whose tag should be corrected. + :type i: int + :param correctTag: The correct tag for the ``i``-th token. + :type correctTag: any + :rtype: list(BrillRule) + """ + + @abstractmethod + def get_neighborhood(self, token, index): + """ + Returns the set of indices *i* such that + ``applicable_rules(token, i, ...)`` depends on the value of + the *index*th token of *token*. + + This method is used by the "fast" Brill tagger trainer. + + :param token: The tokens being tagged. + :type token: list(tuple) + :param index: The index whose neighborhood should be returned. + :type index: int + :rtype: set + """ + + +class Template(BrillTemplateI): + """ + A tbl Template that generates a list of L{Rule}s that apply at a given sentence + position. In particular, each C{Template} is parameterized by a list of + independent features (a combination of a specific + property to extract and a list C{L} of relative positions at which to extract + it) and generates all Rules that: + + - use the given features, each at its own independent position; and + - are applicable to the given token. + """ + + ALLTEMPLATES = [] + # record a unique id of form "001", for each template created + # _ids = it.count(0) + + def __init__(self, *features): + + """ + Construct a Template for generating Rules. + + Takes a list of Features. A C{Feature} is a combination + of a specific property and its relative positions and should be + a subclass of L{nltk.tbl.feature.Feature}. + + An alternative calling convention (kept for backwards compatibility, + but less expressive as it only permits one feature type) is + Template(Feature, (start1, end1), (start2, end2), ...) + In new code, that would be better written + Template(Feature(start1, end1), Feature(start2, end2), ...) + + For instance, importing some features + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + Create some features + + >>> wfeat1, wfeat2, pfeat = (Word([-1]), Word([1,2]), Pos([-2,-1])) + + Create a single-feature template + + >>> Template(wfeat1) + Template(Word([-1])) + + Or a two-feature one + + >>> Template(wfeat1, wfeat2) + Template(Word([-1]),Word([1, 2])) + + Or a three-feature one with two different feature types + + >>> Template(wfeat1, wfeat2, pfeat) + Template(Word([-1]),Word([1, 2]),Pos([-2, -1])) + + deprecated api: Feature subclass, followed by list of (start,end) pairs + (permits only a single Feature) + + >>> Template(Word, (-2,-1), (0,0)) + Template(Word([-2, -1]),Word([0])) + + Incorrect specification raises TypeError + + >>> Template(Word, (-2,-1), Pos, (0,0)) + Traceback (most recent call last): + File "", line 1, in + File "nltk/tag/tbl/template.py", line 143, in __init__ + raise TypeError( + TypeError: expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ... + + :type features: list of Features + :param features: the features to build this Template on + """ + # determine the calling form: either + # Template(Feature, args1, [args2, ...)] + # Template(Feature1(args), Feature2(args), ...) + if all(isinstance(f, Feature) for f in features): + self._features = features + elif issubclass(features[0], Feature) and all( + isinstance(a, tuple) for a in features[1:] + ): + self._features = [features[0](*tp) for tp in features[1:]] + else: + raise TypeError( + "expected either Feature1(args), Feature2(args), ... or Feature, (start1, end1), (start2, end2), ..." + ) + self.id = f"{len(self.ALLTEMPLATES):03d}" + self.ALLTEMPLATES.append(self) + + def __repr__(self): + return "{}({})".format( + self.__class__.__name__, + ",".join([str(f) for f in self._features]), + ) + + def applicable_rules(self, tokens, index, correct_tag): + if tokens[index][1] == correct_tag: + return [] + + # For each of this Template's features, find the conditions + # that are applicable for the given token. + # Then, generate one Rule for each combination of features + # (the crossproduct of the conditions). + + applicable_conditions = self._applicable_conditions(tokens, index) + xs = list(it.product(*applicable_conditions)) + return [Rule(self.id, tokens[index][1], correct_tag, tuple(x)) for x in xs] + + def _applicable_conditions(self, tokens, index): + """ + :returns: A set of all conditions for rules + that are applicable to C{tokens[index]}. + """ + conditions = [] + + for feature in self._features: + conditions.append([]) + for pos in feature.positions: + if not (0 <= index + pos < len(tokens)): + continue + value = feature.extract_property(tokens, index + pos) + conditions[-1].append((feature, value)) + return conditions + + def get_neighborhood(self, tokens, index): + # inherit docs from BrillTemplateI + + # applicable_rules(tokens, index, ...) depends on index. + neighborhood = {index} # set literal for python 2.7+ + + # applicable_rules(tokens, i, ...) depends on index if + # i+start < index <= i+end. + + allpositions = [0] + [p for feat in self._features for p in feat.positions] + start, end = min(allpositions), max(allpositions) + s = max(0, index + (-end)) + e = min(index + (-start) + 1, len(tokens)) + for i in range(s, e): + neighborhood.add(i) + return neighborhood + + @classmethod + def expand(cls, featurelists, combinations=None, skipintersecting=True): + + """ + Factory method to mass generate Templates from a list L of lists of Features. + + #With combinations=(k1, k2), the function will in all possible ways choose k1 ... k2 + #of the sublists in L; it will output all Templates formed by the Cartesian product + #of this selection, with duplicates and other semantically equivalent + #forms removed. Default for combinations is (1, len(L)). + + The feature lists may have been specified + manually, or generated from Feature.expand(). For instance, + + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Word, Pos + + #creating some features + >>> (wd_0, wd_01) = (Word([0]), Word([0,1])) + + >>> (pos_m2, pos_m33) = (Pos([-2]), Pos([3-2,-1,0,1,2,3])) + + >>> list(Template.expand([[wd_0], [pos_m2]])) + [Template(Word([0])), Template(Pos([-2])), Template(Pos([-2]),Word([0]))] + + >>> list(Template.expand([[wd_0, wd_01], [pos_m2]])) + [Template(Word([0])), Template(Word([0, 1])), Template(Pos([-2])), Template(Pos([-2]),Word([0])), Template(Pos([-2]),Word([0, 1]))] + + #note: with Feature.expand(), it is very easy to generate more templates + #than your system can handle -- for instance, + >>> wordtpls = Word.expand([-2,-1,0,1], [1,2], excludezero=False) + >>> len(wordtpls) + 7 + + >>> postpls = Pos.expand([-3,-2,-1,0,1,2], [1,2,3], excludezero=True) + >>> len(postpls) + 9 + + #and now the Cartesian product of all non-empty combinations of two wordtpls and + #two postpls, with semantic equivalents removed + >>> templates = list(Template.expand([wordtpls, wordtpls, postpls, postpls])) + >>> len(templates) + 713 + + + will return a list of eight templates + Template(Word([0])), + Template(Word([0, 1])), + Template(Pos([-2])), + Template(Pos([-1])), + Template(Pos([-2]),Word([0])), + Template(Pos([-1]),Word([0])), + Template(Pos([-2]),Word([0, 1])), + Template(Pos([-1]),Word([0, 1]))] + + + #Templates where one feature is a subset of another, such as + #Template(Word([0,1]), Word([1]), will not appear in the output. + #By default, this non-subset constraint is tightened to disjointness: + #Templates of type Template(Word([0,1]), Word([1,2]) will also be filtered out. + #With skipintersecting=False, then such Templates are allowed + + WARNING: this method makes it very easy to fill all your memory when training + generated templates on any real-world corpus + + :param featurelists: lists of Features, whose Cartesian product will return a set of Templates + :type featurelists: list of (list of Features) + :param combinations: given n featurelists: if combinations=k, all generated Templates will have + k features; if combinations=(k1,k2) they will have k1..k2 features; if None, defaults to 1..n + :type combinations: None, int, or (int, int) + :param skipintersecting: if True, do not output intersecting Templates (non-disjoint positions for some feature) + :type skipintersecting: bool + :returns: generator of Templates + + """ + + def nonempty_powerset(xs): # xs is a list + # itertools docnonempty_powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3) + + # find the correct tuple given combinations, one of {None, k, (k1,k2)} + k = combinations # for brevity + combrange = ( + (1, len(xs) + 1) + if k is None + else (k, k + 1) # n over 1 .. n over n (all non-empty combinations) + if isinstance(k, int) + else (k[0], k[1] + 1) # n over k (only + ) # n over k1, n over k1+1... n over k2 + return it.chain.from_iterable( + it.combinations(xs, r) for r in range(*combrange) + ) + + seentemplates = set() + for picks in nonempty_powerset(featurelists): + for pick in it.product(*picks): + if any( + i != j and x.issuperset(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + if skipintersecting and any( + i != j and x.intersects(y) + for (i, x) in enumerate(pick) + for (j, y) in enumerate(pick) + ): + continue + thistemplate = cls(*sorted(pick)) + strpick = str(thistemplate) + #!!FIXME --this is hackish + if strpick in seentemplates: # already added + cls._poptemplate() + continue + seentemplates.add(strpick) + yield thistemplate + + @classmethod + def _cleartemplates(cls): + cls.ALLTEMPLATES = [] + + @classmethod + def _poptemplate(cls): + return cls.ALLTEMPLATES.pop() if cls.ALLTEMPLATES else None diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5162796f751878d3521aaf66de56fac11b2a2dd8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__init__.py @@ -0,0 +1,132 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Contributors: matthewmc, clouds56 +# URL: +# For license information, see LICENSE.TXT + +r""" +NLTK Tokenizer Package + +Tokenizers divide strings into lists of substrings. For example, +tokenizers can be used to find the words and punctuation in a string: + + >>> from nltk.tokenize import word_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> word_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +This particular tokenizer requires the Punkt sentence tokenization +models to be installed. NLTK also provides a simpler, +regular-expression based tokenizer, which splits text on whitespace +and punctuation: + + >>> from nltk.tokenize import wordpunct_tokenize + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +We can also operate at the level of sentences, using the sentence +tokenizer directly as follows: + + >>> from nltk.tokenize import sent_tokenize, word_tokenize + >>> sent_tokenize(s) + ['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.'] + >>> [word_tokenize(t) for t in sent_tokenize(s)] # doctest: +NORMALIZE_WHITESPACE + [['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'], + ['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']] + +Caution: when tokenizing a Unicode string, make sure you are not +using an encoded version of the string (it may be necessary to +decode it first, e.g. with ``s.decode("utf8")``. + +NLTK tokenizers can produce token-spans, represented as tuples of integers +having the same semantics as string slices, to support efficient comparison +of tokenizers. (These methods are implemented as generators.) + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> list(WhitespaceTokenizer().span_tokenize(s)) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44), + (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + +There are numerous ways to tokenize text. If you need more control over +tokenization, see the other methods provided in this package. + +For further information, please see Chapter 3 of the NLTK book. +""" + +import re + +from nltk.data import load +from nltk.tokenize.casual import TweetTokenizer, casual_tokenize +from nltk.tokenize.destructive import NLTKWordTokenizer +from nltk.tokenize.legality_principle import LegalitySyllableTokenizer +from nltk.tokenize.mwe import MWETokenizer +from nltk.tokenize.punkt import PunktSentenceTokenizer +from nltk.tokenize.regexp import ( + BlanklineTokenizer, + RegexpTokenizer, + WhitespaceTokenizer, + WordPunctTokenizer, + blankline_tokenize, + regexp_tokenize, + wordpunct_tokenize, +) +from nltk.tokenize.repp import ReppTokenizer +from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize +from nltk.tokenize.simple import ( + LineTokenizer, + SpaceTokenizer, + TabTokenizer, + line_tokenize, +) +from nltk.tokenize.sonority_sequencing import SyllableTokenizer +from nltk.tokenize.stanford_segmenter import StanfordSegmenter +from nltk.tokenize.texttiling import TextTilingTokenizer +from nltk.tokenize.toktok import ToktokTokenizer +from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +# Standard sentence tokenizer. +def sent_tokenize(text, language="english"): + """ + Return a sentence-tokenized copy of *text*, + using NLTK's recommended sentence tokenizer + (currently :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into sentences + :param language: the model name in the Punkt corpus + """ + tokenizer = load(f"tokenizers/punkt/{language}.pickle") + return tokenizer.tokenize(text) + + +# Standard word tokenizer. +_treebank_word_tokenizer = NLTKWordTokenizer() + + +def word_tokenize(text, language="english", preserve_line=False): + """ + Return a tokenized copy of *text*, + using NLTK's recommended word tokenizer + (currently an improved :class:`.TreebankWordTokenizer` + along with :class:`.PunktSentenceTokenizer` + for the specified language). + + :param text: text to split into words + :type text: str + :param language: the model name in the Punkt corpus + :type language: str + :param preserve_line: A flag to decide whether to sentence tokenize the text or not. + :type preserve_line: bool + """ + sentences = [text] if preserve_line else sent_tokenize(text, language) + return [ + token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent) + ] diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f9743abc7265b2f573b9993871f4101bde325b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e176b0eb90a5b52e98190b3cae5aec0c2f2e6827 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f5a09574409f5e4c56826fada8765486304de1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/casual.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37197d9f2b4c89910b9fbe2edde561ab153ec98b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/destructive.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c69f6a7709f2d3f6aec803a22c323ddd6208fcb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/legality_principle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..910b9436d7a7ca1075b22c8f020d3e92086d000d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/mwe.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd476bde0e3d12dcafd7cfc62f9d87eb2380cda0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/nist.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf181102954bb500ea566ceafa7fedaa1035134d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/punkt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02d7cbde32a9d7bc0914eeda29eed9f70b243d50 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/regexp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07ed67211b02425c12eb873c8773c51d25aa20ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/repp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b04084721ad17ff773596c9562b8f3d2465dd4c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sexpr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b7395c4ddb564a23c800f398a69a4a1dc6562c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/simple.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c26da04b1bd21e0ac7c934b69bf4dc3cfb72f16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/sonority_sequencing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f54424b3cba7e13ea717e70e2c1bfcba68771f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fda60e2da6e71ba9a7cb004dde867c393931188 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/stanford_segmenter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db175463f88f4126258755a57f33a17e5a2eaa8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/texttiling.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cdc4dd028510f73c881f47b64f52e6b769ab866 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/toktok.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3bbf34519ca97af973e76551dd8400c4b873c77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/treebank.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..673cefc4f55edfd98aa0c36c8e6b4e86983c85d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/api.py new file mode 100644 index 0000000000000000000000000000000000000000..419ff646cfb89d5f3b63e645b53bedea09a1b479 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/api.py @@ -0,0 +1,83 @@ +# Natural Language Toolkit: Tokenizer Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Tokenizer Interface +""" + +from abc import ABC, abstractmethod +from typing import Iterator, List, Tuple + +from nltk.internals import overridden +from nltk.tokenize.util import string_span_tokenize + + +class TokenizerI(ABC): + """ + A processing interface for tokenizing a string. + Subclasses must define ``tokenize()`` or ``tokenize_sents()`` (or both). + """ + + @abstractmethod + def tokenize(self, s: str) -> List[str]: + """ + Return a tokenized copy of *s*. + + :rtype: List[str] + """ + if overridden(self.tokenize_sents): + return self.tokenize_sents([s])[0] + + def span_tokenize(self, s: str) -> Iterator[Tuple[int, int]]: + """ + Identify the tokens using integer offsets ``(start_i, end_i)``, + where ``s[start_i:end_i]`` is the corresponding token. + + :rtype: Iterator[Tuple[int, int]] + """ + raise NotImplementedError() + + def tokenize_sents(self, strings: List[str]) -> List[List[str]]: + """ + Apply ``self.tokenize()`` to each element of ``strings``. I.e.: + + return [self.tokenize(s) for s in strings] + + :rtype: List[List[str]] + """ + return [self.tokenize(s) for s in strings] + + def span_tokenize_sents( + self, strings: List[str] + ) -> Iterator[List[Tuple[int, int]]]: + """ + Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.: + + return [self.span_tokenize(s) for s in strings] + + :yield: List[Tuple[int, int]] + """ + for s in strings: + yield list(self.span_tokenize(s)) + + +class StringTokenizer(TokenizerI): + """A tokenizer that divides a string into substrings by splitting + on the specified string (defined in subclasses). + """ + + @property + @abstractmethod + def _string(self): + raise NotImplementedError + + def tokenize(self, s): + return s.split(self._string) + + def span_tokenize(self, s): + yield from string_span_tokenize(s, self._string) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/casual.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/casual.py new file mode 100644 index 0000000000000000000000000000000000000000..d0545abe50530c20903f8aeaa29fbfc55094e70e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/casual.py @@ -0,0 +1,458 @@ +# +# Natural Language Toolkit: Twitter Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Potts +# Ewan Klein (modifications) +# Pierpaolo Pantone <> (modifications) +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT +# + + +""" +Twitter-aware tokenizer, designed to be flexible and easy to adapt to new +domains and tasks. The basic logic is this: + +1. The tuple REGEXPS defines a list of regular expression + strings. + +2. The REGEXPS strings are put, in order, into a compiled + regular expression object called WORD_RE, under the TweetTokenizer + class. + +3. The tokenization is done by WORD_RE.findall(s), where s is the + user-supplied string, inside the tokenize() method of the class + TweetTokenizer. + +4. When instantiating Tokenizer objects, there are several options: + * preserve_case. By default, it is set to True. If it is set to + False, then the tokenizer will downcase everything except for + emoticons. + * reduce_len. By default, it is set to False. It specifies whether + to replace repeated character sequences of length 3 or greater + with sequences of length 3. + * strip_handles. By default, it is set to False. It specifies + whether to remove Twitter handles of text used in the + `tokenize` method. + * match_phone_numbers. By default, it is set to True. It indicates + whether the `tokenize` method should look for phone numbers. +""" + + +###################################################################### + +import html +from typing import List + +import regex # https://github.com/nltk/nltk/issues/2409 + +from nltk.tokenize.api import TokenizerI + +###################################################################### +# The following strings are components in the regular expression +# that is used for tokenizing. It's important that phone_number +# appears first in the final regex (since it can contain whitespace). +# It also could matter that tags comes after emoticons, due to the +# possibility of having text like +# +# <:| and some text >:) +# +# Most importantly, the final element should always be last, since it +# does a last ditch whitespace-based tokenization of whatever is left. + +# ToDo: Update with https://en.wikipedia.org/wiki/List_of_emoticons ? + +# This particular element is used in a couple ways, so we define it +# with a name: +EMOTICONS = r""" + (?: + [<>]? + [:;=8] # eyes + [\-o\*\']? # optional nose + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + | + [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth + [\-o\*\']? # optional nose + [:;=8] # eyes + [<>]? + | + {}\[\]]+ # Run of non-space, non-()<>{}[] + | # or + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + )+ + (?: # End with: + \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...) + | + \([^\s]+?\) # balanced parens, non-recursive: (...) + | # or + [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars + ) + | # OR, the following to match naked domains: + (?: + (?\s]+>""", + # ASCII Arrows + r"""[\-]+>|<[\-]+""", + # Twitter username: + r"""(?:@[\w_]+)""", + # Twitter hashtags: + r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""", + # email addresses + r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""", + # Zero-Width-Joiner and Skin tone modifier emojis + """.(?: + [\U0001F3FB-\U0001F3FF]?(?:\u200d.[\U0001F3FB-\U0001F3FF]?)+ + | + [\U0001F3FB-\U0001F3FF] + )""", + # flags + FLAGS, + # Remaining word types: + r""" + (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes. + | + (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals. + | + (?:[\w_]+) # Words without apostrophes or dashes. + | + (?:\.(?:\s*\.){1,}) # Ellipsis dots. + | + (?:\S) # Everything else that isn't whitespace. + """, +) + +# Take the main components and add a phone regex as the second parameter +REGEXPS_PHONE = (REGEXPS[0], PHONE_REGEX, *REGEXPS[1:]) + +###################################################################### +# TweetTokenizer.WORD_RE and TweetTokenizer.PHONE_WORD_RE represent +# the core tokenizing regexes. They are compiled lazily. + +# WORD_RE performs poorly on these patterns: +HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}") + +# The emoticon string gets its own regex so that we can preserve case for +# them as needed: +EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) + +# These are for regularizing HTML entities to Unicode: +ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);") + +# For stripping away handles from a tweet: +HANDLES_RE = regex.compile( + r"(?>> from nltk.tokenize.casual import _replace_html_entities + >>> _replace_html_entities(b'Price: £100') + 'Price: \\xa3100' + >>> print(_replace_html_entities(b'Price: £100')) + Price: £100 + >>> + """ + + def _convert_entity(match): + entity_body = match.group(3) + if match.group(1): + try: + if match.group(2): + number = int(entity_body, 16) + else: + number = int(entity_body, 10) + # Numeric character references in the 80-9F range are typically + # interpreted by browsers as representing the characters mapped + # to bytes 80-9F in the Windows-1252 encoding. For more info + # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets + if 0x80 <= number <= 0x9F: + return bytes((number,)).decode("cp1252") + except ValueError: + number = None + else: + if entity_body in keep: + return match.group(0) + number = html.entities.name2codepoint.get(entity_body) + if number is not None: + try: + return chr(number) + except (ValueError, OverflowError): + pass + + return "" if remove_illegal else match.group(0) + + return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) + + +###################################################################### + + +class TweetTokenizer(TokenizerI): + r""" + Tokenizer for tweets. + + >>> from nltk.tokenize import TweetTokenizer + >>> tknzr = TweetTokenizer() + >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" + >>> tknzr.tokenize(s0) # doctest: +NORMALIZE_WHITESPACE + ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', + '<--'] + + Examples using `strip_handles` and `reduce_len parameters`: + + >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) + >>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!' + >>> tknzr.tokenize(s1) + [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] + """ + + # Values used to lazily compile WORD_RE and PHONE_WORD_RE, + # which are the core tokenizing regexes. + _WORD_RE = None + _PHONE_WORD_RE = None + + ###################################################################### + + def __init__( + self, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, + ): + """ + Create a `TweetTokenizer` instance with settings for use in the `tokenize` method. + + :param preserve_case: Flag indicating whether to preserve the casing (capitalisation) + of text used in the `tokenize` method. Defaults to True. + :type preserve_case: bool + :param reduce_len: Flag indicating whether to replace repeated character sequences + of length 3 or greater with sequences of length 3. Defaults to False. + :type reduce_len: bool + :param strip_handles: Flag indicating whether to remove Twitter handles of text used + in the `tokenize` method. Defaults to False. + :type strip_handles: bool + :param match_phone_numbers: Flag indicating whether the `tokenize` method should look + for phone numbers. Defaults to True. + :type match_phone_numbers: bool + """ + self.preserve_case = preserve_case + self.reduce_len = reduce_len + self.strip_handles = strip_handles + self.match_phone_numbers = match_phone_numbers + + def tokenize(self, text: str) -> List[str]: + """Tokenize the input text. + + :param text: str + :rtype: list(str) + :return: a tokenized list of strings; joining this list returns\ + the original string if `preserve_case=False`. + """ + # Fix HTML character entities: + text = _replace_html_entities(text) + # Remove username handles + if self.strip_handles: + text = remove_handles(text) + # Normalize word lengthening + if self.reduce_len: + text = reduce_lengthening(text) + # Shorten problematic sequences of characters + safe_text = HANG_RE.sub(r"\1\1\1", text) + # Recognise phone numbers during tokenization + if self.match_phone_numbers: + words = self.PHONE_WORD_RE.findall(safe_text) + else: + words = self.WORD_RE.findall(safe_text) + # Possibly alter the case, but avoid changing emoticons like :D into :d: + if not self.preserve_case: + words = list( + map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words) + ) + return words + + @property + def WORD_RE(self) -> "regex.Pattern": + """Core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._WORD_RE: + type(self)._WORD_RE = regex.compile( + f"({'|'.join(REGEXPS)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._WORD_RE + + @property + def PHONE_WORD_RE(self) -> "regex.Pattern": + """Secondary core TweetTokenizer regex""" + # Compiles the regex for this and all future instantiations of TweetTokenizer. + if not type(self)._PHONE_WORD_RE: + type(self)._PHONE_WORD_RE = regex.compile( + f"({'|'.join(REGEXPS_PHONE)})", + regex.VERBOSE | regex.I | regex.UNICODE, + ) + return type(self)._PHONE_WORD_RE + + +###################################################################### +# Normalization Functions +###################################################################### + + +def reduce_lengthening(text): + """ + Replace repeated character sequences of length 3 or greater with sequences + of length 3. + """ + pattern = regex.compile(r"(.)\1{2,}") + return pattern.sub(r"\1\1\1", text) + + +def remove_handles(text): + """ + Remove Twitter username handles from text. + """ + # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly + return HANDLES_RE.sub(" ", text) + + +###################################################################### +# Tokenization Function +###################################################################### + + +def casual_tokenize( + text, + preserve_case=True, + reduce_len=False, + strip_handles=False, + match_phone_numbers=True, +): + """ + Convenience function for wrapping the tokenizer. + """ + return TweetTokenizer( + preserve_case=preserve_case, + reduce_len=reduce_len, + strip_handles=strip_handles, + match_phone_numbers=match_phone_numbers, + ).tokenize(text) + + +############################################################################### diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/destructive.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/destructive.py new file mode 100644 index 0000000000000000000000000000000000000000..4beb395dde57bf73082dfa91f65ad625d199bc31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/destructive.py @@ -0,0 +1,233 @@ +# Natural Language Toolkit: NLTK's very own tokenizer. +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Liling Tan +# Tom Aarsen <> (modifications) +# URL: +# For license information, see LICENSE.TXT + + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import align_tokens + + +class MacIntyreContractions: + """ + List of contractions adapted from Robert MacIntyre's tokenizer. + """ + + CONTRACTIONS2 = [ + r"(?i)\b(can)(?#X)(not)\b", + r"(?i)\b(d)(?#X)('ye)\b", + r"(?i)\b(gim)(?#X)(me)\b", + r"(?i)\b(gon)(?#X)(na)\b", + r"(?i)\b(got)(?#X)(ta)\b", + r"(?i)\b(lem)(?#X)(me)\b", + r"(?i)\b(more)(?#X)('n)\b", + r"(?i)\b(wan)(?#X)(na)(?=\s)", + ] + CONTRACTIONS3 = [r"(?i) ('t)(?#X)(is)\b", r"(?i) ('t)(?#X)(was)\b"] + CONTRACTIONS4 = [r"(?i)\b(whad)(dd)(ya)\b", r"(?i)\b(wha)(t)(cha)\b"] + + +class NLTKWordTokenizer(TokenizerI): + """ + The NLTK tokenizer that has improved upon the TreebankWordTokenizer. + + This is the method that is invoked by ``word_tokenize()``. It assumes that the + text has already been segmented into sentences, e.g. using ``sent_tokenize()``. + + The tokenizer is "destructive" such that the regexes applied will munge the + input string to a state beyond re-construction. It is possible to apply + `TreebankWordDetokenizer.detokenize` to the tokenized outputs of + `NLTKDestructiveWordTokenizer.tokenize` but there's no guarantees to + revert to the original string. + """ + + # Starting quotes. + STARTING_QUOTES = [ + (re.compile("([«“‘„]|[`]+)", re.U), r" \1 "), + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + (re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d|n)(\w)\b", re.U), r"\1 \2"), + ] + + # Ending quotes. + ENDING_QUOTES = [ + (re.compile("([»”’])", re.U), r" \1 "), + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # For improvements for starting/closing quotes from TreebankWordTokenizer, + # see discussion on https://github.com/nltk/nltk/pull/1437 + # Adding to TreebankWordTokenizer, nltk.word_tokenize now splits on + # - chervon quotes u'\xab' and u'\xbb' . + # - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d' + # See https://github.com/nltk/nltk/issues/1995#issuecomment-376741608 + # Also, behavior of splitting on clitics now follows Stanford CoreNLP + # - clitics covered (?!re|ve|ll|m|t|s|d)(\w)\b + + # Punctuation. + PUNCTUATION = [ + (re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "), + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + ( + re.compile(r"\.{2,}", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ( + re.compile(r"[*]", re.U), + r" \g<0> ", + ), # See https://github.com/nltk/nltk/pull/2322 + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> NLTKWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> NLTKWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import NLTKWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py new file mode 100644 index 0000000000000000000000000000000000000000..547827cefe1af65209e1f44237b7ac160b167920 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/legality_principle.py @@ -0,0 +1,147 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Legality Principle is a language agnostic principle maintaining that syllable +onsets and codas (the beginning and ends of syllables not including the vowel) +are only legal if they are found as word onsets or codas in the language. The English +word ''admit'' must then be syllabified as ''ad-mit'' since ''dm'' is not found +word-initially in the English language (Bartlett et al.). This principle was first proposed +in Daniel Kahn's 1976 dissertation, ''Syllable-based generalizations in English phonology''. + +Kahn further argues that there is a ''strong tendency to syllabify in such a way that +initial clusters are of maximal length, consistent with the general constraints on +word-initial consonant clusters.'' Consequently, in addition to being legal onsets, +the longest legal onset is preferable---''Onset Maximization''. + +The default implementation assumes an English vowel set, but the `vowels` attribute +can be set to IPA or any other alphabet's vowel set for the use-case. +Both a valid set of vowels as well as a text corpus of words in the language +are necessary to determine legal onsets and subsequently syllabify words. + +The legality principle with onset maximization is a universal syllabification algorithm, +but that does not mean it performs equally across languages. Bartlett et al. (2009) +is a good benchmark for English accuracy if utilizing IPA (pg. 311). + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Theo Vennemann, ''On the Theory of Syllabic Phonology,'' 1972, p. 11. +- Daniel Kahn, ''Syllable-based generalizations in English phonology'', (PhD diss., MIT, 1976). +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Jeremy Goslin and Ulrich Frauenfelder. 2001. A comparison of theoretical and human syllabification. Language and Speech, 44:409–436. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +- Christopher Hench. 2017. Resonances in Middle High German: New Methodologies in Prosody. UC Berkeley. +""" + +from collections import Counter + +from nltk.tokenize.api import TokenizerI + + +class LegalitySyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Legality Principle and Onset Maximization. + + >>> from nltk.tokenize import LegalitySyllableTokenizer + >>> from nltk import word_tokenize + >>> from nltk.corpus import words + >>> text = "This is a wonderful sentence." + >>> text_words = word_tokenize(text) + >>> LP = LegalitySyllableTokenizer(words.words()) + >>> [LP.tokenize(word) for word in text_words] + [['This'], ['is'], ['a'], ['won', 'der', 'ful'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__( + self, tokenized_source_text, vowels="aeiouy", legal_frequency_threshold=0.001 + ): + """ + :param tokenized_source_text: List of valid tokens in the language + :type tokenized_source_text: list(str) + :param vowels: Valid vowels in language or IPA representation + :type vowels: str + :param legal_frequency_threshold: Lowest frequency of all onsets to be considered a legal onset + :type legal_frequency_threshold: float + """ + self.legal_frequency_threshold = legal_frequency_threshold + self.vowels = vowels + self.legal_onsets = self.find_legal_onsets(tokenized_source_text) + + def find_legal_onsets(self, words): + """ + Gathers all onsets and then return only those above the frequency threshold + + :param words: List of words in a language + :type words: list(str) + :return: Set of legal onsets + :rtype: set(str) + """ + onsets = [self.onset(word) for word in words] + legal_onsets = [ + k + for k, v in Counter(onsets).items() + if (v / len(onsets)) > self.legal_frequency_threshold + ] + return set(legal_onsets) + + def onset(self, word): + """ + Returns consonant cluster of word, i.e. all characters until the first vowel. + + :param word: Single word or token + :type word: str + :return: String of characters of onset + :rtype: str + """ + onset = "" + for c in word.lower(): + if c in self.vowels: + return onset + else: + onset += c + return onset + + def tokenize(self, token): + """ + Apply the Legality Principle in combination with + Onset Maximization to return a list of syllables. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + syllables = [] + syllable, current_onset = "", "" + vowel, onset = False, False + for char in token[::-1]: + char_lower = char.lower() + if not vowel: + syllable += char + vowel = bool(char_lower in self.vowels) + else: + if char_lower + current_onset[::-1] in self.legal_onsets: + syllable += char + current_onset += char_lower + onset = True + elif char_lower in self.vowels and not onset: + syllable += char + current_onset += char_lower + else: + syllables.append(syllable) + syllable = char + current_onset = "" + vowel = bool(char_lower in self.vowels) + syllables.append(syllable) + syllables_ordered = [syllable[::-1] for syllable in syllables][::-1] + return syllables_ordered diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/mwe.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/mwe.py new file mode 100644 index 0000000000000000000000000000000000000000..c39244c7b1c7a9be96331548150c60ce9aaae8be --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/mwe.py @@ -0,0 +1,124 @@ +# Multi-Word Expression tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rob Malouf +# URL: +# For license information, see LICENSE.TXT + +""" +Multi-Word Expression Tokenizer + +A ``MWETokenizer`` takes a string which has already been divided into tokens and +retokenizes it, merging multi-word expressions into single tokens, using a lexicon +of MWEs: + + + >>> from nltk.tokenize import MWETokenizer + + >>> tokenizer = MWETokenizer([('a', 'little'), ('a', 'little', 'bit'), ('a', 'lot')]) + >>> tokenizer.add_mwe(('in', 'spite', 'of')) + + >>> tokenizer.tokenize('Testing testing testing one two three'.split()) + ['Testing', 'testing', 'testing', 'one', 'two', 'three'] + + >>> tokenizer.tokenize('This is a test in spite'.split()) + ['This', 'is', 'a', 'test', 'in', 'spite'] + + >>> tokenizer.tokenize('In a little or a little bit or a lot in spite of'.split()) + ['In', 'a_little', 'or', 'a_little_bit', 'or', 'a_lot', 'in_spite_of'] + +""" +from nltk.tokenize.api import TokenizerI +from nltk.util import Trie + + +class MWETokenizer(TokenizerI): + """A tokenizer that processes tokenized text and merges multi-word expressions + into single tokens. + """ + + def __init__(self, mwes=None, separator="_"): + """Initialize the multi-word tokenizer with a list of expressions and a + separator + + :type mwes: list(list(str)) + :param mwes: A sequence of multi-word expressions to be merged, where + each MWE is a sequence of strings. + :type separator: str + :param separator: String that should be inserted between words in a multi-word + expression token. (Default is '_') + + """ + if not mwes: + mwes = [] + self._mwes = Trie(mwes) + self._separator = separator + + def add_mwe(self, mwe): + """Add a multi-word expression to the lexicon (stored as a word trie) + + We use ``util.Trie`` to represent the trie. Its form is a dict of dicts. + The key True marks the end of a valid MWE. + + :param mwe: The multi-word expression we're adding into the word trie + :type mwe: tuple(str) or list(str) + + :Example: + + >>> tokenizer = MWETokenizer() + >>> tokenizer.add_mwe(('a', 'b')) + >>> tokenizer.add_mwe(('a', 'b', 'c')) + >>> tokenizer.add_mwe(('a', 'x')) + >>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}} + >>> tokenizer._mwes == expected + True + + """ + self._mwes.insert(mwe) + + def tokenize(self, text): + """ + + :param text: A list containing tokenized text + :type text: list(str) + :return: A list of the tokenized text with multi-words merged together + :rtype: list(str) + + :Example: + + >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+') + >>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split()) + ['An', "hors+d'oeuvre", 'tonight,', 'sir?'] + + """ + i = 0 + n = len(text) + result = [] + + while i < n: + if text[i] in self._mwes: + # possible MWE match + j = i + trie = self._mwes + last_match = -1 + while j < n and text[j] in trie: # and len(trie[text[j]]) > 0 : + trie = trie[text[j]] + j = j + 1 + if Trie.LEAF in trie: + last_match = j + else: + if last_match > -1: + j = last_match + + if Trie.LEAF in trie or last_match > -1: + # success! + result.append(self._separator.join(text[i:j])) + i = j + else: + # no match, so backtrack + result.append(text[i]) + i += 1 + else: + result.append(text[i]) + i += 1 + return result diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/nist.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/nist.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e13dad28b81d91891a838d89bcdf5a0c1ad086 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/nist.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the mteval-v14.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Liling Tan (ported from ftp://jaguar.ncsl.nist.gov/mt/resources/mteval-v14.pl) +# Contributors: Ozan Caglayan, Wiktor Stribizew +# +# URL: +# For license information, see LICENSE.TXT + +""" +This is a NLTK port of the tokenizer used in the NIST BLEU evaluation script, +https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L926 +which was also ported into Python in +https://github.com/lium-lst/nmtpy/blob/master/nmtpy/metrics/mtevalbleu.py#L162 +""" + + +import io +import re + +from nltk.corpus import perluniprops +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import xml_unescape + + +class NISTTokenizer(TokenizerI): + """ + This NIST tokenizer is sentence-based instead of the original + paragraph-based tokenization from mteval-14.pl; The sentence-based + tokenization is consistent with the other tokenizers available in NLTK. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + >>> s = "Good muffins cost $3.88 in New York." + >>> expected_lower = [u'good', u'muffins', u'cost', u'$', u'3.88', u'in', u'new', u'york', u'.'] + >>> expected_cased = [u'Good', u'muffins', u'cost', u'$', u'3.88', u'in', u'New', u'York', u'.'] + >>> nist.tokenize(s, lowercase=False) == expected_cased + True + >>> nist.tokenize(s, lowercase=True) == expected_lower # Lowercased. + True + + The international_tokenize() is the preferred function when tokenizing + non-european text, e.g. + + >>> from nltk.tokenize.nist import NISTTokenizer + >>> nist = NISTTokenizer() + + # Input strings. + >>> albb = u'Alibaba Group Holding Limited (Chinese: 阿里巴巴集团控股 有限公司) us a Chinese e-commerce company...' + >>> amz = u'Amazon.com, Inc. (/ˈæməzɒn/) is an American electronic commerce...' + >>> rkt = u'Rakuten, Inc. (楽天株式会社 Rakuten Kabushiki-gaisha) is a Japanese electronic commerce and Internet company based in Tokyo.' + + # Expected tokens. + >>> expected_albb = [u'Alibaba', u'Group', u'Holding', u'Limited', u'(', u'Chinese', u':', u'\u963f\u91cc\u5df4\u5df4\u96c6\u56e2\u63a7\u80a1', u'\u6709\u9650\u516c\u53f8', u')'] + >>> expected_amz = [u'Amazon', u'.', u'com', u',', u'Inc', u'.', u'(', u'/', u'\u02c8\xe6', u'm'] + >>> expected_rkt = [u'Rakuten', u',', u'Inc', u'.', u'(', u'\u697d\u5929\u682a\u5f0f\u4f1a\u793e', u'Rakuten', u'Kabushiki', u'-', u'gaisha'] + + >>> nist.international_tokenize(albb)[:10] == expected_albb + True + >>> nist.international_tokenize(amz)[:10] == expected_amz + True + >>> nist.international_tokenize(rkt)[:10] == expected_rkt + True + + # Doctest for patching issue #1926 + >>> sent = u'this is a foo\u2604sentence.' + >>> expected_sent = [u'this', u'is', u'a', u'foo', u'\u2604', u'sentence', u'.'] + >>> nist.international_tokenize(sent) == expected_sent + True + """ + + # Strip "skipped" tags + STRIP_SKIP = re.compile(""), "" + # Strip end-of-line hyphenation and join lines + STRIP_EOL_HYPHEN = re.compile("\u2028"), " " + # Tokenize punctuation. + PUNCT = re.compile(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])"), " \\1 " + # Tokenize period and comma unless preceded by a digit. + PERIOD_COMMA_PRECEED = re.compile(r"([^0-9])([\.,])"), "\\1 \\2 " + # Tokenize period and comma unless followed by a digit. + PERIOD_COMMA_FOLLOW = re.compile(r"([\.,])([^0-9])"), " \\1 \\2" + # Tokenize dash when preceded by a digit + DASH_PRECEED_DIGIT = re.compile("([0-9])(-)"), "\\1 \\2 " + + LANG_DEPENDENT_REGEXES = [ + PUNCT, + PERIOD_COMMA_PRECEED, + PERIOD_COMMA_FOLLOW, + DASH_PRECEED_DIGIT, + ] + + # Perluniprops characters used in NIST tokenizer. + pup_number = str("".join(set(perluniprops.chars("Number")))) # i.e. \p{N} + pup_punct = str("".join(set(perluniprops.chars("Punctuation")))) # i.e. \p{P} + pup_symbol = str("".join(set(perluniprops.chars("Symbol")))) # i.e. \p{S} + + # Python regexes needs to escape some special symbols, see + # see https://stackoverflow.com/q/45670950/610569 + number_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_number) + punct_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_punct) + symbol_regex = re.sub(r"[]^\\-]", r"\\\g<0>", pup_symbol) + + # Note: In the original perl implementation, \p{Z} and \p{Zl} were used to + # (i) strip trailing and heading spaces and + # (ii) de-deuplicate spaces. + # In Python, this would do: ' '.join(str.strip().split()) + # Thus, the next two lines were commented out. + # Line_Separator = str(''.join(perluniprops.chars('Line_Separator'))) # i.e. \p{Zl} + # Separator = str(''.join(perluniprops.chars('Separator'))) # i.e. \p{Z} + + # Pads non-ascii strings with space. + NONASCII = re.compile("([\x00-\x7f]+)"), r" \1 " + # Tokenize any punctuation unless followed AND preceded by a digit. + PUNCT_1 = ( + re.compile(f"([{number_regex}])([{punct_regex}])"), + "\\1 \\2 ", + ) + PUNCT_2 = ( + re.compile(f"([{punct_regex}])([{number_regex}])"), + " \\1 \\2", + ) + # Tokenize symbols + SYMBOLS = re.compile(f"([{symbol_regex}])"), " \\1 " + + INTERNATIONAL_REGEXES = [NONASCII, PUNCT_1, PUNCT_2, SYMBOLS] + + def lang_independent_sub(self, text): + """Performs the language independent string substituitions.""" + # It's a strange order of regexes. + # It'll be better to unescape after STRIP_EOL_HYPHEN + # but let's keep it close to the original NIST implementation. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + text = xml_unescape(text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + return text + + def tokenize(self, text, lowercase=False, western_lang=True, return_str=False): + text = str(text) + # Language independent regex. + text = self.lang_independent_sub(text) + # Language dependent regex. + if western_lang: + # Pad string with whitespace. + text = " " + text + " " + if lowercase: + text = text.lower() + for regexp, substitution in self.LANG_DEPENDENT_REGEXES: + text = regexp.sub(substitution, text) + # Remove contiguous whitespaces. + text = " ".join(text.split()) + # Finally, strips heading and trailing spaces + # and converts output string into unicode. + text = str(text.strip()) + return text if return_str else text.split() + + def international_tokenize( + self, text, lowercase=False, split_non_ascii=True, return_str=False + ): + text = str(text) + # Different from the 'normal' tokenize(), STRIP_EOL_HYPHEN is applied + # first before unescaping. + regexp, substitution = self.STRIP_SKIP + text = regexp.sub(substitution, text) + regexp, substitution = self.STRIP_EOL_HYPHEN + text = regexp.sub(substitution, text) + text = xml_unescape(text) + + if lowercase: + text = text.lower() + + for regexp, substitution in self.INTERNATIONAL_REGEXES: + text = regexp.sub(substitution, text) + + # Make sure that there's only one space only between words. + # Strip leading and trailing spaces. + text = " ".join(text.strip().split()) + return text if return_str else text.split() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/punkt.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/punkt.py new file mode 100644 index 0000000000000000000000000000000000000000..129bd49c270c301d97a44eec5e58d7e19f15cabe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/punkt.py @@ -0,0 +1,1767 @@ +# Natural Language Toolkit: Punkt sentence tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Algorithm: Kiss & Strunk (2006) +# Author: Willy (original Python port) +# Steven Bird (additions) +# Edward Loper (rewrite) +# Joel Nothman (almost rewrite) +# Arthur Darcet (fixes) +# Tom Aarsen <> (tackle ReDoS & performance issues) +# URL: +# For license information, see LICENSE.TXT + +r""" +Punkt Sentence Tokenizer + +This tokenizer divides a text into a list of sentences +by using an unsupervised algorithm to build a model for abbreviation +words, collocations, and words that start sentences. It must be +trained on a large collection of plaintext in the target language +before it can be used. + +The NLTK data package includes a pre-trained Punkt tokenizer for +English. + + >>> import nltk.data + >>> text = ''' + ... Punkt knows that the periods in Mr. Smith and Johann S. Bach + ... do not mark sentence boundaries. And sometimes sentences + ... can start with non-capitalized words. i is a good variable + ... name. + ... ''' + >>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') + >>> print('\n-----\n'.join(sent_detector.tokenize(text.strip()))) + Punkt knows that the periods in Mr. Smith and Johann S. Bach + do not mark sentence boundaries. + ----- + And sometimes sentences + can start with non-capitalized words. + ----- + i is a good variable + name. + +(Note that whitespace from the original text, including newlines, is +retained in the output.) + +Punctuation following sentences is also included by default +(from NLTK 3.0 onwards). It can be excluded with the realign_boundaries +flag. + + >>> text = ''' + ... (How does it deal with this parenthesis?) "It should be part of the + ... previous sentence." "(And the same with this one.)" ('And this one!') + ... "('(And (this)) '?)" [(and this. )] + ... ''' + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip()))) + (How does it deal with this parenthesis?) + ----- + "It should be part of the + previous sentence." + ----- + "(And the same with this one.)" + ----- + ('And this one!') + ----- + "('(And (this)) '?)" + ----- + [(and this. )] + >>> print('\n-----\n'.join( + ... sent_detector.tokenize(text.strip(), realign_boundaries=False))) + (How does it deal with this parenthesis? + ----- + ) "It should be part of the + previous sentence. + ----- + " "(And the same with this one. + ----- + )" ('And this one! + ----- + ') + "('(And (this)) '? + ----- + )" [(and this. + ----- + )] + +However, Punkt is designed to learn parameters (a list of abbreviations, etc.) +unsupervised from a corpus similar to the target domain. The pre-packaged models +may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn +parameters from the given text. + +:class:`.PunktTrainer` learns parameters such as a list of abbreviations +(without supervision) from portions of text. Using a ``PunktTrainer`` directly +allows for incremental training and modification of the hyper-parameters used +to decide what is considered an abbreviation, etc. + +The algorithm for this tokenizer is described in:: + + Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence + Boundary Detection. Computational Linguistics 32: 485-525. +""" + +# TODO: Make orthographic heuristic less susceptible to overtraining +# TODO: Frequent sentence starters optionally exclude always-capitalised words +# FIXME: Problem with ending string with e.g. '!!!' -> '!! !' + +import math +import re +import string +from collections import defaultdict +from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union + +from nltk.probability import FreqDist +from nltk.tokenize.api import TokenizerI + +###################################################################### +# { Orthographic Context Constants +###################################################################### +# The following constants are used to describe the orthographic +# contexts in which a word can occur. BEG=beginning, MID=middle, +# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case. + +_ORTHO_BEG_UC = 1 << 1 +"""Orthographic context: beginning of a sentence with upper case.""" + +_ORTHO_MID_UC = 1 << 2 +"""Orthographic context: middle of a sentence with upper case.""" + +_ORTHO_UNK_UC = 1 << 3 +"""Orthographic context: unknown position in a sentence with upper case.""" + +_ORTHO_BEG_LC = 1 << 4 +"""Orthographic context: beginning of a sentence with lower case.""" + +_ORTHO_MID_LC = 1 << 5 +"""Orthographic context: middle of a sentence with lower case.""" + +_ORTHO_UNK_LC = 1 << 6 +"""Orthographic context: unknown position in a sentence with lower case.""" + +_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC +"""Orthographic context: occurs with upper case.""" + +_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC +"""Orthographic context: occurs with lower case.""" + +_ORTHO_MAP = { + ("initial", "upper"): _ORTHO_BEG_UC, + ("internal", "upper"): _ORTHO_MID_UC, + ("unknown", "upper"): _ORTHO_UNK_UC, + ("initial", "lower"): _ORTHO_BEG_LC, + ("internal", "lower"): _ORTHO_MID_LC, + ("unknown", "lower"): _ORTHO_UNK_LC, +} +"""A map from context position and first-letter case to the +appropriate orthographic context flag.""" + +# } (end orthographic context constants) +###################################################################### + +###################################################################### +# { Decision reasons for debugging +###################################################################### + +REASON_DEFAULT_DECISION = "default decision" +REASON_KNOWN_COLLOCATION = "known collocation (both words)" +REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = "abbreviation + orthographic heuristic" +REASON_ABBR_WITH_SENTENCE_STARTER = "abbreviation + frequent sentence starter" +REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = "initial + orthographic heuristic" +REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = ( + "initial + special orthographic heuristic" +) + + +# } (end decision reasons for debugging) +###################################################################### + +###################################################################### +# { Language-dependent variables +###################################################################### + + +class PunktLanguageVars: + """ + Stores variables, mostly regular expressions, which may be + language-dependent for correct application of the algorithm. + An extension of this class may modify its properties to suit + a language other than English; an instance can then be passed + as an argument to PunktSentenceTokenizer and PunktTrainer + constructors. + """ + + __slots__ = ("_re_period_context", "_re_word_tokenizer") + + def __getstate__(self): + # All modifications to the class are performed by inheritance. + # Non-default parameters to be pickled must be defined in the inherited + # class. + return 1 + + def __setstate__(self, state): + return 1 + + sent_end_chars = (".", "?", "!") + """Characters which are candidates for sentence boundaries""" + + @property + def _re_sent_end_chars(self): + return "[%s]" % re.escape("".join(self.sent_end_chars)) + + internal_punctuation = ",:;" # might want to extend this.. + """sentence internal punctuation, which indicates an abbreviation if + preceded by a period-final token.""" + + re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)', re.MULTILINE) + """Used to realign punctuation that should be included in a sentence + although it follows the period (or ?, !).""" + + _re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]" + """Excludes some characters from starting word tokens""" + + @property + def _re_non_word_chars(self): + return r"(?:[)\";}\]\*:@\'\({\[%s])" % re.escape( + "".join(set(self.sent_end_chars) - {"."}) + ) + + """Characters that cannot appear within words""" + + _re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)" + """Hyphen and ellipsis are multi-character punctuation""" + + _word_tokenize_fmt = r"""( + %(MultiChar)s + | + (?=%(WordStart)s)\S+? # Accept word characters until end is found + (?= # Sequences marking a word's end + \s| # White-space + $| # End-of-string + %(NonWord)s|%(MultiChar)s| # Punctuation + ,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word + ) + | + \S + )""" + """Format of a regular expression to split punctuation from words, + excluding period.""" + + def _word_tokenizer_re(self): + """Compiles and returns a regular expression for word tokenization""" + try: + return self._re_word_tokenizer + except AttributeError: + self._re_word_tokenizer = re.compile( + self._word_tokenize_fmt + % { + "NonWord": self._re_non_word_chars, + "MultiChar": self._re_multi_char_punct, + "WordStart": self._re_word_start, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_word_tokenizer + + def word_tokenize(self, s): + """Tokenize a string to split off punctuation other than periods""" + return self._word_tokenizer_re().findall(s) + + _period_context_fmt = r""" + %(SentEndChars)s # a potential sentence ending + (?=(?P + %(NonWord)s # either other punctuation + | + \s+(?P\S+) # or whitespace and some other token + ))""" + """Format of a regular expression to find contexts including possible + sentence boundaries. Matches token which the possible sentence boundary + ends, and matches the following token within a lookahead expression.""" + + def period_context_re(self): + """Compiles and returns a regular expression to find contexts + including possible sentence boundaries.""" + try: + return self._re_period_context + except: + self._re_period_context = re.compile( + self._period_context_fmt + % { + "NonWord": self._re_non_word_chars, + "SentEndChars": self._re_sent_end_chars, + }, + re.UNICODE | re.VERBOSE, + ) + return self._re_period_context + + +_re_non_punct = re.compile(r"[^\W\d]", re.UNICODE) +"""Matches token types that are not merely punctuation. (Types for +numeric tokens are changed to ##number## and hence contain alpha.)""" + + +# } +###################################################################### + + +# //////////////////////////////////////////////////////////// +# { Helper Functions +# //////////////////////////////////////////////////////////// + + +def _pair_iter(iterator): + """ + Yields pairs of tokens from the given iterator such that each input + token will appear as the first element in a yielded tuple. The last + pair will have None as its second element. + """ + iterator = iter(iterator) + try: + prev = next(iterator) + except StopIteration: + return + for el in iterator: + yield (prev, el) + prev = el + yield (prev, None) + + +###################################################################### +# { Punkt Parameters +###################################################################### + + +class PunktParameters: + """Stores data used to perform sentence boundary detection with Punkt.""" + + def __init__(self): + self.abbrev_types = set() + """A set of word types for known abbreviations.""" + + self.collocations = set() + """A set of word type tuples for known common collocations + where the first word ends in a period. E.g., ('S.', 'Bach') + is a common collocation in a text that discusses 'Johann + S. Bach'. These count as negative evidence for sentence + boundaries.""" + + self.sent_starters = set() + """A set of word types for words that often appear at the + beginning of sentences.""" + + self.ortho_context = defaultdict(int) + """A dictionary mapping word types to the set of orthographic + contexts that word type appears in. Contexts are represented + by adding orthographic context flags: ...""" + + def clear_abbrevs(self): + self.abbrev_types = set() + + def clear_collocations(self): + self.collocations = set() + + def clear_sent_starters(self): + self.sent_starters = set() + + def clear_ortho_context(self): + self.ortho_context = defaultdict(int) + + def add_ortho_context(self, typ, flag): + self.ortho_context[typ] |= flag + + def _debug_ortho_context(self, typ): + context = self.ortho_context[typ] + if context & _ORTHO_BEG_UC: + yield "BEG-UC" + if context & _ORTHO_MID_UC: + yield "MID-UC" + if context & _ORTHO_UNK_UC: + yield "UNK-UC" + if context & _ORTHO_BEG_LC: + yield "BEG-LC" + if context & _ORTHO_MID_LC: + yield "MID-LC" + if context & _ORTHO_UNK_LC: + yield "UNK-LC" + + +###################################################################### +# { PunktToken +###################################################################### + + +class PunktToken: + """Stores a token of text with annotations produced during + sentence boundary detection.""" + + _properties = ["parastart", "linestart", "sentbreak", "abbr", "ellipsis"] + __slots__ = ["tok", "type", "period_final"] + _properties + + def __init__(self, tok, **params): + self.tok = tok + self.type = self._get_type(tok) + self.period_final = tok.endswith(".") + + for prop in self._properties: + setattr(self, prop, None) + for k in params: + setattr(self, k, params[k]) + + # //////////////////////////////////////////////////////////// + # { Regular expressions for properties + # //////////////////////////////////////////////////////////// + # Note: [A-Za-z] is approximated by [^\W\d] in the general case. + _RE_ELLIPSIS = re.compile(r"\.\.+$") + _RE_NUMERIC = re.compile(r"^-?[\.,]?\d[\d,\.-]*\.?$") + _RE_INITIAL = re.compile(r"[^\W\d]\.$", re.UNICODE) + _RE_ALPHA = re.compile(r"[^\W\d]+$", re.UNICODE) + + # //////////////////////////////////////////////////////////// + # { Derived properties + # //////////////////////////////////////////////////////////// + + def _get_type(self, tok): + """Returns a case-normalized representation of the token.""" + return self._RE_NUMERIC.sub("##number##", tok.lower()) + + @property + def type_no_period(self): + """ + The type with its final period removed if it has one. + """ + if len(self.type) > 1 and self.type[-1] == ".": + return self.type[:-1] + return self.type + + @property + def type_no_sentperiod(self): + """ + The type with its final period removed if it is marked as a + sentence break. + """ + if self.sentbreak: + return self.type_no_period + return self.type + + @property + def first_upper(self): + """True if the token's first character is uppercase.""" + return self.tok[0].isupper() + + @property + def first_lower(self): + """True if the token's first character is lowercase.""" + return self.tok[0].islower() + + @property + def first_case(self): + if self.first_lower: + return "lower" + if self.first_upper: + return "upper" + return "none" + + @property + def is_ellipsis(self): + """True if the token text is that of an ellipsis.""" + return self._RE_ELLIPSIS.match(self.tok) + + @property + def is_number(self): + """True if the token text is that of a number.""" + return self.type.startswith("##number##") + + @property + def is_initial(self): + """True if the token text is that of an initial.""" + return self._RE_INITIAL.match(self.tok) + + @property + def is_alpha(self): + """True if the token text is all alphabetic.""" + return self._RE_ALPHA.match(self.tok) + + @property + def is_non_punct(self): + """True if the token is either a number or is alphabetic.""" + return _re_non_punct.search(self.type) + + # //////////////////////////////////////////////////////////// + # { String representation + # //////////////////////////////////////////////////////////// + + def __repr__(self): + """ + A string representation of the token that can reproduce it + with eval(), which lists all the token's non-default + annotations. + """ + typestr = " type=%s," % repr(self.type) if self.type != self.tok else "" + + propvals = ", ".join( + f"{p}={repr(getattr(self, p))}" + for p in self._properties + if getattr(self, p) + ) + + return "{}({},{} {})".format( + self.__class__.__name__, + repr(self.tok), + typestr, + propvals, + ) + + def __str__(self): + """ + A string representation akin to that used by Kiss and Strunk. + """ + res = self.tok + if self.abbr: + res += "" + if self.ellipsis: + res += "" + if self.sentbreak: + res += "" + return res + + +###################################################################### +# { Punkt base class +###################################################################### + + +class PunktBaseClass: + """ + Includes common components of PunktTrainer and PunktSentenceTokenizer. + """ + + def __init__(self, lang_vars=None, token_cls=PunktToken, params=None): + if lang_vars is None: + lang_vars = PunktLanguageVars() + if params is None: + params = PunktParameters() + self._params = params + self._lang_vars = lang_vars + self._Token = token_cls + """The collection of parameters that determines the behavior + of the punkt tokenizer.""" + + # //////////////////////////////////////////////////////////// + # { Word tokenization + # //////////////////////////////////////////////////////////// + + def _tokenize_words(self, plaintext): + """ + Divide the given text into tokens, using the punkt word + segmentation regular expression, and generate the resulting list + of tokens augmented as three-tuples with two boolean values for whether + the given token occurs at the start of a paragraph or a new line, + respectively. + """ + parastart = False + for line in plaintext.split("\n"): + if line.strip(): + line_toks = iter(self._lang_vars.word_tokenize(line)) + + try: + tok = next(line_toks) + except StopIteration: + continue + + yield self._Token(tok, parastart=parastart, linestart=True) + parastart = False + + for tok in line_toks: + yield self._Token(tok) + else: + parastart = True + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_first_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Perform the first pass of annotation, which makes decisions + based purely based on the word type of each word: + + - '?', '!', and '.' are marked as sentence breaks. + - sequences of two or more periods are marked as ellipsis. + - any word ending in '.' that's a known abbreviation is + marked as an abbreviation. + - any other word ending in '.' is marked as a sentence break. + + Return these annotations as a tuple of three sets: + + - sentbreak_toks: The indices of all sentence breaks. + - abbrev_toks: The indices of all abbreviations. + - ellipsis_toks: The indices of all ellipsis marks. + """ + for aug_tok in tokens: + self._first_pass_annotation(aug_tok) + yield aug_tok + + def _first_pass_annotation(self, aug_tok: PunktToken) -> None: + """ + Performs type-based annotation on a single token. + """ + + tok = aug_tok.tok + + if tok in self._lang_vars.sent_end_chars: + aug_tok.sentbreak = True + elif aug_tok.is_ellipsis: + aug_tok.ellipsis = True + elif aug_tok.period_final and not tok.endswith(".."): + if ( + tok[:-1].lower() in self._params.abbrev_types + or tok[:-1].lower().split("-")[-1] in self._params.abbrev_types + ): + + aug_tok.abbr = True + else: + aug_tok.sentbreak = True + + return + + +###################################################################### +# { Punkt Trainer +###################################################################### + + +class PunktTrainer(PunktBaseClass): + """Learns parameters used in Punkt sentence boundary detection.""" + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + self._type_fdist = FreqDist() + """A frequency distribution giving the frequency of each + case-normalized token type in the training data.""" + + self._num_period_toks = 0 + """The number of words ending in period in the training data.""" + + self._collocation_fdist = FreqDist() + """A frequency distribution giving the frequency of all + bigrams in the training data where the first word ends in a + period. Bigrams are encoded as tuples of word types. + Especially common collocations are extracted from this + frequency distribution, and stored in + ``_params``.``collocations ``.""" + + self._sent_starter_fdist = FreqDist() + """A frequency distribution giving the frequency of all words + that occur at the training data at the beginning of a sentence + (after the first pass of annotation). Especially common + sentence starters are extracted from this frequency + distribution, and stored in ``_params.sent_starters``. + """ + + self._sentbreak_count = 0 + """The total number of sentence breaks identified in training, used for + calculating the frequent sentence starter heuristic.""" + + self._finalized = True + """A flag as to whether the training has been finalized by finding + collocations and sentence starters, or whether finalize_training() + still needs to be called.""" + + if train_text: + self.train(train_text, verbose, finalize=True) + + def get_params(self): + """ + Calculates and returns parameters for sentence boundary detection as + derived from training.""" + if not self._finalized: + self.finalize_training() + return self._params + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + ABBREV = 0.3 + """cut-off value whether a 'token' is an abbreviation""" + + IGNORE_ABBREV_PENALTY = False + """allows the disabling of the abbreviation penalty heuristic, which + exponentially disadvantages words that are found at times without a + final period.""" + + ABBREV_BACKOFF = 5 + """upper cut-off for Mikheev's(2002) abbreviation detection algorithm""" + + COLLOCATION = 7.88 + """minimal log-likelihood value that two tokens need to be considered + as a collocation""" + + SENT_STARTER = 30 + """minimal log-likelihood value that a token requires to be considered + as a frequent sentence starter""" + + INCLUDE_ALL_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word ends in a period. It may be useful in corpora where there is a lot + of variation that makes abbreviations like Mr difficult to identify.""" + + INCLUDE_ABBREV_COLLOCS = False + """this includes as potential collocations all word pairs where the first + word is an abbreviation. Such collocations override the orthographic + heuristic, but not the sentence starter heuristic. This is overridden by + INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials + and ordinals are considered.""" + """""" + + MIN_COLLOC_FREQ = 1 + """this sets a minimum bound on the number of times a bigram needs to + appear before it can be considered a collocation, in addition to log + likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True.""" + + # //////////////////////////////////////////////////////////// + # { Training.. + # //////////////////////////////////////////////////////////// + + def train(self, text, verbose=False, finalize=True): + """ + Collects training data from a given text. If finalize is True, it + will determine all the parameters for sentence boundary detection. If + not, this will be delayed until get_params() or finalize_training() is + called. If verbose is True, abbreviations found will be listed. + """ + # Break the text into tokens; record which token indices correspond to + # line starts and paragraph starts; and determine their types. + self._train_tokens(self._tokenize_words(text), verbose) + if finalize: + self.finalize_training(verbose) + + def train_tokens(self, tokens, verbose=False, finalize=True): + """ + Collects training data from a given list of tokens. + """ + self._train_tokens((self._Token(t) for t in tokens), verbose) + if finalize: + self.finalize_training(verbose) + + def _train_tokens(self, tokens, verbose): + self._finalized = False + + # Ensure tokens are a list + tokens = list(tokens) + + # Find the frequency of each case-normalized type. (Don't + # strip off final periods.) Also keep track of the number of + # tokens that end in periods. + for aug_tok in tokens: + self._type_fdist[aug_tok.type] += 1 + if aug_tok.period_final: + self._num_period_toks += 1 + + # Look for new abbreviations, and for types that no longer are + unique_types = self._unique_types(tokens) + for abbr, score, is_add in self._reclassify_abbrev_types(unique_types): + if score >= self.ABBREV: + if is_add: + self._params.abbrev_types.add(abbr) + if verbose: + print(f" Abbreviation: [{score:6.4f}] {abbr}") + else: + if not is_add: + self._params.abbrev_types.remove(abbr) + if verbose: + print(f" Removed abbreviation: [{score:6.4f}] {abbr}") + + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = list(self._annotate_first_pass(tokens)) + + # Check what contexts each word type can appear in, given the + # case of its first letter. + self._get_orthography_data(tokens) + + # We need total number of sentence breaks to find sentence starters + self._sentbreak_count += self._get_sentbreak_count(tokens) + + # The remaining heuristics relate to pairs of tokens where the first + # ends in a period. + for aug_tok1, aug_tok2 in _pair_iter(tokens): + if not aug_tok1.period_final or not aug_tok2: + continue + + # Is the first token a rare abbreviation? + if self._is_rare_abbrev_type(aug_tok1, aug_tok2): + self._params.abbrev_types.add(aug_tok1.type_no_period) + if verbose: + print(" Rare Abbrev: %s" % aug_tok1.type) + + # Does second token have a high likelihood of starting a sentence? + if self._is_potential_sent_starter(aug_tok2, aug_tok1): + self._sent_starter_fdist[aug_tok2.type] += 1 + + # Is this bigram a potential collocation? + if self._is_potential_collocation(aug_tok1, aug_tok2): + self._collocation_fdist[ + (aug_tok1.type_no_period, aug_tok2.type_no_sentperiod) + ] += 1 + + def _unique_types(self, tokens): + return {aug_tok.type for aug_tok in tokens} + + def finalize_training(self, verbose=False): + """ + Uses data that has been gathered in training to determine likely + collocations and sentence starters. + """ + self._params.clear_sent_starters() + for typ, log_likelihood in self._find_sent_starters(): + self._params.sent_starters.add(typ) + if verbose: + print(f" Sent Starter: [{log_likelihood:6.4f}] {typ!r}") + + self._params.clear_collocations() + for (typ1, typ2), log_likelihood in self._find_collocations(): + self._params.collocations.add((typ1, typ2)) + if verbose: + print(f" Collocation: [{log_likelihood:6.4f}] {typ1!r}+{typ2!r}") + + self._finalized = True + + # //////////////////////////////////////////////////////////// + # { Overhead reduction + # //////////////////////////////////////////////////////////// + + def freq_threshold( + self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2 + ): + """ + Allows memory use to be reduced after much training by removing data + about rare tokens that are unlikely to have a statistical effect with + further training. Entries occurring above the given thresholds will be + retained. + """ + if ortho_thresh > 1: + old_oc = self._params.ortho_context + self._params.clear_ortho_context() + for tok in self._type_fdist: + count = self._type_fdist[tok] + if count >= ortho_thresh: + self._params.ortho_context[tok] = old_oc[tok] + + self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh) + self._collocation_fdist = self._freq_threshold( + self._collocation_fdist, colloc_thres + ) + self._sent_starter_fdist = self._freq_threshold( + self._sent_starter_fdist, sentstart_thresh + ) + + def _freq_threshold(self, fdist, threshold): + """ + Returns a FreqDist containing only data with counts below a given + threshold, as well as a mapping (None -> count_removed). + """ + # We assume that there is more data below the threshold than above it + # and so create a new FreqDist rather than working in place. + res = FreqDist() + num_removed = 0 + for tok in fdist: + count = fdist[tok] + if count < threshold: + num_removed += 1 + else: + res[tok] += count + res[None] += num_removed + return res + + # //////////////////////////////////////////////////////////// + # { Orthographic data + # //////////////////////////////////////////////////////////// + + def _get_orthography_data(self, tokens): + """ + Collect information about whether each token type occurs + with different case patterns (i) overall, (ii) at + sentence-initial positions, and (iii) at sentence-internal + positions. + """ + # 'initial' or 'internal' or 'unknown' + context = "internal" + tokens = list(tokens) + + for aug_tok in tokens: + # If we encounter a paragraph break, then it's a good sign + # that it's a sentence break. But err on the side of + # caution (by not positing a sentence break) if we just + # saw an abbreviation. + if aug_tok.parastart and context != "unknown": + context = "initial" + + # If we're at the beginning of a line, then we can't decide + # between 'internal' and 'initial'. + if aug_tok.linestart and context == "internal": + context = "unknown" + + # Find the case-normalized type of the token. If it's a + # sentence-final token, strip off the period. + typ = aug_tok.type_no_sentperiod + + # Update the orthographic context table. + flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0) + if flag: + self._params.add_ortho_context(typ, flag) + + # Decide whether the next word is at a sentence boundary. + if aug_tok.sentbreak: + if not (aug_tok.is_number or aug_tok.is_initial): + context = "initial" + else: + context = "unknown" + elif aug_tok.ellipsis or aug_tok.abbr: + context = "unknown" + else: + context = "internal" + + # //////////////////////////////////////////////////////////// + # { Abbreviations + # //////////////////////////////////////////////////////////// + + def _reclassify_abbrev_types(self, types): + """ + (Re)classifies each given token if + - it is period-final and not a known abbreviation; or + - it is not period-final and is otherwise a known abbreviation + by checking whether its previous classification still holds according + to the heuristics of section 3. + Yields triples (abbr, score, is_add) where abbr is the type in question, + score is its log-likelihood with penalties applied, and is_add specifies + whether the present type is a candidate for inclusion or exclusion as an + abbreviation, such that: + - (is_add and score >= 0.3) suggests a new abbreviation; and + - (not is_add and score < 0.3) suggests excluding an abbreviation. + """ + # (While one could recalculate abbreviations from all .-final tokens at + # every iteration, in cases requiring efficiency, the number of tokens + # in the present training document will be much less.) + + for typ in types: + # Check some basic conditions, to rule out words that are + # clearly not abbrev_types. + if not _re_non_punct.search(typ) or typ == "##number##": + continue + + if typ.endswith("."): + if typ in self._params.abbrev_types: + continue + typ = typ[:-1] + is_add = True + else: + if typ not in self._params.abbrev_types: + continue + is_add = False + + # Count how many periods & nonperiods are in the + # candidate. + num_periods = typ.count(".") + 1 + num_nonperiods = len(typ) - num_periods + 1 + + # Let be the candidate without the period, and + # be the period. Find a log likelihood ratio that + # indicates whether occurs as a single unit (high + # value of log_likelihood), or as two independent units and + # (low value of log_likelihood). + count_with_period = self._type_fdist[typ + "."] + count_without_period = self._type_fdist[typ] + log_likelihood = self._dunning_log_likelihood( + count_with_period + count_without_period, + self._num_period_toks, + count_with_period, + self._type_fdist.N(), + ) + + # Apply three scaling factors to 'tweak' the basic log + # likelihood ratio: + # F_length: long word -> less likely to be an abbrev + # F_periods: more periods -> more likely to be an abbrev + # F_penalty: penalize occurrences w/o a period + f_length = math.exp(-num_nonperiods) + f_periods = num_periods + f_penalty = int(self.IGNORE_ABBREV_PENALTY) or math.pow( + num_nonperiods, -count_without_period + ) + score = log_likelihood * f_length * f_periods * f_penalty + + yield typ, score, is_add + + def find_abbrev_types(self): + """ + Recalculates abbreviations given type frequencies, despite no prior + determination of abbreviations. + This fails to include abbreviations otherwise found as "rare". + """ + self._params.clear_abbrevs() + tokens = (typ for typ in self._type_fdist if typ and typ.endswith(".")) + for abbr, score, _is_add in self._reclassify_abbrev_types(tokens): + if score >= self.ABBREV: + self._params.abbrev_types.add(abbr) + + # This function combines the work done by the original code's + # functions `count_orthography_context`, `get_orthography_count`, + # and `get_rare_abbreviations`. + def _is_rare_abbrev_type(self, cur_tok, next_tok): + """ + A word type is counted as a rare abbreviation if... + - it's not already marked as an abbreviation + - it occurs fewer than ABBREV_BACKOFF times + - either it is followed by a sentence-internal punctuation + mark, *or* it is followed by a lower-case word that + sometimes appears with upper case, but never occurs with + lower case at the beginning of sentences. + """ + if cur_tok.abbr or not cur_tok.sentbreak: + return False + + # Find the case-normalized type of the token. If it's + # a sentence-final token, strip off the period. + typ = cur_tok.type_no_sentperiod + + # Proceed only if the type hasn't been categorized as an + # abbreviation already, and is sufficiently rare... + count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] + if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF: + return False + + # Record this token as an abbreviation if the next + # token is a sentence-internal punctuation mark. + # [XX] :1 or check the whole thing?? + if next_tok.tok[:1] in self._lang_vars.internal_punctuation: + return True + + # Record this type as an abbreviation if the next + # token... (i) starts with a lower case letter, + # (ii) sometimes occurs with an uppercase letter, + # and (iii) never occus with an uppercase letter + # sentence-internally. + # [xx] should the check for (ii) be modified?? + if next_tok.first_lower: + typ2 = next_tok.type_no_sentperiod + typ2ortho_context = self._params.ortho_context[typ2] + if (typ2ortho_context & _ORTHO_BEG_UC) and not ( + typ2ortho_context & _ORTHO_MID_UC + ): + return True + + # //////////////////////////////////////////////////////////// + # { Log Likelihoods + # //////////////////////////////////////////////////////////// + + # helper for _reclassify_abbrev_types: + @staticmethod + def _dunning_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that calculates the modified Dunning log-likelihood + ratio scores for abbreviation candidates. The details of how + this works is available in the paper. + """ + p1 = count_b / N + p2 = 0.99 + + null_hypo = count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1) + alt_hypo = count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2) + + likelihood = null_hypo - alt_hypo + + return -2.0 * likelihood + + @staticmethod + def _col_log_likelihood(count_a, count_b, count_ab, N): + """ + A function that will just compute log-likelihood estimate, in + the original paper it's described in algorithm 6 and 7. + + This *should* be the original Dunning log-likelihood values, + unlike the previous log_l function where it used modified + Dunning log-likelihood values + """ + p = count_b / N + p1 = count_ab / count_a + try: + p2 = (count_b - count_ab) / (N - count_a) + except ZeroDivisionError: + p2 = 1 + + try: + summand1 = count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p) + except ValueError: + summand1 = 0 + + try: + summand2 = (count_b - count_ab) * math.log(p) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p) + except ValueError: + summand2 = 0 + + if count_a == count_ab or p1 <= 0 or p1 >= 1: + summand3 = 0 + else: + summand3 = count_ab * math.log(p1) + (count_a - count_ab) * math.log( + 1.0 - p1 + ) + + if count_b == count_ab or p2 <= 0 or p2 >= 1: + summand4 = 0 + else: + summand4 = (count_b - count_ab) * math.log(p2) + ( + N - count_a - count_b + count_ab + ) * math.log(1.0 - p2) + + likelihood = summand1 + summand2 - summand3 - summand4 + + return -2.0 * likelihood + + # //////////////////////////////////////////////////////////// + # { Collocation Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_collocation(self, aug_tok1, aug_tok2): + """ + Returns True if the pair of tokens may form a collocation given + log-likelihood statistics. + """ + return ( + ( + self.INCLUDE_ALL_COLLOCS + or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) + or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial)) + ) + and aug_tok1.is_non_punct + and aug_tok2.is_non_punct + ) + + def _find_collocations(self): + """ + Generates likely collocations and their log-likelihood. + """ + for types in self._collocation_fdist: + try: + typ1, typ2 = types + except TypeError: + # types may be None after calling freq_threshold() + continue + if typ2 in self._params.sent_starters: + continue + + col_count = self._collocation_fdist[types] + typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + "."] + typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + "."] + if ( + typ1_count > 1 + and typ2_count > 1 + and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count) + ): + + log_likelihood = self._col_log_likelihood( + typ1_count, typ2_count, col_count, self._type_fdist.N() + ) + # Filter out the not-so-collocative + if log_likelihood >= self.COLLOCATION and ( + self._type_fdist.N() / typ1_count > typ2_count / col_count + ): + yield (typ1, typ2), log_likelihood + + # //////////////////////////////////////////////////////////// + # { Sentence-Starter Finder + # //////////////////////////////////////////////////////////// + + def _is_potential_sent_starter(self, cur_tok, prev_tok): + """ + Returns True given a token and the token that precedes it if it + seems clear that the token is beginning a sentence. + """ + # If a token (i) is preceded by a sentece break that is + # not a potential ordinal number or initial, and (ii) is + # alphabetic, then it is a a sentence-starter. + return ( + prev_tok.sentbreak + and not (prev_tok.is_number or prev_tok.is_initial) + and cur_tok.is_alpha + ) + + def _find_sent_starters(self): + """ + Uses collocation heuristics for each candidate token to + determine if it frequently starts sentences. + """ + for typ in self._sent_starter_fdist: + if not typ: + continue + + typ_at_break_count = self._sent_starter_fdist[typ] + typ_count = self._type_fdist[typ] + self._type_fdist[typ + "."] + if typ_count < typ_at_break_count: + # needed after freq_threshold + continue + + log_likelihood = self._col_log_likelihood( + self._sentbreak_count, + typ_count, + typ_at_break_count, + self._type_fdist.N(), + ) + + if ( + log_likelihood >= self.SENT_STARTER + and self._type_fdist.N() / self._sentbreak_count + > typ_count / typ_at_break_count + ): + yield typ, log_likelihood + + def _get_sentbreak_count(self, tokens): + """ + Returns the number of sentence breaks marked in a given set of + augmented tokens. + """ + return sum(1 for aug_tok in tokens if aug_tok.sentbreak) + + +###################################################################### +# { Punkt Sentence Tokenizer +###################################################################### + + +class PunktSentenceTokenizer(PunktBaseClass, TokenizerI): + """ + A sentence tokenizer which uses an unsupervised algorithm to build + a model for abbreviation words, collocations, and words that start + sentences; and then uses that model to find sentence boundaries. + This approach has been shown to work well for many European + languages. + """ + + def __init__( + self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken + ): + """ + train_text can either be the sole training text for this sentence + boundary detector, or can be a PunktParameters object. + """ + PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) + + if train_text: + self._params = self.train(train_text, verbose) + + def train(self, train_text, verbose=False): + """ + Derives parameters from a given training text, or uses the parameters + given. Repeated calls to this method destroy previous parameters. For + incremental training, instantiate a separate PunktTrainer instance. + """ + if not isinstance(train_text, str): + return train_text + return PunktTrainer( + train_text, lang_vars=self._lang_vars, token_cls=self._Token + ).get_params() + + # //////////////////////////////////////////////////////////// + # { Tokenization + # //////////////////////////////////////////////////////////// + + def tokenize(self, text: str, realign_boundaries: bool = True) -> List[str]: + """ + Given a text, returns a list of the sentences in that text. + """ + return list(self.sentences_from_text(text, realign_boundaries)) + + def debug_decisions(self, text: str) -> Iterator[Dict[str, Any]]: + """ + Classifies candidate periods as sentence breaks, yielding a dict for + each that may be used to understand why the decision was made. + + See format_debug_decision() to help make this output readable. + """ + + for match, decision_text in self._match_potential_end_contexts(text): + tokens = self._tokenize_words(decision_text) + tokens = list(self._annotate_first_pass(tokens)) + while tokens and not tokens[0].tok.endswith(self._lang_vars.sent_end_chars): + tokens.pop(0) + yield { + "period_index": match.end() - 1, + "text": decision_text, + "type1": tokens[0].type, + "type2": tokens[1].type, + "type1_in_abbrs": bool(tokens[0].abbr), + "type1_is_initial": bool(tokens[0].is_initial), + "type2_is_sent_starter": tokens[1].type_no_sentperiod + in self._params.sent_starters, + "type2_ortho_heuristic": self._ortho_heuristic(tokens[1]), + "type2_ortho_contexts": set( + self._params._debug_ortho_context(tokens[1].type_no_sentperiod) + ), + "collocation": ( + tokens[0].type_no_sentperiod, + tokens[1].type_no_sentperiod, + ) + in self._params.collocations, + "reason": self._second_pass_annotation(tokens[0], tokens[1]) + or REASON_DEFAULT_DECISION, + "break_decision": tokens[0].sentbreak, + } + + def span_tokenize( + self, text: str, realign_boundaries: bool = True + ) -> Iterator[Tuple[int, int]]: + """ + Given a text, generates (start, end) spans of sentences + in the text. + """ + slices = self._slices_from_text(text) + if realign_boundaries: + slices = self._realign_boundaries(text, slices) + for sentence in slices: + yield (sentence.start, sentence.stop) + + def sentences_from_text( + self, text: str, realign_boundaries: bool = True + ) -> List[str]: + """ + Given a text, generates the sentences in that text by only + testing candidate sentence breaks. If realign_boundaries is + True, includes in the sentence closing punctuation that + follows the period. + """ + return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)] + + def _get_last_whitespace_index(self, text: str) -> int: + """ + Given a text, find the index of the *last* occurrence of *any* + whitespace character, i.e. " ", "\n", "\t", "\r", etc. + If none is found, return 0. + """ + for i in range(len(text) - 1, -1, -1): + if text[i] in string.whitespace: + return i + return 0 + + def _match_potential_end_contexts(self, text: str) -> Iterator[Tuple[Match, str]]: + """ + Given a text, find the matches of potential sentence breaks, + alongside the contexts surrounding these sentence breaks. + + Since the fix for the ReDOS discovered in issue #2866, we no longer match + the word before a potential end of sentence token. Instead, we use a separate + regex for this. As a consequence, `finditer`'s desire to find non-overlapping + matches no longer aids us in finding the single longest match. + Where previously, we could use:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +SKIP + [] + + Now we have to find the word before (i.e. 'acting') separately, and `finditer` + returns:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +NORMALIZE_WHITESPACE + [, + , + ] + + So, we need to find the word before the match from right to left, and then manually remove + the overlaps. That is what this method does:: + + >>> pst = PunktSentenceTokenizer() + >>> text = "Very bad acting!!! I promise." + >>> list(pst._match_potential_end_contexts(text)) + [(, 'acting!!! I')] + + :param text: String of one or more sentences + :type text: str + :return: Generator of match-context tuples. + :rtype: Iterator[Tuple[Match, str]] + """ + previous_slice = slice(0, 0) + previous_match = None + for match in self._lang_vars.period_context_re().finditer(text): + + # Get the slice of the previous word + before_text = text[previous_slice.stop : match.start()] + index_after_last_space = self._get_last_whitespace_index(before_text) + if index_after_last_space: + # + 1 to exclude the space itself + index_after_last_space += previous_slice.stop + 1 + else: + index_after_last_space = previous_slice.start + prev_word_slice = slice(index_after_last_space, match.start()) + + # If the previous slice does not overlap with this slice, then + # we can yield the previous match and slice. If there is an overlap, + # then we do not yield the previous match and slice. + if previous_match and previous_slice.stop <= prev_word_slice.start: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + previous_match = match + previous_slice = prev_word_slice + + # Yield the last match and context, if it exists + if previous_match: + yield ( + previous_match, + text[previous_slice] + + previous_match.group() + + previous_match.group("after_tok"), + ) + + def _slices_from_text(self, text: str) -> Iterator[slice]: + last_break = 0 + for match, context in self._match_potential_end_contexts(text): + if self.text_contains_sentbreak(context): + yield slice(last_break, match.end()) + if match.group("next_tok"): + # next sentence starts after whitespace + last_break = match.start("next_tok") + else: + # next sentence starts at following punctuation + last_break = match.end() + # The last sentence should not contain trailing whitespace. + yield slice(last_break, len(text.rstrip())) + + def _realign_boundaries( + self, text: str, slices: Iterator[slice] + ) -> Iterator[slice]: + """ + Attempts to realign punctuation that falls after the period but + should otherwise be included in the same sentence. + + For example: "(Sent1.) Sent2." will otherwise be split as:: + + ["(Sent1.", ") Sent1."]. + + This method will produce:: + + ["(Sent1.)", "Sent2."]. + """ + realign = 0 + for sentence1, sentence2 in _pair_iter(slices): + sentence1 = slice(sentence1.start + realign, sentence1.stop) + if not sentence2: + if text[sentence1]: + yield sentence1 + continue + + m = self._lang_vars.re_boundary_realignment.match(text[sentence2]) + if m: + yield slice(sentence1.start, sentence2.start + len(m.group(0).rstrip())) + realign = m.end() + else: + realign = 0 + if text[sentence1]: + yield sentence1 + + def text_contains_sentbreak(self, text: str) -> bool: + """ + Returns True if the given text includes a sentence break. + """ + found = False # used to ignore last token + for tok in self._annotate_tokens(self._tokenize_words(text)): + if found: + return True + if tok.sentbreak: + found = True + return False + + def sentences_from_text_legacy(self, text: str) -> Iterator[str]: + """ + Given a text, generates the sentences in that text. Annotates all + tokens, rather than just those with possible sentence breaks. Should + produce the same results as ``sentences_from_text``. + """ + tokens = self._annotate_tokens(self._tokenize_words(text)) + return self._build_sentence_list(text, tokens) + + def sentences_from_tokens( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Given a sequence of tokens, generates lists of tokens, each list + corresponding to a sentence. + """ + tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens)) + sentence = [] + for aug_tok in tokens: + sentence.append(aug_tok.tok) + if aug_tok.sentbreak: + yield sentence + sentence = [] + if sentence: + yield sentence + + def _annotate_tokens(self, tokens: Iterator[PunktToken]) -> Iterator[PunktToken]: + """ + Given a set of tokens augmented with markers for line-start and + paragraph-start, returns an iterator through those tokens with full + annotation including predicted sentence breaks. + """ + # Make a preliminary pass through the document, marking likely + # sentence breaks, abbreviations, and ellipsis tokens. + tokens = self._annotate_first_pass(tokens) + + # Make a second pass through the document, using token context + # information to change our preliminary decisions about where + # sentence breaks, abbreviations, and ellipsis occurs. + tokens = self._annotate_second_pass(tokens) + + ## [XX] TESTING + # tokens = list(tokens) + # self.dump(tokens) + + return tokens + + def _build_sentence_list( + self, text: str, tokens: Iterator[PunktToken] + ) -> Iterator[str]: + """ + Given the original text and the list of augmented word tokens, + construct and return a tokenized list of sentence strings. + """ + # Most of the work here is making sure that we put the right + # pieces of whitespace back in all the right places. + + # Our position in the source text, used to keep track of which + # whitespace to add: + pos = 0 + + # A regular expression that finds pieces of whitespace: + white_space_regexp = re.compile(r"\s*") + + sentence = "" + for aug_tok in tokens: + tok = aug_tok.tok + + # Find the whitespace before this token, and update pos. + white_space = white_space_regexp.match(text, pos).group() + pos += len(white_space) + + # Some of the rules used by the punkt word tokenizer + # strip whitespace out of the text, resulting in tokens + # that contain whitespace in the source text. If our + # token doesn't match, see if adding whitespace helps. + # If so, then use the version with whitespace. + if text[pos : pos + len(tok)] != tok: + pat = r"\s*".join(re.escape(c) for c in tok) + m = re.compile(pat).match(text, pos) + if m: + tok = m.group() + + # Move our position pointer to the end of the token. + assert text[pos : pos + len(tok)] == tok + pos += len(tok) + + # Add this token. If it's not at the beginning of the + # sentence, then include any whitespace that separated it + # from the previous token. + if sentence: + sentence += white_space + sentence += tok + + # If we're at a sentence break, then start a new sentence. + if aug_tok.sentbreak: + yield sentence + sentence = "" + + # If the last sentence is empty, discard it. + if sentence: + yield sentence + + # [XX] TESTING + def dump(self, tokens: Iterator[PunktToken]) -> None: + print("writing to /tmp/punkt.new...") + with open("/tmp/punkt.new", "w") as outfile: + for aug_tok in tokens: + if aug_tok.parastart: + outfile.write("\n\n") + elif aug_tok.linestart: + outfile.write("\n") + else: + outfile.write(" ") + + outfile.write(str(aug_tok)) + + # //////////////////////////////////////////////////////////// + # { Customization Variables + # //////////////////////////////////////////////////////////// + + PUNCTUATION = tuple(";:,.!?") + + # //////////////////////////////////////////////////////////// + # { Annotation Procedures + # //////////////////////////////////////////////////////////// + + def _annotate_second_pass( + self, tokens: Iterator[PunktToken] + ) -> Iterator[PunktToken]: + """ + Performs a token-based classification (section 4) over the given + tokens, making use of the orthographic heuristic (4.1.1), collocation + heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3). + """ + for token1, token2 in _pair_iter(tokens): + self._second_pass_annotation(token1, token2) + yield token1 + + def _second_pass_annotation( + self, aug_tok1: PunktToken, aug_tok2: Optional[PunktToken] + ) -> Optional[str]: + """ + Performs token-based classification over a pair of contiguous tokens + updating the first. + """ + # Is it the last token? We can't do anything then. + if not aug_tok2: + return + + if not aug_tok1.period_final: + # We only care about words ending in periods. + return + typ = aug_tok1.type_no_period + next_typ = aug_tok2.type_no_sentperiod + tok_is_initial = aug_tok1.is_initial + + # [4.1.2. Collocation Heuristic] If there's a + # collocation between the word before and after the + # period, then label tok as an abbreviation and NOT + # a sentence break. Note that collocations with + # frequent sentence starters as their second word are + # excluded in training. + if (typ, next_typ) in self._params.collocations: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_KNOWN_COLLOCATION + + # [4.2. Token-Based Reclassification of Abbreviations] If + # the token is an abbreviation or an ellipsis, then decide + # whether we should *also* classify it as a sentbreak. + if (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial): + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + if is_sent_starter == True: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC + + # [4.1.3. Frequent Sentence Starter Heruistic] If the + # next word is capitalized, and is a member of the + # frequent-sentence-starters list, then label tok as a + # sentence break. + if aug_tok2.first_upper and next_typ in self._params.sent_starters: + aug_tok1.sentbreak = True + return REASON_ABBR_WITH_SENTENCE_STARTER + + # [4.3. Token-Based Detection of Initials and Ordinals] + # Check if any initials or ordinals tokens that are marked + # as sentbreaks should be reclassified as abbreviations. + if tok_is_initial or typ == "##number##": + + # [4.1.1. Orthographic Heuristic] Check if there's + # orthogrpahic evidence about whether the next word + # starts a sentence or not. + is_sent_starter = self._ortho_heuristic(aug_tok2) + + if is_sent_starter == False: + aug_tok1.sentbreak = False + aug_tok1.abbr = True + if tok_is_initial: + return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC + return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC + + # Special heuristic for initials: if orthogrpahic + # heuristic is unknown, and next word is always + # capitalized, then mark as abbrev (eg: J. Bach). + if ( + is_sent_starter == "unknown" + and tok_is_initial + and aug_tok2.first_upper + and not (self._params.ortho_context[next_typ] & _ORTHO_LC) + ): + aug_tok1.sentbreak = False + aug_tok1.abbr = True + return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC + + return + + def _ortho_heuristic(self, aug_tok: PunktToken) -> Union[bool, str]: + """ + Decide whether the given token is the first token in a sentence. + """ + # Sentences don't start with punctuation marks: + if aug_tok.tok in self.PUNCTUATION: + return False + + ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod] + + # If the word is capitalized, occurs at least once with a + # lower case first letter, and never occurs with an upper case + # first letter sentence-internally, then it's a sentence starter. + if ( + aug_tok.first_upper + and (ortho_context & _ORTHO_LC) + and not (ortho_context & _ORTHO_MID_UC) + ): + return True + + # If the word is lower case, and either (a) we've seen it used + # with upper case, or (b) we've never seen it used + # sentence-initially with lower case, then it's not a sentence + # starter. + if aug_tok.first_lower and ( + (ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC) + ): + return False + + # Otherwise, we're not sure. + return "unknown" + + +DEBUG_DECISION_FMT = """Text: {text!r} (at offset {period_index}) +Sentence break? {break_decision} ({reason}) +Collocation? {collocation} +{type1!r}: + known abbreviation: {type1_in_abbrs} + is initial: {type1_is_initial} +{type2!r}: + known sentence starter: {type2_is_sent_starter} + orthographic heuristic suggests is a sentence starter? {type2_ortho_heuristic} + orthographic contexts in training: {type2_ortho_contexts} +""" + + +def format_debug_decision(d): + return DEBUG_DECISION_FMT.format(**d) + + +def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer): + """Builds a punkt model and applies it to the same text""" + cleanup = ( + lambda s: re.compile(r"(?:\r|^\s+)", re.MULTILINE).sub("", s).replace("\n", " ") + ) + trainer = train_cls() + trainer.INCLUDE_ALL_COLLOCS = True + trainer.train(text) + sbd = tok_cls(trainer.get_params()) + for sentence in sbd.sentences_from_text(text): + print(cleanup(sentence)) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/regexp.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/regexp.py new file mode 100644 index 0000000000000000000000000000000000000000..e3875b1447ba2843b7e6f186de24b4e67baf8844 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/regexp.py @@ -0,0 +1,220 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# Trevor Cohn +# URL: +# For license information, see LICENSE.TXT + +r""" +Regular-Expression Tokenizers + +A ``RegexpTokenizer`` splits a string into substrings using a regular expression. +For example, the following tokenizer forms tokens out of alphabetic sequences, +money expressions, and any other non-whitespace sequences: + + >>> from nltk.tokenize import RegexpTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +A ``RegexpTokenizer`` can use its regexp to match delimiters instead: + + >>> tokenizer = RegexpTokenizer(r'\s+', gaps=True) + >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + +Note that empty tokens are not returned when the delimiter appears at +the start or end of the string. + +The material between the tokens is discarded. For example, +the following tokenizer selects just the capitalized words: + + >>> capword_tokenizer = RegexpTokenizer(r'[A-Z]\w+') + >>> capword_tokenizer.tokenize(s) + ['Good', 'New', 'York', 'Please', 'Thanks'] + +This module contains several subclasses of ``RegexpTokenizer`` +that use pre-defined regular expressions. + + >>> from nltk.tokenize import BlanklineTokenizer + >>> # Uses '\s*\n\s*\n\s*': + >>> BlanklineTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', + 'Thanks.'] + +All of the regular expression tokenizers are also available as functions: + + >>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize + >>> regexp_tokenize(s, pattern=r'\w+|\$[\d\.]+|\S+') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> blankline_tokenize(s) + ['Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.', 'Thanks.'] + +Caution: The function ``regexp_tokenize()`` takes the text as its +first argument, and the regular expression pattern as its second +argument. This differs from the conventions used by Python's +``re`` functions, where the pattern is always the first argument. +(This is for consistency with the other NLTK tokenizers.) +""" + +import re + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.util import regexp_span_tokenize + + +class RegexpTokenizer(TokenizerI): + r""" + A tokenizer that splits a string using a regular expression, which + matches either the tokens or the separators between tokens. + + >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+') + + :type pattern: str + :param pattern: The pattern used to build this tokenizer. + (This pattern must not contain capturing parentheses; + Use non-capturing parentheses, e.g. (?:...), instead) + :type gaps: bool + :param gaps: True if this tokenizer's pattern should be used + to find separators between tokens; False if this + tokenizer's pattern should be used to find the tokens + themselves. + :type discard_empty: bool + :param discard_empty: True if any empty tokens `''` + generated by the tokenizer should be discarded. Empty + tokens can only be generated if `_gaps == True`. + :type flags: int + :param flags: The regexp flags used to compile this + tokenizer's pattern. By default, the following flags are + used: `re.UNICODE | re.MULTILINE | re.DOTALL`. + + """ + + def __init__( + self, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, + ): + # If they gave us a regexp object, extract the pattern. + pattern = getattr(pattern, "pattern", pattern) + + self._pattern = pattern + self._gaps = gaps + self._discard_empty = discard_empty + self._flags = flags + self._regexp = None + + def _check_regexp(self): + if self._regexp is None: + self._regexp = re.compile(self._pattern, self._flags) + + def tokenize(self, text): + self._check_regexp() + # If our regexp matches gaps, use re.split: + if self._gaps: + if self._discard_empty: + return [tok for tok in self._regexp.split(text) if tok] + else: + return self._regexp.split(text) + + # If our regexp matches tokens, use re.findall: + else: + return self._regexp.findall(text) + + def span_tokenize(self, text): + self._check_regexp() + + if self._gaps: + for left, right in regexp_span_tokenize(text, self._regexp): + if not (self._discard_empty and left == right): + yield left, right + else: + for m in re.finditer(self._regexp, text): + yield m.span() + + def __repr__(self): + return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format( + self.__class__.__name__, + self._pattern, + self._gaps, + self._discard_empty, + self._flags, + ) + + +class WhitespaceTokenizer(RegexpTokenizer): + r""" + Tokenize a string on whitespace (space, tab, newline). + In general, users should use the string ``split()`` method instead. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WhitespaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s+", gaps=True) + + +class BlanklineTokenizer(RegexpTokenizer): + """ + Tokenize a string, treating any sequence of blank lines as a delimiter. + Blank lines are defined as lines containing no characters, except for + space or tab characters. + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\s*\n\s*\n\s*", gaps=True) + + +class WordPunctTokenizer(RegexpTokenizer): + r""" + Tokenize a text into a sequence of alphabetic and + non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``. + + >>> from nltk.tokenize import WordPunctTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> WordPunctTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', + '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + """ + + def __init__(self): + RegexpTokenizer.__init__(self, r"\w+|[^\w\s]+") + + +###################################################################### +# { Tokenization Functions +###################################################################### + + +def regexp_tokenize( + text, + pattern, + gaps=False, + discard_empty=True, + flags=re.UNICODE | re.MULTILINE | re.DOTALL, +): + """ + Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` + for descriptions of the arguments. + """ + tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags) + return tokenizer.tokenize(text) + + +blankline_tokenize = BlanklineTokenizer().tokenize +wordpunct_tokenize = WordPunctTokenizer().tokenize diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/repp.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/repp.py new file mode 100644 index 0000000000000000000000000000000000000000..6e0740a94645f14ec6162814cdb3c92167f503bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/repp.py @@ -0,0 +1,149 @@ +# Natural Language Toolkit: Interface to the Repp Tokenizer +# +# Copyright (C) 2001-2015 NLTK Project +# Authors: Rebecca Dridan and Stephan Oepen +# Contributors: Liling Tan +# +# URL: +# For license information, see LICENSE.TXT + +import os +import re +import subprocess +import sys +import tempfile + +from nltk.data import ZipFilePathPointer +from nltk.internals import find_dir +from nltk.tokenize.api import TokenizerI + + +class ReppTokenizer(TokenizerI): + """ + A class for word tokenization using the REPP parser described in + Rebecca Dridan and Stephan Oepen (2012) Tokenization: Returning to a + Long Solved Problem - A Survey, Contrastive Experiment, Recommendations, + and Toolkit. In ACL. http://anthology.aclweb.org/P/P12/P12-2.pdf#page=406 + + >>> sents = ['Tokenization is widely regarded as a solved problem due to the high accuracy that rulebased tokenizers achieve.' , + ... 'But rule-based tokenizers are hard to maintain and their rules language specific.' , + ... 'We evaluated our method on three languages and obtained error rates of 0.27% (English), 0.35% (Dutch) and 0.76% (Italian) for our best models.' + ... ] + >>> tokenizer = ReppTokenizer('/home/alvas/repp/') # doctest: +SKIP + >>> for sent in sents: # doctest: +SKIP + ... tokenizer.tokenize(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + + >>> for sent in tokenizer.tokenize_sents(sents): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + (u'Tokenization', u'is', u'widely', u'regarded', u'as', u'a', u'solved', u'problem', u'due', u'to', u'the', u'high', u'accuracy', u'that', u'rulebased', u'tokenizers', u'achieve', u'.') + (u'But', u'rule-based', u'tokenizers', u'are', u'hard', u'to', u'maintain', u'and', u'their', u'rules', u'language', u'specific', u'.') + (u'We', u'evaluated', u'our', u'method', u'on', u'three', u'languages', u'and', u'obtained', u'error', u'rates', u'of', u'0.27', u'%', u'(', u'English', u')', u',', u'0.35', u'%', u'(', u'Dutch', u')', u'and', u'0.76', u'%', u'(', u'Italian', u')', u'for', u'our', u'best', u'models', u'.') + >>> for sent in tokenizer.tokenize_sents(sents, keep_token_positions=True): # doctest: +SKIP + ... print(sent) # doctest: +SKIP + ... + [(u'Tokenization', 0, 12), (u'is', 13, 15), (u'widely', 16, 22), (u'regarded', 23, 31), (u'as', 32, 34), (u'a', 35, 36), (u'solved', 37, 43), (u'problem', 44, 51), (u'due', 52, 55), (u'to', 56, 58), (u'the', 59, 62), (u'high', 63, 67), (u'accuracy', 68, 76), (u'that', 77, 81), (u'rulebased', 82, 91), (u'tokenizers', 92, 102), (u'achieve', 103, 110), (u'.', 110, 111)] + [(u'But', 0, 3), (u'rule-based', 4, 14), (u'tokenizers', 15, 25), (u'are', 26, 29), (u'hard', 30, 34), (u'to', 35, 37), (u'maintain', 38, 46), (u'and', 47, 50), (u'their', 51, 56), (u'rules', 57, 62), (u'language', 63, 71), (u'specific', 72, 80), (u'.', 80, 81)] + [(u'We', 0, 2), (u'evaluated', 3, 12), (u'our', 13, 16), (u'method', 17, 23), (u'on', 24, 26), (u'three', 27, 32), (u'languages', 33, 42), (u'and', 43, 46), (u'obtained', 47, 55), (u'error', 56, 61), (u'rates', 62, 67), (u'of', 68, 70), (u'0.27', 71, 75), (u'%', 75, 76), (u'(', 77, 78), (u'English', 78, 85), (u')', 85, 86), (u',', 86, 87), (u'0.35', 88, 92), (u'%', 92, 93), (u'(', 94, 95), (u'Dutch', 95, 100), (u')', 100, 101), (u'and', 102, 105), (u'0.76', 106, 110), (u'%', 110, 111), (u'(', 112, 113), (u'Italian', 113, 120), (u')', 120, 121), (u'for', 122, 125), (u'our', 126, 129), (u'best', 130, 134), (u'models', 135, 141), (u'.', 141, 142)] + """ + + def __init__(self, repp_dir, encoding="utf8"): + self.repp_dir = self.find_repptokenizer(repp_dir) + # Set a directory to store the temporary files. + self.working_dir = tempfile.gettempdir() + # Set an encoding for the input strings. + self.encoding = encoding + + def tokenize(self, sentence): + """ + Use Repp to tokenize a single sentence. + + :param sentence: A single sentence string. + :type sentence: str + :return: A tuple of tokens. + :rtype: tuple(str) + """ + return next(self.tokenize_sents([sentence])) + + def tokenize_sents(self, sentences, keep_token_positions=False): + """ + Tokenize multiple sentences using Repp. + + :param sentences: A list of sentence strings. + :type sentences: list(str) + :return: A list of tuples of tokens + :rtype: iter(tuple(str)) + """ + with tempfile.NamedTemporaryFile( + prefix="repp_input.", dir=self.working_dir, mode="w", delete=False + ) as input_file: + # Write sentences to temporary input file. + for sent in sentences: + input_file.write(str(sent) + "\n") + input_file.close() + # Generate command to run REPP. + cmd = self.generate_repp_command(input_file.name) + # Decode the stdout and strips the ending newline. + repp_output = self._execute(cmd).decode(self.encoding).strip() + for tokenized_sent in self.parse_repp_outputs(repp_output): + if not keep_token_positions: + # Removes token position information. + tokenized_sent, starts, ends = zip(*tokenized_sent) + yield tokenized_sent + + def generate_repp_command(self, inputfilename): + """ + This module generates the REPP command to be used at the terminal. + + :param inputfilename: path to the input file + :type inputfilename: str + """ + cmd = [self.repp_dir + "/src/repp"] + cmd += ["-c", self.repp_dir + "/erg/repp.set"] + cmd += ["--format", "triple"] + cmd += [inputfilename] + return cmd + + @staticmethod + def _execute(cmd): + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + return stdout + + @staticmethod + def parse_repp_outputs(repp_output): + """ + This module parses the tri-tuple format that REPP outputs using the + "--format triple" option and returns an generator with tuple of string + tokens. + + :param repp_output: + :type repp_output: type + :return: an iterable of the tokenized sentences as tuples of strings + :rtype: iter(tuple) + """ + line_regex = re.compile(r"^\((\d+), (\d+), (.+)\)$", re.MULTILINE) + for section in repp_output.split("\n\n"): + words_with_positions = [ + (token, int(start), int(end)) + for start, end, token in line_regex.findall(section) + ] + words = tuple(t[2] for t in words_with_positions) + yield words_with_positions + + def find_repptokenizer(self, repp_dirname): + """ + A module to find REPP tokenizer binary and its *repp.set* config file. + """ + if os.path.exists(repp_dirname): # If a full path is given. + _repp_dir = repp_dirname + else: # Try to find path to REPP directory in environment variables. + _repp_dir = find_dir(repp_dirname, env_vars=("REPP_TOKENIZER",)) + # Checks for the REPP binary and erg/repp.set config file. + assert os.path.exists(_repp_dir + "/src/repp") + assert os.path.exists(_repp_dir + "/erg/repp.set") + return _repp_dir diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sexpr.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..0776642fbd2759c3f37352a97b18d915198cc20c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sexpr.py @@ -0,0 +1,140 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Yoav Goldberg +# Steven Bird (minor edits) +# URL: +# For license information, see LICENSE.TXT + +""" +S-Expression Tokenizer + +``SExprTokenizer`` is used to find parenthesized expressions in a +string. In particular, it divides a string into a sequence of +substrings that are either parenthesized expressions (including any +nested parenthesized expressions), or other whitespace-separated +tokens. + + >>> from nltk.tokenize import SExprTokenizer + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +By default, `SExprTokenizer` will raise a ``ValueError`` exception if +used to tokenize an expression with non-matching parentheses: + + >>> SExprTokenizer().tokenize('c) d) e (f (g') + Traceback (most recent call last): + ... + ValueError: Un-matched close paren at char 1 + +The ``strict`` argument can be set to False to allow for +non-matching parentheses. Any unmatched close parentheses will be +listed as their own s-expression; and the last partial sexpr with +unmatched open parentheses will be listed as its own sexpr: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + +The characters used for open and close parentheses may be customized +using the ``parens`` argument to the `SExprTokenizer` constructor: + + >>> SExprTokenizer(parens='{}').tokenize('{a b {c d}} e f {g}') + ['{a b {c d}}', 'e', 'f', '{g}'] + +The s-expression tokenizer is also available as a function: + + >>> from nltk.tokenize import sexpr_tokenize + >>> sexpr_tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class SExprTokenizer(TokenizerI): + """ + A tokenizer that divides strings into s-expressions. + An s-expresion can be either: + + - a parenthesized expression, including any nested parenthesized + expressions, or + - a sequence of non-whitespace non-parenthesis characters. + + For example, the string ``(a (b c)) d e (f)`` consists of four + s-expressions: ``(a (b c))``, ``d``, ``e``, and ``(f)``. + + By default, the characters ``(`` and ``)`` are treated as open and + close parentheses, but alternative strings may be specified. + + :param parens: A two-element sequence specifying the open and close parentheses + that should be used to find sexprs. This will typically be either a + two-character string, or a list of two strings. + :type parens: str or list + :param strict: If true, then raise an exception when tokenizing an ill-formed sexpr. + """ + + def __init__(self, parens="()", strict=True): + if len(parens) != 2: + raise ValueError("parens must contain exactly two strings") + self._strict = strict + self._open_paren = parens[0] + self._close_paren = parens[1] + self._paren_regexp = re.compile( + f"{re.escape(parens[0])}|{re.escape(parens[1])}" + ) + + def tokenize(self, text): + """ + Return a list of s-expressions extracted from *text*. + For example: + + >>> SExprTokenizer().tokenize('(a b (c d)) e f (g)') + ['(a b (c d))', 'e', 'f', '(g)'] + + All parentheses are assumed to mark s-expressions. + (No special processing is done to exclude parentheses that occur + inside strings, or following backslash characters.) + + If the given expression contains non-matching parentheses, + then the behavior of the tokenizer depends on the ``strict`` + parameter to the constructor. If ``strict`` is ``True``, then + raise a ``ValueError``. If ``strict`` is ``False``, then any + unmatched close parentheses will be listed as their own + s-expression; and the last partial s-expression with unmatched open + parentheses will be listed as its own s-expression: + + >>> SExprTokenizer(strict=False).tokenize('c) d) e (f (g') + ['c', ')', 'd', ')', 'e', '(f (g'] + + :param text: the string to be tokenized + :type text: str or iter(str) + :rtype: iter(str) + """ + result = [] + pos = 0 + depth = 0 + for m in self._paren_regexp.finditer(text): + paren = m.group() + if depth == 0: + result += text[pos : m.start()].split() + pos = m.start() + if paren == self._open_paren: + depth += 1 + if paren == self._close_paren: + if self._strict and depth == 0: + raise ValueError("Un-matched close paren at char %d" % m.start()) + depth = max(0, depth - 1) + if depth == 0: + result.append(text[pos : m.end()]) + pos = m.end() + if self._strict and depth > 0: + raise ValueError("Un-matched open paren at char %d" % pos) + if pos < len(text): + result.append(text[pos:]) + return result + + +sexpr_tokenize = SExprTokenizer().tokenize diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/simple.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..f87b60a274c8121303ff60f203e1f3b991da1547 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/simple.py @@ -0,0 +1,137 @@ +# Natural Language Toolkit: Simple Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +r""" +Simple Tokenizers + +These tokenizers divide strings into substrings using the string +``split()`` method. +When tokenizing using a particular delimiter string, use +the string ``split()`` method directly, as this is more efficient. + +The simple tokenizers are *not* available as separate functions; +instead, you should just use the string ``split()`` method directly: + + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> s.split() # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', + 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.'] + >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + >>> s.split('\n') # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + +The simple tokenizers are mainly useful because they follow the +standard ``TokenizerI`` interface, and so can be used with any code +that expects a tokenizer. For example, these tokenizers can be used +to specify the tokenization conventions when building a `CorpusReader`. + +""" + +from nltk.tokenize.api import StringTokenizer, TokenizerI +from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize + + +class SpaceTokenizer(StringTokenizer): + r"""Tokenize a string using the space character as a delimiter, + which is the same as ``s.split(' ')``. + + >>> from nltk.tokenize import SpaceTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '', + 'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.'] + """ + + _string = " " + + +class TabTokenizer(StringTokenizer): + r"""Tokenize a string use the tab character as a delimiter, + the same as ``s.split('\t')``. + + >>> from nltk.tokenize import TabTokenizer + >>> TabTokenizer().tokenize('a\tb c\n\t d') + ['a', 'b c\n', ' d'] + """ + + _string = "\t" + + +class CharTokenizer(StringTokenizer): + """Tokenize a string into individual characters. If this functionality + is ever required directly, use ``for char in string``. + """ + + def tokenize(self, s): + return list(s) + + def span_tokenize(self, s): + yield from enumerate(range(1, len(s) + 1)) + + +class LineTokenizer(TokenizerI): + r"""Tokenize a string into its lines, optionally discarding blank lines. + This is similar to ``s.split('\n')``. + + >>> from nltk.tokenize import LineTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks." + >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', '', 'Thanks.'] + >>> # same as [l for l in s.split('\n') if l.strip()]: + >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good muffins cost $3.88', 'in New York. Please buy me', + 'two of them.', 'Thanks.'] + + :param blanklines: Indicates how blank lines should be handled. Valid values are: + + - ``discard``: strip blank lines out of the token list before returning it. + A line is considered blank if it contains only whitespace characters. + - ``keep``: leave all blank lines in the token list. + - ``discard-eof``: if the string ends with a newline, then do not generate + a corresponding token ``''`` after that newline. + """ + + def __init__(self, blanklines="discard"): + valid_blanklines = ("discard", "keep", "discard-eof") + if blanklines not in valid_blanklines: + raise ValueError( + "Blank lines must be one of: %s" % " ".join(valid_blanklines) + ) + + self._blanklines = blanklines + + def tokenize(self, s): + lines = s.splitlines() + # If requested, strip off blank lines. + if self._blanklines == "discard": + lines = [l for l in lines if l.rstrip()] + elif self._blanklines == "discard-eof": + if lines and not lines[-1].strip(): + lines.pop() + return lines + + # discard-eof not implemented + def span_tokenize(self, s): + if self._blanklines == "keep": + yield from string_span_tokenize(s, r"\n") + else: + yield from regexp_span_tokenize(s, r"\n(\s+\n)*") + + +###################################################################### +# { Tokenization Functions +###################################################################### +# XXX: it is stated in module docs that there is no function versions + + +def line_tokenize(text, blanklines="discard"): + return LineTokenizer(blanklines).tokenize(text) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py new file mode 100644 index 0000000000000000000000000000000000000000..24e43caae2dae6e3c76e66704fa9b856a6dc348c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/sonority_sequencing.py @@ -0,0 +1,194 @@ +# Natural Language Toolkit: Tokenizers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Christopher Hench +# Alex Estes +# URL: +# For license information, see LICENSE.TXT + +""" +The Sonority Sequencing Principle (SSP) is a language agnostic algorithm proposed +by Otto Jesperson in 1904. The sonorous quality of a phoneme is judged by the +openness of the lips. Syllable breaks occur before troughs in sonority. For more +on the SSP see Selkirk (1984). + +The default implementation uses the English alphabet, but the `sonority_hiearchy` +can be modified to IPA or any other alphabet for the use-case. The SSP is a +universal syllabification algorithm, but that does not mean it performs equally +across languages. Bartlett et al. (2009) is a good benchmark for English accuracy +if utilizing IPA (pg. 311). + +Importantly, if a custom hierarchy is supplied and vowels span across more than +one level, they should be given separately to the `vowels` class attribute. + +References: + +- Otto Jespersen. 1904. Lehrbuch der Phonetik. + Leipzig, Teubner. Chapter 13, Silbe, pp. 185-203. +- Elisabeth Selkirk. 1984. On the major class features and syllable theory. + In Aronoff & Oehrle (eds.) Language Sound Structure: Studies in Phonology. + Cambridge, MIT Press. pp. 107-136. +- Susan Bartlett, et al. 2009. On the Syllabification of Phonemes. + In HLT-NAACL. pp. 308-316. +""" + +import re +import warnings +from string import punctuation + +from nltk.tokenize.api import TokenizerI +from nltk.util import ngrams + + +class SyllableTokenizer(TokenizerI): + """ + Syllabifies words based on the Sonority Sequencing Principle (SSP). + + >>> from nltk.tokenize import SyllableTokenizer + >>> from nltk import word_tokenize + >>> SSP = SyllableTokenizer() + >>> SSP.tokenize('justification') + ['jus', 'ti', 'fi', 'ca', 'tion'] + >>> text = "This is a foobar-like sentence." + >>> [SSP.tokenize(token) for token in word_tokenize(text)] + [['This'], ['is'], ['a'], ['foo', 'bar', '-', 'li', 'ke'], ['sen', 'ten', 'ce'], ['.']] + """ + + def __init__(self, lang="en", sonority_hierarchy=False): + """ + :param lang: Language parameter, default is English, 'en' + :type lang: str + :param sonority_hierarchy: Sonority hierarchy according to the + Sonority Sequencing Principle. + :type sonority_hierarchy: list(str) + """ + # Sonority hierarchy should be provided in descending order. + # If vowels are spread across multiple levels, they should be + # passed assigned self.vowels var together, otherwise should be + # placed in first index of hierarchy. + if not sonority_hierarchy and lang == "en": + sonority_hierarchy = [ + "aeiouy", # vowels. + "lmnrw", # nasals. + "zvsf", # fricatives. + "bcdgtkpqxhj", # stops. + ] + + self.vowels = sonority_hierarchy[0] + self.phoneme_map = {} + for i, level in enumerate(sonority_hierarchy): + for c in level: + sonority_level = len(sonority_hierarchy) - i + self.phoneme_map[c] = sonority_level + self.phoneme_map[c.upper()] = sonority_level + + def assign_values(self, token): + """ + Assigns each phoneme its value from the sonority hierarchy. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return: List of tuples, first element is character/phoneme and + second is the soronity value. + :rtype: list(tuple(str, int)) + """ + syllables_values = [] + for c in token: + try: + syllables_values.append((c, self.phoneme_map[c])) + except KeyError: + if c not in "0123456789" and c not in punctuation: + warnings.warn( + "Character not defined in sonority_hierarchy," + " assigning as vowel: '{}'".format(c) + ) + syllables_values.append((c, max(self.phoneme_map.values()))) + if c not in self.vowels: + self.vowels += c + else: # If it's a punctuation or numbers, assign -1. + syllables_values.append((c, -1)) + return syllables_values + + def validate_syllables(self, syllable_list): + """ + Ensures each syllable has at least one vowel. + If the following syllable doesn't have vowel, add it to the current one. + + :param syllable_list: Single word or token broken up into syllables. + :type syllable_list: list(str) + :return: Single word or token broken up into syllables + (with added syllables if necessary) + :rtype: list(str) + """ + valid_syllables = [] + front = "" + vowel_pattern = re.compile("|".join(self.vowels)) + for i, syllable in enumerate(syllable_list): + if syllable in punctuation: + valid_syllables.append(syllable) + continue + if not vowel_pattern.search(syllable): + if len(valid_syllables) == 0: + front += syllable + else: + valid_syllables = valid_syllables[:-1] + [ + valid_syllables[-1] + syllable + ] + else: + if len(valid_syllables) == 0: + valid_syllables.append(front + syllable) + else: + valid_syllables.append(syllable) + + return valid_syllables + + def tokenize(self, token): + """ + Apply the SSP to return a list of syllables. + Note: Sentence/text has to be tokenized first. + + :param token: Single word or token + :type token: str + :return syllable_list: Single word or token broken up into syllables. + :rtype: list(str) + """ + # assign values from hierarchy + syllables_values = self.assign_values(token) + + # if only one vowel return word + if sum(token.count(x) for x in self.vowels) <= 1: + return [token] + + syllable_list = [] + syllable = syllables_values[0][0] # start syllable with first phoneme + for trigram in ngrams(syllables_values, n=3): + phonemes, values = zip(*trigram) + # Sonority of previous, focal and following phoneme + prev_value, focal_value, next_value = values + # Focal phoneme. + focal_phoneme = phonemes[1] + + # These cases trigger syllable break. + if focal_value == -1: # If it's a punctuation, just break. + syllable_list.append(syllable) + syllable_list.append(focal_phoneme) + syllable = "" + elif prev_value >= focal_value == next_value: + syllable += focal_phoneme + syllable_list.append(syllable) + syllable = "" + + elif prev_value > focal_value < next_value: + syllable_list.append(syllable) + syllable = "" + syllable += focal_phoneme + + # no syllable break + else: + syllable += focal_phoneme + + syllable += syllables_values[-1][0] # append last phoneme + syllable_list.append(syllable) + + return self.validate_syllables(syllable_list) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..81a2d8584aee1d4c39042af6a150bd41c838ee14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford.py @@ -0,0 +1,115 @@ +# Natural Language Toolkit: Interface to the Stanford Tokenizer +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_jar, java +from nltk.parse.corenlp import CoreNLPParser +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software/tokenizer.shtml" + + +class StanfordTokenizer(TokenizerI): + r""" + Interface to the Stanford Tokenizer + + >>> from nltk.tokenize.stanford import StanfordTokenizer + >>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks." + >>> StanfordTokenizer().tokenize(s) # doctest: +SKIP + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + >>> s = "The colour of the wall is blue." + >>> StanfordTokenizer(options={"americanize": True}).tokenize(s) # doctest: +SKIP + ['The', 'color', 'of', 'the', 'wall', 'is', 'blue', '.'] + """ + + _JAR = "stanford-postagger.jar" + + def __init__( + self, + path_to_jar=None, + encoding="utf8", + options=None, + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + + self._stanford_jar = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_POSTAGGER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + + self._encoding = encoding + self.java_options = java_options + + options = {} if options is None else options + self._options_cmd = ",".join(f"{key}={val}" for key, val in options.items()) + + @staticmethod + def _parse_tokenized_output(s): + return s.splitlines() + + def tokenize(self, s): + """ + Use stanford tokenizer's PTBTokenizer to tokenize multiple sentences. + """ + cmd = ["edu.stanford.nlp.process.PTBTokenizer"] + return self._parse_tokenized_output(self._execute(cmd, s)) + + def _execute(self, cmd, input_, verbose=False): + encoding = self._encoding + cmd.extend(["-charset", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + # Windows is incompatible with NamedTemporaryFile() without passing in delete=False. + with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: + # Write the actual sentences to the temporary input file + if isinstance(input_, str) and encoding: + input_ = input_.encode(encoding) + input_file.write(input_) + input_file.flush() + + cmd.append(input_file.name) + + # Run the tagger and get the output. + stdout, stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + os.unlink(input_file.name) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3f16621e3a3c38ee0265e817b04c655856dd70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/stanford_segmenter.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python +# Natural Language Toolkit: Interface to the Stanford Segmenter +# for Chinese and Arabic +# +# Copyright (C) 2001-2023 NLTK Project +# Author: 52nlp <52nlpcn@gmail.com> +# Casper Lehmann-Strøm +# Alex Constantin +# +# URL: +# For license information, see LICENSE.TXT + +import json +import os +import tempfile +import warnings +from subprocess import PIPE + +from nltk.internals import ( + _java_options, + config_java, + find_dir, + find_file, + find_jar, + java, +) +from nltk.tokenize.api import TokenizerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordSegmenter(TokenizerI): + """Interface to the Stanford Segmenter + + If stanford-segmenter version is older than 2016-10-31, then path_to_slf4j + should be provieded, for example:: + + seg = StanfordSegmenter(path_to_slf4j='/YOUR_PATH/slf4j-api.jar') + + >>> from nltk.tokenize.stanford_segmenter import StanfordSegmenter + >>> seg = StanfordSegmenter() # doctest: +SKIP + >>> seg.default_config('zh') # doctest: +SKIP + >>> sent = u'这是斯坦福中文分词器测试' + >>> print(seg.segment(sent)) # doctest: +SKIP + \u8fd9 \u662f \u65af\u5766\u798f \u4e2d\u6587 \u5206\u8bcd\u5668 \u6d4b\u8bd5 + + >>> seg.default_config('ar') # doctest: +SKIP + >>> sent = u'هذا هو تصنيف ستانفورد العربي للكلمات' + >>> print(seg.segment(sent.split())) # doctest: +SKIP + \u0647\u0630\u0627 \u0647\u0648 \u062a\u0635\u0646\u064a\u0641 \u0633\u062a\u0627\u0646\u0641\u0648\u0631\u062f \u0627\u0644\u0639\u0631\u0628\u064a \u0644 \u0627\u0644\u0643\u0644\u0645\u0627\u062a + + """ + + _JAR = "stanford-segmenter.jar" + + def __init__( + self, + path_to_jar=None, + path_to_slf4j=None, + java_class=None, + path_to_model=None, + path_to_dict=None, + path_to_sihan_corpora_dict=None, + sihan_post_processing="false", + keep_whitespaces="false", + encoding="UTF-8", + options=None, + verbose=False, + java_options="-mx2g", + ): + # Raise deprecation warning. + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.5.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPTokenizer\033[0m instead.'" + ), + DeprecationWarning, + stacklevel=2, + ) + warnings.simplefilter("ignore", DeprecationWarning) + + stanford_segmenter = find_jar( + self._JAR, + path_to_jar, + env_vars=("STANFORD_SEGMENTER",), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + if path_to_slf4j is not None: + slf4j = find_jar( + "slf4j-api.jar", + path_to_slf4j, + env_vars=("SLF4J", "STANFORD_SEGMENTER"), + searchpath=(), + url=_stanford_url, + verbose=verbose, + ) + else: + slf4j = None + + # This is passed to java as the -cp option, the old version of segmenter needs slf4j. + # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j + self._stanford_jar = os.pathsep.join( + _ for _ in [stanford_segmenter, slf4j] if _ is not None + ) + + self._java_class = java_class + self._model = path_to_model + self._sihan_corpora_dict = path_to_sihan_corpora_dict + self._sihan_post_processing = sihan_post_processing + self._keep_whitespaces = keep_whitespaces + self._dict = path_to_dict + + self._encoding = encoding + self.java_options = java_options + options = {} if options is None else options + self._options_cmd = ",".join( + f"{key}={json.dumps(val)}" for key, val in options.items() + ) + + def default_config(self, lang): + """ + Attempt to initialize Stanford Word Segmenter for the specified language + using the STANFORD_SEGMENTER and STANFORD_MODELS environment variables + """ + + search_path = () + if os.environ.get("STANFORD_SEGMENTER"): + search_path = {os.path.join(os.environ.get("STANFORD_SEGMENTER"), "data")} + + # init for Chinese-specific files + self._dict = None + self._sihan_corpora_dict = None + self._sihan_post_processing = "false" + + if lang == "ar": + self._java_class = ( + "edu.stanford.nlp.international.arabic.process.ArabicSegmenter" + ) + model = "arabic-segmenter-atb+bn+arztrain.ser.gz" + + elif lang == "zh": + self._java_class = "edu.stanford.nlp.ie.crf.CRFClassifier" + model = "pku.gz" + self._sihan_post_processing = "true" + + path_to_dict = "dict-chris6.ser.gz" + try: + self._dict = find_file( + path_to_dict, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS",), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" + % path_to_dict + ) from e + + sihan_dir = "./data/" + try: + path_to_sihan_dir = find_dir( + sihan_dir, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_SEGMENTER",), + ) + self._sihan_corpora_dict = os.path.join(path_to_sihan_dir, sihan_dir) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using the " + "STANFORD_SEGMENTER environment variable)" % sihan_dir + ) from e + else: + raise LookupError(f"Unsupported language {lang}") + + try: + self._model = find_file( + model, + searchpath=search_path, + url=_stanford_url, + verbose=False, + env_vars=("STANFORD_MODELS", "STANFORD_SEGMENTER"), + ) + except LookupError as e: + raise LookupError( + "Could not find '%s' (tried using env. " + "variables STANFORD_MODELS and /data/)" % model + ) from e + + def tokenize(self, s): + super().tokenize(s) + + def segment_file(self, input_file_path): + """ """ + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + return stdout + + def segment(self, tokens): + return self.segment_sents([tokens]) + + def segment_sents(self, sentences): + """ """ + encoding = self._encoding + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + # Write the actural sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + cmd = [ + self._java_class, + "-loadClassifier", + self._model, + "-keepAllWhitespaces", + self._keep_whitespaces, + "-textFile", + self._input_file_path, + ] + if self._sihan_corpora_dict is not None: + cmd.extend( + [ + "-serDictionary", + self._dict, + "-sighanCorporaDict", + self._sihan_corpora_dict, + "-sighanPostProcessing", + self._sihan_post_processing, + ] + ) + + stdout = self._execute(cmd) + + # Delete the temporary file + os.unlink(self._input_file_path) + + return stdout + + def _execute(self, cmd, verbose=False): + encoding = self._encoding + cmd.extend(["-inputEncoding", encoding]) + _options_cmd = self._options_cmd + if _options_cmd: + cmd.extend(["-options", self._options_cmd]) + + default_options = " ".join(_java_options) + + # Configure java. + config_java(options=self.java_options, verbose=verbose) + + stdout, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stdout = stdout.decode(encoding) + + # Return java configurations to their default values. + config_java(options=default_options, verbose=False) + + return stdout diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/texttiling.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/texttiling.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b770b2d08a998538d85803126e74cc13139d11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/texttiling.py @@ -0,0 +1,475 @@ +# Natural Language Toolkit: TextTiling +# +# Copyright (C) 2001-2023 NLTK Project +# Author: George Boutsioukis +# +# URL: +# For license information, see LICENSE.TXT + +import math +import re + +try: + import numpy +except ImportError: + pass + +from nltk.tokenize.api import TokenizerI + +BLOCK_COMPARISON, VOCABULARY_INTRODUCTION = 0, 1 +LC, HC = 0, 1 +DEFAULT_SMOOTHING = [0] + + +class TextTilingTokenizer(TokenizerI): + """Tokenize a document into topical sections using the TextTiling algorithm. + This algorithm detects subtopic shifts based on the analysis of lexical + co-occurrence patterns. + + The process starts by tokenizing the text into pseudosentences of + a fixed size w. Then, depending on the method used, similarity + scores are assigned at sentence gaps. The algorithm proceeds by + detecting the peak differences between these scores and marking + them as boundaries. The boundaries are normalized to the closest + paragraph break and the segmented text is returned. + + :param w: Pseudosentence size + :type w: int + :param k: Size (in sentences) of the block used in the block comparison method + :type k: int + :param similarity_method: The method used for determining similarity scores: + `BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`. + :type similarity_method: constant + :param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus) + :type stopwords: list(str) + :param smoothing_method: The method used for smoothing the score plot: + `DEFAULT_SMOOTHING` (default) + :type smoothing_method: constant + :param smoothing_width: The width of the window used by the smoothing method + :type smoothing_width: int + :param smoothing_rounds: The number of smoothing passes + :type smoothing_rounds: int + :param cutoff_policy: The policy used to determine the number of boundaries: + `HC` (default) or `LC` + :type cutoff_policy: constant + + >>> from nltk.corpus import brown + >>> tt = TextTilingTokenizer(demo_mode=True) + >>> text = brown.raw()[:4000] + >>> s, ss, d, b = tt.tokenize(text) + >>> b + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0] + """ + + def __init__( + self, + w=20, + k=10, + similarity_method=BLOCK_COMPARISON, + stopwords=None, + smoothing_method=DEFAULT_SMOOTHING, + smoothing_width=2, + smoothing_rounds=1, + cutoff_policy=HC, + demo_mode=False, + ): + + if stopwords is None: + from nltk.corpus import stopwords + + stopwords = stopwords.words("english") + self.__dict__.update(locals()) + del self.__dict__["self"] + + def tokenize(self, text): + """Return a tokenized copy of *text*, where each "token" represents + a separate topic.""" + + lowercase_text = text.lower() + paragraph_breaks = self._mark_paragraph_breaks(text) + text_length = len(lowercase_text) + + # Tokenization step starts here + + # Remove punctuation + nopunct_text = "".join( + c for c in lowercase_text if re.match(r"[a-z\-' \n\t]", c) + ) + nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text) + + tokseqs = self._divide_to_tokensequences(nopunct_text) + + # The morphological stemming step mentioned in the TextTile + # paper is not implemented. A comment in the original C + # implementation states that it offers no benefit to the + # process. It might be interesting to test the existing + # stemmers though. + # words = _stem_words(words) + + # Filter stopwords + for ts in tokseqs: + ts.wrdindex_list = [ + wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords + ] + + token_table = self._create_token_table(tokseqs, nopunct_par_breaks) + # End of the Tokenization step + + # Lexical score determination + if self.similarity_method == BLOCK_COMPARISON: + gap_scores = self._block_comparison(tokseqs, token_table) + elif self.similarity_method == VOCABULARY_INTRODUCTION: + raise NotImplementedError("Vocabulary introduction not implemented") + else: + raise ValueError( + f"Similarity method {self.similarity_method} not recognized" + ) + + if self.smoothing_method == DEFAULT_SMOOTHING: + smooth_scores = self._smooth_scores(gap_scores) + else: + raise ValueError(f"Smoothing method {self.smoothing_method} not recognized") + # End of Lexical score Determination + + # Boundary identification + depth_scores = self._depth_scores(smooth_scores) + segment_boundaries = self._identify_boundaries(depth_scores) + + normalized_boundaries = self._normalize_boundaries( + text, segment_boundaries, paragraph_breaks + ) + # End of Boundary Identification + segmented_text = [] + prevb = 0 + + for b in normalized_boundaries: + if b == 0: + continue + segmented_text.append(text[prevb:b]) + prevb = b + + if prevb < text_length: # append any text that may be remaining + segmented_text.append(text[prevb:]) + + if not segmented_text: + segmented_text = [text] + + if self.demo_mode: + return gap_scores, smooth_scores, depth_scores, segment_boundaries + return segmented_text + + def _block_comparison(self, tokseqs, token_table): + """Implements the block comparison method""" + + def blk_frq(tok, block): + ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences) + freq = sum(tsocc[1] for tsocc in ts_occs) + return freq + + gap_scores = [] + numgaps = len(tokseqs) - 1 + + for curr_gap in range(numgaps): + score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0 + score = 0.0 + # adjust window size for boundary conditions + if curr_gap < self.k - 1: + window_size = curr_gap + 1 + elif curr_gap > numgaps - self.k: + window_size = numgaps - curr_gap + else: + window_size = self.k + + b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]] + b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]] + + for t in token_table: + score_dividend += blk_frq(t, b1) * blk_frq(t, b2) + score_divisor_b1 += blk_frq(t, b1) ** 2 + score_divisor_b2 += blk_frq(t, b2) ** 2 + try: + score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2) + except ZeroDivisionError: + pass # score += 0.0 + + gap_scores.append(score) + + return gap_scores + + def _smooth_scores(self, gap_scores): + "Wraps the smooth function from the SciPy Cookbook" + return list( + smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1) + ) + + def _mark_paragraph_breaks(self, text): + """Identifies indented text or line breaks as the beginning of + paragraphs""" + MIN_PARAGRAPH = 100 + pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*") + matches = pattern.finditer(text) + + last_break = 0 + pbreaks = [0] + for pb in matches: + if pb.start() - last_break < MIN_PARAGRAPH: + continue + else: + pbreaks.append(pb.start()) + last_break = pb.start() + + return pbreaks + + def _divide_to_tokensequences(self, text): + "Divides the text into pseudosentences of fixed size" + w = self.w + wrdindex_list = [] + matches = re.finditer(r"\w+", text) + for match in matches: + wrdindex_list.append((match.group(), match.start())) + return [ + TokenSequence(i / w, wrdindex_list[i : i + w]) + for i in range(0, len(wrdindex_list), w) + ] + + def _create_token_table(self, token_sequences, par_breaks): + "Creates a table of TokenTableFields" + token_table = {} + current_par = 0 + current_tok_seq = 0 + pb_iter = par_breaks.__iter__() + current_par_break = next(pb_iter) + if current_par_break == 0: + try: + current_par_break = next(pb_iter) # skip break at 0 + except StopIteration as e: + raise ValueError( + "No paragraph breaks were found(text too short perhaps?)" + ) from e + for ts in token_sequences: + for word, index in ts.wrdindex_list: + try: + while index > current_par_break: + current_par_break = next(pb_iter) + current_par += 1 + except StopIteration: + # hit bottom + pass + + if word in token_table: + token_table[word].total_count += 1 + + if token_table[word].last_par != current_par: + token_table[word].last_par = current_par + token_table[word].par_count += 1 + + if token_table[word].last_tok_seq != current_tok_seq: + token_table[word].last_tok_seq = current_tok_seq + token_table[word].ts_occurences.append([current_tok_seq, 1]) + else: + token_table[word].ts_occurences[-1][1] += 1 + else: # new word + token_table[word] = TokenTableField( + first_pos=index, + ts_occurences=[[current_tok_seq, 1]], + total_count=1, + par_count=1, + last_par=current_par, + last_tok_seq=current_tok_seq, + ) + + current_tok_seq += 1 + + return token_table + + def _identify_boundaries(self, depth_scores): + """Identifies boundaries at the peaks of similarity score + differences""" + + boundaries = [0 for x in depth_scores] + + avg = sum(depth_scores) / len(depth_scores) + stdev = numpy.std(depth_scores) + + if self.cutoff_policy == LC: + cutoff = avg - stdev + else: + cutoff = avg - stdev / 2.0 + + depth_tuples = sorted(zip(depth_scores, range(len(depth_scores)))) + depth_tuples.reverse() + hp = list(filter(lambda x: x[0] > cutoff, depth_tuples)) + + for dt in hp: + boundaries[dt[1]] = 1 + for dt2 in hp: # undo if there is a boundary close already + if ( + dt[1] != dt2[1] + and abs(dt2[1] - dt[1]) < 4 + and boundaries[dt2[1]] == 1 + ): + boundaries[dt[1]] = 0 + return boundaries + + def _depth_scores(self, scores): + """Calculates the depth of each gap, i.e. the average difference + between the left and right peaks and the gap's score""" + + depth_scores = [0 for x in scores] + # clip boundaries: this holds on the rule of thumb(my thumb) + # that a section shouldn't be smaller than at least 2 + # pseudosentences for small texts and around 5 for larger ones. + + clip = min(max(len(scores) // 10, 2), 5) + index = clip + + for gapscore in scores[clip:-clip]: + lpeak = gapscore + for score in scores[index::-1]: + if score >= lpeak: + lpeak = score + else: + break + rpeak = gapscore + for score in scores[index:]: + if score >= rpeak: + rpeak = score + else: + break + depth_scores[index] = lpeak + rpeak - 2 * gapscore + index += 1 + + return depth_scores + + def _normalize_boundaries(self, text, boundaries, paragraph_breaks): + """Normalize the boundaries identified to the original text's + paragraph breaks""" + + norm_boundaries = [] + char_count, word_count, gaps_seen = 0, 0, 0 + seen_word = False + + for char in text: + char_count += 1 + if char in " \t\n" and seen_word: + seen_word = False + word_count += 1 + if char not in " \t\n" and not seen_word: + seen_word = True + if gaps_seen < len(boundaries) and word_count > ( + max(gaps_seen * self.w, self.w) + ): + if boundaries[gaps_seen] == 1: + # find closest paragraph break + best_fit = len(text) + for br in paragraph_breaks: + if best_fit > abs(br - char_count): + best_fit = abs(br - char_count) + bestbr = br + else: + break + if bestbr not in norm_boundaries: # avoid duplicates + norm_boundaries.append(bestbr) + gaps_seen += 1 + + return norm_boundaries + + +class TokenTableField: + """A field in the token table holding parameters for each token, + used later in the process""" + + def __init__( + self, + first_pos, + ts_occurences, + total_count=1, + par_count=1, + last_par=0, + last_tok_seq=None, + ): + self.__dict__.update(locals()) + del self.__dict__["self"] + + +class TokenSequence: + "A token list with its original length and its index" + + def __init__(self, index, wrdindex_list, original_length=None): + original_length = original_length or len(wrdindex_list) + self.__dict__.update(locals()) + del self.__dict__["self"] + + +# Pasted from the SciPy cookbook: https://www.scipy.org/Cookbook/SignalSmooth +def smooth(x, window_len=11, window="flat"): + """smooth the data using a window with requested size. + + This method is based on the convolution of a scaled window with the signal. + The signal is prepared by introducing reflected copies of the signal + (with the window size) in both ends so that transient parts are minimized + in the beginning and end part of the output signal. + + :param x: the input signal + :param window_len: the dimension of the smoothing window; should be an odd integer + :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' + flat window will produce a moving average smoothing. + + :return: the smoothed signal + + example:: + + t=linspace(-2,2,0.1) + x=sin(t)+randn(len(t))*0.1 + y=smooth(x) + + :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, + scipy.signal.lfilter + + TODO: the window parameter could be the window itself if an array instead of a string + """ + + if x.ndim != 1: + raise ValueError("smooth only accepts 1 dimension arrays.") + + if x.size < window_len: + raise ValueError("Input vector needs to be bigger than window size.") + + if window_len < 3: + return x + + if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]: + raise ValueError( + "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" + ) + + s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]] + + # print(len(s)) + if window == "flat": # moving average + w = numpy.ones(window_len, "d") + else: + w = eval("numpy." + window + "(window_len)") + + y = numpy.convolve(w / w.sum(), s, mode="same") + + return y[window_len - 1 : -window_len + 1] + + +def demo(text=None): + from matplotlib import pylab + + from nltk.corpus import brown + + tt = TextTilingTokenizer(demo_mode=True) + if text is None: + text = brown.raw()[:10000] + s, ss, d, b = tt.tokenize(text) + pylab.xlabel("Sentence Gap index") + pylab.ylabel("Gap Scores") + pylab.plot(range(len(s)), s, label="Gap Scores") + pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores") + pylab.plot(range(len(d)), d, label="Depth scores") + pylab.stem(range(len(b)), b) + pylab.legend() + pylab.show() diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/toktok.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/toktok.py new file mode 100644 index 0000000000000000000000000000000000000000..4229a7327743ad9788449a82c8d2350b9c8db392 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/toktok.py @@ -0,0 +1,179 @@ +# Natural Language Toolkit: Python port of the tok-tok.pl tokenizer. +# +# Copyright (C) 2001-2015 NLTK Project +# Author: Jon Dehdari +# Contributors: Liling Tan, Selcuk Ayguney, ikegami, Martijn Pieters +# +# URL: +# For license information, see LICENSE.TXT + +""" +The tok-tok tokenizer is a simple, general tokenizer, where the input has one +sentence per line; thus only final period is tokenized. + +Tok-tok has been tested on, and gives reasonably good results for English, +Persian, Russian, Czech, French, German, Vietnamese, Tajik, and a few others. +The input should be in UTF-8 encoding. + +Reference: +Jon Dehdari. 2014. A Neurophysiologically-Inspired Statistical Language +Model (Doctoral dissertation). Columbus, OH, USA: The Ohio State University. +""" + +import re + +from nltk.tokenize.api import TokenizerI + + +class ToktokTokenizer(TokenizerI): + """ + This is a Python port of the tok-tok.pl from + https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl + + >>> toktok = ToktokTokenizer() + >>> text = u'Is 9.5 or 525,600 my favorite number?' + >>> print(toktok.tokenize(text, return_str=True)) + Is 9.5 or 525,600 my favorite number ? + >>> text = u'The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things' + >>> print(toktok.tokenize(text, return_str=True)) + The https://github.com/jonsafari/tok-tok/blob/master/tok-tok.pl is a website with/and/or slashes and sort of weird : things + >>> text = u'\xa1This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf' + >>> expected = u'\xa1 This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf' + >>> assert toktok.tokenize(text, return_str=True) == expected + >>> toktok.tokenize(text) == [u'\xa1', u'This', u',', u'is', u'a', u'sentence', u'with', u'weird', u'\xbb', u'symbols', u'\u2026', u'appearing', u'everywhere', u'\xbf'] + True + """ + + # Replace non-breaking spaces with normal spaces. + NON_BREAKING = re.compile("\u00A0"), " " + + # Pad some funky punctuation. + FUNKY_PUNCT_1 = re.compile(r'([،;؛¿!"\])}»›”؟¡%٪°±©®।॥…])'), r" \1 " + # Pad more funky punctuation. + FUNKY_PUNCT_2 = re.compile(r"([({\[“‘„‚«‹「『])"), r" \1 " + # Pad En dash and em dash + EN_EM_DASHES = re.compile("([–—])"), r" \1 " + + # Replace problematic character with numeric character reference. + AMPERCENT = re.compile("& "), "& " + TAB = re.compile("\t"), " " + PIPE = re.compile(r"\|"), " | " + + # Pad numbers with commas to keep them from further tokenization. + COMMA_IN_NUM = re.compile(r"(? "something ..." + # "something." -> "something ." + FINAL_PERIOD_1 = re.compile(r"(? "... stuff ." + FINAL_PERIOD_2 = re.compile(r"""(? +# Michael Heilman (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed) +# Tom Aarsen <> (modifications) +# +# URL: +# For license information, see LICENSE.TXT + +r""" + +Penn Treebank Tokenizer + +The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. +This implementation is a port of the tokenizer sed script written by Robert McIntyre +and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed. +""" + +import re +import warnings +from typing import Iterator, List, Tuple + +from nltk.tokenize.api import TokenizerI +from nltk.tokenize.destructive import MacIntyreContractions +from nltk.tokenize.util import align_tokens + + +class TreebankWordTokenizer(TokenizerI): + r""" + The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. + + This tokenizer performs the following steps: + + - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` + - treat most punctuation characters as separate tokens + - split off commas and single quotes, when followed by whitespace + - separate periods that appear at the end of line + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) + ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.'] + >>> s = "They'll save and invest more." + >>> TreebankWordTokenizer().tokenize(s) + ['They', "'ll", 'save', 'and', 'invest', 'more', '.'] + >>> s = "hi, my name can't hello," + >>> TreebankWordTokenizer().tokenize(s) + ['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ','] + """ + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"^\""), r"``"), + (re.compile(r"(``)"), r" \1 "), + (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([:,])([^\d])"), r" \1 \2"), + (re.compile(r"([:,])$"), r" \1 "), + (re.compile(r"\.\.\."), r" ... "), + (re.compile(r"[;@#$%&]"), r" \g<0> "), + ( + re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), + r"\1 \2\3 ", + ), # Handles the final period. + (re.compile(r"[?!]"), r" \g<0> "), + (re.compile(r"([^'])' "), r"\1 ' "), + ] + + # Pads parentheses + PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") + + # Optionally: Convert parentheses, brackets and converts them to PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile(r"\("), "-LRB-"), + (re.compile(r"\)"), "-RRB-"), + (re.compile(r"\["), "-LSB-"), + (re.compile(r"\]"), "-RSB-"), + (re.compile(r"\{"), "-LCB-"), + (re.compile(r"\}"), "-RCB-"), + ] + + DOUBLE_DASHES = (re.compile(r"--"), r" -- ") + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"''"), " '' "), + (re.compile(r'"'), " '' "), + (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), + (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), + ] + + # List of contractions adapted from Robert MacIntyre's tokenizer. + _contractions = MacIntyreContractions() + CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) + CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) + + def tokenize( + self, text: str, convert_parentheses: bool = False, return_str: bool = False + ) -> List[str]: + r"""Return a tokenized copy of `text`. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> TreebankWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', + 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + >>> TreebankWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE + ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', + 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', + 'of', 'them.', 'Thanks', '.'] + + :param text: A string with a sentence or sentences. + :type text: str + :param convert_parentheses: if True, replace parentheses to PTB symbols, + e.g. `(` to `-LRB-`. Defaults to False. + :type convert_parentheses: bool, optional + :param return_str: If True, return tokens as space-separated string, + defaults to False. + :type return_str: bool, optional + :return: List of tokens from `text`. + :rtype: List[str] + """ + if return_str is not False: + warnings.warn( + "Parameter 'return_str' has been deprecated and should no " + "longer be used.", + category=DeprecationWarning, + stacklevel=2, + ) + + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Handles parentheses. + regexp, substitution = self.PARENS_BRACKETS + text = regexp.sub(substitution, text) + # Optionally convert parentheses + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Handles double dash. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + # add extra space to make things easier + text = " " + text + " " + + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r" \1 \2 ", text) + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r" \1 \2 ", text) + + # We are not using CONTRACTIONS4 since + # they are also commented out in the SED scripts + # for regexp in self._contractions.CONTRACTIONS4: + # text = regexp.sub(r' \1 \2 \3 ', text) + + return text.split() + + def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: + r""" + Returns the spans of the tokens in ``text``. + Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(TreebankWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in TreebankWordTokenizer().span_tokenize(s)] == expected + True + + :param text: A string with a sentence or sentences. + :type text: str + :yield: Tuple[int, int] + """ + raw_tokens = self.tokenize(text) + + # Convert converted quotes back to original double quotes + # Do this only if original text contains double quote(s) or double + # single-quotes (because '' might be transformed to `` if it is + # treated as starting quotes). + if ('"' in text) or ("''" in text): + # Find double quotes and converted quotes + matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] + + # Replace converted quotes back to double quotes + tokens = [ + matched.pop(0) if tok in ['"', "``", "''"] else tok + for tok in raw_tokens + ] + else: + tokens = raw_tokens + + yield from align_tokens(tokens, text) + + +class TreebankWordDetokenizer(TokenizerI): + r""" + The Treebank detokenizer uses the reverse regex operations corresponding to + the Treebank tokenizer's regexes. + + Note: + + - There're additional assumption mades when undoing the padding of ``[;@#$%&]`` + punctuation symbols that isn't presupposed in the TreebankTokenizer. + - There're additional regexes added in reversing the parentheses tokenization, + such as the ``r'([\]\)\}\>])\s([:;,.])'``, which removes the additional right + padding added to the closing parentheses precedding ``[:;,.]``. + - It's not possible to return the original whitespaces as they were because + there wasn't explicit records of where `'\n'`, `'\t'` or `'\s'` were removed at + the text.split() operation. + + >>> from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' + >>> d = TreebankWordDetokenizer() + >>> t = TreebankWordTokenizer() + >>> toks = t.tokenize(s) + >>> d.detokenize(toks) + 'Good muffins cost $3.88 in New York. Please buy me two of them. Thanks.' + + The MXPOST parentheses substitution can be undone using the ``convert_parentheses`` + parameter: + + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected_tokens = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '-LRB-', 'York', '-RRB-', '.', 'Please', '-LRB-', 'buy', + ... '-RRB-', 'me', 'two', 'of', 'them.', '-LRB-', 'Thanks', '-RRB-', '.'] + >>> expected_tokens == t.tokenize(s, convert_parentheses=True) + True + >>> expected_detoken = 'Good muffins cost $3.88 in New (York). Please (buy) me two of them. (Thanks).' + >>> expected_detoken == d.detokenize(t.tokenize(s, convert_parentheses=True), convert_parentheses=True) + True + + During tokenization it's safe to add more spaces but during detokenization, + simply undoing the padding doesn't really help. + + - During tokenization, left and right pad is added to ``[!?]``, when + detokenizing, only left shift the ``[!?]`` is needed. + Thus ``(re.compile(r'\s([?!])'), r'\g<1>')``. + + - During tokenization ``[:,]`` are left and right padded but when detokenizing, + only left shift is necessary and we keep right pad after comma/colon + if the string after is a non-digit. + Thus ``(re.compile(r'\s([:,])\s([^\d])'), r'\1 \2')``. + + >>> from nltk.tokenize.treebank import TreebankWordDetokenizer + >>> toks = ['hello', ',', 'i', 'ca', "n't", 'feel', 'my', 'feet', '!', 'Help', '!', '!'] + >>> twd = TreebankWordDetokenizer() + >>> twd.detokenize(toks) + "hello, i can't feel my feet! Help!!" + + >>> toks = ['hello', ',', 'i', "can't", 'feel', ';', 'my', 'feet', '!', + ... 'Help', '!', '!', 'He', 'said', ':', 'Help', ',', 'help', '?', '!'] + >>> twd.detokenize(toks) + "hello, i can't feel; my feet! Help!! He said: Help, help?!" + """ + + _contractions = MacIntyreContractions() + CONTRACTIONS2 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS2 + ] + CONTRACTIONS3 = [ + re.compile(pattern.replace("(?#X)", r"\s")) + for pattern in _contractions.CONTRACTIONS3 + ] + + # ending quotes + ENDING_QUOTES = [ + (re.compile(r"([^' ])\s('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1\2 "), + (re.compile(r"([^' ])\s('[sS]|'[mM]|'[dD]|') "), r"\1\2 "), + (re.compile(r"(\S)\s(\'\')"), r"\1\2"), + ( + re.compile(r"(\'\')\s([.,:)\]>};%])"), + r"\1\2", + ), # Quotes followed by no-left-padded punctuations. + (re.compile(r"''"), '"'), + ] + + # Handles double dashes + DOUBLE_DASHES = (re.compile(r" -- "), r"--") + + # Optionally: Convert parentheses, brackets and converts them from PTB symbols. + CONVERT_PARENTHESES = [ + (re.compile("-LRB-"), "("), + (re.compile("-RRB-"), ")"), + (re.compile("-LSB-"), "["), + (re.compile("-RSB-"), "]"), + (re.compile("-LCB-"), "{"), + (re.compile("-RCB-"), "}"), + ] + + # Undo padding on parentheses. + PARENS_BRACKETS = [ + (re.compile(r"([\[\(\{\<])\s"), r"\g<1>"), + (re.compile(r"\s([\]\)\}\>])"), r"\g<1>"), + (re.compile(r"([\]\)\}\>])\s([:;,.])"), r"\1\2"), + ] + + # punctuation + PUNCTUATION = [ + (re.compile(r"([^'])\s'\s"), r"\1' "), + (re.compile(r"\s([?!])"), r"\g<1>"), # Strip left pad for [?!] + # (re.compile(r'\s([?!])\s'), r'\g<1>'), + (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r"\1\2\3"), + # When tokenizing, [;@#$%&] are padded with whitespace regardless of + # whether there are spaces before or after them. + # But during detokenization, we need to distinguish between left/right + # pad, so we split this up. + (re.compile(r"([#$])\s"), r"\g<1>"), # Left pad. + (re.compile(r"\s([;%])"), r"\g<1>"), # Right pad. + # (re.compile(r"\s([&*])\s"), r" \g<1> "), # Unknown pad. + (re.compile(r"\s\.\.\.\s"), r"..."), + # (re.compile(r"\s([:,])\s$"), r"\1"), # .strip() takes care of it. + ( + re.compile(r"\s([:,])"), + r"\1", + ), # Just remove left padding. Punctuation in numbers won't be padded. + ] + + # starting quotes + STARTING_QUOTES = [ + (re.compile(r"([ (\[{<])\s``"), r"\1``"), + (re.compile(r"(``)\s"), r"\1"), + (re.compile(r"``"), r'"'), + ] + + def tokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """ + Treebank detokenizer, created by undoing the regexes from + the TreebankWordTokenizer.tokenize. + + :param tokens: A list of strings, i.e. tokenized text. + :type tokens: List[str] + :param convert_parentheses: if True, replace PTB symbols with parentheses, + e.g. `-LRB-` to `(`. Defaults to False. + :type convert_parentheses: bool, optional + :return: str + """ + text = " ".join(tokens) + + # Add extra space to make things easier + text = " " + text + " " + + # Reverse the contractions regexes. + # Note: CONTRACTIONS4 are not used in tokenization. + for regexp in self.CONTRACTIONS3: + text = regexp.sub(r"\1\2", text) + for regexp in self.CONTRACTIONS2: + text = regexp.sub(r"\1\2", text) + + # Reverse the regexes applied for ending quotes. + for regexp, substitution in self.ENDING_QUOTES: + text = regexp.sub(substitution, text) + + # Undo the space padding. + text = text.strip() + + # Reverse the padding on double dashes. + regexp, substitution = self.DOUBLE_DASHES + text = regexp.sub(substitution, text) + + if convert_parentheses: + for regexp, substitution in self.CONVERT_PARENTHESES: + text = regexp.sub(substitution, text) + + # Reverse the padding regexes applied for parenthesis/brackets. + for regexp, substitution in self.PARENS_BRACKETS: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for punctuations. + for regexp, substitution in self.PUNCTUATION: + text = regexp.sub(substitution, text) + + # Reverse the regexes applied for starting quotes. + for regexp, substitution in self.STARTING_QUOTES: + text = regexp.sub(substitution, text) + + return text.strip() + + def detokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: + """Duck-typing the abstract *tokenize()*.""" + return self.tokenize(tokens, convert_parentheses) diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/util.py b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e496e0169aa89569b8f0428096b972d4776a0b2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/tokenize/util.py @@ -0,0 +1,295 @@ +# Natural Language Toolkit: Tokenizer Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from re import finditer +from xml.sax.saxutils import escape, unescape + + +def string_span_tokenize(s, sep): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each occurrence of *sep*. + + >>> from nltk.tokenize.util import string_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37), + (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)] + + :param s: the string to be tokenized + :type s: str + :param sep: the token separator + :type sep: str + :rtype: iter(tuple(int, int)) + """ + if len(sep) == 0: + raise ValueError("Token delimiter must not be empty") + left = 0 + while True: + try: + right = s.index(sep, left) + if right != 0: + yield left, right + except ValueError: + if left != len(s): + yield left, len(s) + break + + left = right + len(sep) + + +def regexp_span_tokenize(s, regexp): + r""" + Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` + tuples, by splitting the string at each successive match of *regexp*. + + >>> from nltk.tokenize.util import regexp_span_tokenize + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), + (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] + + :param s: the string to be tokenized + :type s: str + :param regexp: regular expression that matches token separators (must not be empty) + :type regexp: str + :rtype: iter(tuple(int, int)) + """ + left = 0 + for m in finditer(regexp, s): + right, next = m.span() + if right != left: + yield left, right + left = next + yield left, len(s) + + +def spans_to_relative(spans): + r""" + Return a sequence of relative spans, given a sequence of spans. + + >>> from nltk.tokenize import WhitespaceTokenizer + >>> from nltk.tokenize.util import spans_to_relative + >>> s = '''Good muffins cost $3.88\nin New York. Please buy me + ... two of them.\n\nThanks.''' + >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE + [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6), + (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)] + + :param spans: a sequence of (start, end) offsets of the tokens + :type spans: iter(tuple(int, int)) + :rtype: iter(tuple(int, int)) + """ + prev = 0 + for left, right in spans: + yield left - prev, right - left + prev = right + + +class CJKChars: + """ + An object that enumerates the code points of the CJK characters as listed on + https://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane + + This is a Python port of the CJK code point enumerations of Moses tokenizer: + https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl#L309 + """ + + # Hangul Jamo (1100–11FF) + Hangul_Jamo = (4352, 4607) # (ord(u"\u1100"), ord(u"\u11ff")) + + # CJK Radicals Supplement (2E80–2EFF) + # Kangxi Radicals (2F00–2FDF) + # Ideographic Description Characters (2FF0–2FFF) + # CJK Symbols and Punctuation (3000–303F) + # Hiragana (3040–309F) + # Katakana (30A0–30FF) + # Bopomofo (3100–312F) + # Hangul Compatibility Jamo (3130–318F) + # Kanbun (3190–319F) + # Bopomofo Extended (31A0–31BF) + # CJK Strokes (31C0–31EF) + # Katakana Phonetic Extensions (31F0–31FF) + # Enclosed CJK Letters and Months (3200–32FF) + # CJK Compatibility (3300–33FF) + # CJK Unified Ideographs Extension A (3400–4DBF) + # Yijing Hexagram Symbols (4DC0–4DFF) + # CJK Unified Ideographs (4E00–9FFF) + # Yi Syllables (A000–A48F) + # Yi Radicals (A490–A4CF) + CJK_Radicals = (11904, 42191) # (ord(u"\u2e80"), ord(u"\ua4cf")) + + # Phags-pa (A840–A87F) + Phags_Pa = (43072, 43135) # (ord(u"\ua840"), ord(u"\ua87f")) + + # Hangul Syllables (AC00–D7AF) + Hangul_Syllables = (44032, 55215) # (ord(u"\uAC00"), ord(u"\uD7AF")) + + # CJK Compatibility Ideographs (F900–FAFF) + CJK_Compatibility_Ideographs = (63744, 64255) # (ord(u"\uF900"), ord(u"\uFAFF")) + + # CJK Compatibility Forms (FE30–FE4F) + CJK_Compatibility_Forms = (65072, 65103) # (ord(u"\uFE30"), ord(u"\uFE4F")) + + # Range U+FF65–FFDC encodes halfwidth forms, of Katakana and Hangul characters + Katakana_Hangul_Halfwidth = (65381, 65500) # (ord(u"\uFF65"), ord(u"\uFFDC")) + + # Supplementary Ideographic Plane 20000–2FFFF + Supplementary_Ideographic_Plane = ( + 131072, + 196607, + ) # (ord(u"\U00020000"), ord(u"\U0002FFFF")) + + ranges = [ + Hangul_Jamo, + CJK_Radicals, + Phags_Pa, + Hangul_Syllables, + CJK_Compatibility_Ideographs, + CJK_Compatibility_Forms, + Katakana_Hangul_Halfwidth, + Supplementary_Ideographic_Plane, + ] + + +def is_cjk(character): + """ + Python port of Moses' code to check for CJK character. + + >>> CJKChars().ranges + [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] + >>> is_cjk(u'\u33fe') + True + >>> is_cjk(u'\uFE5F') + False + + :param character: The character that needs to be checked. + :type character: char + :return: bool + """ + return any( + [ + start <= ord(character) <= end + for start, end in [ + (4352, 4607), + (11904, 42191), + (43072, 43135), + (44032, 55215), + (63744, 64255), + (65072, 65103), + (65381, 65500), + (131072, 196607), + ] + ] + ) + + +def xml_escape(text): + """ + This function transforms the input text into an "escaped" version suitable + for well-formed XML formatting. + + Note that the default xml.sax.saxutils.escape() function don't escape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> input_str = ''')| & < > ' " ] [''' + >>> expected_output = ''')| & < > ' " ] [''' + >>> escape(input_str) == expected_output + True + >>> xml_escape(input_str) + ')| & < > ' " ] [' + + :param text: The text that needs to be escaped. + :type text: str + :rtype: str + """ + return escape( + text, + entities={ + r"'": r"'", + r'"': r""", + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def xml_unescape(text): + """ + This function transforms the "escaped" version suitable + for well-formed XML formatting into humanly-readable string. + + Note that the default xml.sax.saxutils.unescape() function don't unescape + some characters that Moses does so we have to manually add them to the + entities dictionary. + + >>> from xml.sax.saxutils import unescape + >>> s = ')| & < > ' " ] [' + >>> expected = ''')| & < > \' " ] [''' + >>> xml_unescape(s) == expected + True + + :param text: The text that needs to be unescaped. + :type text: str + :rtype: str + """ + return unescape( + text, + entities={ + r"'": r"'", + r""": r'"', + r"|": r"|", + r"[": r"[", + r"]": r"]", + }, + ) + + +def align_tokens(tokens, sentence): + """ + This module attempt to find the offsets of the tokens in *s*, as a sequence + of ``(start, end)`` tuples, given the tokens and also the source string. + + >>> from nltk.tokenize import TreebankWordTokenizer + >>> from nltk.tokenize.util import align_tokens + >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's " + ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh " + ... "on Saturday.") + >>> tokens = TreebankWordTokenizer().tokenize(s) + >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), + ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), + ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), + ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), + ... (123, 131), (131, 132)] + >>> output = list(align_tokens(tokens, s)) + >>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same. + True + >>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected. + True + >>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens. + True + + :param tokens: The list of strings that are the result of tokenization + :type tokens: list(str) + :param sentence: The original string + :type sentence: str + :rtype: list(tuple(int,int)) + """ + point = 0 + offsets = [] + for token in tokens: + try: + start = sentence.index(token, point) + except ValueError as e: + raise ValueError(f'substring "{token}" not found in "{sentence}"') from e + point = start + len(token) + offsets.append((start, point)) + return offsets diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__init__.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd14ffb4703bf38bb349cc19cca2d97b6df29f77 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/__init__.py @@ -0,0 +1,35 @@ +# Natural Language Toolkit: Twitter +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# URL: +# For license information, see LICENSE.TXT + +""" +NLTK Twitter Package + +This package contains classes for retrieving Tweet documents using the +Twitter API. + +""" +try: + import twython +except ImportError: + import warnings + + warnings.warn( + "The twython library has not been installed. " + "Some functionality from the twitter package will not be available." + ) +else: + from nltk.twitter.util import Authenticate, credsfromfile + from nltk.twitter.twitterclient import ( + Streamer, + Query, + Twitter, + TweetViewer, + TweetWriter, + ) + + +from nltk.twitter.common import json2csv diff --git a/env-llmeval/lib/python3.10/site-packages/nltk/twitter/api.py b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/api.py new file mode 100644 index 0000000000000000000000000000000000000000..71248b176340abd0d0d7d51e8ed68700f7948e13 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/nltk/twitter/api.py @@ -0,0 +1,145 @@ +# Natural Language Toolkit: Twitter API +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Ewan Klein +# Lorenzo Rubio +# URL: +# For license information, see LICENSE.TXT + +""" +This module provides an interface for TweetHandlers, and support for timezone +handling. +""" + +import time as _time +from abc import ABCMeta, abstractmethod +from datetime import datetime, timedelta, timezone, tzinfo + + +class LocalTimezoneOffsetWithUTC(tzinfo): + """ + This is not intended to be a general purpose class for dealing with the + local timezone. In particular: + + * it assumes that the date passed has been created using + `datetime(..., tzinfo=Local)`, where `Local` is an instance of + the object `LocalTimezoneOffsetWithUTC`; + * for such an object, it returns the offset with UTC, used for date comparisons. + + Reference: https://docs.python.org/3/library/datetime.html + """ + + STDOFFSET = timedelta(seconds=-_time.timezone) + + if _time.daylight: + DSTOFFSET = timedelta(seconds=-_time.altzone) + else: + DSTOFFSET = STDOFFSET + + def utcoffset(self, dt): + """ + Access the relevant time offset. + """ + return self.DSTOFFSET + + +LOCAL = LocalTimezoneOffsetWithUTC() + + +class BasicTweetHandler(metaclass=ABCMeta): + """ + Minimal implementation of `TweetHandler`. + + Counts the number of Tweets and decides when the client should stop + fetching them. + """ + + def __init__(self, limit=20): + self.limit = limit + self.counter = 0 + + """ + A flag to indicate to the client whether to stop fetching data given + some condition (e.g., reaching a date limit). + """ + self.do_stop = False + + """ + Stores the id of the last fetched Tweet to handle pagination. + """ + self.max_id = None + + def do_continue(self): + """ + Returns `False` if the client should stop fetching Tweets. + """ + return self.counter < self.limit and not self.do_stop + + +class TweetHandlerI(BasicTweetHandler): + """ + Interface class whose subclasses should implement a handle method that + Twitter clients can delegate to. + """ + + def __init__(self, limit=20, upper_date_limit=None, lower_date_limit=None): + """ + :param int limit: The number of data items to process in the current\ + round of processing. + + :param tuple upper_date_limit: The date at which to stop collecting\ + new data. This should be entered as a tuple which can serve as the\ + argument to `datetime.datetime`.\ + E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. + + :param tuple lower_date_limit: The date at which to stop collecting\ + new data. See `upper_data_limit` for formatting. + """ + BasicTweetHandler.__init__(self, limit) + + self.upper_date_limit = None + self.lower_date_limit = None + if upper_date_limit: + self.upper_date_limit = datetime(*upper_date_limit, tzinfo=LOCAL) + if lower_date_limit: + self.lower_date_limit = datetime(*lower_date_limit, tzinfo=LOCAL) + + self.startingup = True + + @abstractmethod + def handle(self, data): + """ + Deal appropriately with data returned by the Twitter API + """ + + @abstractmethod + def on_finish(self): + """ + Actions when the tweet limit has been reached + """ + + def check_date_limit(self, data, verbose=False): + """ + Validate date limits. + """ + if self.upper_date_limit or self.lower_date_limit: + date_fmt = "%a %b %d %H:%M:%S +0000 %Y" + tweet_date = datetime.strptime(data["created_at"], date_fmt).replace( + tzinfo=timezone.utc + ) + if (self.upper_date_limit and tweet_date > self.upper_date_limit) or ( + self.lower_date_limit and tweet_date < self.lower_date_limit + ): + if self.upper_date_limit: + message = "earlier" + date_limit = self.upper_date_limit + else: + message = "later" + date_limit = self.lower_date_limit + if verbose: + print( + "Date limit {} is {} than date of current tweet {}".format( + date_limit, message, tweet_date + ) + ) + self.do_stop = True