peacock-data-public-datasets-idc-cronscript
/
venv
/lib
/python3.10
/site-packages
/nltk
/tokenize
/treebank.py
| # Natural Language Toolkit: Tokenizers | |
| # | |
| # Copyright (C) 2001-2023 NLTK Project | |
| # Author: Edward Loper <[email protected]> | |
| # Michael Heilman <[email protected]> (re-port from http://www.cis.upenn.edu/~treebank/tokenizer.sed) | |
| # Tom Aarsen <> (modifications) | |
| # | |
| # URL: <https://www.nltk.org> | |
| # For license information, see LICENSE.TXT | |
| r""" | |
| Penn Treebank Tokenizer | |
| The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. | |
| This implementation is a port of the tokenizer sed script written by Robert McIntyre | |
| and available at http://www.cis.upenn.edu/~treebank/tokenizer.sed. | |
| """ | |
| import re | |
| import warnings | |
| from typing import Iterator, List, Tuple | |
| from nltk.tokenize.api import TokenizerI | |
| from nltk.tokenize.destructive import MacIntyreContractions | |
| from nltk.tokenize.util import align_tokens | |
| class TreebankWordTokenizer(TokenizerI): | |
| r""" | |
| The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. | |
| This tokenizer performs the following steps: | |
| - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` | |
| - treat most punctuation characters as separate tokens | |
| - split off commas and single quotes, when followed by whitespace | |
| - separate periods that appear at the end of line | |
| >>> from nltk.tokenize import TreebankWordTokenizer | |
| >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' | |
| >>> TreebankWordTokenizer().tokenize(s) | |
| ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks', '.'] | |
| >>> s = "They'll save and invest more." | |
| >>> TreebankWordTokenizer().tokenize(s) | |
| ['They', "'ll", 'save', 'and', 'invest', 'more', '.'] | |
| >>> s = "hi, my name can't hello," | |
| >>> TreebankWordTokenizer().tokenize(s) | |
| ['hi', ',', 'my', 'name', 'ca', "n't", 'hello', ','] | |
| """ | |
| # starting quotes | |
| STARTING_QUOTES = [ | |
| (re.compile(r"^\""), r"``"), | |
| (re.compile(r"(``)"), r" \1 "), | |
| (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 `` "), | |
| ] | |
| # punctuation | |
| PUNCTUATION = [ | |
| (re.compile(r"([:,])([^\d])"), r" \1 \2"), | |
| (re.compile(r"([:,])$"), r" \1 "), | |
| (re.compile(r"\.\.\."), r" ... "), | |
| (re.compile(r"[;@#$%&]"), r" \g<0> "), | |
| ( | |
| re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'), | |
| r"\1 \2\3 ", | |
| ), # Handles the final period. | |
| (re.compile(r"[?!]"), r" \g<0> "), | |
| (re.compile(r"([^'])' "), r"\1 ' "), | |
| ] | |
| # Pads parentheses | |
| PARENS_BRACKETS = (re.compile(r"[\]\[\(\)\{\}\<\>]"), r" \g<0> ") | |
| # Optionally: Convert parentheses, brackets and converts them to PTB symbols. | |
| CONVERT_PARENTHESES = [ | |
| (re.compile(r"\("), "-LRB-"), | |
| (re.compile(r"\)"), "-RRB-"), | |
| (re.compile(r"\["), "-LSB-"), | |
| (re.compile(r"\]"), "-RSB-"), | |
| (re.compile(r"\{"), "-LCB-"), | |
| (re.compile(r"\}"), "-RCB-"), | |
| ] | |
| DOUBLE_DASHES = (re.compile(r"--"), r" -- ") | |
| # ending quotes | |
| ENDING_QUOTES = [ | |
| (re.compile(r"''"), " '' "), | |
| (re.compile(r'"'), " '' "), | |
| (re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "), | |
| (re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "), | |
| ] | |
| # List of contractions adapted from Robert MacIntyre's tokenizer. | |
| _contractions = MacIntyreContractions() | |
| CONTRACTIONS2 = list(map(re.compile, _contractions.CONTRACTIONS2)) | |
| CONTRACTIONS3 = list(map(re.compile, _contractions.CONTRACTIONS3)) | |
| def tokenize( | |
| self, text: str, convert_parentheses: bool = False, return_str: bool = False | |
| ) -> List[str]: | |
| r"""Return a tokenized copy of `text`. | |
| >>> from nltk.tokenize import TreebankWordTokenizer | |
| >>> s = '''Good muffins cost $3.88 (roughly 3,36 euros)\nin New York. Please buy me\ntwo of them.\nThanks.''' | |
| >>> TreebankWordTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE | |
| ['Good', 'muffins', 'cost', '$', '3.88', '(', 'roughly', '3,36', | |
| 'euros', ')', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', | |
| 'of', 'them.', 'Thanks', '.'] | |
| >>> TreebankWordTokenizer().tokenize(s, convert_parentheses=True) # doctest: +NORMALIZE_WHITESPACE | |
| ['Good', 'muffins', 'cost', '$', '3.88', '-LRB-', 'roughly', '3,36', | |
| 'euros', '-RRB-', 'in', 'New', 'York.', 'Please', 'buy', 'me', 'two', | |
| 'of', 'them.', 'Thanks', '.'] | |
| :param text: A string with a sentence or sentences. | |
| :type text: str | |
| :param convert_parentheses: if True, replace parentheses to PTB symbols, | |
| e.g. `(` to `-LRB-`. Defaults to False. | |
| :type convert_parentheses: bool, optional | |
| :param return_str: If True, return tokens as space-separated string, | |
| defaults to False. | |
| :type return_str: bool, optional | |
| :return: List of tokens from `text`. | |
| :rtype: List[str] | |
| """ | |
| if return_str is not False: | |
| warnings.warn( | |
| "Parameter 'return_str' has been deprecated and should no " | |
| "longer be used.", | |
| category=DeprecationWarning, | |
| stacklevel=2, | |
| ) | |
| for regexp, substitution in self.STARTING_QUOTES: | |
| text = regexp.sub(substitution, text) | |
| for regexp, substitution in self.PUNCTUATION: | |
| text = regexp.sub(substitution, text) | |
| # Handles parentheses. | |
| regexp, substitution = self.PARENS_BRACKETS | |
| text = regexp.sub(substitution, text) | |
| # Optionally convert parentheses | |
| if convert_parentheses: | |
| for regexp, substitution in self.CONVERT_PARENTHESES: | |
| text = regexp.sub(substitution, text) | |
| # Handles double dash. | |
| regexp, substitution = self.DOUBLE_DASHES | |
| text = regexp.sub(substitution, text) | |
| # add extra space to make things easier | |
| text = " " + text + " " | |
| for regexp, substitution in self.ENDING_QUOTES: | |
| text = regexp.sub(substitution, text) | |
| for regexp in self.CONTRACTIONS2: | |
| text = regexp.sub(r" \1 \2 ", text) | |
| for regexp in self.CONTRACTIONS3: | |
| text = regexp.sub(r" \1 \2 ", text) | |
| # We are not using CONTRACTIONS4 since | |
| # they are also commented out in the SED scripts | |
| # for regexp in self._contractions.CONTRACTIONS4: | |
| # text = regexp.sub(r' \1 \2 \3 ', text) | |
| return text.split() | |
| def span_tokenize(self, text: str) -> Iterator[Tuple[int, int]]: | |
| r""" | |
| Returns the spans of the tokens in ``text``. | |
| Uses the post-hoc nltk.tokens.align_tokens to return the offset spans. | |
| >>> from nltk.tokenize import TreebankWordTokenizer | |
| >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' | |
| >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), | |
| ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), | |
| ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), | |
| ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] | |
| >>> list(TreebankWordTokenizer().span_tokenize(s)) == expected | |
| True | |
| >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', | |
| ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', | |
| ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] | |
| >>> [s[start:end] for start, end in TreebankWordTokenizer().span_tokenize(s)] == expected | |
| True | |
| :param text: A string with a sentence or sentences. | |
| :type text: str | |
| :yield: Tuple[int, int] | |
| """ | |
| raw_tokens = self.tokenize(text) | |
| # Convert converted quotes back to original double quotes | |
| # Do this only if original text contains double quote(s) or double | |
| # single-quotes (because '' might be transformed to `` if it is | |
| # treated as starting quotes). | |
| if ('"' in text) or ("''" in text): | |
| # Find double quotes and converted quotes | |
| matched = [m.group() for m in re.finditer(r"``|'{2}|\"", text)] | |
| # Replace converted quotes back to double quotes | |
| tokens = [ | |
| matched.pop(0) if tok in ['"', "``", "''"] else tok | |
| for tok in raw_tokens | |
| ] | |
| else: | |
| tokens = raw_tokens | |
| yield from align_tokens(tokens, text) | |
| class TreebankWordDetokenizer(TokenizerI): | |
| r""" | |
| The Treebank detokenizer uses the reverse regex operations corresponding to | |
| the Treebank tokenizer's regexes. | |
| Note: | |
| - There're additional assumption mades when undoing the padding of ``[;@#$%&]`` | |
| punctuation symbols that isn't presupposed in the TreebankTokenizer. | |
| - There're additional regexes added in reversing the parentheses tokenization, | |
| such as the ``r'([\]\)\}\>])\s([:;,.])'``, which removes the additional right | |
| padding added to the closing parentheses precedding ``[:;,.]``. | |
| - It's not possible to return the original whitespaces as they were because | |
| there wasn't explicit records of where `'\n'`, `'\t'` or `'\s'` were removed at | |
| the text.split() operation. | |
| >>> from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer | |
| >>> s = '''Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks.''' | |
| >>> d = TreebankWordDetokenizer() | |
| >>> t = TreebankWordTokenizer() | |
| >>> toks = t.tokenize(s) | |
| >>> d.detokenize(toks) | |
| 'Good muffins cost $3.88 in New York. Please buy me two of them. Thanks.' | |
| The MXPOST parentheses substitution can be undone using the ``convert_parentheses`` | |
| parameter: | |
| >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' | |
| >>> expected_tokens = ['Good', 'muffins', 'cost', '$', '3.88', 'in', | |
| ... 'New', '-LRB-', 'York', '-RRB-', '.', 'Please', '-LRB-', 'buy', | |
| ... '-RRB-', 'me', 'two', 'of', 'them.', '-LRB-', 'Thanks', '-RRB-', '.'] | |
| >>> expected_tokens == t.tokenize(s, convert_parentheses=True) | |
| True | |
| >>> expected_detoken = 'Good muffins cost $3.88 in New (York). Please (buy) me two of them. (Thanks).' | |
| >>> expected_detoken == d.detokenize(t.tokenize(s, convert_parentheses=True), convert_parentheses=True) | |
| True | |
| During tokenization it's safe to add more spaces but during detokenization, | |
| simply undoing the padding doesn't really help. | |
| - During tokenization, left and right pad is added to ``[!?]``, when | |
| detokenizing, only left shift the ``[!?]`` is needed. | |
| Thus ``(re.compile(r'\s([?!])'), r'\g<1>')``. | |
| - During tokenization ``[:,]`` are left and right padded but when detokenizing, | |
| only left shift is necessary and we keep right pad after comma/colon | |
| if the string after is a non-digit. | |
| Thus ``(re.compile(r'\s([:,])\s([^\d])'), r'\1 \2')``. | |
| >>> from nltk.tokenize.treebank import TreebankWordDetokenizer | |
| >>> toks = ['hello', ',', 'i', 'ca', "n't", 'feel', 'my', 'feet', '!', 'Help', '!', '!'] | |
| >>> twd = TreebankWordDetokenizer() | |
| >>> twd.detokenize(toks) | |
| "hello, i can't feel my feet! Help!!" | |
| >>> toks = ['hello', ',', 'i', "can't", 'feel', ';', 'my', 'feet', '!', | |
| ... 'Help', '!', '!', 'He', 'said', ':', 'Help', ',', 'help', '?', '!'] | |
| >>> twd.detokenize(toks) | |
| "hello, i can't feel; my feet! Help!! He said: Help, help?!" | |
| """ | |
| _contractions = MacIntyreContractions() | |
| CONTRACTIONS2 = [ | |
| re.compile(pattern.replace("(?#X)", r"\s")) | |
| for pattern in _contractions.CONTRACTIONS2 | |
| ] | |
| CONTRACTIONS3 = [ | |
| re.compile(pattern.replace("(?#X)", r"\s")) | |
| for pattern in _contractions.CONTRACTIONS3 | |
| ] | |
| # ending quotes | |
| ENDING_QUOTES = [ | |
| (re.compile(r"([^' ])\s('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1\2 "), | |
| (re.compile(r"([^' ])\s('[sS]|'[mM]|'[dD]|') "), r"\1\2 "), | |
| (re.compile(r"(\S)\s(\'\')"), r"\1\2"), | |
| ( | |
| re.compile(r"(\'\')\s([.,:)\]>};%])"), | |
| r"\1\2", | |
| ), # Quotes followed by no-left-padded punctuations. | |
| (re.compile(r"''"), '"'), | |
| ] | |
| # Handles double dashes | |
| DOUBLE_DASHES = (re.compile(r" -- "), r"--") | |
| # Optionally: Convert parentheses, brackets and converts them from PTB symbols. | |
| CONVERT_PARENTHESES = [ | |
| (re.compile("-LRB-"), "("), | |
| (re.compile("-RRB-"), ")"), | |
| (re.compile("-LSB-"), "["), | |
| (re.compile("-RSB-"), "]"), | |
| (re.compile("-LCB-"), "{"), | |
| (re.compile("-RCB-"), "}"), | |
| ] | |
| # Undo padding on parentheses. | |
| PARENS_BRACKETS = [ | |
| (re.compile(r"([\[\(\{\<])\s"), r"\g<1>"), | |
| (re.compile(r"\s([\]\)\}\>])"), r"\g<1>"), | |
| (re.compile(r"([\]\)\}\>])\s([:;,.])"), r"\1\2"), | |
| ] | |
| # punctuation | |
| PUNCTUATION = [ | |
| (re.compile(r"([^'])\s'\s"), r"\1' "), | |
| (re.compile(r"\s([?!])"), r"\g<1>"), # Strip left pad for [?!] | |
| # (re.compile(r'\s([?!])\s'), r'\g<1>'), | |
| (re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r"\1\2\3"), | |
| # When tokenizing, [;@#$%&] are padded with whitespace regardless of | |
| # whether there are spaces before or after them. | |
| # But during detokenization, we need to distinguish between left/right | |
| # pad, so we split this up. | |
| (re.compile(r"([#$])\s"), r"\g<1>"), # Left pad. | |
| (re.compile(r"\s([;%])"), r"\g<1>"), # Right pad. | |
| # (re.compile(r"\s([&*])\s"), r" \g<1> "), # Unknown pad. | |
| (re.compile(r"\s\.\.\.\s"), r"..."), | |
| # (re.compile(r"\s([:,])\s$"), r"\1"), # .strip() takes care of it. | |
| ( | |
| re.compile(r"\s([:,])"), | |
| r"\1", | |
| ), # Just remove left padding. Punctuation in numbers won't be padded. | |
| ] | |
| # starting quotes | |
| STARTING_QUOTES = [ | |
| (re.compile(r"([ (\[{<])\s``"), r"\1``"), | |
| (re.compile(r"(``)\s"), r"\1"), | |
| (re.compile(r"``"), r'"'), | |
| ] | |
| def tokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: | |
| """ | |
| Treebank detokenizer, created by undoing the regexes from | |
| the TreebankWordTokenizer.tokenize. | |
| :param tokens: A list of strings, i.e. tokenized text. | |
| :type tokens: List[str] | |
| :param convert_parentheses: if True, replace PTB symbols with parentheses, | |
| e.g. `-LRB-` to `(`. Defaults to False. | |
| :type convert_parentheses: bool, optional | |
| :return: str | |
| """ | |
| text = " ".join(tokens) | |
| # Add extra space to make things easier | |
| text = " " + text + " " | |
| # Reverse the contractions regexes. | |
| # Note: CONTRACTIONS4 are not used in tokenization. | |
| for regexp in self.CONTRACTIONS3: | |
| text = regexp.sub(r"\1\2", text) | |
| for regexp in self.CONTRACTIONS2: | |
| text = regexp.sub(r"\1\2", text) | |
| # Reverse the regexes applied for ending quotes. | |
| for regexp, substitution in self.ENDING_QUOTES: | |
| text = regexp.sub(substitution, text) | |
| # Undo the space padding. | |
| text = text.strip() | |
| # Reverse the padding on double dashes. | |
| regexp, substitution = self.DOUBLE_DASHES | |
| text = regexp.sub(substitution, text) | |
| if convert_parentheses: | |
| for regexp, substitution in self.CONVERT_PARENTHESES: | |
| text = regexp.sub(substitution, text) | |
| # Reverse the padding regexes applied for parenthesis/brackets. | |
| for regexp, substitution in self.PARENS_BRACKETS: | |
| text = regexp.sub(substitution, text) | |
| # Reverse the regexes applied for punctuations. | |
| for regexp, substitution in self.PUNCTUATION: | |
| text = regexp.sub(substitution, text) | |
| # Reverse the regexes applied for starting quotes. | |
| for regexp, substitution in self.STARTING_QUOTES: | |
| text = regexp.sub(substitution, text) | |
| return text.strip() | |
| def detokenize(self, tokens: List[str], convert_parentheses: bool = False) -> str: | |
| """Duck-typing the abstract *tokenize()*.""" | |
| return self.tokenize(tokens, convert_parentheses) | |